diff --git a/.github/actionlint.yaml b/.github/actionlint.yaml index 11d59e9073cef..2435d074d1167 100644 --- a/.github/actionlint.yaml +++ b/.github/actionlint.yaml @@ -1,4 +1,4 @@ self-hosted-runner: # Labels of self-hosted runners in array of string labels: - - runs-on,cpu=64,family=c7,hdd=500,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} + - runs-on,cpu=64,family=c7,disk=large,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} diff --git a/.github/workflows/bump-release-version.yaml b/.github/workflows/bump-release-version.yaml index 11e09273dd098..ff82352270be6 100644 --- a/.github/workflows/bump-release-version.yaml +++ b/.github/workflows/bump-release-version.yaml @@ -85,6 +85,13 @@ jobs: core.setOutput("newVersion", newVersion); core.setOutput("releaseBranch", branch); core.setOutput("currentVersion", currentVersion); + + - uses: dtolnay/rust-toolchain@stable + + - name: Update Cargo.lock + run: | + cargo update -p aptos-node + - name: Create Pull Request id: create-pr uses: peter-evans/create-pull-request@v7 @@ -95,7 +102,7 @@ jobs: title: "[${{ steps.determine-version.outputs.releaseBranch }}] Bump version to ${{ steps.determine-version.outputs.newVersion }}" body: "This PR bumps the aptos-node version to ${{ steps.determine-version.outputs.newVersion }} in ${{ steps.determine-version.outputs.releaseBranch }}." commit-message: "[${{ steps.determine-version.outputs.releaseBranch }}] Bump version to ${{ steps.determine-version.outputs.newVersion }}" - add-paths: "aptos-node/Cargo.toml" + add-paths: "aptos-node/Cargo.toml, Cargo.lock" - name: Log PR URL if: ${{ steps.create-pr.outputs.pull-request-number }} run: | diff --git a/.github/workflows/cli-e2e-tests.yaml b/.github/workflows/cli-e2e-tests.yaml index 7b9f32f8fb62c..5217a47c283e5 100644 --- a/.github/workflows/cli-e2e-tests.yaml +++ b/.github/workflows/cli-e2e-tests.yaml @@ -25,7 +25,7 @@ jobs: # we ensure that the Aptos CLI works with all 3 prod networks, at least # based on the tests in the test suite. run-cli-tests: - runs-on: runs-on,cpu=64,family=c7,hdd=500,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} + runs-on: runs-on,cpu=64,family=c7,disk=large,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} permissions: contents: read id-token: write diff --git a/.github/workflows/cli-release.yaml b/.github/workflows/cli-release.yaml index a367ce75a00fd..1f81f17032aef 100644 --- a/.github/workflows/cli-release.yaml +++ b/.github/workflows/cli-release.yaml @@ -131,7 +131,7 @@ jobs: build-windows-binary: name: "Build Windows binary" - runs-on: windows-latest + runs-on: windows-2025 steps: - uses: actions/checkout@v4 with: diff --git a/.github/workflows/copy-images-to-dockerhub.yaml b/.github/workflows/copy-images-to-dockerhub.yaml index 6ead9db5a4481..b02b60cb0ec52 100644 --- a/.github/workflows/copy-images-to-dockerhub.yaml +++ b/.github/workflows/copy-images-to-dockerhub.yaml @@ -39,7 +39,7 @@ permissions: jobs: copy-images: # Run on a machine with more local storage for large docker images - runs-on: runs-on,cpu=16,family=m6id,hdd=500,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} + runs-on: runs-on,cpu=16,family=m6id,disk=large,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/coverage-move-only.yaml b/.github/workflows/coverage-move-only.yaml index 52358b73717a6..ec01b417041c0 100644 --- a/.github/workflows/coverage-move-only.yaml +++ b/.github/workflows/coverage-move-only.yaml @@ -30,7 +30,7 @@ concurrency: jobs: rust-move-unit-coverage: timeout-minutes: 60 - runs-on: runs-on,cpu=64,family=c7,hdd=500,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} + runs-on: runs-on,cpu=64,family=c7,disk=large,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} steps: - uses: actions/checkout@v4 - uses: aptos-labs/aptos-core/.github/actions/rust-setup@main diff --git a/.github/workflows/coverage.yaml b/.github/workflows/coverage.yaml index 006d837a200cc..d4dd559c20a26 100644 --- a/.github/workflows/coverage.yaml +++ b/.github/workflows/coverage.yaml @@ -27,7 +27,7 @@ jobs: (github.event_name == 'schedule' && github.ref_name == 'main') # Note the tests run slowly due to instrutmentation. It takes CI 10 hrs timeout-minutes: 720 - runs-on: runs-on,cpu=64,family=c7,hdd=500,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} + runs-on: runs-on,cpu=64,family=c7,disk=large,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} steps: - uses: actions/checkout@v4 with: @@ -57,7 +57,7 @@ jobs: contains(github.event.pull_request.labels.*.name, 'CICD:run-coverage') || (github.event_name == 'schedule' && github.ref_name == 'main') timeout-minutes: 720 # incremented from 240 due to execution time limit hit in cron - runs-on: runs-on,cpu=64,family=c7,hdd=500,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} + runs-on: runs-on,cpu=64,family=c7,disk=large,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} steps: - uses: actions/checkout@v4 with: diff --git a/.github/workflows/docker-build-rosetta.yaml b/.github/workflows/docker-build-rosetta.yaml index 1aed7aa668014..f7eb3d12cfd1f 100644 --- a/.github/workflows/docker-build-rosetta.yaml +++ b/.github/workflows/docker-build-rosetta.yaml @@ -17,7 +17,7 @@ permissions: jobs: build: - runs-on: runs-on,cpu=64,family=c7,hdd=500,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} + runs-on: runs-on,cpu=64,family=c7,disk=large,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/execution-performance.yaml b/.github/workflows/execution-performance.yaml index 1597d9ecbc863..ee9fc902c15dc 100644 --- a/.github/workflows/execution-performance.yaml +++ b/.github/workflows/execution-performance.yaml @@ -2,7 +2,7 @@ name: "execution-performance" on: workflow_dispatch: pull_request: - types: [labeled, opened, synchronize, reopened, auto_merge_enabled] + types: [ labeled, opened, synchronize, reopened, auto_merge_enabled ] schedule: - cron: "0 */4 * * *" # This runs every four hours @@ -21,8 +21,8 @@ jobs: secrets: inherit with: GIT_SHA: ${{ github.event.pull_request.head.sha || github.sha }} - RUNNER_NAME: executor-benchmark-runner - # Run all tests only on the scheduled cadence, or explicitly requested + RUNNER_NAME: benchmark-c3d-60 + # Run all tests only on the scheduled cadence, or explicitly requested FLOW: ${{ (github.event_name == 'schedule' || contains(github.event.pull_request.labels.*.name, 'CICD:run-execution-performance-full-test')) && 'CONTINUOUS' || 'LAND_BLOCKING' }} # Ignore target determination if on the scheduled cadence, or explicitly requested IGNORE_TARGET_DETERMINATION: ${{ github.event_name == 'schedule' || contains(github.event.pull_request.labels.*.name, 'CICD:run-execution-performance-test') || contains(github.event.pull_request.labels.*.name, 'CICD:run-execution-performance-full-test') }} diff --git a/.github/workflows/faucet-tests-main.yaml b/.github/workflows/faucet-tests-main.yaml index dafa08ff5927b..6b0275260c739 100644 --- a/.github/workflows/faucet-tests-main.yaml +++ b/.github/workflows/faucet-tests-main.yaml @@ -49,7 +49,7 @@ jobs: # be compatible in production. run-tests-main: if: contains(github.event.pull_request.labels.*.name, 'CICD:non-required-tests') - runs-on: runs-on,cpu=64,family=c7,hdd=500,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} + runs-on: runs-on,cpu=64,family=c7,disk=large,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} steps: - uses: actions/checkout@v4 if: ${{ !inputs.SKIP_JOB }} diff --git a/.github/workflows/faucet-tests-prod.yaml b/.github/workflows/faucet-tests-prod.yaml index c731966c09015..8cb64d9f859b6 100644 --- a/.github/workflows/faucet-tests-prod.yaml +++ b/.github/workflows/faucet-tests-prod.yaml @@ -37,7 +37,7 @@ jobs: run-tests-devnet: if: contains(github.event.pull_request.labels.*.name, 'CICD:non-required-tests') needs: [permission-check] - runs-on: runs-on,cpu=64,family=c7,hdd=500,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} + runs-on: runs-on,cpu=64,family=c7,disk=large,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} steps: - uses: actions/checkout@v4 - uses: aptos-labs/aptos-core/.github/actions/docker-setup@main @@ -57,7 +57,7 @@ jobs: run-tests-testnet: if: contains(github.event.pull_request.labels.*.name, 'CICD:non-required-tests') needs: [permission-check] - runs-on: runs-on,cpu=64,family=c7,hdd=500,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} + runs-on: runs-on,cpu=64,family=c7,disk=large,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} permissions: contents: read id-token: write diff --git a/.github/workflows/find-packages-with-undeclared-feature-dependencies.yaml b/.github/workflows/find-packages-with-undeclared-feature-dependencies.yaml index 0087168d29054..646655c421366 100644 --- a/.github/workflows/find-packages-with-undeclared-feature-dependencies.yaml +++ b/.github/workflows/find-packages-with-undeclared-feature-dependencies.yaml @@ -4,7 +4,7 @@ on: jobs: find-packages-with-undeclared-feature-dependencies: - runs-on: runs-on,cpu=64,family=c7,hdd=500,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} + runs-on: runs-on,cpu=64,family=c7,disk=large,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} steps: - uses: actions/checkout@v4 - uses: aptos-labs/aptos-core/.github/actions/rust-setup@main diff --git a/.github/workflows/forge-continuous-land-blocking-test.yaml b/.github/workflows/forge-continuous-land-blocking-test.yaml index 54371b5fd6a59..d3d3693c480c3 100644 --- a/.github/workflows/forge-continuous-land-blocking-test.yaml +++ b/.github/workflows/forge-continuous-land-blocking-test.yaml @@ -12,6 +12,11 @@ permissions: on: # Allow triggering manually workflow_dispatch: + inputs: + FORGE_CLUSTER_NAME: + required: false + type: string + description: The Forge k8s cluster to be used for test push: branches: # Use this branch for canary @@ -85,6 +90,7 @@ jobs: FORGE_TEST_SUITE: realistic_env_max_load IMAGE_TAG: ${{ needs.determine-docker-build-metadata.outputs.gitSha }} FORGE_RUNNER_DURATION_SECS: 480 + FORGE_CLUSTER_NAME: ${{ inputs.FORGE_CLUSTER_NAME }} FORGE_NAMESPACE: forge-e2e-${{ needs.determine-docker-build-metadata.outputs.FORGE_NAMESPACE_SUFFIX }} SEND_RESULTS_TO_TRUNK: true @@ -144,6 +150,7 @@ jobs: FORGE_TEST_SUITE: compat IMAGE_TAG: ${{ needs.fetch-last-released-docker-image-tag.outputs.IMAGE_TAG }} FORGE_RUNNER_DURATION_SECS: 300 + FORGE_CLUSTER_NAME: ${{ inputs.FORGE_CLUSTER_NAME }} FORGE_NAMESPACE: forge-compat-${{ needs.determine-docker-build-metadata.outputs.FORGE_NAMESPACE_SUFFIX }} SEND_RESULTS_TO_TRUNK: true @@ -161,5 +168,6 @@ jobs: FORGE_TEST_SUITE: framework_upgrade IMAGE_TAG: ${{ needs.fetch-last-released-docker-image-tag.outputs.IMAGE_TAG }} FORGE_RUNNER_DURATION_SECS: 300 + FORGE_CLUSTER_NAME: ${{ inputs.FORGE_CLUSTER_NAME }} FORGE_NAMESPACE: forge-framework-upgrade-${{ needs.determine-docker-build-metadata.outputs.FORGE_NAMESPACE_SUFFIX }} SEND_RESULTS_TO_TRUNK: true diff --git a/.github/workflows/forge-stable.yaml b/.github/workflows/forge-stable.yaml index 6dc2509d8563a..92bf1e6fbc537 100644 --- a/.github/workflows/forge-stable.yaml +++ b/.github/workflows/forge-stable.yaml @@ -287,4 +287,4 @@ jobs: FORGE_ENABLE_PERFORMANCE: ${{ matrix.FORGE_ENABLE_PERFORMANCE || false }} FORGE_ENABLE_FAILPOINTS: ${{ matrix.FORGE_ENABLE_FAILPOINTS || false }} POST_TO_SLACK: true - SEND_RESULTS_TO_TRUNK: true \ No newline at end of file + SEND_RESULTS_TO_TRUNK: true diff --git a/.github/workflows/fuzzer-data-update.yml b/.github/workflows/fuzzer-data-update.yml index b58011e3571dd..02e83ee841af2 100644 --- a/.github/workflows/fuzzer-data-update.yml +++ b/.github/workflows/fuzzer-data-update.yml @@ -14,7 +14,7 @@ permissions: jobs: update-fuzzer-data: - runs-on: runs-on,cpu=16,family=m6id,hdd=500,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} + runs-on: runs-on,cpu=16,family=m6id,disk=large,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} steps: - name: Checkout repository uses: actions/checkout@v4 diff --git a/.github/workflows/indexer-processor-testing.yaml b/.github/workflows/indexer-processor-testing.yaml index f3c8a5f207243..6d2a2bbff1204 100644 --- a/.github/workflows/indexer-processor-testing.yaml +++ b/.github/workflows/indexer-processor-testing.yaml @@ -16,7 +16,7 @@ permissions: jobs: dispatch_event: - runs-on: runs-on,cpu=16,family=c7,hdd=500,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} + runs-on: runs-on,cpu=16,family=c7,disk=large,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} steps: - name: Checkout the repository @@ -164,4 +164,4 @@ jobs: . scripts/indexer_processor_tests_status_poll.sh env: GITHUB_TOKEN: ${{ steps.secrets.outputs.token }} # Pass the correct GitHub token - GITHUB_SHA: ${{ github.sha }} \ No newline at end of file + GITHUB_SHA: ${{ github.sha }} diff --git a/.github/workflows/lint-test.yaml b/.github/workflows/lint-test.yaml index cca74b716e9e1..a771259083e4a 100644 --- a/.github/workflows/lint-test.yaml +++ b/.github/workflows/lint-test.yaml @@ -56,7 +56,7 @@ jobs: # Run the crypto hasher domain separation checks rust-cryptohasher-domain-separation-check: needs: file_change_determinator - runs-on: runs-on,cpu=64,family=c7,hdd=500,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} + runs-on: runs-on,cpu=64,family=c7,disk=large,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} if: contains(github.event.pull_request.labels.*.name, 'CICD:non-required-tests') steps: - uses: actions/checkout@v4 @@ -65,7 +65,7 @@ jobs: # Run all rust lints. This is a PR required job. rust-lints: needs: file_change_determinator - runs-on: runs-on,cpu=64,family=c7,hdd=500,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} + runs-on: runs-on,cpu=64,family=c7,disk=large,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} steps: - uses: actions/checkout@v4 if: needs.file_change_determinator.outputs.only_docs_changed != 'true' @@ -101,7 +101,7 @@ jobs: contains(github.event.pull_request.labels.*.name, 'CICD:run-e2e-tests') || github.event.pull_request.auto_merge != null ) - runs-on: runs-on,cpu=64,family=c7,hdd=500,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} + runs-on: runs-on,cpu=64,family=c7,disk=large,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} steps: - uses: actions/checkout@v4 - name: Run rust doc tests @@ -120,7 +120,7 @@ jobs: github.event.pull_request.auto_merge != null) || contains(github.event.pull_request.body, '#e2e' ) - runs-on: runs-on,cpu=64,family=c7,hdd=500,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} + runs-on: runs-on,cpu=64,family=c7,disk=large,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} steps: - uses: actions/checkout@v4 if: needs.file_change_determinator.outputs.only_docs_changed != 'true' @@ -139,7 +139,7 @@ jobs: !contains(github.event.pull_request.base.ref, '-release-') ) needs: file_change_determinator - runs-on: runs-on,cpu=64,family=c7,hdd=500,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} + runs-on: runs-on,cpu=64,family=c7,disk=large,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} steps: - uses: actions/checkout@v4 with: @@ -157,7 +157,7 @@ jobs: !contains(github.event.pull_request.base.ref, '-release-') ) needs: file_change_determinator - runs-on: runs-on,cpu=64,family=c7,hdd=500,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} + runs-on: runs-on,cpu=64,family=c7,disk=large,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} steps: - uses: actions/checkout@v4 with: @@ -182,7 +182,7 @@ jobs: contains(github.event.pull_request.labels.*.name, 'CICD:run-all-unit-tests') || contains(github.event.pull_request.base.ref, '-release-') ) - runs-on: runs-on,cpu=64,family=c7,hdd=500,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} + runs-on: runs-on,cpu=64,family=c7,disk=large,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} steps: - uses: actions/checkout@v4 # Install Move Prover tools @@ -205,7 +205,7 @@ jobs: contains(github.event.pull_request.labels.*.name, 'CICD:run-e2e-tests') || github.event.pull_request.auto_merge != null ) - runs-on: runs-on,cpu=64,family=c7,hdd=500,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} + runs-on: runs-on,cpu=64,family=c7,disk=large,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} steps: - uses: actions/checkout@v4 if: needs.file_change_determinator.outputs.only_docs_changed != 'true' @@ -217,7 +217,7 @@ jobs: # Run the consensus only unit tests rust-consensus-only-unit-test: - runs-on: runs-on,cpu=64,family=c7,hdd=500,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} + runs-on: runs-on,cpu=64,family=c7,disk=large,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} if: contains(github.event.pull_request.labels.*.name, 'CICD:build-consensus-only-image') steps: - uses: actions/checkout@v4 @@ -227,7 +227,7 @@ jobs: # Run the consensus only smoke test rust-consensus-only-smoke-test: - runs-on: runs-on,cpu=64,family=c7,hdd=500,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} + runs-on: runs-on,cpu=64,family=c7,disk=large,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} if: contains(github.event.pull_request.labels.*.name, 'CICD:build-consensus-only-image') steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/node-api-compatibility-tests.yaml b/.github/workflows/node-api-compatibility-tests.yaml index 1d5631e6514d0..2bc6c6bf0df4c 100644 --- a/.github/workflows/node-api-compatibility-tests.yaml +++ b/.github/workflows/node-api-compatibility-tests.yaml @@ -43,7 +43,7 @@ jobs: # if there are any changes that would affect it within the PR / commit. If # everything is checked in, run tests, build the SDK, and upload it to npmjs. node-api-compatibility-tests: - runs-on: runs-on,cpu=64,family=c7,hdd=500,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} + runs-on: runs-on,cpu=64,family=c7,disk=large,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} permissions: contents: read id-token: write @@ -108,4 +108,4 @@ jobs: git diff --no-index --ignore-space-at-eol --ignore-blank-lines ${{ runner.temp }}/specs/spec.json api/doc/spec.json if: ${{ !inputs.SKIP_JOB }} - # TODO: Need to use the other SDKs here to verify correctness \ No newline at end of file + # TODO: Need to use the other SDKs here to verify correctness diff --git a/.github/workflows/prover-daily-test.yaml b/.github/workflows/prover-daily-test.yaml index fcaece5daf22b..c222e58815efc 100644 --- a/.github/workflows/prover-daily-test.yaml +++ b/.github/workflows/prover-daily-test.yaml @@ -22,7 +22,7 @@ concurrency: jobs: prover-inconsistency-test: - runs-on: runs-on,cpu=64,family=c7,hdd=500,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} + runs-on: runs-on,cpu=64,family=c7,disk=large,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} timeout-minutes: ${{ github.event_name == 'pull_request' && 10 || 480}} steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/run-gas-calibration.yaml b/.github/workflows/run-gas-calibration.yaml index 9d613140b8a13..9c17e0fd74f18 100644 --- a/.github/workflows/run-gas-calibration.yaml +++ b/.github/workflows/run-gas-calibration.yaml @@ -25,7 +25,7 @@ concurrency: jobs: run-gas-calibration: if: contains(github.event.pull_request.labels.*.name, 'CICD:non-required-tests') - runs-on: runs-on,cpu=64,family=c7,hdd=500,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} + runs-on: runs-on,cpu=64,family=c7,disk=large,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} steps: - uses: actions/checkout@v4 with: diff --git a/.github/workflows/rust-client-tests.yaml b/.github/workflows/rust-client-tests.yaml index d612fc824c546..6738f7ef780a7 100644 --- a/.github/workflows/rust-client-tests.yaml +++ b/.github/workflows/rust-client-tests.yaml @@ -31,7 +31,7 @@ jobs: run-tests-devnet: if: contains(github.event.pull_request.labels.*.name, 'CICD:non-required-tests') needs: [permission-check] - runs-on: runs-on,cpu=64,family=c7,hdd=500,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} + runs-on: runs-on,cpu=64,family=c7,disk=large,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} steps: - uses: actions/checkout@v4 - uses: aptos-labs/aptos-core/.github/actions/docker-setup@main @@ -50,7 +50,7 @@ jobs: run-tests-testnet: if: contains(github.event.pull_request.labels.*.name, 'CICD:non-required-tests') needs: [permission-check] - runs-on: runs-on,cpu=64,family=c7,hdd=500,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} + runs-on: runs-on,cpu=64,family=c7,disk=large,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} steps: - uses: actions/checkout@v4 - uses: aptos-labs/aptos-core/.github/actions/docker-setup@main @@ -69,7 +69,7 @@ jobs: run-tests-mainnet: if: contains(github.event.pull_request.labels.*.name, 'CICD:non-required-tests') needs: [permission-check] - runs-on: runs-on,cpu=64,family=c7,hdd=500,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} + runs-on: runs-on,cpu=64,family=c7,disk=large,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} steps: - uses: actions/checkout@v4 - uses: aptos-labs/aptos-core/.github/actions/docker-setup@main diff --git a/.github/workflows/windows-build.yaml b/.github/workflows/windows-build.yaml index 124cc3e334160..83dc4c6e43b3e 100644 --- a/.github/workflows/windows-build.yaml +++ b/.github/workflows/windows-build.yaml @@ -13,7 +13,7 @@ on: jobs: windows-build: - runs-on: windows-latest + runs-on: windows-2025 if: | # Only run on each PR once an appropriate event occurs ( github.event_name == 'workflow_dispatch' || diff --git a/.github/workflows/workflow-run-docker-rust-build.yaml b/.github/workflows/workflow-run-docker-rust-build.yaml index 50e6e14ee1963..ba98ebef6513c 100644 --- a/.github/workflows/workflow-run-docker-rust-build.yaml +++ b/.github/workflows/workflow-run-docker-rust-build.yaml @@ -73,7 +73,7 @@ permissions: jobs: rust-all: - runs-on: runs-on,cpu=64,family=c7,hdd=1024,image=aptos-ubuntu-x64,run-id=${{ github.run_id }},spot=co + runs-on: runs-on,cpu=64,family=c7,image=aptos-ubuntu-x64,run-id=${{ github.run_id }},spot=co,disk=large steps: - uses: actions/checkout@v4 with: diff --git a/.github/workflows/workflow-run-replay-verify-on-archive.yaml b/.github/workflows/workflow-run-replay-verify-on-archive.yaml index f1fce9a8006ea..dadcb2aafb666 100644 --- a/.github/workflows/workflow-run-replay-verify-on-archive.yaml +++ b/.github/workflows/workflow-run-replay-verify-on-archive.yaml @@ -58,7 +58,7 @@ jobs: GCP_SERVICE_ACCOUNT_EMAIL: ${{ secrets.GCP_SERVICE_ACCOUNT_EMAIL }} EXPORT_GCP_PROJECT_VARIABLES: "false" GIT_CREDENTIALS: ${{ secrets.GIT_CREDENTIALS }} - GCP_AUTH_DURATION: "10800" + GCP_AUTH_DURATION: "18000" # Authenticate to Google Cloud the project is aptos-ci with credentails files generated - name: Authenticate to Google Cloud @@ -109,7 +109,7 @@ jobs: fi eval $CMD - timeout-minutes: 300 + timeout-minutes: 420 # This is in case user manually cancel the step above, we still want to cleanup the resources - name: Post-run cleanup env: diff --git a/Cargo.lock b/Cargo.lock index 7e4ac01391b19..1bb092c5208f1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -304,7 +304,7 @@ dependencies = [ [[package]] name = "aptos" -version = "7.4.0" +version = "7.6.0" dependencies = [ "anyhow", "aptos-api-types", @@ -352,7 +352,7 @@ dependencies = [ "clap 4.5.21", "clap_complete", "colored", - "dashmap", + "dashmap 7.0.0-rc2", "diesel", "diesel-async", "dirs 5.0.1", @@ -486,6 +486,7 @@ dependencies = [ "aptos-runtimes", "aptos-sdk", "aptos-storage-interface", + "aptos-transaction-filters", "aptos-types", "aptos-vm", "bcs 0.1.4", @@ -754,7 +755,7 @@ dependencies = [ "bcs 0.1.4", "clap 4.5.21", "criterion", - "dashmap", + "dashmap 7.0.0-rc2", "itertools 0.13.0", "jemallocator", "move-core-types", @@ -893,6 +894,7 @@ dependencies = [ "aptos-secure-storage", "aptos-short-hex-str", "aptos-temppath", + "aptos-transaction-filters", "aptos-types", "arr_macro", "bcs 0.1.4", @@ -954,6 +956,7 @@ dependencies = [ "aptos-storage-interface", "aptos-temppath", "aptos-time-service", + "aptos-transaction-filters", "aptos-types", "aptos-validator-transaction-pool", "aptos-vm", @@ -965,7 +968,7 @@ dependencies = [ "chrono", "claims", "clap 4.5.21", - "dashmap", + "dashmap 7.0.0-rc2", "enum_dispatch", "fail", "futures", @@ -990,7 +993,6 @@ dependencies = [ "serde", "serde_bytes", "serde_json", - "serde_yaml 0.8.26", "sha3 0.9.1", "strum 0.27.1", "strum_macros 0.27.1", @@ -1154,7 +1156,7 @@ dependencies = [ "async-trait", "bcs 0.1.4", "claims", - "dashmap", + "dashmap 7.0.0-rc2", "futures", "itertools 0.13.0", "maplit", @@ -1226,7 +1228,7 @@ dependencies = [ "claims", "clap 4.5.21", "crossbeam-channel", - "dashmap", + "dashmap 7.0.0-rc2", "either", "hex", "indicatif 0.15.0", @@ -1263,7 +1265,7 @@ dependencies = [ "aptos-types", "bcs 0.1.4", "bytes", - "dashmap", + "dashmap 7.0.0-rc2", "move-core-types", "once_cell", "rand 0.7.3", @@ -1546,7 +1548,7 @@ dependencies = [ "bytes", "chrono", "clap 4.5.21", - "dashmap", + "dashmap 7.0.0-rc2", "derivative", "indicatif 0.15.0", "itertools 0.13.0", @@ -1586,7 +1588,7 @@ dependencies = [ "clap 4.5.21", "crossbeam-channel", "ctrlc", - "dashmap", + "dashmap 7.0.0-rc2", "itertools 0.13.0", "num_cpus", "once_cell", @@ -2225,7 +2227,7 @@ dependencies = [ "async-trait", "build_html", "clap 4.5.21", - "dashmap", + "dashmap 7.0.0-rc2", "futures", "jemallocator", "once_cell", @@ -2383,7 +2385,7 @@ dependencies = [ "async-trait", "build_html", "clap 4.5.21", - "dashmap", + "dashmap 7.0.0-rc2", "futures", "jemallocator", "once_cell", @@ -2467,7 +2469,7 @@ dependencies = [ "bytesize", "chrono", "cloud-storage", - "dashmap", + "dashmap 7.0.0-rc2", "futures", "itertools 0.13.0", "lz4", @@ -2511,7 +2513,7 @@ dependencies = [ [[package]] name = "aptos-indexer-processor-sdk" version = "0.1.0" -source = "git+https://github.com/aptos-labs/aptos-indexer-processor-sdk.git?rev=c63f6ee891e42cc50f92480c21d4afa86012b157#c63f6ee891e42cc50f92480c21d4afa86012b157" +source = "git+https://github.com/aptos-labs/aptos-indexer-processor-sdk.git?tag=aptos-indexer-processor-sdk-v2.1.1#6eb78b8fdadae14551e886185105ad5fffa29f2d" dependencies = [ "ahash 0.8.11", "anyhow", @@ -2596,10 +2598,10 @@ dependencies = [ [[package]] name = "aptos-indexer-transaction-stream" version = "0.1.0" -source = "git+https://github.com/aptos-labs/aptos-indexer-processor-sdk.git?rev=c63f6ee891e42cc50f92480c21d4afa86012b157#c63f6ee891e42cc50f92480c21d4afa86012b157" +source = "git+https://github.com/aptos-labs/aptos-indexer-processor-sdk.git?tag=aptos-indexer-processor-sdk-v2.1.1#6eb78b8fdadae14551e886185105ad5fffa29f2d" dependencies = [ "anyhow", - "aptos-moving-average 0.1.0 (git+https://github.com/aptos-labs/aptos-indexer-processor-sdk.git?rev=c63f6ee891e42cc50f92480c21d4afa86012b157)", + "aptos-moving-average 0.1.0 (git+https://github.com/aptos-labs/aptos-indexer-processor-sdk.git?tag=aptos-indexer-processor-sdk-v2.1.1)", "aptos-protos 1.3.1 (git+https://github.com/aptos-labs/aptos-core.git?rev=2dd9c73b27fdcbe78c7391fd43c9a5d00b93e686)", "aptos-transaction-filter 0.1.0 (git+https://github.com/aptos-labs/aptos-core.git?rev=2dd9c73b27fdcbe78c7391fd43c9a5d00b93e686)", "chrono", @@ -2801,7 +2803,7 @@ dependencies = [ "ark-groth16", "ark-serialize", "bcs 0.1.4", - "dashmap", + "dashmap 7.0.0-rc2", "firestore", "hex", "hyper 0.14.28", @@ -2971,6 +2973,7 @@ dependencies = [ "aptos-short-hex-str", "aptos-storage-interface", "aptos-time-service", + "aptos-transaction-filters", "aptos-types", "aptos-vm-validator", "bcs 0.1.4", @@ -3106,7 +3109,7 @@ dependencies = [ [[package]] name = "aptos-moving-average" version = "0.1.0" -source = "git+https://github.com/aptos-labs/aptos-indexer-processors.git?rev=62beedc881d1b76632318ceb186ee9065236468e#62beedc881d1b76632318ceb186ee9065236468e" +source = "git+https://github.com/aptos-labs/aptos-indexer-processor-sdk.git?tag=aptos-indexer-processor-sdk-v2.1.1#6eb78b8fdadae14551e886185105ad5fffa29f2d" dependencies = [ "chrono", ] @@ -3114,7 +3117,7 @@ dependencies = [ [[package]] name = "aptos-moving-average" version = "0.1.0" -source = "git+https://github.com/aptos-labs/aptos-indexer-processor-sdk.git?rev=c63f6ee891e42cc50f92480c21d4afa86012b157#c63f6ee891e42cc50f92480c21d4afa86012b157" +source = "git+https://github.com/aptos-labs/aptos-indexer-processors.git?rev=62beedc881d1b76632318ceb186ee9065236468e#62beedc881d1b76632318ceb186ee9065236468e" dependencies = [ "chrono", ] @@ -3125,16 +3128,21 @@ version = "0.1.0" dependencies = [ "anyhow", "aptos-aggregator", + "aptos-infallible", "aptos-types", "aptos-vm-types", "bytes", "claims", + "concurrent-queue", "crossbeam", - "dashmap", + "dashmap 7.0.0-rc2", + "equivalent", + "fail", "move-binary-format", "move-core-types", "move-vm-runtime", "move-vm-types", + "num_cpus", "proptest", "proptest-derive", "rayon", @@ -3149,8 +3157,6 @@ dependencies = [ "aptos-gas-algebra", "aptos-gas-schedule", "aptos-types", - "bcs 0.1.4", - "bytes", "move-binary-format", "move-core-types", "move-vm-runtime", @@ -3939,7 +3945,7 @@ dependencies = [ "aptos-vm", "bitvec 1.0.1", "criterion", - "dashmap", + "dashmap 7.0.0-rc2", "itertools 0.13.0", "jemallocator", "once_cell", @@ -4124,7 +4130,7 @@ dependencies = [ "aptos-types", "arr_macro", "bcs 0.1.4", - "dashmap", + "dashmap 7.0.0-rc2", "derive_more 0.99.17", "itertools 0.13.0", "once_cell", @@ -4184,7 +4190,7 @@ dependencies = [ "bcs 0.1.4", "bytes", "claims", - "dashmap", + "dashmap 7.0.0-rc2", "futures", "maplit", "mini-moka", @@ -4509,6 +4515,18 @@ dependencies = [ "thiserror", ] +[[package]] +name = "aptos-transaction-filters" +version = "0.1.0" +dependencies = [ + "aptos-crypto", + "aptos-types", + "move-core-types", + "rand 0.7.3", + "serde", + "serde_yaml 0.8.26", +] + [[package]] name = "aptos-transaction-generator-lib" version = "0.0.0" @@ -4632,7 +4650,7 @@ dependencies = [ "claims", "coset", "criterion", - "dashmap", + "dashmap 7.0.0-rc2", "derivative", "derive_more 0.99.17", "fixed", @@ -7550,7 +7568,21 @@ dependencies = [ "hashbrown 0.14.3", "lock_api", "once_cell", - "parking_lot_core 0.9.9", + "parking_lot_core 0.9.10", +] + +[[package]] +name = "dashmap" +version = "7.0.0-rc2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4a1e35a65fe0538a60167f0ada6e195ad5d477f6ddae273943596d4a1a5730b" +dependencies = [ + "cfg-if", + "crossbeam-utils", + "equivalent", + "hashbrown 0.15.3", + "lock_api", + "parking_lot_core 0.9.10", ] [[package]] @@ -9779,9 +9811,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.15.1" +version = "0.15.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a9bfc1af68b1726ea47d3d5109de126281def866b33970e10fbab11b5dafab3" +checksum = "84b26c544d002229e640969970a2e74021aadf6e2f96372b9c58eff97de08eb3" [[package]] name = "hashers" @@ -10496,7 +10528,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62f822373a4fe84d4bb149bf54e584a7f4abec90e072ed49cda0edea5b95471f" dependencies = [ "equivalent", - "hashbrown 0.15.1", + "hashbrown 0.15.3", "serde", ] @@ -10541,7 +10573,7 @@ dependencies = [ "clap 4.5.21", "crossbeam-channel", "crossbeam-utils", - "dashmap", + "dashmap 5.5.3", "env_logger", "indexmap 2.7.0", "is-terminal", @@ -10584,7 +10616,7 @@ dependencies = [ [[package]] name = "instrumented-channel" version = "0.1.0" -source = "git+https://github.com/aptos-labs/aptos-indexer-processor-sdk.git?rev=c63f6ee891e42cc50f92480c21d4afa86012b157#c63f6ee891e42cc50f92480c21d4afa86012b157" +source = "git+https://github.com/aptos-labs/aptos-indexer-processor-sdk.git?tag=aptos-indexer-processor-sdk-v2.1.1#6eb78b8fdadae14551e886185105ad5fffa29f2d" dependencies = [ "delegate", "derive_builder", @@ -10607,7 +10639,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6ab388864246d58a276e60e7569a833d9cc4cd75c66e5ca77c177dad38e59996" dependencies = [ "ahash 0.7.8", - "dashmap", + "dashmap 5.5.3", "hashbrown 0.12.3", "once_cell", "parking_lot 0.12.1", @@ -11395,9 +11427,9 @@ checksum = "c4cd1a83af159aa67994778be9070f0ae1bd732942279cabb14f86f986a21456" [[package]] name = "lock_api" -version = "0.4.11" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" +checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" dependencies = [ "autocfg", "scopeguard", @@ -11666,7 +11698,7 @@ checksum = "c325dfab65f261f386debee8b0969da215b3fa0037e74c8a1234db7ba986d803" dependencies = [ "crossbeam-channel", "crossbeam-utils", - "dashmap", + "dashmap 5.5.3", "skeptic", "smallvec", "tagptr", @@ -12678,7 +12710,7 @@ dependencies = [ "bytes", "claims", "crossbeam", - "dashmap", + "dashmap 7.0.0-rc2", "derivative", "hashbrown 0.14.3", "itertools 0.13.0", @@ -13517,7 +13549,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", - "parking_lot_core 0.9.9", + "parking_lot_core 0.9.10", ] [[package]] @@ -13536,15 +13568,15 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.9" +version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e" +checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.4.1", + "redox_syscall 0.5.11", "smallvec", - "windows-targets 0.48.5", + "windows-targets 0.52.6", ] [[package]] @@ -14596,7 +14628,7 @@ dependencies = [ [[package]] name = "processor" version = "0.1.0" -source = "git+https://github.com/aptos-labs/aptos-indexer-processors-v2.git?rev=8628f1534efa28ada547bbcd34afec1af55697d4#8628f1534efa28ada547bbcd34afec1af55697d4" +source = "git+https://github.com/aptos-labs/aptos-indexer-processors-v2.git?tag=aptos-indexer-processors-v2.1.4#768c3433275cc4a4d191b8e97442c78408642849" dependencies = [ "ahash 0.8.11", "allocative", @@ -15287,6 +15319,15 @@ dependencies = [ "bitflags 1.3.2", ] +[[package]] +name = "redox_syscall" +version = "0.5.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2f103c6d277498fbceb16e84d317e2a400f160f46904d5f5410848c829511a3" +dependencies = [ + "bitflags 2.6.0", +] + [[package]] name = "redox_users" version = "0.4.4" @@ -16020,7 +16061,7 @@ dependencies = [ [[package]] name = "sample" version = "0.1.0" -source = "git+https://github.com/aptos-labs/aptos-indexer-processor-sdk.git?rev=c63f6ee891e42cc50f92480c21d4afa86012b157#c63f6ee891e42cc50f92480c21d4afa86012b157" +source = "git+https://github.com/aptos-labs/aptos-indexer-processor-sdk.git?tag=aptos-indexer-processor-sdk-v2.1.1#6eb78b8fdadae14551e886185105ad5fffa29f2d" dependencies = [ "tracing", ] diff --git a/Cargo.toml b/Cargo.toml index 4c7e4fc0d759d..f2428cb034bc0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -100,6 +100,7 @@ members = [ "crates/aptos-telemetry-service", "crates/aptos-temppath", "crates/aptos-time-service", + "crates/aptos-transaction-filters", "crates/aptos-warp-webserver", "crates/bounded-executor", "crates/channel", @@ -447,6 +448,7 @@ aptos-storage-service-types = { path = "state-sync/storage-service/types" } aptos-storage-service-server = { path = "state-sync/storage-service/server" } aptos-system-utils = { path = "crates/aptos-system-utils" } aptos-transaction-filter = { path = "ecosystem/indexer-grpc/transaction-filter" } +aptos-transaction-filters = { path = "crates/aptos-transaction-filters" } aptos-telemetry = { path = "crates/aptos-telemetry" } aptos-telemetry-service = { path = "crates/aptos-telemetry-service" } aptos-temppath = { path = "crates/aptos-temppath" } @@ -496,7 +498,7 @@ ark-groth16 = "0.4.0" ark-relations = "0.4.0" ark-serialize = "0.4.0" ark-std = { version = "0.4.0", features = ["getrandom"] } -aptos-indexer-processor-sdk = { git = "https://github.com/aptos-labs/aptos-indexer-processor-sdk.git", rev = "c63f6ee891e42cc50f92480c21d4afa86012b157", features = [ +aptos-indexer-processor-sdk = { git = "https://github.com/aptos-labs/aptos-indexer-processor-sdk.git", tag = "aptos-indexer-processor-sdk-v2.1.1", features = [ "postgres_partial", ] } aptos-moving-average = { git = "https://github.com/aptos-labs/aptos-indexer-processors.git", rev = "62beedc881d1b76632318ceb186ee9065236468e" } @@ -558,7 +560,7 @@ crossterm = "0.26.1" csv = "1.2.1" curve25519-dalek = "3" curve25519-dalek-ng = "4" -dashmap = { version = "5.5.3", features = ["inline"] } +dashmap = { version = "7.0.0-rc2", features = ["inline-more"] } datatest-stable = "0.1.1" debug-ignore = { version = "1.0.3", features = ["serde"] } derivative = "2.2.0" @@ -587,6 +589,7 @@ ed25519-dalek-bip32 = "0.2.0" either = "1.6.1" enum_dispatch = "0.3.12" env_logger = "0.10.0" +equivalent = "1.0" erased-serde = "0.3.13" ethnum = "1.5.0" ethers = { version = "2" } @@ -695,7 +698,7 @@ pretty = "0.10.0" pretty_assertions = "1.2.1" # We set default-features to false so we don't onboard the libpq dep. See more here: # https://github.com/aptos-labs/aptos-core/pull/12568 -processor = { git = "https://github.com/aptos-labs/aptos-indexer-processors-v2.git", rev = "8628f1534efa28ada547bbcd34afec1af55697d4", default-features = false } +processor = { git = "https://github.com/aptos-labs/aptos-indexer-processors-v2.git", tag = "aptos-indexer-processors-v2.1.4", default-features = false } procfs = "0.14.1" proc-macro2 = "1.0.38" project-root = "0.2.2" diff --git a/api/Cargo.toml b/api/Cargo.toml index 4bda07ed5528d..21f2d61cec939 100644 --- a/api/Cargo.toml +++ b/api/Cargo.toml @@ -56,6 +56,7 @@ aptos-gas-meter = { workspace = true } aptos-gas-schedule = { workspace = true, features = ["testing"] } aptos-move-stdlib = { workspace = true } aptos-proptest-helpers = { workspace = true } +aptos-transaction-filters = { workspace = true, features = ["fuzzing"] } move-package = { workspace = true } passkey-types = { workspace = true } percent-encoding = { workspace = true } diff --git a/api/doc/spec.json b/api/doc/spec.json index 5e1cd2d23c404..97e664df52334 100644 --- a/api/doc/spec.json +++ b/api/doc/spec.json @@ -15423,6 +15423,7 @@ "invalid_transaction_update", "sequence_number_too_old", "vm_error", + "rejected_by_filter", "health_check_failed", "mempool_is_full", "internal_error", diff --git a/api/doc/spec.yaml b/api/doc/spec.yaml index 4698a0f04585f..5881f396169a3 100644 --- a/api/doc/spec.yaml +++ b/api/doc/spec.yaml @@ -11533,6 +11533,7 @@ components: - invalid_transaction_update - sequence_number_too_old - vm_error + - rejected_by_filter - health_check_failed - mempool_is_full - internal_error diff --git a/api/goldens/aptos_api__tests__transactions_test__test_get_transaction_by_hash_with_delayed_internal_indexer.json b/api/goldens/aptos_api__tests__transactions_test__test_get_transaction_by_hash_with_delayed_internal_indexer.json deleted file mode 100644 index 8340f651426b2..0000000000000 --- a/api/goldens/aptos_api__tests__transactions_test__test_get_transaction_by_hash_with_delayed_internal_indexer.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "hash": "", - "sender": "0x34bf7e2d17674feb234371a7ea58efd715f0e56ba20ebf13789480d9d643afaf", - "sequence_number": "0", - "max_gas_amount": "100000000", - "gas_unit_price": "0", - "expiration_timestamp_secs": "18446744073709551615", - "payload": { - "function": "0x1::aptos_account::transfer", - "type_arguments": [], - "arguments": [ - "0x1", - "1" - ], - "type": "entry_function_payload" - }, - "signature": { - "public_key": "0xd5a781494d2bf1a174ddffde1e02cb8881cff6dab70e61cbdef393deac0ce639", - "signature": "0xbdc9e553e86cdee876de3318bccd8c6499923b719ab5f189e8b43ba91771645f01c3ded4061e20b3bb85767e475dfe24f76b4aed46860c9328baf28d11d2c701", - "type": "ed25519_signature" - }, - "replay_protection_nonce": null, - "type": "pending_transaction" -} diff --git a/api/goldens/aptos_api__tests__transactions_test__test_get_transactions_output_user_transaction_with_entry_function_payload.json b/api/goldens/aptos_api__tests__transactions_test__test_get_transactions_output_user_transaction_with_entry_function_payload.json index bf0f423086f9d..97fa4e4307885 100644 --- a/api/goldens/aptos_api__tests__transactions_test__test_get_transactions_output_user_transaction_with_entry_function_payload.json +++ b/api/goldens/aptos_api__tests__transactions_test__test_get_transactions_output_user_transaction_with_entry_function_payload.json @@ -295,8 +295,8 @@ "block_end_info": { "block_gas_limit_reached": false, "block_output_limit_reached": false, - "block_effective_block_gas_units": 9, - "block_approx_output_size": 0 + "block_effective_block_gas_units": 18, + "block_approx_output_size": 2045 }, "type": "block_epilogue_transaction" } diff --git a/api/goldens/aptos_api__tests__transactions_test__test_get_transactions_returns_last_page_when_start_version_is_not_specified.json b/api/goldens/aptos_api__tests__transactions_test__test_get_transactions_returns_last_page_when_start_version_is_not_specified.json index 6acff6c759393..c0223234beee1 100644 --- a/api/goldens/aptos_api__tests__transactions_test__test_get_transactions_returns_last_page_when_start_version_is_not_specified.json +++ b/api/goldens/aptos_api__tests__transactions_test__test_get_transactions_returns_last_page_when_start_version_is_not_specified.json @@ -14,8 +14,8 @@ "block_end_info": { "block_gas_limit_reached": false, "block_output_limit_reached": false, - "block_effective_block_gas_units": 9, - "block_approx_output_size": 0 + "block_effective_block_gas_units": 18, + "block_approx_output_size": 1923 }, "type": "block_epilogue_transaction" }, @@ -306,8 +306,8 @@ "block_end_info": { "block_gas_limit_reached": false, "block_output_limit_reached": false, - "block_effective_block_gas_units": 9, - "block_approx_output_size": 0 + "block_effective_block_gas_units": 18, + "block_approx_output_size": 1923 }, "type": "block_epilogue_transaction" }, @@ -598,8 +598,8 @@ "block_end_info": { "block_gas_limit_reached": false, "block_output_limit_reached": false, - "block_effective_block_gas_units": 9, - "block_approx_output_size": 0 + "block_effective_block_gas_units": 18, + "block_approx_output_size": 1923 }, "type": "block_epilogue_transaction" }, @@ -890,8 +890,8 @@ "block_end_info": { "block_gas_limit_reached": false, "block_output_limit_reached": false, - "block_effective_block_gas_units": 9, - "block_approx_output_size": 0 + "block_effective_block_gas_units": 18, + "block_approx_output_size": 1923 }, "type": "block_epilogue_transaction" }, @@ -1182,8 +1182,8 @@ "block_end_info": { "block_gas_limit_reached": false, "block_output_limit_reached": false, - "block_effective_block_gas_units": 9, - "block_approx_output_size": 0 + "block_effective_block_gas_units": 18, + "block_approx_output_size": 1923 }, "type": "block_epilogue_transaction" }, @@ -1474,8 +1474,8 @@ "block_end_info": { "block_gas_limit_reached": false, "block_output_limit_reached": false, - "block_effective_block_gas_units": 9, - "block_approx_output_size": 0 + "block_effective_block_gas_units": 18, + "block_approx_output_size": 1923 }, "type": "block_epilogue_transaction" }, @@ -1766,8 +1766,8 @@ "block_end_info": { "block_gas_limit_reached": false, "block_output_limit_reached": false, - "block_effective_block_gas_units": 9, - "block_approx_output_size": 0 + "block_effective_block_gas_units": 18, + "block_approx_output_size": 1923 }, "type": "block_epilogue_transaction" }, @@ -2058,8 +2058,8 @@ "block_end_info": { "block_gas_limit_reached": false, "block_output_limit_reached": false, - "block_effective_block_gas_units": 9, - "block_approx_output_size": 0 + "block_effective_block_gas_units": 18, + "block_approx_output_size": 1923 }, "type": "block_epilogue_transaction" }, @@ -2350,8 +2350,8 @@ "block_end_info": { "block_gas_limit_reached": false, "block_output_limit_reached": false, - "block_effective_block_gas_units": 9, - "block_approx_output_size": 0 + "block_effective_block_gas_units": 18, + "block_approx_output_size": 1923 }, "type": "block_epilogue_transaction" } diff --git a/api/goldens/aptos_api__tests__view_function__test_simple_view_invalid.json b/api/goldens/aptos_api__tests__view_function__test_simple_view_invalid.json index 7d32adc125baa..e7bf7fd793095 100644 --- a/api/goldens/aptos_api__tests__view_function__test_simple_view_invalid.json +++ b/api/goldens/aptos_api__tests__view_function__test_simple_view_invalid.json @@ -1 +1 @@ -{"message":"PartialVMError with status INVALID_MAIN_FUNCTION_SIGNATURE and message 'function not marked as view function'","error_code":"invalid_input","vm_error_code":null} \ No newline at end of file +{"message":"function not marked as view function","error_code":"invalid_input","vm_error_code":1011} \ No newline at end of file diff --git a/api/goldens/aptos_api__tests__view_function__test_view_does_not_exist.json b/api/goldens/aptos_api__tests__view_function__test_view_does_not_exist.json new file mode 100644 index 0000000000000..8c34a57827311 --- /dev/null +++ b/api/goldens/aptos_api__tests__view_function__test_view_does_not_exist.json @@ -0,0 +1 @@ +{"message":"could not find view function by 0x1::aptos_account::fake_function","error_code":"invalid_input","vm_error_code":null} \ No newline at end of file diff --git a/api/goldens/aptos_api__tests__view_function__test_view_error_move_abort.json b/api/goldens/aptos_api__tests__view_function__test_view_error_move_abort.json new file mode 100644 index 0000000000000..9a7bb75ef2a1f --- /dev/null +++ b/api/goldens/aptos_api__tests__view_function__test_view_error_move_abort.json @@ -0,0 +1 @@ +{"message":"Move abort in 0x1::account: ENO_SUCH_ROTATION_CAPABILITY_OFFER(0x60012): The specified rotation capability offer does not exist at the specified offerer address","error_code":"invalid_input","vm_error_code":4016} \ No newline at end of file diff --git a/api/goldens/aptos_api__tests__view_function__test_view_error_type_resolution_error.json b/api/goldens/aptos_api__tests__view_function__test_view_error_type_resolution_error.json new file mode 100644 index 0000000000000..175a627741044 --- /dev/null +++ b/api/goldens/aptos_api__tests__view_function__test_view_error_type_resolution_error.json @@ -0,0 +1 @@ +{"message":"Struct 0x1::aptos_coin::NewCoin does not exist","error_code":"invalid_input","vm_error_code":2021} \ No newline at end of file diff --git a/api/src/accounts.rs b/api/src/accounts.rs index 887f8e02f3013..25171dfd30547 100644 --- a/api/src/accounts.rs +++ b/api/src/accounts.rs @@ -664,7 +664,8 @@ impl Account { .find_resource(&state_view, self.address, resource_type) .context(format!( "Failed to query DB to check for {} at {}", - resource_type, self.address + resource_type.to_canonical_string(), + self.address )) .map_err(|err| { BasicErrorWith404::internal_with_code( diff --git a/api/src/response.rs b/api/src/response.rs index ab94491b083bc..e7082f4ed73a6 100644 --- a/api/src/response.rs +++ b/api/src/response.rs @@ -91,6 +91,14 @@ macro_rules! generate_error_traits { error_code: aptos_api_types::AptosErrorCode, ) -> Self where Self: Sized; + #[allow(unused)] + fn [<$trait_name:snake _with_optional_vm_status_and_ledger_info>]( + err: Err, + error_code: aptos_api_types::AptosErrorCode, + vm_status: Option, + ledger_info: Option<&aptos_api_types::LedgerInfo> + ) -> Self where Self: Sized; + #[allow(unused)] fn [<$trait_name:snake _with_vm_status>]( err: Err, @@ -199,6 +207,30 @@ macro_rules! generate_error_response { None, )) } + fn [<$name:snake _with_optional_vm_status_and_ledger_info>]( + err: Err, + error_code: aptos_api_types::AptosErrorCode, + vm_status: Option, + ledger_info: Option<&aptos_api_types::LedgerInfo> + ) -> Self where Self: Sized { + let error = if let Some(vm_status) = vm_status { + aptos_api_types::AptosError::new_with_vm_status(err, error_code, vm_status) + } else { + aptos_api_types::AptosError::new_with_error_code(err, error_code) + }; + let payload = poem_openapi::payload::Json(Box::new(error)); + Self::from($enum_name::$name( + payload, + ledger_info.map(|info| info.chain_id), + ledger_info.map(|info| info.ledger_version.into()), + ledger_info.map(|info| info.oldest_ledger_version.into()), + ledger_info.map(|info| info.ledger_timestamp.into()), + ledger_info.map(|info| info.epoch.into()), + ledger_info.map(|info| info.block_height.into()), + ledger_info.map(|info| info.oldest_block_height.into()), + None, + )) + } fn [<$name:snake _with_vm_status>]( err: Err, @@ -663,7 +695,9 @@ pub fn resource_not_found( "Resource", format!( "Address({}), Struct tag({}) and Ledger version({})", - address, struct_tag, ledger_version + address, + struct_tag.to_canonical_string(), + ledger_version ), AptosErrorCode::ResourceNotFound, ledger_info, @@ -698,7 +732,10 @@ pub fn struct_field_not_found( "Struct Field", format!( "Address({}), Struct tag({}), Field name({}) and Ledger version({})", - address, struct_tag, field_name, ledger_version + address, + struct_tag.to_canonical_string(), + field_name, + ledger_version ), AptosErrorCode::StructFieldNotFound, ledger_info, diff --git a/api/src/state.rs b/api/src/state.rs index 79095ab2f14ea..acb5a793bc037 100644 --- a/api/src/state.rs +++ b/api/src/state.rs @@ -291,7 +291,8 @@ impl StateApi { .find_resource(&state_view, address, &tag) .context(format!( "Failed to query DB to check for {} at {}", - tag, address + tag.to_canonical_string(), + address )) .map_err(|err| { BasicErrorWith404::internal_with_code( diff --git a/api/src/tests/accounts_test.rs b/api/src/tests/accounts_test.rs index c8122a4b18559..ecc6354416417 100644 --- a/api/src/tests/accounts_test.rs +++ b/api/src/tests/accounts_test.rs @@ -3,7 +3,6 @@ // SPDX-License-Identifier: Apache-2.0 use super::new_test_context; -use crate::tests::new_test_context_with_db_sharding_and_internal_indexer; use aptos_api_test_context::{current_function_name, find_value, TestContext}; use aptos_api_types::{MoveModuleBytecode, MoveResource, MoveStructTag, StateKeyWrapper}; use aptos_cached_packages::aptos_stdlib; @@ -55,8 +54,7 @@ async fn test_get_account_resources_by_valid_account_address() { res.push(resp); } - let shard_context = - new_test_context_with_db_sharding_and_internal_indexer(current_function_name!()); + let shard_context = new_test_context(current_function_name!()); let mut shard_res = vec![]; for address in &addresses { let resp = shard_context.get(&account_resources(address)).await; @@ -159,8 +157,7 @@ async fn test_get_account_resources_by_ledger_version() { } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_get_account_resources_by_ledger_version_with_shard_context() { - let shard_context = - new_test_context_with_db_sharding_and_internal_indexer(current_function_name!()); + let shard_context = new_test_context(current_function_name!()); test_account_resources_by_ledger_version_with_context(shard_context).await; } @@ -311,8 +308,7 @@ async fn test_get_account_modules_by_ledger_version_with_context(mut context: Te async fn test_get_account_modules_by_ledger_version() { let context = new_test_context(current_function_name!()); test_get_account_modules_by_ledger_version_with_context(context).await; - let shard_context = - new_test_context_with_db_sharding_and_internal_indexer(current_function_name!()); + let shard_context = new_test_context(current_function_name!()); test_get_account_modules_by_ledger_version_with_context(shard_context).await; } diff --git a/api/src/tests/event_v2_translation_test.rs b/api/src/tests/event_v2_translation_test.rs index c63e683704e73..995fa0f92c553 100644 --- a/api/src/tests/event_v2_translation_test.rs +++ b/api/src/tests/event_v2_translation_test.rs @@ -2,7 +2,7 @@ // Parts of the project are originally copyright © Meta Platforms, Inc. // SPDX-License-Identifier: Apache-2.0 -use super::{new_test_context, new_test_context_with_db_sharding_and_internal_indexer}; +use super::new_test_context; use aptos_api_test_context::{current_function_name, TestContext}; use aptos_crypto::{ed25519::Ed25519PrivateKey, SigningKey, ValidCryptoMaterial}; use aptos_sdk::types::LocalAccount; @@ -44,8 +44,7 @@ fn matches_event_details( #[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[ignore] async fn test_event_v2_translation_coin_deposit_event() { - let context = - &mut new_test_context_with_db_sharding_and_internal_indexer(current_function_name!()); + let context = &mut new_test_context(current_function_name!()); // Start with the MODULE_EVENT_MIGRATION feature disabled context.disable_feature(MODULE_EVENT_MIGRATION).await; @@ -157,8 +156,7 @@ async fn test_event_v2_translation_coin_deposit_event() { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[ignore] async fn test_event_v2_translation_coin_withdraw_event() { - let context = - &mut new_test_context_with_db_sharding_and_internal_indexer(current_function_name!()); + let context = &mut new_test_context(current_function_name!()); // Start with the MODULE_EVENT_MIGRATION feature disabled context.disable_feature(MODULE_EVENT_MIGRATION).await; @@ -278,8 +276,7 @@ async fn test_event_v2_translation_coin_withdraw_event() { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[ignore] async fn test_event_v2_translation_account_coin_register_event() { - let context = - &mut new_test_context_with_db_sharding_and_internal_indexer(current_function_name!()); + let context = &mut new_test_context(current_function_name!()); // Make sure that the MODULE_EVENT_MIGRATION feature is enabled context.enable_feature(MODULE_EVENT_MIGRATION).await; @@ -421,8 +418,7 @@ fn rotate_authentication_key_payload( #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_event_v2_translation_account_key_rotation_event() { - let context = - &mut new_test_context_with_db_sharding_and_internal_indexer(current_function_name!()); + let context = &mut new_test_context(current_function_name!()); // Make sure that the MODULE_EVENT_MIGRATION feature is enabled context.enable_feature(MODULE_EVENT_MIGRATION).await; @@ -531,8 +527,7 @@ async fn test_event_v2_translation_account_key_rotation_event() { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_event_v2_translation_token_objects() { - let context = - &mut new_test_context_with_db_sharding_and_internal_indexer(current_function_name!()); + let context = &mut new_test_context(current_function_name!()); // Make sure that the MODULE_EVENT_MIGRATION feature is enabled context.enable_feature(MODULE_EVENT_MIGRATION).await; @@ -684,8 +679,7 @@ async fn test_event_v2_translation_token_objects() { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_event_v2_translation_token_v1() { - let context = - &mut new_test_context_with_db_sharding_and_internal_indexer(current_function_name!()); + let context = &mut new_test_context(current_function_name!()); // Make sure that the MODULE_EVENT_MIGRATION feature is enabled context.enable_feature(MODULE_EVENT_MIGRATION).await; diff --git a/api/src/tests/events_test.rs b/api/src/tests/events_test.rs index 1c1f69830cc06..127c7aa4594f4 100644 --- a/api/src/tests/events_test.rs +++ b/api/src/tests/events_test.rs @@ -3,7 +3,6 @@ // SPDX-License-Identifier: Apache-2.0 use super::new_test_context; -use crate::tests::new_test_context_with_db_sharding_and_internal_indexer; use aptos_api_test_context::{current_function_name, TestContext}; use percent_encoding::{utf8_percent_encode, NON_ALPHANUMERIC}; use serde_json::json; @@ -39,8 +38,7 @@ async fn test_get_events_filter_by_start_sequence_number() { context.check_golden_output(resp.clone()); // assert the same resp after db sharding migration with internal indexer turned on - let shard_context = - new_test_context_with_db_sharding_and_internal_indexer(current_function_name!()); + let shard_context = new_test_context(current_function_name!()); let new_resp = shard_context .get( format!( @@ -101,8 +99,7 @@ async fn test_get_events_by_account_event_handle() { .await; context.check_golden_output(resp.clone()); - let shard_context = - new_test_context_with_db_sharding_and_internal_indexer(current_function_name!()); + let shard_context = new_test_context(current_function_name!()); let new_resp = shard_context .get("/accounts/0x1/events/0x1::reconfiguration::Configuration/events") .await; diff --git a/api/src/tests/function_value_test.rs b/api/src/tests/function_value_test.rs index 9c3659873f92b..3f48ab337fa6d 100644 --- a/api/src/tests/function_value_test.rs +++ b/api/src/tests/function_value_test.rs @@ -82,3 +82,32 @@ async fn test_function_values() { .unwrap()["data"]; assert_eq!(state, &json!({"__variant__": "Value", "_0": "33"})); } + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_function_values_with_references() { + let mut context = new_test_context(current_function_name!()); + let mut account = context.create_account().await; + let addr = account.address(); + + let named_addresses = vec![("account".to_string(), addr)]; + let txn = futures::executor::block_on(async move { + let path = + PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("src/tests/move/pack_function_values"); + TestContext::build_package_with_latest_language(path, named_addresses) + }); + context.publish_package(&mut account, txn).await; + + let resource = format!("{}::test::FunctionStore", addr); + let response = &context.gen_resource(&addr, &resource).await.unwrap(); + + let expected_name = format!("{}::test::freeze_ref", addr); + assert_eq!( + response["data"], + json!({ + "f": { + "__fun_name__": &expected_name, + "__mask__": "0", + } + }) + ); +} diff --git a/api/src/tests/mod.rs b/api/src/tests/mod.rs index b3aeb0f2980f8..ce3a06c54acdc 100644 --- a/api/src/tests/mod.rs +++ b/api/src/tests/mod.rs @@ -27,18 +27,8 @@ mod webauthn_secp256r1_ecdsa; use aptos_api_test_context::{new_test_context_inner as super_new_test_context, TestContext}; use aptos_config::config::{internal_indexer_db_config::InternalIndexerDBConfig, NodeConfig}; -fn new_test_context(test_name: String) -> TestContext { - new_test_context_with_config(test_name, NodeConfig::default()) -} - -fn new_test_context_with_config(test_name: String, node_config: NodeConfig) -> TestContext { - super_new_test_context(test_name, node_config, false, None) -} - #[cfg(test)] -fn new_test_context_with_db_sharding_and_internal_indexer(test_name: String) -> TestContext { - let mut node_config = NodeConfig::default(); - node_config.storage.rocksdb_configs.enable_storage_sharding = true; +fn new_test_context_with_config(test_name: String, mut node_config: NodeConfig) -> TestContext { node_config.indexer_db_config = InternalIndexerDBConfig::new(true, true, true, 0, true, 10); let test_context = super_new_test_context(test_name, node_config, false, None); let _ = test_context @@ -48,12 +38,7 @@ fn new_test_context_with_db_sharding_and_internal_indexer(test_name: String) -> test_context } -fn new_test_context_with_sharding_and_delayed_internal_indexer( - test_name: String, - end_version: Option, -) -> TestContext { - let mut node_config = NodeConfig::default(); - node_config.storage.rocksdb_configs.enable_storage_sharding = true; - node_config.indexer_db_config = InternalIndexerDBConfig::new(true, true, true, 0, true, 1); - super_new_test_context(test_name, node_config, false, end_version) +#[cfg(test)] +fn new_test_context(test_name: String) -> TestContext { + new_test_context_with_config(test_name, NodeConfig::default()) } diff --git a/api/src/tests/move/pack_function_values/Move.toml b/api/src/tests/move/pack_function_values/Move.toml new file mode 100644 index 0000000000000..bcd2fe040c39a --- /dev/null +++ b/api/src/tests/move/pack_function_values/Move.toml @@ -0,0 +1,9 @@ +[package] +name = "pack_function_values" +version = "0.0.0" + +[dependencies] +AptosFramework = { local = "../../../../../aptos-move/framework/aptos-framework" } + +[addresses] +account = "_" diff --git a/api/src/tests/move/pack_function_values/sources/test.move b/api/src/tests/move/pack_function_values/sources/test.move new file mode 100644 index 0000000000000..60eeefa4d4c70 --- /dev/null +++ b/api/src/tests/move/pack_function_values/sources/test.move @@ -0,0 +1,15 @@ +module account::test { + + struct FunctionStore has key { + f: |&mut u64|&u64 has copy+drop+store, + } + + public fun freeze_ref(x: &mut u64): &u64 { + x + } + + fun init_module(account: &signer) { + let f: |&mut u64|&u64 has copy+drop+store = |s| freeze_ref(s); + move_to(account, FunctionStore { f }); + } +} diff --git a/api/src/tests/transactions_test.rs b/api/src/tests/transactions_test.rs index 829c863227fd8..ccf71bf23781f 100644 --- a/api/src/tests/transactions_test.rs +++ b/api/src/tests/transactions_test.rs @@ -2,19 +2,16 @@ // Parts of the project are originally copyright © Meta Platforms, Inc. // SPDX-License-Identifier: Apache-2.0 -use super::new_test_context; -use crate::tests::{ - new_test_context_with_config, new_test_context_with_db_sharding_and_internal_indexer, - new_test_context_with_sharding_and_delayed_internal_indexer, -}; +use crate::tests::{new_test_context, new_test_context_with_config}; use aptos_api_test_context::{assert_json, current_function_name, pretty, TestContext}; -use aptos_config::config::{GasEstimationStaticOverride, NodeConfig}; +use aptos_config::config::{GasEstimationStaticOverride, NodeConfig, TransactionFilterConfig}; use aptos_crypto::{ ed25519::{Ed25519PrivateKey, Ed25519Signature}, multi_ed25519::{MultiEd25519PrivateKey, MultiEd25519PublicKey}, PrivateKey, SigningKey, Uniform, }; use aptos_sdk::types::{AccountKey, LocalAccount}; +use aptos_transaction_filters::transaction_filter::TransactionFilter; use aptos_types::{ account_address::AccountAddress, account_config::aptos_test_root_address, @@ -492,34 +489,6 @@ async fn test_get_transaction_by_hash() { assert_json(resp, txns[0].clone()); } -#[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn test_get_transaction_by_hash_with_delayed_internal_indexer() { - let mut context = new_test_context_with_sharding_and_delayed_internal_indexer( - current_function_name!(), - Some(2), - ); - - let mut account = context.gen_account(); - let txn = context.create_user_account(&account).await; - context.commit_block(&vec![txn.clone()]).await; - let txn1 = context.account_transfer_to( - &mut account, - AccountAddress::from_hex_literal("0x1").unwrap(), - 1, - ); - context.commit_block(&vec![txn1.clone()]).await; - let committed_hash = txn1.committed_hash().to_hex_literal(); - - let _ = context - .get_indexer_reader() - .unwrap() - .wait_for_internal_indexer(1); - let resp = context - .get(&format!("/transactions/by_hash/{}", committed_hash)) - .await; - context.check_golden_output(resp); -} - #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_get_transaction_by_hash_not_found() { let mut context = new_test_context(current_function_name!()); @@ -808,8 +777,7 @@ async fn test_account_transaction_with_context(mut context: TestContext) { async fn test_get_account_transactions() { let context = new_test_context(current_function_name!()); test_account_transaction_with_context(context).await; - let shard_context = - new_test_context_with_db_sharding_and_internal_indexer(current_function_name!()); + let shard_context = new_test_context(current_function_name!()); test_account_transaction_with_context(shard_context).await; } @@ -1665,9 +1633,9 @@ async fn test_simulation_filter_deny() { let mut node_config = NodeConfig::default(); // Blocklist the balance function. - let mut filter = node_config.api.simulation_filter.clone(); - filter = filter.add_all_filter(false); - node_config.api.simulation_filter = filter; + let transaction_filter = TransactionFilter::empty().add_all_filter(false); + let transaction_filter_config = TransactionFilterConfig::new(true, transaction_filter); + node_config.transaction_filters.api_filter = transaction_filter_config; let mut context = new_test_context_with_config(current_function_name!(), node_config); @@ -1690,10 +1658,11 @@ async fn test_simulation_filter_allow_sender() { let mut node_config = NodeConfig::default(); // Allow the root sender only. - let mut filter = node_config.api.simulation_filter.clone(); - filter = filter.add_sender_filter(true, aptos_test_root_address()); - filter = filter.add_all_filter(false); - node_config.api.simulation_filter = filter; + let transaction_filter = TransactionFilter::empty() + .add_sender_filter(true, aptos_test_root_address()) + .add_all_filter(false); + let transaction_filter_config = TransactionFilterConfig::new(true, transaction_filter); + node_config.transaction_filters.api_filter = transaction_filter_config; let mut context = new_test_context_with_config(current_function_name!(), node_config); diff --git a/api/src/tests/view_function.rs b/api/src/tests/view_function.rs index 2637385020023..5929482370ad8 100644 --- a/api/src/tests/view_function.rs +++ b/api/src/tests/view_function.rs @@ -52,6 +52,7 @@ async fn test_view_gas_used_header() { let txn2 = context.account_transfer(creator, owner, 100_000); context.commit_block(&vec![txn1, txn2]).await; + context.wait_for_internal_indexer_caught_up().await; let req = warp::test::request() .method("POST") @@ -144,6 +145,79 @@ async fn test_view_blocklist() { context.check_golden_output_no_prune(json!(vec![resp1, resp2])); } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_view_error_type_resolution_error() { + let mut context = new_test_context(current_function_name!()); + let creator = &mut context.gen_account(); + let owner = &mut context.gen_account(); + let txn1 = context.mint_user_account(creator).await; + let txn2 = context.account_transfer(creator, owner, 100_000); + + context.commit_block(&vec![txn1, txn2]).await; + + let resp = context + .expect_status_code(400) + .post( + "/view", + json!({ + "function":"0x1::coin::is_account_registered", + "arguments": vec![AccountAddress::random().to_string()], + "type_arguments": ["0x1::aptos_coin::NewCoin"], // Does not exist + }), + ) + .await; + context.check_golden_output_no_prune(resp); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_view_error_move_abort() { + let mut context = new_test_context(current_function_name!()); + let creator = &mut context.gen_account(); + let owner = &mut context.gen_account(); + let txn1 = context.mint_user_account(creator).await; + let txn2 = context.account_transfer(creator, owner, 100_000); + + context.commit_block(&vec![txn1, txn2]).await; + + let resp = context + .expect_status_code(400) + .post( + "/view", + json!({ + "function":"0x1::account::get_rotation_capability_offer_for", // Rotation capability does not exist + "arguments": vec![owner.address().to_string()], + "type_arguments": [], + }), + ) + .await; + context.check_golden_output_no_prune(resp); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_view_does_not_exist() { + let mut context = new_test_context(current_function_name!()); + let creator = &mut context.gen_account(); + let owner = &mut context.gen_account(); + let txn1 = context.mint_user_account(creator).await; + let txn2 = context.account_transfer(creator, owner, 100_000); + + context.commit_block(&vec![txn1, txn2]).await; + + let resp = context + .expect_status_code(400) + .post( + "/view", + json!({ + "function":"0x1::aptos_account::fake_function", + "arguments": vec![owner.address().to_string()], + "type_arguments": [], + }), + ) + .await; + + context.check_golden_output_no_prune(resp); +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_simple_view_invalid() { let mut context = new_test_context(current_function_name!()); diff --git a/api/src/transactions.rs b/api/src/transactions.rs index f435cd51415c5..de22059321b03 100644 --- a/api/src/transactions.rs +++ b/api/src/transactions.rs @@ -17,6 +17,7 @@ use crate::{ BasicErrorWith404, BasicResponse, BasicResponseStatus, BasicResult, BasicResultWith404, ForbiddenError, InsufficientStorageError, InternalError, }, + view_function::convert_view_function_error, ApiTags, }; use anyhow::Context as AnyhowContext; @@ -580,15 +581,13 @@ impl TransactionsApi { let ledger_info = context.get_latest_ledger_info()?; let mut signed_transaction = api.get_signed_transaction(&ledger_info, data)?; - // Confirm the simulation filter allows the transaction. We use HashValue::zero() - // here for the block ID because we don't allow filtering by block ID for the - // simulation filters. See the ConfigSanitizer for ApiConfig. - if !context.node_config.api.simulation_filter.allows( - aptos_crypto::HashValue::zero(), - ledger_info.epoch(), - ledger_info.timestamp(), - &signed_transaction, - ) { + // Confirm the API simulation filter allows the transaction + let api_filter = &context.node_config.transaction_filters.api_filter; + if api_filter.is_enabled() + && !api_filter + .transaction_filter() + .allows_transaction(&signed_transaction) + { return Err(SubmitTransactionError::forbidden_with_code( "Transaction not allowed by simulation filter", AptosErrorCode::InvalidInput, @@ -641,10 +640,14 @@ impl TransactionsApi { vec![signed_transaction.sender().to_vec()], context.node_config.api.max_gas_view_function, ); - let values = output.values.map_err(|err| { - SubmitTransactionError::bad_request_with_code_no_info( - err, + let values = output.values.map_err(|status| { + let (err_string, vm_error_code) = + convert_view_function_error(&status, &state_view, &context); + SubmitTransactionError::bad_request_with_optional_vm_status_and_ledger_info( + anyhow::anyhow!(err_string), AptosErrorCode::InvalidInput, + vm_error_code, + Some(&ledger_info), ) })?; let balance: u64 = bcs::from_bytes(&values[0]).map_err(|err| { @@ -1405,6 +1408,10 @@ impl TransactionsApi { format!("Transaction was rejected with status {}", mempool_status,), AptosErrorCode::InternalError, )), + MempoolStatusCode::RejectedByFilter => Err(AptosError::new_with_error_code( + mempool_status.message, + AptosErrorCode::RejectedByFilter, + )), } } @@ -1612,6 +1619,7 @@ impl TransactionsApi { None, output.gas_used(), exe_status, + None, ); let mut events = output.events().to_vec(); let _ = self diff --git a/api/src/view_function.rs b/api/src/view_function.rs index cc640c5ff1f7c..6e2ef0969c119 100644 --- a/api/src/view_function.rs +++ b/api/src/view_function.rs @@ -18,6 +18,7 @@ use aptos_api_types::{ U64, }; use aptos_bcs_utils::serialize_uleb128; +use aptos_types::{state_store::StateView, transaction::ViewFunctionError, vm_status::StatusCode}; use aptos_vm::AptosVM; use itertools::Itertools; use move_core_types::language_storage::TypeTag; @@ -30,6 +31,24 @@ pub struct ViewFunctionApi { pub context: Arc, } +pub fn convert_view_function_error( + error: &ViewFunctionError, + state_view: &impl StateView, + context: &Context, +) -> (String, Option) { + match error { + ViewFunctionError::MoveAbort(status, vm_error_code) => { + let vm_status = state_view + .as_converter(context.db.clone(), context.indexer_reader.clone()) + .explain_vm_status(status, None); + (vm_status, *vm_error_code) + }, + ViewFunctionError::ErrorMessage(message, vm_error_code) => { + (message.clone(), *vm_error_code) + }, + } +} + #[derive(ApiRequest, Debug)] pub enum ViewFunctionRequest { #[oai(content_type = "application/json")] @@ -140,8 +159,16 @@ fn view_request( view_function.args.clone(), context.node_config.api.max_gas_view_function, ); - let values = output.values.map_err(|err| { - BasicErrorWith404::bad_request_with_code_no_info(err, AptosErrorCode::InvalidInput) + + let values = output.values.map_err(|status| { + let (err_string, vm_error_code) = + convert_view_function_error(&status, &state_view, &context); + BasicErrorWith404::bad_request_with_optional_vm_status_and_ledger_info( + anyhow::anyhow!(err_string), + AptosErrorCode::InvalidInput, + vm_error_code, + Some(&ledger_info), + ) })?; let result = match accept_type { AcceptType::Bcs => { diff --git a/api/test-context/src/test_context.rs b/api/test-context/src/test_context.rs index 264ad7ea4a505..4ddafdc1de530 100644 --- a/api/test-context/src/test_context.rs +++ b/api/test-context/src/test_context.rs @@ -830,7 +830,7 @@ impl TestContext { .execute_block( (metadata.id(), into_signature_verified_block(txns.clone())).into(), parent_id, - BlockExecutorConfigFromOnchain::new_no_block_limit(), + BlockExecutorConfigFromOnchain::on_but_large_for_test(), ) .unwrap(); let compute_status = result.compute_status_for_input_txns().clone(); @@ -1194,6 +1194,7 @@ impl TestContext { } pub async fn execute(&self, req: warp::test::RequestBuilder) -> Value { + self.wait_for_internal_indexer_caught_up().await; let resp = self.reply(req).await; let headers = resp.headers(); diff --git a/api/types/src/convert.rs b/api/types/src/convert.rs index 420f414a214a9..439fcd7bf3a40 100644 --- a/api/types/src/convert.rs +++ b/api/types/src/convert.rs @@ -32,8 +32,8 @@ use aptos_types::{ StateView, }, transaction::{ - BlockEndInfo, BlockEpiloguePayload, EntryFunction, ExecutionStatus, Multisig, - RawTransaction, Script, SignedTransaction, TransactionAuxiliaryData, + BlockEndInfo, EntryFunction, ExecutionStatus, Multisig, RawTransaction, Script, + SignedTransaction, TransactionAuxiliaryData, }, vm::module_metadata::get_metadata, vm_status::AbortLocation, @@ -218,26 +218,27 @@ impl<'a, S: StateView> MoveConverter<'a, S> { }) }, BlockEpilogue(block_epilogue_payload) => { + let block_end_info = block_epilogue_payload + .try_as_block_end_info() + .unwrap() + .clone(); + let block_end_info = match block_end_info { + BlockEndInfo::V0 { + block_gas_limit_reached, + block_output_limit_reached, + block_effective_block_gas_units, + block_approx_output_size, + } => Some(crate::transaction::BlockEndInfo { + block_gas_limit_reached, + block_output_limit_reached, + block_effective_block_gas_units, + block_approx_output_size, + }), + }; Transaction::BlockEpilogueTransaction(BlockEpilogueTransaction { info, timestamp: timestamp.into(), - block_end_info: match block_epilogue_payload { - BlockEpiloguePayload::V0 { - block_end_info: - BlockEndInfo::V0 { - block_gas_limit_reached, - block_output_limit_reached, - block_effective_block_gas_units, - block_approx_output_size, - }, - .. - } => Some(crate::transaction::BlockEndInfo { - block_gas_limit_reached, - block_output_limit_reached, - block_effective_block_gas_units, - block_approx_output_size, - }), - }, + block_end_info, }) }, aptos_types::transaction::Transaction::ValidatorTransaction(txn) => { @@ -573,9 +574,9 @@ impl<'a, S: StateView> MoveConverter<'a, S> { Ok(Some(DecodedTableData { key: key.json().unwrap(), - key_type: table_info.key_type.to_string(), + key_type: table_info.key_type.to_canonical_string(), value: value.json().unwrap(), - value_type: table_info.value_type.to_string(), + value_type: table_info.value_type.to_canonical_string(), })) } @@ -596,7 +597,7 @@ impl<'a, S: StateView> MoveConverter<'a, S> { Ok(Some(DeletedTableData { key: key.json().unwrap(), - key_type: table_info.key_type.to_string(), + key_type: table_info.key_type.to_canonical_string(), })) } @@ -1028,10 +1029,10 @@ impl<'a, S: StateView> MoveConverter<'a, S> { let code = self.inner.view_existing_module(&module.clone().into())? as Arc; let func = code .find_function(function.name.0.as_ident_str()) - .ok_or_else(|| format_err!("could not find entry function by {}", function))?; + .ok_or_else(|| format_err!("could not find view function by {}", function))?; ensure!( func.generic_type_params.len() == type_arguments.len(), - "expected {} type arguments for entry function {}, but got {}", + "expected {} type arguments for view function {}, but got {}", func.generic_type_params.len(), function, type_arguments.len() @@ -1060,7 +1061,7 @@ impl<'a, S: StateView> MoveConverter<'a, S> { Ok(None) } - fn explain_vm_status( + pub fn explain_vm_status( &self, status: &ExecutionStatus, txn_aux_data: Option, diff --git a/api/types/src/error.rs b/api/types/src/error.rs index 3f7454f50c9b8..a2d4c2fd80c92 100644 --- a/api/types/src/error.rs +++ b/api/types/src/error.rs @@ -96,6 +96,8 @@ pub enum AptosErrorCode { SequenceNumberTooOld = 402, /// The submitted transaction failed VM checks. VmError = 403, + /// The transaction was rejected due to a transaction filter. + RejectedByFilter = 404, /// Health check failed. HealthCheckFailed = 500, diff --git a/api/types/src/move_types.rs b/api/types/src/move_types.rs index 325d2b9a10a2b..21d65bde1e286 100644 --- a/api/types/src/move_types.rs +++ b/api/types/src/move_types.rs @@ -16,7 +16,7 @@ use move_core_types::{ ability::{Ability, AbilitySet}, account_address::AccountAddress, identifier::Identifier, - language_storage::{FunctionTag, ModuleId, StructTag, TypeTag}, + language_storage::{FunctionParamOrReturnTag, FunctionTag, ModuleId, StructTag, TypeTag}, parser::{parse_struct_tag, parse_type_tag}, transaction_argument::TransactionArgument, }; @@ -807,7 +807,21 @@ fn from_function_tag(f: &FunctionTag) -> MoveType { results, abilities, } = f; - let from_vec = |tys: &[TypeTag]| tys.iter().map(MoveType::from).collect::>(); + let from_vec = |ts: &[FunctionParamOrReturnTag]| { + ts.iter() + .map(|t| match t { + FunctionParamOrReturnTag::Reference(t) => MoveType::Reference { + mutable: false, + to: Box::new(MoveType::from(t)), + }, + FunctionParamOrReturnTag::MutableReference(t) => MoveType::Reference { + mutable: true, + to: Box::new(MoveType::from(t)), + }, + FunctionParamOrReturnTag::Value(t) => MoveType::from(t), + }) + .collect::>() + }; MoveType::Function { args: from_vec(args), results: from_vec(results), @@ -838,7 +852,19 @@ impl TryFrom<&MoveType> for TypeTag { } => { let try_vec = |tys: &[MoveType]| { tys.iter() - .map(Self::try_from) + .map(|t| { + Ok(match t { + MoveType::Reference { mutable, to } => { + let tag = to.as_ref().try_into()?; + if *mutable { + FunctionParamOrReturnTag::MutableReference(tag) + } else { + FunctionParamOrReturnTag::Reference(tag) + } + }, + t => FunctionParamOrReturnTag::Value(t.try_into()?), + }) + }) .collect::>() }; TypeTag::Function(Box::new(FunctionTag { @@ -848,7 +874,7 @@ impl TryFrom<&MoveType> for TypeTag { })) }, MoveType::GenericTypeParam { index: _ } => TypeTag::Address, // Dummy type, allows for Object - _ => { + MoveType::Reference { .. } | MoveType::Unparsable(_) => { return Err(anyhow::anyhow!( "Invalid move type for converting into `TypeTag`: {:?}", &tag diff --git a/api/types/src/transaction.rs b/api/types/src/transaction.rs index 39d77ab717965..57ebf47291e55 100755 --- a/api/types/src/transaction.rs +++ b/api/types/src/transaction.rs @@ -1815,6 +1815,27 @@ pub struct MultiKeySignature { impl VerifyInput for MultiKeySignature { fn verify(&self) -> anyhow::Result<()> { + if self.public_keys.is_empty() { + bail!("MultiKey signature has no public keys") + } else if self.signatures.is_empty() { + bail!("MultiKey signature has no signatures") + } else if self.public_keys.len() > MAX_NUM_OF_KEYS { + bail!( + "MultiKey signature has over the maximum number of public keys {}", + MAX_NUM_OF_KEYS + ) + } else if self.signatures.len() > MAX_NUM_OF_SIGS { + bail!( + "MultiKey signature has over the maximum number of signatures {}", + MAX_NUM_OF_SIGS + ) + } else if self.signatures.len() != self.signatures_required as usize { + bail!("MultiKey signature does not the number of signatures required") + } else if self.signatures_required == 0 { + bail!("MultiKey signature threshold must be greater than 0") + } else if self.signatures_required > MAX_NUM_OF_SIGS as u8 { + bail!("MultiKey signature threshold is greater than the maximum number of signatures") + } let _: AccountAuthenticator = self.try_into()?; Ok(()) } diff --git a/aptos-move/aptos-debugger/src/aptos_debugger.rs b/aptos-move/aptos-debugger/src/aptos_debugger.rs index 9a77ebfeb4da7..790fa90d48b15 100644 --- a/aptos-move/aptos-debugger/src/aptos_debugger.rs +++ b/aptos-move/aptos-debugger/src/aptos_debugger.rs @@ -68,7 +68,8 @@ impl AptosDebugger { ) -> anyhow::Result> { let sig_verified_txns: Vec = txns.into_iter().map(|x| x.into()).collect::>(); - let txn_provider = DefaultTxnProvider::new(sig_verified_txns); + // TODO(grao): Pass in persisted info. + let txn_provider = DefaultTxnProvider::new_without_info(sig_verified_txns); let state_view = DebuggerStateView::new(self.debugger.clone(), version); print_transaction_stats(txn_provider.get_txns(), version); diff --git a/aptos-move/aptos-e2e-comparison-testing/src/data_collection.rs b/aptos-move/aptos-e2e-comparison-testing/src/data_collection.rs index 4cb63d52a4748..5de4548d7b507 100644 --- a/aptos-move/aptos-e2e-comparison-testing/src/data_collection.rs +++ b/aptos-move/aptos-e2e-comparison-testing/src/data_collection.rs @@ -93,7 +93,8 @@ impl DataCollection { // FIXME(#10412): remove the assert let val = debugger_state_view.get_state_value(TOTAL_SUPPLY_STATE_KEY.deref()); assert!(val.is_ok() && val.unwrap().is_some()); - let txn_provider = DefaultTxnProvider::new(sig_verified_txns); + // TODO(grao): Pass in persisted info here if necessary. + let txn_provider = DefaultTxnProvider::new_without_info(sig_verified_txns); AptosVMBlockExecutor::new() .execute_block_no_limit(&txn_provider, debugger_state_view) .map_err(|err| format_err!("Unexpected VM Error: {:?}", err)) diff --git a/aptos-move/aptos-gas-meter/src/meter.rs b/aptos-move/aptos-gas-meter/src/meter.rs index 788aebfaf1ae9..8ed5e36641511 100644 --- a/aptos-move/aptos-gas-meter/src/meter.rs +++ b/aptos-move/aptos-gas-meter/src/meter.rs @@ -22,7 +22,7 @@ use move_core_types::{ vm_status::StatusCode, }; use move_vm_types::{ - gas::{GasMeter as MoveGasMeter, SimpleInstruction}, + gas::{DependencyGasMeter, GasMeter, NativeGasMeter, SimpleInstruction}, views::{TypeView, ValueView}, }; @@ -46,7 +46,52 @@ where } } -impl MoveGasMeter for StandardGasMeter +impl DependencyGasMeter for StandardGasMeter +where + A: GasAlgebra, +{ + #[inline] + fn charge_dependency( + &mut self, + _is_new: bool, + addr: &AccountAddress, + _name: &IdentStr, + size: NumBytes, + ) -> PartialVMResult<()> { + // Modules under special addresses are considered system modules that should always + // be loaded, and are therefore excluded from gas charging. + // + // TODO: 0xA550C18 is a legacy system address we used, but it is currently not covered by + // `.is_special()`. We should double check if this address still needs special + // treatment. + if self.feature_version() >= 15 && !addr.is_special() { + self.algebra + .charge_execution(DEPENDENCY_PER_MODULE + DEPENDENCY_PER_BYTE * size)?; + self.algebra.count_dependency(size)?; + } + Ok(()) + } +} + +impl NativeGasMeter for StandardGasMeter +where + A: GasAlgebra, +{ + fn legacy_gas_budget_in_native_context(&self) -> InternalGas { + self.algebra.balance_internal() + } + + fn charge_native_execution(&mut self, amount: InternalGas) -> PartialVMResult<()> { + self.algebra.charge_execution(amount) + } + + #[inline] + fn use_heap_memory_in_native_context(&mut self, _amount: u64) -> PartialVMResult<()> { + Ok(()) + } +} + +impl GasMeter for StandardGasMeter where A: GasAlgebra, { @@ -241,7 +286,7 @@ where .vm_gas_params() .misc .abs_val - .abstract_value_size_stack_and_heap(val, self.feature_version()); + .abstract_value_size_stack_and_heap(val, self.feature_version())?; // Note(Gas): this makes a deep copy so we need to charge for the full value size self.algebra @@ -294,13 +339,31 @@ where } } + #[inline] + fn charge_pack_closure( + &mut self, + is_generic: bool, + args: impl ExactSizeIterator, + ) -> PartialVMResult<()> { + let num_args = NumArgs::new(args.len() as u64); + + match is_generic { + false => self + .algebra + .charge_execution(PACK_CLOSURE_BASE + PACK_CLOSURE_PER_ARG * num_args), + true => self.algebra.charge_execution( + PACK_CLOSURE_GENERIC_BASE + PACK_CLOSURE_GENERIC_PER_ARG * num_args, + ), + } + } + #[inline] fn charge_read_ref(&mut self, val: impl ValueView) -> PartialVMResult<()> { let (stack_size, heap_size) = self .vm_gas_params() .misc .abs_val - .abstract_value_size_stack_and_heap(val, self.feature_version()); + .abstract_value_size_stack_and_heap(val, self.feature_version())?; // Note(Gas): this makes a deep copy so we need to charge for the full value size self.algebra @@ -322,8 +385,9 @@ where let cost = EQ_BASE + EQ_PER_ABS_VAL_UNIT - * (abs_val_params.abstract_value_size_dereferenced(lhs, self.feature_version()) - + abs_val_params.abstract_value_size_dereferenced(rhs, self.feature_version())); + * (abs_val_params.abstract_value_size_dereferenced(lhs, self.feature_version())? + + abs_val_params + .abstract_value_size_dereferenced(rhs, self.feature_version())?); self.algebra.charge_execution(cost) } @@ -334,8 +398,9 @@ where let cost = NEQ_BASE + NEQ_PER_ABS_VAL_UNIT - * (abs_val_params.abstract_value_size_dereferenced(lhs, self.feature_version()) - + abs_val_params.abstract_value_size_dereferenced(rhs, self.feature_version())); + * (abs_val_params.abstract_value_size_dereferenced(lhs, self.feature_version())? + + abs_val_params + .abstract_value_size_dereferenced(rhs, self.feature_version())?); self.algebra.charge_execution(cost) } @@ -482,33 +547,6 @@ where self.algebra.charge_execution(cost) } - - #[inline] - fn charge_dependency( - &mut self, - _is_new: bool, - addr: &AccountAddress, - _name: &IdentStr, - size: NumBytes, - ) -> PartialVMResult<()> { - // Modules under special addresses are considered system modules that should always - // be loaded, and are therefore excluded from gas charging. - // - // TODO: 0xA550C18 is a legacy system address we used, but it is currently not covered by - // `.is_special()`. We should double check if this address still needs special - // treatment. - if self.feature_version() >= 15 && !addr.is_special() { - self.algebra - .charge_execution(DEPENDENCY_PER_MODULE + DEPENDENCY_PER_BYTE * size)?; - self.algebra.count_dependency(size)?; - } - Ok(()) - } - - #[inline] - fn charge_heap_memory(&mut self, _amount: u64) -> PartialVMResult<()> { - Ok(()) - } } impl AptosGasMeter for StandardGasMeter diff --git a/aptos-move/aptos-gas-profiling/src/aggregate.rs b/aptos-move/aptos-gas-profiling/src/aggregate.rs index 0095dad224670..5c9b23f63200f 100644 --- a/aptos-move/aptos-gas-profiling/src/aggregate.rs +++ b/aptos-move/aptos-gas-profiling/src/aggregate.rs @@ -99,7 +99,7 @@ impl ExecutionAndIOCosts { addr: _addr, ty, cost, - } => insert_or_add(&mut storage_reads, format!("{}", ty), *cost), + } => insert_or_add(&mut storage_reads, ty.to_canonical_string(), *cost), CreateTy { cost } => insert_or_add(&mut ops, "create_ty".to_string(), *cost), } } diff --git a/aptos-move/aptos-gas-profiling/src/erased.rs b/aptos-move/aptos-gas-profiling/src/erased.rs index febdee5874026..df4da854f2225 100644 --- a/aptos-move/aptos-gas-profiling/src/erased.rs +++ b/aptos-move/aptos-gas-profiling/src/erased.rs @@ -148,9 +148,10 @@ impl ExecutionGasEvent { ), *cost, ), - LoadResource { addr, ty, cost } => { - Node::new(format!("load<{}::{}>", Render(addr), ty), *cost) - }, + LoadResource { addr, ty, cost } => Node::new( + format!("load<{}::{}>", Render(addr), ty.to_canonical_string()), + *cost, + ), CreateTy { cost } => Node::new("create_ty", *cost), } } @@ -272,7 +273,7 @@ impl WriteStorage { impl EventStorage { fn to_erased(&self) -> Node { - Node::new(format!("{}", self.ty), (self.cost, Fee::zero())) + Node::new(self.ty.to_canonical_string(), (self.cost, Fee::zero())) } } diff --git a/aptos-move/aptos-gas-profiling/src/flamegraph.rs b/aptos-move/aptos-gas-profiling/src/flamegraph.rs index 3dc78e7995d1a..c2459f8341e33 100644 --- a/aptos-move/aptos-gas-profiling/src/flamegraph.rs +++ b/aptos-move/aptos-gas-profiling/src/flamegraph.rs @@ -47,7 +47,10 @@ impl StorageFees { for event in &self.events { // TODO: Handle discounts. - lines.push(format!("events;{}", event.ty), event.cost) + lines.push( + format!("events;{}", event.ty.to_canonical_string()), + event.cost, + ) } lines.into_inner() @@ -148,7 +151,12 @@ impl ExecutionAndIOCosts { *cost, ), LoadResource { addr, ty, cost } => self.lines.push( - format!("{};load<{}::{}>", self.path(), Render(addr), ty), + format!( + "{};load<{}::{}>", + self.path(), + Render(addr), + ty.to_canonical_string() + ), *cost, ), } diff --git a/aptos-move/aptos-gas-profiling/src/log.rs b/aptos-move/aptos-gas-profiling/src/log.rs index ae7e36f37e82b..b198d27353653 100644 --- a/aptos-move/aptos-gas-profiling/src/log.rs +++ b/aptos-move/aptos-gas-profiling/src/log.rs @@ -58,6 +58,8 @@ pub enum FrameName { pub struct CallFrame { pub name: FrameName, pub events: Vec, + /// Accumulates gas charged by native functions. For frames of non-native functions, kept as 0. + pub native_gas: InternalGas, } /// The type of an operation performed on a storage item. @@ -183,6 +185,7 @@ impl CallFrame { ty_args, }, events: vec![], + native_gas: 0.into(), } } @@ -190,6 +193,7 @@ impl CallFrame { Self { name: FrameName::Script, events: vec![], + native_gas: 0.into(), } } } diff --git a/aptos-move/aptos-gas-profiling/src/profiler.rs b/aptos-move/aptos-gas-profiling/src/profiler.rs index 095982e1509ef..3ee2b7ac684e1 100644 --- a/aptos-move/aptos-gas-profiling/src/profiler.rs +++ b/aptos-move/aptos-gas-profiling/src/profiler.rs @@ -26,7 +26,7 @@ use move_core_types::{ language_storage::{ModuleId, TypeTag}, }; use move_vm_types::{ - gas::{GasMeter, SimpleInstruction}, + gas::{DependencyGasMeter, GasMeter, NativeGasMeter, SimpleInstruction}, views::{TypeView, ValueView}, }; @@ -158,6 +158,62 @@ where } } +impl DependencyGasMeter for GasProfiler +where + G: AptosGasMeter, +{ + fn charge_dependency( + &mut self, + is_new: bool, + addr: &AccountAddress, + name: &IdentStr, + size: NumBytes, + ) -> PartialVMResult<()> { + let (cost, res) = + self.delegate_charge(|base| base.charge_dependency(is_new, addr, name, size)); + + if !cost.is_zero() { + self.dependencies.push(Dependency { + is_new, + id: ModuleId::new(*addr, name.to_owned()), + size, + cost, + }); + } + + res + } +} + +impl NativeGasMeter for GasProfiler +where + G: AptosGasMeter, +{ + delegate! { + fn legacy_gas_budget_in_native_context(&self) -> InternalGas; + } + + fn use_heap_memory_in_native_context(&mut self, amount: u64) -> PartialVMResult<()> { + let (cost, res) = + self.delegate_charge(|base| base.use_heap_memory_in_native_context(amount)); + assert_eq!( + cost, + 0.into(), + "Using heap memory does not incur any gas costs" + ); + res + } + + fn charge_native_execution(&mut self, amount: InternalGas) -> PartialVMResult<()> { + self.frames + .last_mut() + .expect("Native function must have recorded the frame") + .native_gas += amount; + + self.base.charge_native_execution(amount) + } +} + impl GasMeter for GasProfiler where G: AptosGasMeter, @@ -211,6 +267,13 @@ where args: impl ExactSizeIterator + Clone, ) -> PartialVMResult<()>; + [PACK_CLOSURE] + fn charge_pack_closure( + &mut self, + is_generic: bool, + args: impl ExactSizeIterator + Clone, + ) -> PartialVMResult<()>; + [READ_REF] fn charge_read_ref(&mut self, val: impl ValueView) -> PartialVMResult<()>; @@ -321,9 +384,6 @@ where amount: InternalGas, ret_vals: Option + Clone>, ) -> PartialVMResult<()> { - let (cost, res) = - self.delegate_charge(|base| base.charge_native_function(amount, ret_vals)); - // Whenever a function gets called, the VM will notify the gas profiler // via `charge_call/charge_call_generic`. // @@ -332,8 +392,14 @@ where // // Later when it realizes the function is native, it will transform the original frame // into a native-specific event that does not contain recursive structures. - let cur = self.frames.pop().expect("frame must exist"); - let (module_id, name, ty_args) = match cur.name { + let frame = self.frames.pop().expect("frame must exist"); + + // Add native gas accumulated per frame. + let (mut cost, res) = + self.delegate_charge(|base| base.charge_native_function(amount, ret_vals)); + cost += frame.native_gas; + + let (module_id, name, ty_args) = match frame.name { FrameName::Function { module_id, name, @@ -341,11 +407,12 @@ where } => (module_id, name, ty_args), FrameName::Script => unreachable!(), }; + // The following line of code is needed for correctness. // // This is because additional gas events may be produced after the frame has been // created and these events need to be preserved. - self.active_event_stream().extend(cur.events); + self.active_event_stream().extend(frame.events); self.record_gas_event(ExecutionGasEvent::CallNative { module_id, @@ -482,32 +549,6 @@ where res } - - fn charge_dependency( - &mut self, - is_new: bool, - addr: &AccountAddress, - name: &IdentStr, - size: NumBytes, - ) -> PartialVMResult<()> { - let (cost, res) = - self.delegate_charge(|base| base.charge_dependency(is_new, addr, name, size)); - - if !cost.is_zero() { - self.dependencies.push(Dependency { - is_new, - id: ModuleId::new(*addr, name.to_owned()), - size, - cost, - }); - } - - res - } - - fn charge_heap_memory(&mut self, amount: u64) -> PartialVMResult<()> { - self.base.charge_heap_memory(amount) - } } fn write_op_type(op: &WriteOpSize) -> WriteOpType { diff --git a/aptos-move/aptos-gas-profiling/src/render.rs b/aptos-move/aptos-gas-profiling/src/render.rs index a73b1e09e292f..e19dc982f752a 100644 --- a/aptos-move/aptos-gas-profiling/src/render.rs +++ b/aptos-move/aptos-gas-profiling/src/render.rs @@ -45,9 +45,9 @@ impl<'a> Display for Render<'a, (&'a ModuleId, &'a IdentStr, &'a [TypeTag])> { self.0 .2 .iter() - .map(|ty| format!("{}", ty)) + .map(|ty| ty.to_canonical_string()) .collect::>() - .join(",") + .join(", ") )?; } Ok(()) @@ -75,8 +75,8 @@ impl Display for Render<'_, Path> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self.0 { Path::Code(module_id) => write!(f, "{}", Render(module_id)), - Path::Resource(struct_ty) => write!(f, "{}", struct_ty), - Path::ResourceGroup(struct_ty) => write!(f, "{}", struct_ty), + Path::Resource(struct_ty) => write!(f, "{}", struct_ty.to_canonical_string()), + Path::ResourceGroup(struct_ty) => write!(f, "{}", struct_ty.to_canonical_string()), } } } @@ -130,6 +130,6 @@ impl Display for Render<'_, WriteOpType> { impl Display for Render<'_, TypeTag> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", self.0) + write!(f, "{}", self.0.to_canonical_string()) } } diff --git a/aptos-move/aptos-gas-profiling/src/report.rs b/aptos-move/aptos-gas-profiling/src/report.rs index fc2aa9c25c3b1..83eb1499985af 100644 --- a/aptos-move/aptos-gas-profiling/src/report.rs +++ b/aptos-move/aptos-gas-profiling/src/report.rs @@ -285,7 +285,7 @@ impl TransactionGasLog { .iter() .map(|event| { json!({ - "name": format!("{}", event.ty), + "name": format!("{}", event.ty.to_canonical_string()), "cost": fmt_storage_fee(event.cost), "cost-percentage": fmt_storage_fee_percentage(event.cost), }) diff --git a/aptos-move/aptos-gas-schedule/src/gas_schedule/instr.rs b/aptos-move/aptos-gas-schedule/src/gas_schedule/instr.rs index 1e7f1a4860103..204015314c0ce 100644 --- a/aptos-move/aptos-gas-schedule/src/gas_schedule/instr.rs +++ b/aptos-move/aptos-gas-schedule/src/gas_schedule/instr.rs @@ -3,7 +3,10 @@ //! This module defines the gas parameters for all Move instructions. -use crate::{gas_feature_versions::RELEASE_V1_18, gas_schedule::VMGasParameters}; +use crate::{ + gas_feature_versions::{RELEASE_V1_18, RELEASE_V1_33}, + gas_schedule::VMGasParameters, +}; use aptos_gas_algebra::{ InternalGas, InternalGasPerAbstractValueUnit, InternalGasPerArg, InternalGasPerByte, InternalGasPerTypeNode, @@ -82,6 +85,10 @@ crate::gas_schedule::macros::define_gas_parameters!( [unpack_per_field: InternalGasPerArg, "unpack.per_field", 147], [unpack_generic_base: InternalGas, "unpack_generic.base", 808], [unpack_generic_per_field: InternalGasPerArg, "unpack_generic.per_field", 147], + [pack_closure_base: InternalGas, { RELEASE_V1_33.. => "pack_closure.base" }, 908], + [pack_closure_per_arg: InternalGasPerArg, { RELEASE_V1_33.. => "pack.closure.per_arg" }, 147], + [pack_closure_generic_base: InternalGas, { RELEASE_V1_33.. => "pack_closure_generic.base" }, 908], + [pack_closure_generic_per_arg: InternalGasPerArg, { RELEASE_V1_33.. => "pack_closure_generic.per_arg" }, 147], // ref [read_ref_base: InternalGas, "read_ref.base", 735], [read_ref_per_abs_val_unit: InternalGasPerAbstractValueUnit, "read_ref.per_abs_val_unit", 14], diff --git a/aptos-move/aptos-gas-schedule/src/gas_schedule/misc.rs b/aptos-move/aptos-gas-schedule/src/gas_schedule/misc.rs index 60818143b0403..79a4653faaf79 100644 --- a/aptos-move/aptos-gas-schedule/src/gas_schedule/misc.rs +++ b/aptos-move/aptos-gas-schedule/src/gas_schedule/misc.rs @@ -7,11 +7,16 @@ use crate::{ gas_schedule::VMGasParameters, traits::{FromOnChainGasSchedule, InitialGasSchedule, ToOnChainGasSchedule}, + ver::gas_feature_versions::RELEASE_V1_33, }; use aptos_gas_algebra::{AbstractValueSize, AbstractValueSizePerArg}; -use move_core_types::{account_address::AccountAddress, gas_algebra::NumArgs, u256::U256}; +use move_core_types::{ + account_address::AccountAddress, gas_algebra::NumArgs, u256::U256, vm_status::StatusCode, +}; use move_vm_types::{ delayed_values::delayed_field_id::DelayedFieldID, + natives::function::{PartialVMError, PartialVMResult}, + values::DEFAULT_MAX_VM_VALUE_NESTED_DEPTH, views::{ValueView, ValueVisitor}, }; use std::collections::BTreeMap; @@ -31,6 +36,7 @@ crate::gas_schedule::macros::define_gas_parameters!( [bool: AbstractValueSize, "bool", 40], [address: AbstractValueSize, "address", 40], [struct_: AbstractValueSize, "struct", 40], + [closure: AbstractValueSize, { RELEASE_V1_33.. => "closure" }, 40], [vector: AbstractValueSize, "vector", 40], [reference: AbstractValueSize, "reference", 40], [per_u8_packed: AbstractValueSizePerArg, "per_u8_packed", 1], @@ -81,8 +87,9 @@ macro_rules! deref_visitor_delegate_simple { ($([$fn: ident, $ty: ty $(,)?]),+ $(,)?) => { $( #[inline] - fn $fn(&mut self, depth: usize, val: $ty) { - self.inner.$fn(depth - self.offset, val); + fn $fn(&mut self, depth: u64, val: $ty) -> PartialVMResult<()> { + self.inner.$fn(depth - self.offset as u64, val)?; + Ok(()) } )* }; @@ -110,40 +117,60 @@ where ); #[inline] - fn visit_struct(&mut self, depth: usize, len: usize) -> bool { - self.inner.visit_struct(depth - self.offset, len) + fn visit_struct(&mut self, depth: u64, len: usize) -> PartialVMResult { + self.inner.visit_struct(depth - self.offset as u64, len) } #[inline] - fn visit_vec(&mut self, depth: usize, len: usize) -> bool { - self.inner.visit_vec(depth - self.offset, len) + fn visit_vec(&mut self, depth: u64, len: usize) -> PartialVMResult { + self.inner.visit_vec(depth - self.offset as u64, len) } #[inline] - fn visit_ref(&mut self, depth: usize, _is_global: bool) -> bool { + fn visit_ref(&mut self, depth: u64, _is_global: bool) -> PartialVMResult { assert_eq!(depth, 0, "There shouldn't be inner refs"); self.offset = 1; - true + Ok(true) } #[inline] - fn visit_closure(&mut self, depth: usize, len: usize) -> bool { + fn visit_closure(&mut self, depth: u64, len: usize) -> PartialVMResult { self.inner.visit_closure(depth, len) } } +/// Checks that the provided depth is not too deep. Used to bound recursion, preventing stack from +/// overflowing. +macro_rules! check_depth_impl { + () => { + fn check_depth(&self, depth: u64) -> PartialVMResult<()> { + if self + .max_value_nest_depth + .map_or(false, |max_value_nest_depth| depth > max_value_nest_depth) + { + return Err(PartialVMError::new(StatusCode::VM_MAX_VALUE_DEPTH_REACHED)); + } + Ok(()) + } + }; +} + struct AbstractValueSizeVisitor<'a> { feature_version: u64, params: &'a AbstractValueSizeGasParameters, size: AbstractValueSize, + max_value_nest_depth: Option, } impl<'a> AbstractValueSizeVisitor<'a> { + check_depth_impl!(); + fn new(params: &'a AbstractValueSizeGasParameters, feature_version: u64) -> Self { Self { feature_version, params, size: 0.into(), + max_value_nest_depth: Some(DEFAULT_MAX_VM_VALUE_NESTED_DEPTH), } } @@ -154,137 +181,173 @@ impl<'a> AbstractValueSizeVisitor<'a> { impl ValueVisitor for AbstractValueSizeVisitor<'_> { #[inline] - fn visit_delayed(&mut self, _depth: usize, _id: DelayedFieldID) { - // TODO[agg_v2](cleanup): add a new abstract value size parameter? + fn visit_delayed(&mut self, depth: u64, _id: DelayedFieldID) -> PartialVMResult<()> { + self.check_depth(depth)?; self.size += self.params.u64; + Ok(()) } #[inline] - fn visit_u8(&mut self, _depth: usize, _val: u8) { + fn visit_u8(&mut self, depth: u64, _val: u8) -> PartialVMResult<()> { + self.check_depth(depth)?; self.size += self.params.u8; + Ok(()) } #[inline] - fn visit_u16(&mut self, _depth: usize, _val: u16) { - self.size += self.params.u16 + fn visit_u16(&mut self, depth: u64, _val: u16) -> PartialVMResult<()> { + self.check_depth(depth)?; + self.size += self.params.u16; + Ok(()) } #[inline] - fn visit_u32(&mut self, _depth: usize, _val: u32) { - self.size += self.params.u32 + fn visit_u32(&mut self, depth: u64, _val: u32) -> PartialVMResult<()> { + self.check_depth(depth)?; + self.size += self.params.u32; + Ok(()) } #[inline] - fn visit_u64(&mut self, _depth: usize, _val: u64) { + fn visit_u64(&mut self, depth: u64, _val: u64) -> PartialVMResult<()> { + self.check_depth(depth)?; self.size += self.params.u64; + Ok(()) } #[inline] - fn visit_u128(&mut self, _depth: usize, _val: u128) { + fn visit_u128(&mut self, depth: u64, _val: u128) -> PartialVMResult<()> { + self.check_depth(depth)?; self.size += self.params.u128; + Ok(()) } #[inline] - fn visit_u256(&mut self, _depth: usize, _val: U256) { - self.size += self.params.u256 + fn visit_u256(&mut self, depth: u64, _val: U256) -> PartialVMResult<()> { + self.check_depth(depth)?; + self.size += self.params.u256; + Ok(()) } #[inline] - fn visit_bool(&mut self, _depth: usize, _val: bool) { + fn visit_bool(&mut self, depth: u64, _val: bool) -> PartialVMResult<()> { + self.check_depth(depth)?; self.size += self.params.bool; + Ok(()) } #[inline] - fn visit_address(&mut self, _depth: usize, _val: AccountAddress) { + fn visit_address(&mut self, depth: u64, _val: AccountAddress) -> PartialVMResult<()> { + self.check_depth(depth)?; self.size += self.params.address; + Ok(()) } #[inline] - fn visit_struct(&mut self, _depth: usize, _len: usize) -> bool { + fn visit_struct(&mut self, depth: u64, _len: usize) -> PartialVMResult { + self.check_depth(depth)?; self.size += self.params.struct_; - true + Ok(true) } #[inline] - fn visit_closure(&mut self, _depth: usize, _len: usize) -> bool { - // TODO(#15664): introduce a dedicated gas parameter? - self.size += self.params.struct_; - true + fn visit_closure(&mut self, depth: u64, _len: usize) -> PartialVMResult { + self.check_depth(depth)?; + self.size += self.params.closure; + Ok(true) } #[inline] - fn visit_vec(&mut self, _depth: usize, _len: usize) -> bool { + fn visit_vec(&mut self, depth: u64, _len: usize) -> PartialVMResult { + self.check_depth(depth)?; self.size += self.params.vector; - true + Ok(true) } #[inline] - fn visit_vec_u8(&mut self, _depth: usize, vals: &[u8]) { + fn visit_vec_u8(&mut self, depth: u64, vals: &[u8]) -> PartialVMResult<()> { + self.check_depth(depth)?; let mut size = self.params.per_u8_packed * NumArgs::new(vals.len() as u64); if self.feature_version >= 3 { size += self.params.vector; } self.size += size; + Ok(()) } #[inline] - fn visit_vec_u16(&mut self, _depth: usize, vals: &[u16]) { + fn visit_vec_u16(&mut self, depth: u64, vals: &[u16]) -> PartialVMResult<()> { + self.check_depth(depth)?; self.size += self.params.vector + self.params.per_u16_packed * NumArgs::new(vals.len() as u64); + Ok(()) } #[inline] - fn visit_vec_u32(&mut self, _depth: usize, vals: &[u32]) { + fn visit_vec_u32(&mut self, depth: u64, vals: &[u32]) -> PartialVMResult<()> { + self.check_depth(depth)?; self.size += self.params.vector + self.params.per_u32_packed * NumArgs::new(vals.len() as u64); + Ok(()) } #[inline] - fn visit_vec_u64(&mut self, _depth: usize, vals: &[u64]) { + fn visit_vec_u64(&mut self, depth: u64, vals: &[u64]) -> PartialVMResult<()> { + self.check_depth(depth)?; let mut size = self.params.per_u64_packed * NumArgs::new(vals.len() as u64); if self.feature_version >= 3 { size += self.params.vector; } self.size += size; + Ok(()) } #[inline] - fn visit_vec_u128(&mut self, _depth: usize, vals: &[u128]) { + fn visit_vec_u128(&mut self, depth: u64, vals: &[u128]) -> PartialVMResult<()> { + self.check_depth(depth)?; let mut size = self.params.per_u128_packed * NumArgs::new(vals.len() as u64); if self.feature_version >= 3 { size += self.params.vector; } self.size += size; + Ok(()) } #[inline] - fn visit_vec_u256(&mut self, _depth: usize, vals: &[U256]) { + fn visit_vec_u256(&mut self, depth: u64, vals: &[U256]) -> PartialVMResult<()> { + self.check_depth(depth)?; self.size += self.params.vector + self.params.per_u256_packed * NumArgs::new(vals.len() as u64); + Ok(()) } #[inline] - fn visit_vec_bool(&mut self, _depth: usize, vals: &[bool]) { + fn visit_vec_bool(&mut self, depth: u64, vals: &[bool]) -> PartialVMResult<()> { + self.check_depth(depth)?; let mut size = self.params.per_bool_packed * NumArgs::new(vals.len() as u64); if self.feature_version >= 3 { size += self.params.vector; } self.size += size; + Ok(()) } #[inline] - fn visit_vec_address(&mut self, _depth: usize, vals: &[AccountAddress]) { + fn visit_vec_address(&mut self, depth: u64, vals: &[AccountAddress]) -> PartialVMResult<()> { + self.check_depth(depth)?; let mut size = self.params.per_address_packed * NumArgs::new(vals.len() as u64); if self.feature_version >= 3 { size += self.params.vector; } self.size += size; + Ok(()) } #[inline] - fn visit_ref(&mut self, _depth: usize, _is_global: bool) -> bool { + fn visit_ref(&mut self, depth: u64, _is_global: bool) -> PartialVMResult { + self.check_depth(depth)?; self.size += self.params.reference; - false + Ok(false) } } @@ -294,10 +357,10 @@ impl AbstractValueSizeGasParameters { &self, val: impl ValueView, feature_version: u64, - ) -> AbstractValueSize { + ) -> PartialVMResult { let mut visitor = AbstractValueSizeVisitor::new(self, feature_version); - val.visit(&mut visitor); - visitor.finish() + val.visit(&mut visitor)?; + Ok(visitor.finish()) } /// Calculates the abstract size of the given value. @@ -306,10 +369,10 @@ impl AbstractValueSizeGasParameters { &self, val: impl ValueView, feature_version: u64, - ) -> AbstractValueSize { + ) -> PartialVMResult { let mut visitor = DerefVisitor::new(AbstractValueSizeVisitor::new(self, feature_version)); - val.visit(&mut visitor); - visitor.into_inner().finish() + val.visit(&mut visitor)?; + Ok(visitor.into_inner().finish()) } } @@ -318,145 +381,182 @@ impl AbstractValueSizeGasParameters { &self, val: impl ValueView, feature_version: u64, - ) -> AbstractValueSize { + ) -> PartialVMResult { struct Visitor<'a> { feature_version: u64, params: &'a AbstractValueSizeGasParameters, res: Option, + max_value_nest_depth: Option, + } + + impl Visitor<'_> { + check_depth_impl!(); } impl ValueVisitor for Visitor<'_> { #[inline] - fn visit_delayed(&mut self, _depth: usize, _val: DelayedFieldID) { - // TODO[agg_v2](cleanup): add a new abstract value size parameter? + fn visit_delayed(&mut self, depth: u64, _val: DelayedFieldID) -> PartialVMResult<()> { + self.check_depth(depth)?; self.res = Some(self.params.u64); + Ok(()) } #[inline] - fn visit_u8(&mut self, _depth: usize, _val: u8) { + fn visit_u8(&mut self, depth: u64, _val: u8) -> PartialVMResult<()> { + self.check_depth(depth)?; self.res = Some(self.params.u8); + Ok(()) } #[inline] - fn visit_u16(&mut self, _depth: usize, _val: u16) { + fn visit_u16(&mut self, depth: u64, _val: u16) -> PartialVMResult<()> { + self.check_depth(depth)?; self.res = Some(self.params.u16); + Ok(()) } #[inline] - fn visit_u32(&mut self, _depth: usize, _val: u32) { + fn visit_u32(&mut self, depth: u64, _val: u32) -> PartialVMResult<()> { + self.check_depth(depth)?; self.res = Some(self.params.u32); + Ok(()) } #[inline] - fn visit_u64(&mut self, _depth: usize, _val: u64) { + fn visit_u64(&mut self, depth: u64, _val: u64) -> PartialVMResult<()> { + self.check_depth(depth)?; self.res = Some(self.params.u64); + Ok(()) } #[inline] - fn visit_u128(&mut self, _depth: usize, _val: u128) { + fn visit_u128(&mut self, depth: u64, _val: u128) -> PartialVMResult<()> { + self.check_depth(depth)?; self.res = Some(self.params.u128); + Ok(()) } #[inline] - fn visit_u256(&mut self, _depth: usize, _val: U256) { + fn visit_u256(&mut self, depth: u64, _val: U256) -> PartialVMResult<()> { + self.check_depth(depth)?; self.res = Some(self.params.u256); + Ok(()) } #[inline] - fn visit_bool(&mut self, _depth: usize, _val: bool) { + fn visit_bool(&mut self, depth: u64, _val: bool) -> PartialVMResult<()> { + self.check_depth(depth)?; self.res = Some(self.params.bool); + Ok(()) } #[inline] - fn visit_address(&mut self, _depth: usize, _val: AccountAddress) { + fn visit_address(&mut self, depth: u64, _val: AccountAddress) -> PartialVMResult<()> { + self.check_depth(depth)?; self.res = Some(self.params.address); + Ok(()) } #[inline] - fn visit_struct(&mut self, _depth: usize, _len: usize) -> bool { + fn visit_struct(&mut self, depth: u64, _len: usize) -> PartialVMResult { + self.check_depth(depth)?; self.res = Some(self.params.struct_); - false + Ok(false) } #[inline] - fn visit_closure(&mut self, _depth: usize, _len: usize) -> bool { - // TODO(#15664): independent gas parameter for closures? - self.res = Some(self.params.struct_); - false + fn visit_closure(&mut self, depth: u64, _len: usize) -> PartialVMResult { + self.check_depth(depth)?; + self.res = Some(self.params.closure); + Ok(false) } #[inline] - fn visit_vec(&mut self, _depth: usize, _len: usize) -> bool { + fn visit_vec(&mut self, depth: u64, _len: usize) -> PartialVMResult { + self.check_depth(depth)?; self.res = Some(self.params.vector); - false + Ok(false) } #[inline] - fn visit_ref(&mut self, _depth: usize, _is_global: bool) -> bool { + fn visit_ref(&mut self, depth: u64, _is_global: bool) -> PartialVMResult { + self.check_depth(depth)?; self.res = Some(self.params.reference); - false + Ok(false) } // TODO(Gas): The following function impls are necessary due to a bug upstream. // Remove them once the bug is fixed. #[inline] - fn visit_vec_u8(&mut self, depth: usize, vals: &[u8]) { + fn visit_vec_u8(&mut self, depth: u64, vals: &[u8]) -> PartialVMResult<()> { if self.feature_version < 3 { self.res = Some(0.into()); } else { - self.visit_vec(depth, vals.len()); + self.visit_vec(depth, vals.len())?; } + Ok(()) } #[inline] - fn visit_vec_u16(&mut self, depth: usize, vals: &[u16]) { - self.visit_vec(depth, vals.len()); + fn visit_vec_u16(&mut self, depth: u64, vals: &[u16]) -> PartialVMResult<()> { + self.visit_vec(depth, vals.len())?; + Ok(()) } #[inline] - fn visit_vec_u32(&mut self, depth: usize, vals: &[u32]) { - self.visit_vec(depth, vals.len()); + fn visit_vec_u32(&mut self, depth: u64, vals: &[u32]) -> PartialVMResult<()> { + self.visit_vec(depth, vals.len())?; + Ok(()) } #[inline] - fn visit_vec_u64(&mut self, depth: usize, vals: &[u64]) { + fn visit_vec_u64(&mut self, depth: u64, vals: &[u64]) -> PartialVMResult<()> { if self.feature_version < 3 { self.res = Some(0.into()); } else { - self.visit_vec(depth, vals.len()); + self.visit_vec(depth, vals.len())?; } + Ok(()) } #[inline] - fn visit_vec_u128(&mut self, depth: usize, vals: &[u128]) { + fn visit_vec_u128(&mut self, depth: u64, vals: &[u128]) -> PartialVMResult<()> { if self.feature_version < 3 { self.res = Some(0.into()); } else { - self.visit_vec(depth, vals.len()); + self.visit_vec(depth, vals.len())?; } + Ok(()) } #[inline] - fn visit_vec_u256(&mut self, depth: usize, vals: &[U256]) { - self.visit_vec(depth, vals.len()); + fn visit_vec_u256(&mut self, depth: u64, vals: &[U256]) -> PartialVMResult<()> { + self.visit_vec(depth, vals.len())?; + Ok(()) } #[inline] - fn visit_vec_bool(&mut self, depth: usize, vals: &[bool]) { + fn visit_vec_bool(&mut self, depth: u64, vals: &[bool]) -> PartialVMResult<()> { if self.feature_version < 3 { self.res = Some(0.into()); } else { - self.visit_vec(depth, vals.len()); + self.visit_vec(depth, vals.len())?; } + Ok(()) } #[inline] - fn visit_vec_address(&mut self, depth: usize, vals: &[AccountAddress]) { + fn visit_vec_address( + &mut self, + depth: u64, + vals: &[AccountAddress], + ) -> PartialVMResult<()> { if self.feature_version < 3 { self.res = Some(0.into()); } else { - self.visit_vec(depth, vals.len()); + self.visit_vec(depth, vals.len())?; } + Ok(()) } } @@ -464,162 +564,205 @@ impl AbstractValueSizeGasParameters { feature_version, params: self, res: None, + max_value_nest_depth: Some(DEFAULT_MAX_VM_VALUE_NESTED_DEPTH), }; - val.visit(&mut visitor); - visitor.res.unwrap() + val.visit(&mut visitor)?; + visitor.res.ok_or_else(|| { + PartialVMError::new_invariant_violation("Visitor should have set the `res` value") + }) } - pub fn abstract_packed_size(&self, val: impl ValueView) -> AbstractValueSize { + pub fn abstract_packed_size(&self, val: impl ValueView) -> PartialVMResult { struct Visitor<'a> { params: &'a AbstractValueSizeGasParameters, res: Option, + max_value_nest_depth: Option, + } + + impl Visitor<'_> { + check_depth_impl!(); } impl ValueVisitor for Visitor<'_> { #[inline] - fn visit_delayed(&mut self, _depth: usize, _val: DelayedFieldID) { - // TODO[agg_v2](cleanup): add a new abstract value size parameter? + fn visit_delayed(&mut self, depth: u64, _val: DelayedFieldID) -> PartialVMResult<()> { + self.check_depth(depth)?; self.res = Some(self.params.per_u64_packed * NumArgs::from(1)); + Ok(()) } #[inline] - fn visit_u8(&mut self, _depth: usize, _val: u8) { + fn visit_u8(&mut self, depth: u64, _val: u8) -> PartialVMResult<()> { + self.check_depth(depth)?; self.res = Some(self.params.per_u8_packed * NumArgs::from(1)); + Ok(()) } #[inline] - fn visit_u16(&mut self, _depth: usize, _val: u16) { + fn visit_u16(&mut self, depth: u64, _val: u16) -> PartialVMResult<()> { + self.check_depth(depth)?; self.res = Some(self.params.per_u16_packed * NumArgs::from(1)); + Ok(()) } #[inline] - fn visit_u32(&mut self, _depth: usize, _val: u32) { + fn visit_u32(&mut self, depth: u64, _val: u32) -> PartialVMResult<()> { + self.check_depth(depth)?; self.res = Some(self.params.per_u32_packed * NumArgs::from(1)); + Ok(()) } #[inline] - fn visit_u64(&mut self, _depth: usize, _val: u64) { + fn visit_u64(&mut self, depth: u64, _val: u64) -> PartialVMResult<()> { + self.check_depth(depth)?; self.res = Some(self.params.per_u64_packed * NumArgs::from(1)); + Ok(()) } #[inline] - fn visit_u128(&mut self, _depth: usize, _val: u128) { + fn visit_u128(&mut self, depth: u64, _val: u128) -> PartialVMResult<()> { + self.check_depth(depth)?; self.res = Some(self.params.per_u128_packed * NumArgs::from(1)); + Ok(()) } #[inline] - fn visit_u256(&mut self, _depth: usize, _val: U256) { + fn visit_u256(&mut self, depth: u64, _val: U256) -> PartialVMResult<()> { + self.check_depth(depth)?; self.res = Some(self.params.per_u256_packed * NumArgs::from(1)); + Ok(()) } #[inline] - fn visit_bool(&mut self, _depth: usize, _val: bool) { + fn visit_bool(&mut self, depth: u64, _val: bool) -> PartialVMResult<()> { + self.check_depth(depth)?; self.res = Some(self.params.per_bool_packed * NumArgs::from(1)); + Ok(()) } #[inline] - fn visit_address(&mut self, _depth: usize, _val: AccountAddress) { + fn visit_address(&mut self, depth: u64, _val: AccountAddress) -> PartialVMResult<()> { + self.check_depth(depth)?; self.res = Some(self.params.per_address_packed * NumArgs::from(1)); + Ok(()) } #[inline] - fn visit_struct(&mut self, _depth: usize, _len: usize) -> bool { + fn visit_struct(&mut self, depth: u64, _len: usize) -> PartialVMResult { + self.check_depth(depth)?; self.res = Some(self.params.struct_); - false + Ok(false) } #[inline] - fn visit_closure(&mut self, _depth: usize, _len: usize) -> bool { - // TODO(#15664): independent gas parameter - self.res = Some(self.params.struct_); - false + fn visit_closure(&mut self, depth: u64, _len: usize) -> PartialVMResult { + self.check_depth(depth)?; + self.res = Some(self.params.closure); + Ok(false) } #[inline] - fn visit_vec(&mut self, _depth: usize, _len: usize) -> bool { + fn visit_vec(&mut self, depth: u64, _len: usize) -> PartialVMResult { + self.check_depth(depth)?; self.res = Some(self.params.vector); - false + Ok(false) } #[inline] - fn visit_ref(&mut self, _depth: usize, _is_global: bool) -> bool { + fn visit_ref(&mut self, depth: u64, _is_global: bool) -> PartialVMResult { // TODO(Gas): This should be unreachable... // See if we can handle this in a more graceful way. + self.check_depth(depth)?; self.res = Some(self.params.reference); - false + Ok(false) } // TODO(Gas): The following function impls are necessary due to a bug upstream. // Remove them once the bug is fixed. #[inline] - fn visit_vec_u8(&mut self, depth: usize, vals: &[u8]) { - self.visit_vec(depth, vals.len()); + fn visit_vec_u8(&mut self, depth: u64, vals: &[u8]) -> PartialVMResult<()> { + self.visit_vec(depth, vals.len())?; + Ok(()) } #[inline] - fn visit_vec_u16(&mut self, depth: usize, vals: &[u16]) { - self.visit_vec(depth, vals.len()); + fn visit_vec_u16(&mut self, depth: u64, vals: &[u16]) -> PartialVMResult<()> { + self.visit_vec(depth, vals.len())?; + Ok(()) } #[inline] - fn visit_vec_u32(&mut self, depth: usize, vals: &[u32]) { - self.visit_vec(depth, vals.len()); + fn visit_vec_u32(&mut self, depth: u64, vals: &[u32]) -> PartialVMResult<()> { + self.visit_vec(depth, vals.len())?; + Ok(()) } #[inline] - fn visit_vec_u64(&mut self, depth: usize, vals: &[u64]) { - self.visit_vec(depth, vals.len()); + fn visit_vec_u64(&mut self, depth: u64, vals: &[u64]) -> PartialVMResult<()> { + self.visit_vec(depth, vals.len())?; + Ok(()) } #[inline] - fn visit_vec_u128(&mut self, depth: usize, vals: &[u128]) { - self.visit_vec(depth, vals.len()); + fn visit_vec_u128(&mut self, depth: u64, vals: &[u128]) -> PartialVMResult<()> { + self.visit_vec(depth, vals.len())?; + Ok(()) } - fn visit_vec_u256(&mut self, depth: usize, vals: &[U256]) { - self.visit_vec(depth, vals.len()); + fn visit_vec_u256(&mut self, depth: u64, vals: &[U256]) -> PartialVMResult<()> { + self.visit_vec(depth, vals.len())?; + Ok(()) } #[inline] - fn visit_vec_bool(&mut self, depth: usize, vals: &[bool]) { - self.visit_vec(depth, vals.len()); + fn visit_vec_bool(&mut self, depth: u64, vals: &[bool]) -> PartialVMResult<()> { + self.visit_vec(depth, vals.len())?; + Ok(()) } #[inline] - fn visit_vec_address(&mut self, depth: usize, vals: &[AccountAddress]) { - self.visit_vec(depth, vals.len()); + fn visit_vec_address( + &mut self, + depth: u64, + vals: &[AccountAddress], + ) -> PartialVMResult<()> { + self.visit_vec(depth, vals.len())?; + Ok(()) } } let mut visitor = Visitor { params: self, res: None, + max_value_nest_depth: Some(DEFAULT_MAX_VM_VALUE_NESTED_DEPTH), }; - val.visit(&mut visitor); - visitor.res.unwrap() + val.visit(&mut visitor)?; + visitor.res.ok_or_else(|| { + PartialVMError::new_invariant_violation("Visitor should have set the `res` value") + }) } pub fn abstract_value_size_stack_and_heap( &self, val: impl ValueView, feature_version: u64, - ) -> (AbstractValueSize, AbstractValueSize) { - let stack_size = self.abstract_stack_size(&val, feature_version); - let abs_size = self.abstract_value_size(val, feature_version); + ) -> PartialVMResult<(AbstractValueSize, AbstractValueSize)> { + let stack_size = self.abstract_stack_size(&val, feature_version)?; + let abs_size = self.abstract_value_size(val, feature_version)?; let heap_size = abs_size.checked_sub(stack_size).unwrap_or_else(|| 0.into()); - (stack_size, heap_size) + Ok((stack_size, heap_size)) } pub fn abstract_heap_size( &self, val: impl ValueView, feature_version: u64, - ) -> AbstractValueSize { - let stack_size = self.abstract_stack_size(&val, feature_version); - let abs_size = self.abstract_value_size(val, feature_version); + ) -> PartialVMResult { + let stack_size = self.abstract_stack_size(&val, feature_version)?; + let abs_size = self.abstract_value_size(val, feature_version)?; - abs_size.checked_sub(stack_size).unwrap_or_else(|| 0.into()) + Ok(abs_size.checked_sub(stack_size).unwrap_or_else(|| 0.into())) } } diff --git a/aptos-move/aptos-gas-schedule/src/ver.rs b/aptos-move/aptos-gas-schedule/src/ver.rs index bfbf6ed55927f..afdd52a304b99 100644 --- a/aptos-move/aptos-gas-schedule/src/ver.rs +++ b/aptos-move/aptos-gas-schedule/src/ver.rs @@ -72,7 +72,7 @@ /// global operations. /// - V1 /// - TBA -pub const LATEST_GAS_FEATURE_VERSION: u64 = gas_feature_versions::RELEASE_V1_31; +pub const LATEST_GAS_FEATURE_VERSION: u64 = gas_feature_versions::RELEASE_V1_33; pub mod gas_feature_versions { pub const RELEASE_V1_8: u64 = 11; diff --git a/aptos-move/aptos-memory-usage-tracker/src/lib.rs b/aptos-move/aptos-memory-usage-tracker/src/lib.rs index 062ec2687eaeb..7ad4d05e2d253 100644 --- a/aptos-move/aptos-memory-usage-tracker/src/lib.rs +++ b/aptos-move/aptos-memory-usage-tracker/src/lib.rs @@ -18,7 +18,7 @@ use move_core_types::{ vm_status::StatusCode, }; use move_vm_types::{ - gas::{GasMeter as MoveGasMeter, SimpleInstruction}, + gas::{DependencyGasMeter, GasMeter, NativeGasMeter, SimpleInstruction}, views::{TypeView, ValueView}, }; @@ -94,7 +94,34 @@ macro_rules! delegate_mut { }; } -impl MoveGasMeter for MemoryTrackedGasMeter +impl DependencyGasMeter for MemoryTrackedGasMeter +where + G: AptosGasMeter, +{ + delegate_mut! { + fn charge_dependency(&mut self, is_new: bool, addr: &AccountAddress, name: &IdentStr, size: NumBytes) -> PartialVMResult<()>; + } +} + +impl NativeGasMeter for MemoryTrackedGasMeter +where + G: AptosGasMeter, +{ + delegate! { + fn legacy_gas_budget_in_native_context(&self) -> InternalGas; + } + + delegate_mut! { + fn charge_native_execution(&mut self, amount: InternalGas) -> PartialVMResult<()>; + } + + #[inline] + fn use_heap_memory_in_native_context(&mut self, amount: u64) -> PartialVMResult<()> { + self.use_heap_memory(amount.into()) + } +} + +impl GasMeter for MemoryTrackedGasMeter where G: AptosGasMeter, { @@ -164,8 +191,6 @@ where fn charge_vec_swap(&mut self, ty: impl TypeView) -> PartialVMResult<()>; fn charge_create_ty(&mut self, num_nodes: NumTypeNodes) -> PartialVMResult<()>; - - fn charge_dependency(&mut self, is_new: bool, addr: &AccountAddress, name: &IdentStr, size: NumBytes) -> PartialVMResult<()>; } #[inline] @@ -201,13 +226,17 @@ where ) -> PartialVMResult<()> { // TODO(Gas): https://github.com/aptos-labs/aptos-core/issues/5485 if !self.should_leak_memory_for_native { - self.release_heap_memory(args.clone().fold(AbstractValueSize::zero(), |acc, val| { - acc + self - .vm_gas_params() - .misc - .abs_val - .abstract_heap_size(val, self.feature_version()) - })); + self.release_heap_memory(args.clone().try_fold( + AbstractValueSize::zero(), + |acc, val| { + let heap_size = self + .vm_gas_params() + .misc + .abs_val + .abstract_heap_size(val, self.feature_version())?; + Ok::<_, PartialVMError>(acc + heap_size) + }, + )?); } self.base @@ -220,14 +249,15 @@ where amount: InternalGas, ret_vals: Option + Clone>, ) -> PartialVMResult<()> { - if let Some(ret_vals) = ret_vals.clone() { - self.use_heap_memory(ret_vals.fold(AbstractValueSize::zero(), |acc, val| { - acc + self + if let Some(mut ret_vals) = ret_vals.clone() { + self.use_heap_memory(ret_vals.try_fold(AbstractValueSize::zero(), |acc, val| { + let heap_size = self .vm_gas_params() .misc .abs_val - .abstract_heap_size(val, self.feature_version()) - }))?; + .abstract_heap_size(val, self.feature_version())?; + Ok::<_, PartialVMError>(acc + heap_size) + })?)?; } self.base.charge_native_function(amount, ret_vals) @@ -236,7 +266,7 @@ where #[inline] fn charge_load_resource( &mut self, - addr: move_core_types::account_address::AccountAddress, + addr: AccountAddress, ty: impl TypeView, val: Option, bytes_loaded: NumBytes, @@ -248,7 +278,7 @@ where self.vm_gas_params() .misc .abs_val - .abstract_heap_size(val, self.feature_version()), + .abstract_heap_size(val, self.feature_version())?, )?; } } @@ -262,7 +292,7 @@ where self.vm_gas_params() .misc .abs_val - .abstract_heap_size(&popped_val, self.feature_version()), + .abstract_heap_size(&popped_val, self.feature_version())?, ); self.base.charge_pop(popped_val) @@ -277,7 +307,7 @@ where self.vm_gas_params() .misc .abs_val - .abstract_heap_size(&val, self.feature_version()), + .abstract_heap_size(&val, self.feature_version())?, )?; self.base.charge_ld_const_after_deserialization(val) @@ -289,7 +319,7 @@ where .vm_gas_params() .misc .abs_val - .abstract_heap_size(&val, self.feature_version()); + .abstract_heap_size(&val, self.feature_version())?; self.use_heap_memory(heap_size)?; @@ -302,13 +332,17 @@ where is_generic: bool, args: impl ExactSizeIterator + Clone, ) -> PartialVMResult<()> { - self.use_heap_memory(args.clone().fold(AbstractValueSize::zero(), |acc, val| { - acc + self - .vm_gas_params() - .misc - .abs_val - .abstract_stack_size(val, self.feature_version()) - }))?; + self.use_heap_memory( + args.clone() + .try_fold(AbstractValueSize::zero(), |acc, val| { + let stack_size = self + .vm_gas_params() + .misc + .abs_val + .abstract_stack_size(val, self.feature_version())?; + Ok::<_, PartialVMError>(acc + stack_size) + })?, + )?; self.base.charge_pack(is_generic, args) } @@ -319,24 +353,49 @@ where is_generic: bool, args: impl ExactSizeIterator + Clone, ) -> PartialVMResult<()> { - self.release_heap_memory(args.clone().fold(AbstractValueSize::zero(), |acc, val| { - acc + self - .vm_gas_params() - .misc - .abs_val - .abstract_stack_size(val, self.feature_version()) - })); + self.release_heap_memory(args.clone().try_fold( + AbstractValueSize::zero(), + |acc, val| { + let stack_size = self + .vm_gas_params() + .misc + .abs_val + .abstract_stack_size(val, self.feature_version())?; + Ok::<_, PartialVMError>(acc + stack_size) + }, + )?); self.base.charge_unpack(is_generic, args) } + #[inline] + fn charge_pack_closure( + &mut self, + is_generic: bool, + args: impl ExactSizeIterator + Clone, + ) -> PartialVMResult<()> { + self.use_heap_memory( + args.clone() + .try_fold(AbstractValueSize::zero(), |acc, val| { + let stack_size = self + .vm_gas_params() + .misc + .abs_val + .abstract_stack_size(val, self.feature_version())?; + Ok::<_, PartialVMError>(acc + stack_size) + })?, + )?; + + self.base.charge_pack_closure(is_generic, args) + } + #[inline] fn charge_read_ref(&mut self, val: impl ValueView) -> PartialVMResult<()> { let heap_size = self .vm_gas_params() .misc .abs_val - .abstract_heap_size(&val, self.feature_version()); + .abstract_heap_size(&val, self.feature_version())?; self.use_heap_memory(heap_size)?; @@ -353,7 +412,7 @@ where self.vm_gas_params() .misc .abs_val - .abstract_heap_size(&old_val, self.feature_version()), + .abstract_heap_size(&old_val, self.feature_version())?, ); self.base.charge_write_ref(new_val, old_val) @@ -365,13 +424,13 @@ where self.vm_gas_params() .misc .abs_val - .abstract_heap_size(&lhs, self.feature_version()), + .abstract_heap_size(&lhs, self.feature_version())?, ); self.release_heap_memory( self.vm_gas_params() .misc .abs_val - .abstract_heap_size(&rhs, self.feature_version()), + .abstract_heap_size(&rhs, self.feature_version())?, ); self.base.charge_eq(lhs, rhs) @@ -383,13 +442,13 @@ where self.vm_gas_params() .misc .abs_val - .abstract_heap_size(&lhs, self.feature_version()), + .abstract_heap_size(&lhs, self.feature_version())?, ); self.release_heap_memory( self.vm_gas_params() .misc .abs_val - .abstract_heap_size(&rhs, self.feature_version()), + .abstract_heap_size(&rhs, self.feature_version())?, ); self.base.charge_neq(lhs, rhs) @@ -401,9 +460,18 @@ where ty: impl TypeView + 'a, args: impl ExactSizeIterator + Clone, ) -> PartialVMResult<()> { - self.use_heap_memory(args.clone().fold(AbstractValueSize::zero(), |acc, val| { - acc + self.vm_gas_params().misc.abs_val.abstract_packed_size(val) - }))?; + self.use_heap_memory( + args.clone() + .try_fold(AbstractValueSize::zero(), |acc, val| { + Ok::<_, PartialVMError>( + acc + self + .vm_gas_params() + .misc + .abs_val + .abstract_packed_size(val)?, + ) + })?, + )?; self.base.charge_vec_pack(ty, args) } @@ -415,9 +483,18 @@ where expect_num_elements: NumArgs, elems: impl ExactSizeIterator + Clone, ) -> PartialVMResult<()> { - self.release_heap_memory(elems.clone().fold(AbstractValueSize::zero(), |acc, val| { - acc + self.vm_gas_params().misc.abs_val.abstract_packed_size(val) - })); + self.release_heap_memory(elems.clone().try_fold( + AbstractValueSize::zero(), + |acc, val| { + Ok::<_, PartialVMError>( + acc + self + .vm_gas_params() + .misc + .abs_val + .abstract_packed_size(val)?, + ) + }, + )?); self.base.charge_vec_unpack(ty, expect_num_elements, elems) } @@ -428,7 +505,12 @@ where ty: impl TypeView, val: impl ValueView, ) -> PartialVMResult<()> { - self.use_heap_memory(self.vm_gas_params().misc.abs_val.abstract_packed_size(&val))?; + self.use_heap_memory( + self.vm_gas_params() + .misc + .abs_val + .abstract_packed_size(&val)?, + )?; self.base.charge_vec_push_back(ty, val) } @@ -440,7 +522,12 @@ where val: Option, ) -> PartialVMResult<()> { if let Some(val) = &val { - self.release_heap_memory(self.vm_gas_params().misc.abs_val.abstract_packed_size(val)); + self.release_heap_memory( + self.vm_gas_params() + .misc + .abs_val + .abstract_packed_size(val)?, + ); } self.base.charge_vec_pop_back(ty, val) @@ -451,21 +538,20 @@ where &mut self, locals: impl Iterator + Clone, ) -> PartialVMResult<()> { - self.release_heap_memory(locals.clone().fold(AbstractValueSize::zero(), |acc, val| { - acc + self - .vm_gas_params() - .misc - .abs_val - .abstract_heap_size(val, self.feature_version()) - })); + self.release_heap_memory(locals.clone().try_fold( + AbstractValueSize::zero(), + |acc, val| { + let heap_size = self + .vm_gas_params() + .misc + .abs_val + .abstract_heap_size(val, self.feature_version())?; + Ok::<_, PartialVMError>(acc + heap_size) + }, + )?); self.base.charge_drop_frame(locals) } - - #[inline] - fn charge_heap_memory(&mut self, amount: u64) -> PartialVMResult<()> { - self.use_heap_memory(amount.into()) - } } impl AptosGasMeter for MemoryTrackedGasMeter diff --git a/aptos-move/aptos-native-interface/Cargo.toml b/aptos-move/aptos-native-interface/Cargo.toml index 733f2b375081f..413ea285ba54e 100644 --- a/aptos-move/aptos-native-interface/Cargo.toml +++ b/aptos-move/aptos-native-interface/Cargo.toml @@ -15,8 +15,6 @@ rust-version = { workspace = true } aptos-gas-algebra = { workspace = true } aptos-gas-schedule = { workspace = true } aptos-types = { workspace = true } -bcs = { workspace = true } -bytes = { workspace = true } move-binary-format = { workspace = true } move-core-types = { workspace = true } move-vm-runtime = { workspace = true } diff --git a/aptos-move/aptos-native-interface/src/builder.rs b/aptos-move/aptos-native-interface/src/builder.rs index 140d9fbecc12d..b65e6fcb397a9 100644 --- a/aptos-move/aptos-native-interface/src/builder.rs +++ b/aptos-move/aptos-native-interface/src/builder.rs @@ -3,12 +3,11 @@ use crate::{ context::SafeNativeContext, - errors::{SafeNativeError, SafeNativeResult}, + errors::{LimitExceededError, SafeNativeError, SafeNativeResult}, }; use aptos_gas_algebra::DynamicExpression; -use aptos_gas_schedule::{MiscGasParameters, NativeGasParameters, ToOnChainGasSchedule}; +use aptos_gas_schedule::{MiscGasParameters, NativeGasParameters}; use aptos_types::on_chain_config::{Features, TimedFeatures}; -use bytes::Bytes; use move_vm_runtime::native_functions::{NativeContext, NativeFunction}; use move_vm_types::{ loaded_data::runtime_types::Type, natives::function::NativeResult, values::Value, @@ -60,15 +59,6 @@ impl SafeNativeBuilder { } } - /// Controls the default incremental gas charging behavior of the natives created from this builder. - /// - /// See [`SafeNativeContext::set_incremental_gas_charging()`] for details. - /// - /// Default: enabled. - pub fn set_incremental_gas_charging(&mut self, enable: bool) { - self.enable_incremental_gas_charging = enable; - } - /// Convenience function that allows one to set the incremental gas charging behavior only for /// natives created within the given closure. /// @@ -108,8 +98,6 @@ impl SafeNativeBuilder { let closure = move |context: &mut NativeContext, ty_args, args| { use SafeNativeError::*; - let gas_budget = context.gas_balance(); - let mut context = SafeNativeContext { inner: context, @@ -119,10 +107,9 @@ impl SafeNativeBuilder { native_gas_params: &data.native_gas_params, misc_gas_params: &data.misc_gas_params, - gas_budget, - gas_used: 0.into(), - - enable_incremental_gas_charging, + legacy_gas_used: 0.into(), + legacy_enable_incremental_gas_charging: enable_incremental_gas_charging, + legacy_heap_memory_usage: 0, gas_hook: hook.as_deref(), }; @@ -130,21 +117,45 @@ impl SafeNativeBuilder { let res: Result, SafeNativeError> = native(&mut context, ty_args, args); + // If enabled, metering and memory tracking must have been done in the native! + let legacy_heap_memory_usage = context.legacy_heap_memory_usage; + if context.has_direct_gas_meter_access_in_native_context() { + assert_eq!(context.legacy_gas_used, 0.into()); + assert_eq!(legacy_heap_memory_usage, 0); + } + context + .inner + .gas_meter() + .use_heap_memory_in_native_context(legacy_heap_memory_usage)?; + match res { - Ok(ret_vals) => Ok(NativeResult::ok(context.gas_used, ret_vals)), + Ok(ret_vals) => Ok(NativeResult::ok(context.legacy_gas_used, ret_vals)), Err(err) => match err { - Abort { abort_code } => Ok(NativeResult::err(context.gas_used, abort_code)), - OutOfGas => Ok(NativeResult::out_of_gas(context.gas_used)), + Abort { abort_code } => { + Ok(NativeResult::err(context.legacy_gas_used, abort_code)) + }, + LimitExceeded(err) => match err { + LimitExceededError::LegacyOutOfGas => { + assert!(!context.has_direct_gas_meter_access_in_native_context()); + Ok(NativeResult::out_of_gas(context.legacy_gas_used)) + }, + LimitExceededError::LimitExceeded(err) => { + // Return a VM error directly, so the native function returns early. + // There is no need to charge gas in the end because it was charged + // during the execution. + assert!(context.has_direct_gas_meter_access_in_native_context()); + Err(err.unpack()) + }, + }, // TODO(Gas): Check if err is indeed an invariant violation. InvariantViolation(err) => Err(err), FunctionDispatch { - cost, module_name, func_name, ty_args, args, } => Ok(NativeResult::CallFunction { - cost, + cost: context.legacy_gas_used, module_name, func_name, ty_args, @@ -179,30 +190,4 @@ impl SafeNativeBuilder { .into_iter() .map(|(func_name, func)| (func_name.into(), self.make_native(func))) } - - pub fn id_bytes(&self) -> Bytes { - let Self { - data, - enable_incremental_gas_charging, - gas_hook: _gas_hook, - } = self; - let SharedData { - gas_feature_version, - native_gas_params, - misc_gas_params, - timed_features, - features, - } = data.as_ref(); - - bcs::to_bytes(&( - enable_incremental_gas_charging, - gas_feature_version, - native_gas_params.to_on_chain_gas_schedule(*gas_feature_version), - misc_gas_params.to_on_chain_gas_schedule(*gas_feature_version), - timed_features, - features, - )) - .expect("bcs::to_bytes() failed.") - .into() - } } diff --git a/aptos-move/aptos-native-interface/src/context.rs b/aptos-move/aptos-native-interface/src/context.rs index f35c4975cf014..606799a906429 100644 --- a/aptos-move/aptos-native-interface/src/context.rs +++ b/aptos-move/aptos-native-interface/src/context.rs @@ -1,13 +1,16 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::errors::{SafeNativeError, SafeNativeResult}; +use crate::errors::{LimitExceededError, SafeNativeError, SafeNativeResult}; use aptos_gas_algebra::{ AbstractValueSize, DynamicExpression, GasExpression, GasQuantity, InternalGasUnit, }; -use aptos_gas_schedule::{AbstractValueSizeGasParameters, MiscGasParameters, NativeGasParameters}; +use aptos_gas_schedule::{ + gas_feature_versions::RELEASE_V1_32, AbstractValueSizeGasParameters, MiscGasParameters, + NativeGasParameters, +}; use aptos_types::on_chain_config::{Features, TimedFeatureFlag, TimedFeatures}; -use move_binary_format::errors::VMResult; +use move_binary_format::errors::{PartialVMResult, VMResult}; use move_core_types::{ gas_algebra::InternalGas, identifier::Identifier, language_storage::ModuleId, }; @@ -25,39 +28,40 @@ use std::{ /// Major features include incremental gas charging and less ambiguous error handling. For this /// reason, native functions should always use [`SafeNativeContext`] instead of [`NativeContext`]. #[allow(unused)] -pub struct SafeNativeContext<'a, 'b, 'c> { - pub(crate) inner: &'c mut NativeContext<'a, 'b>, +pub struct SafeNativeContext<'a, 'b, 'c, 'd> { + pub(crate) inner: &'d mut NativeContext<'a, 'b, 'c>, - pub(crate) timed_features: &'c TimedFeatures, - pub(crate) features: &'c Features, + pub(crate) timed_features: &'d TimedFeatures, + pub(crate) features: &'d Features, pub(crate) gas_feature_version: u64, - pub(crate) native_gas_params: &'c NativeGasParameters, - pub(crate) misc_gas_params: &'c MiscGasParameters, - - pub(crate) gas_budget: InternalGas, - pub(crate) gas_used: InternalGas, + pub(crate) native_gas_params: &'d NativeGasParameters, + pub(crate) misc_gas_params: &'d MiscGasParameters, - pub(crate) enable_incremental_gas_charging: bool, + // The fields below were used when there was no access to gas meter in native context. This is + // no longer the case, so these can be removed when the feature is stable. + pub(crate) legacy_gas_used: InternalGas, + pub(crate) legacy_enable_incremental_gas_charging: bool, + pub(crate) legacy_heap_memory_usage: u64, - pub(crate) gas_hook: Option<&'c (dyn Fn(DynamicExpression) + Send + Sync)>, + pub(crate) gas_hook: Option<&'d (dyn Fn(DynamicExpression) + Send + Sync)>, } -impl<'a, 'b> Deref for SafeNativeContext<'a, 'b, '_> { - type Target = NativeContext<'a, 'b>; +impl<'a, 'b, 'c> Deref for SafeNativeContext<'a, 'b, 'c, '_> { + type Target = NativeContext<'a, 'b, 'c>; fn deref(&self) -> &Self::Target { self.inner } } -impl DerefMut for SafeNativeContext<'_, '_, '_> { +impl DerefMut for SafeNativeContext<'_, '_, '_, '_> { fn deref_mut(&mut self) -> &mut Self::Target { self.inner } } -impl SafeNativeContext<'_, '_, '_> { +impl SafeNativeContext<'_, '_, '_, '_> { /// Always remember: first charge gas, then execute! /// /// In other words, this function **MUST** always be called **BEFORE** executing **any** @@ -74,15 +78,39 @@ impl SafeNativeContext<'_, '_, '_> { hook(node); } - self.gas_used += amount; - - if self.gas_used > self.gas_budget && self.enable_incremental_gas_charging { - Err(SafeNativeError::OutOfGas) - } else { + if self.has_direct_gas_meter_access_in_native_context() { + self.gas_meter() + .charge_native_execution(amount) + .map_err(LimitExceededError::from_err)?; Ok(()) + } else { + self.legacy_gas_used += amount; + if self.legacy_gas_used > self.legacy_gas_budget() + && self.legacy_enable_incremental_gas_charging + { + Err(SafeNativeError::LimitExceeded( + LimitExceededError::LegacyOutOfGas, + )) + } else { + Ok(()) + } } } + /// Returns true if native functions have access to gas meter and cah charge gas. Otherwise, + /// only VM's interpreter needs to charge gas. + pub fn has_direct_gas_meter_access_in_native_context(&self) -> bool { + self.gas_feature_version >= RELEASE_V1_32 + } + + /// Charges gas for transitive dependencies of the specified module. Used for native dynamic + /// dispatch. + pub fn charge_gas_for_dependencies(&mut self, module_id: ModuleId) -> SafeNativeResult<()> { + self.inner + .charge_gas_for_dependencies(module_id) + .map_err(|err| LimitExceededError::from_err(err.to_partial())) + } + /// Evaluates the given gas expression within the current context immediately. /// /// This can be useful if you have branch conditions depending on gas parameters. @@ -94,14 +122,14 @@ impl SafeNativeContext<'_, '_, '_> { } /// Computes the abstract size of the input value. - pub fn abs_val_size(&self, val: &Value) -> AbstractValueSize { + pub fn abs_val_size(&self, val: &Value) -> PartialVMResult { self.misc_gas_params .abs_val .abstract_value_size(val, self.gas_feature_version) } /// Computes the abstract size of the input value. - pub fn abs_val_size_dereferenced(&self, val: &Value) -> AbstractValueSize { + pub fn abs_val_size_dereferenced(&self, val: &Value) -> PartialVMResult { self.misc_gas_params .abs_val .abstract_value_size_dereferenced(val, self.gas_feature_version) @@ -117,6 +145,20 @@ impl SafeNativeContext<'_, '_, '_> { self.gas_feature_version } + pub fn max_value_nest_depth(&self) -> Option { + self.module_storage() + .runtime_environment() + .vm_config() + .enable_depth_checks + .then(|| { + self.module_storage() + .runtime_environment() + .vm_config() + .max_value_nest_depth + }) + .flatten() + } + /// Returns a reference to the struct representing on-chain features. pub fn get_feature_flags(&self) -> &Features { self.features @@ -127,22 +169,25 @@ impl SafeNativeContext<'_, '_, '_> { self.timed_features.is_enabled(flag) } - /// Signals to the VM (and by extension, the gas meter) that the native function has - /// incurred additional heap memory usage that should be tracked. - pub fn use_heap_memory(&mut self, amount: u64) { + /// If gas metering in native context is available: + /// - Records heap memory usage. If exceeds the maximum allowed limit, an error is returned. + /// + /// If not available: + /// - Signals to the VM (and by extension, the gas meter) that the native function has + /// incurred additional heap memory usage that should be tracked. + /// - Charged by the VM after execution. + pub fn use_heap_memory(&mut self, amount: u64) -> SafeNativeResult<()> { if self.timed_feature_enabled(TimedFeatureFlag::FixMemoryUsageTracking) { - self.inner.use_heap_memory(amount); + if self.has_direct_gas_meter_access_in_native_context() { + self.gas_meter() + .use_heap_memory_in_native_context(amount) + .map_err(LimitExceededError::from_err)?; + } else { + self.legacy_heap_memory_usage = + self.legacy_heap_memory_usage.saturating_add(amount); + } } - } - - /// Configures the behavior of [`Self::charge()`]. - /// - If enabled, it will return an out of gas error as soon as the amount of gas used - /// exceeds the remaining balance. - /// - If disabled, it will not return early errors, but the gas usage is still recorded, - /// and the total amount will be reported back to the VM after the native function returns. - /// This should only be used for backward compatibility reasons. - pub fn set_incremental_gas_charging(&mut self, enable: bool) { - self.enable_incremental_gas_charging = enable; + Ok(()) } pub fn load_function( diff --git a/aptos-move/aptos-native-interface/src/errors.rs b/aptos-move/aptos-native-interface/src/errors.rs index d78df9e332c3b..5c269a50a9dc7 100644 --- a/aptos-move/aptos-native-interface/src/errors.rs +++ b/aptos-move/aptos-native-interface/src/errors.rs @@ -2,12 +2,51 @@ // SPDX-License-Identifier: Apache-2.0 use move_binary_format::errors::PartialVMError; -use move_core_types::{ - gas_algebra::InternalGas, identifier::Identifier, language_storage::ModuleId, -}; +use move_core_types::{identifier::Identifier, language_storage::ModuleId, vm_status::StatusCode}; use move_vm_types::{loaded_data::runtime_types::Type, values::Value}; use smallvec::SmallVec; +/// Wraps [PartialVMError] to ensure it cannot be constructed via public constructor when we create +/// a [LimitExceededError]. +pub struct MeteringError(PartialVMError); + +impl MeteringError { + pub fn unpack(self) -> PartialVMError { + self.0 + } +} + +/// Specifies different ways of exceeding the limit. +pub enum LimitExceededError { + /// Represents legacy out of gas status. Mapped to [StatusCode::OUT_OF_GAS]. Does not represent + /// anything else, e.g., reaching memory limits, loading too many dependencies. + LegacyOutOfGas, + /// Error due to metering. The inner value contains the VM error which can be later returned to + /// interpreter. + LimitExceeded(MeteringError), +} + +impl LimitExceededError { + pub fn from_err(err: PartialVMError) -> SafeNativeError { + match err.major_status() { + StatusCode::OUT_OF_GAS + | StatusCode::EXECUTION_LIMIT_REACHED + | StatusCode::DEPENDENCY_LIMIT_REACHED + | StatusCode::MEMORY_LIMIT_EXCEEDED + | StatusCode::TOO_MANY_TYPE_NODES + | StatusCode::VM_MAX_VALUE_DEPTH_REACHED => SafeNativeError::LimitExceeded( + LimitExceededError::LimitExceeded(MeteringError(err)), + ), + // Treat all other code as invariant violations and leave it for the VM to propagate + // these further. Note that we do not remap the errors. For example, if there is a + // speculative error returned (signaling Block-STM to stop executing this transaction), + // we better not remap it. + // TODO(Gas): Have a single method to convert partial VM error to safe native error. + _ => SafeNativeError::InvariantViolation(err), + } + } +} + /// Saner representation of a native function error. #[allow(unused)] pub enum SafeNativeError { @@ -17,13 +56,16 @@ pub enum SafeNativeError { /// be followed. Abort { abort_code: u64 }, - /// Indicating that the native function has run out of gas during execution. + /// Indicating that the native function has exceeded execution limits. /// - /// This will cause the VM to deduct all the remaining balance and abort the transaction, - /// so use it carefully! - /// Normally this should only be triggered by `SafeNativeContext::charge()` and you should - /// not return this manually without a good reason. - OutOfGas, + /// If metering in native context is not enabled, this will cause the VM to deduct all the + /// remaining balance and abort the transaction, so use it carefully! Normally this should only + /// be triggered by `SafeNativeContext::charge()` and one should not return this variant + /// manually without a good reason. + /// + /// If metering in native context is enabled, then simply returns the error code that specifies + /// the limit that was exceeded. + LimitExceeded(LimitExceededError), /// Indicating that the native function ran into some internal errors that shall not normally /// be triggerable by user inputs. @@ -37,7 +79,6 @@ pub enum SafeNativeError { /// It is important to make sure the args are in the exact same order as passed in from the native argument input /// as the MoveVM relies on this ordering to perform paranoid mode stack transition. FunctionDispatch { - cost: InternalGas, module_name: ModuleId, func_name: Identifier, ty_args: Vec, @@ -46,8 +87,10 @@ pub enum SafeNativeError { /// Load up a module and charge the module accordingly. /// - /// It is critical to invoke this function before calling FunctionDispatch to make sure the module loading - /// is charged properly, otherwise it would be a potential gas issue. + /// It is critical to invoke this function before calling FunctionDispatch to make sure the + /// module loading is charged properly, otherwise it would be a potential gas issue. + /// + /// Note: not used once metering in native context is enabled. LoadModule { module_name: ModuleId }, } diff --git a/aptos-move/aptos-sdk-builder/src/common.rs b/aptos-move/aptos-sdk-builder/src/common.rs index 1711ecaf3414d..315229d6b239c 100644 --- a/aptos-move/aptos-sdk-builder/src/common.rs +++ b/aptos-move/aptos-sdk-builder/src/common.rs @@ -18,11 +18,11 @@ use std::{ pub(crate) fn type_not_allowed(type_tag: &TypeTag) -> ! { panic!( "Transaction scripts cannot take arguments of type {}.", - type_tag + type_tag.to_canonical_string() ); } -/// Clean up doc comments extracter by the Move prover. +/// Clean up doc comments extracted by the Move prover. pub(crate) fn prepare_doc_string(doc: &str) -> String { doc.replace("\n ", "\n").trim().to_string() } diff --git a/aptos-move/aptos-sdk-builder/src/golang.rs b/aptos-move/aptos-sdk-builder/src/golang.rs index 17a847c0a8ceb..1953a8cd61c87 100644 --- a/aptos-move/aptos-sdk-builder/src/golang.rs +++ b/aptos-move/aptos-sdk-builder/src/golang.rs @@ -19,6 +19,7 @@ use serde_generate::{ }; use std::{ collections::BTreeMap, + io, io::{Result, Write}, path::PathBuf, str::FromStr, @@ -40,7 +41,7 @@ pub fn output( }; // Some functions have complex types which are not currently supported in bcs or in this - // generator. Disable those functiosn for now. + // generator. Disable those functions for now. let abis_vec = abis .iter() .filter(|abi| { @@ -468,8 +469,17 @@ func DecodeEntryFunctionPayload(script aptostypes.TransactionPayload) (EntryFunc for (index, arg) in abi.args().iter().enumerate() { let decoding = match Self::bcs_primitive_type_name(arg.type_tag()) { None => { - let type_tag_str = format!("{}", (arg.type_tag())); - if "vector<0x1::string::String>".eq(&type_tag_str) { + let vec_string_tag = + TypeTag::from_str("vector<0x1::string::String>").map_err(|err| { + io::Error::new( + io::ErrorKind::Other, + format!( + "Failed to construct a type tag for vector of strings: {:?}", + err + ), + ) + })?; + if arg.type_tag() == &vec_string_tag { format!( "bcs.NewDeserializer(script.Value.Args[{}]).DeserializeVecBytes()", index, diff --git a/aptos-move/aptos-transaction-benchmarks/src/transaction_bench_state.rs b/aptos-move/aptos-transaction-benchmarks/src/transaction_bench_state.rs index e6436f3329de6..56cf994e96d71 100644 --- a/aptos-move/aptos-transaction-benchmarks/src/transaction_bench_state.rs +++ b/aptos-move/aptos-transaction-benchmarks/src/transaction_bench_state.rs @@ -185,7 +185,7 @@ where pub(crate) fn execute_sequential(mut self) { // The output is ignored here since we're just testing transaction performance, not trying // to assert correctness. - let txn_provider = DefaultTxnProvider::new(self.gen_transaction()); + let txn_provider = DefaultTxnProvider::new_without_info(self.gen_transaction()); self.execute_benchmark_sequential(&txn_provider, None); } @@ -193,7 +193,7 @@ where pub(crate) fn execute_parallel(mut self) { // The output is ignored here since we're just testing transaction performance, not trying // to assert correctness. - let txn_provider = DefaultTxnProvider::new(self.gen_transaction()); + let txn_provider = DefaultTxnProvider::new_without_info(self.gen_transaction()); self.execute_benchmark_parallel(&txn_provider, num_cpus::get(), None); } @@ -285,7 +285,7 @@ where concurrency_level_per_shard: usize, maybe_block_gas_limit: Option, ) -> (usize, usize) { - let txn_provider = DefaultTxnProvider::new(transactions); + let txn_provider = DefaultTxnProvider::new_without_info(transactions); let (output, par_tps) = if run_par { println!("Parallel execution starts..."); let (output, tps) = if self.is_shareded() { diff --git a/aptos-move/aptos-transaction-simulation/src/state_store.rs b/aptos-move/aptos-transaction-simulation/src/state_store.rs index b3965b130df80..765fc545a9635 100644 --- a/aptos-move/aptos-transaction-simulation/src/state_store.rs +++ b/aptos-move/aptos-transaction-simulation/src/state_store.rs @@ -249,6 +249,10 @@ impl TStateView for EmptyStateView { fn contains_state_value(&self, _state_key: &Self::Key) -> StateViewResult { Ok(false) } + + fn next_version(&self) -> Version { + 0 + } } /*************************************************************************************************** @@ -358,6 +362,10 @@ where self.base.contains_state_value(state_key) } + + fn next_version(&self) -> Version { + self.base.next_version() + } } impl SimulationStateStore for DeltaStateStore diff --git a/aptos-move/aptos-transactional-test-harness/src/aptos_test_harness.rs b/aptos-move/aptos-transactional-test-harness/src/aptos_test_harness.rs index 655b06700ae20..f1ce82b8de3fc 100644 --- a/aptos-move/aptos-transactional-test-harness/src/aptos_test_harness.rs +++ b/aptos-move/aptos-transactional-test-harness/src/aptos_test_harness.rs @@ -58,7 +58,10 @@ use move_transactional_test_runner::{ vm_test_harness::{PrecompiledFilesModules, TestRunConfig}, }; use move_vm_runtime::{move_vm::SerializedReturnValues, AsFunctionValueExtension}; -use move_vm_types::{value_serde::ValueSerDeContext, values::Value}; +use move_vm_types::{ + value_serde::{FunctionValueExtension, ValueSerDeContext}, + values::Value, +}; use once_cell::sync::Lazy; use std::{ collections::{BTreeMap, BTreeSet}, @@ -501,7 +504,7 @@ impl AptosTestAdapter<'_> { fn run_transaction(&mut self, txn: Transaction) -> Result { let txn_block = vec![txn]; let sig_verified_block = into_signature_verified_block(txn_block); - let txn_provider = DefaultTxnProvider::new(sig_verified_block); + let txn_provider = DefaultTxnProvider::new_without_info(sig_verified_block); let mut outputs = AptosVMBlockExecutor::new() .execute_block_no_limit(&txn_provider, &self.storage.clone())?; @@ -1031,8 +1034,10 @@ impl<'a> MoveTestAdapter<'a> for AptosTestAdapter<'a> { fn deserialize(&self, bytes: &[u8], layout: &MoveTypeLayout) -> Option { let environment = AptosEnvironment::new(&self.storage); let code_storage = self.storage.as_aptos_code_storage(&environment); - ValueSerDeContext::new() - .with_func_args_deserialization(&code_storage.as_function_value_extension()) + + let function_value_extension = code_storage.as_function_value_extension(); + ValueSerDeContext::new(function_value_extension.max_value_nest_depth()) + .with_func_args_deserialization(&function_value_extension) .deserialize(bytes, layout) } } @@ -1053,7 +1058,11 @@ impl fmt::Display for PrettyEvent<'_> { }, ContractEvent::V2(_v2) => (), } - writeln!(f, " type: {}", self.0.type_tag())?; + writeln!( + f, + " type: {}", + self.0.type_tag().to_canonical_string() + )?; writeln!(f, " data: {:?}", hex::encode(self.0.event_data()))?; write!(f, "}}") } diff --git a/aptos-move/aptos-vm-environment/src/prod_configs.rs b/aptos-move/aptos-vm-environment/src/prod_configs.rs index 4d912192a9b81..5131c622f39ef 100644 --- a/aptos-move/aptos-vm-environment/src/prod_configs.rs +++ b/aptos-move/aptos-vm-environment/src/prod_configs.rs @@ -16,7 +16,9 @@ use aptos_types::{ use move_binary_format::deserializer::DeserializerConfig; use move_bytecode_verifier::VerifierConfig; use move_vm_runtime::config::VMConfig; -use move_vm_types::loaded_data::runtime_types::TypeBuilder; +use move_vm_types::{ + loaded_data::runtime_types::TypeBuilder, values::DEFAULT_MAX_VM_VALUE_NESTED_DEPTH, +}; use once_cell::sync::OnceCell; static PARANOID_TYPE_CHECKS: OnceCell = OnceCell::new(); @@ -80,6 +82,7 @@ pub fn aptos_prod_verifier_config(features: &Features) -> VerifierConfig { let enable_resource_access_control = features.is_enabled(FeatureFlag::ENABLE_RESOURCE_ACCESS_CONTROL); let enable_function_values = features.is_enabled(FeatureFlag::ENABLE_FUNCTION_VALUES); + // Note: we reuse the `enable_function_values` flag to set various stricter limits on types. VerifierConfig { max_loop_depth: Some(5), @@ -87,7 +90,11 @@ pub fn aptos_prod_verifier_config(features: &Features) -> VerifierConfig { max_function_parameters: Some(128), max_basic_blocks: Some(1024), max_value_stack_size: 1024, - max_type_nodes: Some(256), + max_type_nodes: if enable_function_values { + Some(128) + } else { + Some(256) + }, max_push_size: Some(10000), max_struct_definitions: None, max_struct_variants: None, @@ -103,6 +110,16 @@ pub fn aptos_prod_verifier_config(features: &Features) -> VerifierConfig { enable_enum_types, enable_resource_access_control, enable_function_values, + max_function_return_values: if enable_function_values { + Some(128) + } else { + None + }, + max_type_depth: if enable_function_values { + Some(20) + } else { + None + }, } } @@ -127,12 +144,21 @@ pub fn aptos_prod_vm_config( 256 }; - VMConfig { + // Value runtime depth checks have been introduced together with function values and are only + // enabled when the function values are enabled. Previously, checks were performed over types + // to bound the value depth (checking the size of a packed struct type bounds the value), but + // this no longer applies once function values are enabled. With function values, types can be + // shallow while the value can be deeply nested, thanks to captured arguments not visible in a + // type. Hence, depth checks have been adjusted to operate on values. + let enable_depth_checks = features.is_enabled(FeatureFlag::ENABLE_FUNCTION_VALUES); + + let config = VMConfig { verifier_config, deserializer_config, paranoid_type_checks, check_invariant_in_swap_loc, - max_value_nest_depth: Some(128), + // Note: if updating, make sure the constant is in-sync. + max_value_nest_depth: Some(DEFAULT_MAX_VM_VALUE_NESTED_DEPTH), layout_max_size, layout_max_depth: 128, // 5000 limits type tag total size < 5000 bytes and < 50 nodes. @@ -146,7 +172,17 @@ pub fn aptos_prod_vm_config( use_call_tree_and_instruction_cache: features .is_call_tree_and_instruction_vm_cache_enabled(), enable_lazy_loading: features.is_lazy_loading_enabled(), - } + enable_depth_checks, + }; + + // Note: if max_value_nest_depth changed, make sure the constant is in-sync. Do not remove this + // assertion as it ensures the constant value is set correctly. + assert_eq!( + config.max_value_nest_depth, + Some(DEFAULT_MAX_VM_VALUE_NESTED_DEPTH) + ); + + config } /// A collection of on-chain randomness API configs that VM needs to be aware of. diff --git a/aptos-move/aptos-vm-logging/src/lib.rs b/aptos-move/aptos-vm-logging/src/lib.rs index 336f1160b7a3c..ca609d8aa413f 100644 --- a/aptos-move/aptos-vm-logging/src/lib.rs +++ b/aptos-move/aptos-vm-logging/src/lib.rs @@ -82,7 +82,8 @@ fn speculation_disabled() -> bool { /// Initializes the storage of speculative logs for num_txns many transactions. pub fn init_speculative_logs(num_txns: usize) { if !speculation_disabled() { - BUFFERED_LOG_EVENTS.swap(Some(Arc::new(SpeculativeEvents::new(num_txns)))); + // +1 for potential BlockEpilogue transaction. + BUFFERED_LOG_EVENTS.swap(Some(Arc::new(SpeculativeEvents::new(num_txns + 1)))); } } diff --git a/aptos-move/aptos-vm-profiling/src/bins/run_aptos_p2p.rs b/aptos-move/aptos-vm-profiling/src/bins/run_aptos_p2p.rs index 72f6bfe52d218..760fff7f9e431 100644 --- a/aptos-move/aptos-vm-profiling/src/bins/run_aptos_p2p.rs +++ b/aptos-move/aptos-vm-profiling/src/bins/run_aptos_p2p.rs @@ -46,7 +46,7 @@ fn main() -> Result<()> { }) .collect(); - let txn_provider = DefaultTxnProvider::new(txns); + let txn_provider = DefaultTxnProvider::new_without_info(txns); let outputs = AptosVMBlockExecutor::new().execute_block_no_limit(&txn_provider, &state_store)?; for i in 0..NUM_TXNS { diff --git a/aptos-move/aptos-vm-types/src/module_and_script_storage/module_storage.rs b/aptos-move/aptos-vm-types/src/module_and_script_storage/module_storage.rs index 39f02b8bcb047..1edce0e4aea6d 100644 --- a/aptos-move/aptos-vm-types/src/module_and_script_storage/module_storage.rs +++ b/aptos-move/aptos-vm-types/src/module_and_script_storage/module_storage.rs @@ -10,7 +10,7 @@ use move_vm_runtime::ModuleStorage; pub trait AptosModuleStorage: ModuleStorage { /// Returns the state value metadata associated with this module. The error is returned if /// there is a storage error. If the module does not exist, [None] is returned. - fn fetch_state_value_metadata( + fn get_module_state_value_metadata( &self, address: &AccountAddress, module_name: &IdentStr, diff --git a/aptos-move/aptos-vm-types/src/module_and_script_storage/state_view_adapter.rs b/aptos-move/aptos-vm-types/src/module_and_script_storage/state_view_adapter.rs index d439a3f3fad31..1da5fc62c2ba6 100644 --- a/aptos-move/aptos-vm-types/src/module_and_script_storage/state_view_adapter.rs +++ b/aptos-move/aptos-vm-types/src/module_and_script_storage/state_view_adapter.rs @@ -159,7 +159,7 @@ where impl AptosModuleStorage for AptosCodeStorageAdapter<'_, S, E> { - fn fetch_state_value_metadata( + fn get_module_state_value_metadata( &self, address: &AccountAddress, module_name: &IdentStr, diff --git a/aptos-move/aptos-vm-types/src/module_write_set.rs b/aptos-move/aptos-vm-types/src/module_write_set.rs index 06ddf7b2669da..7348bc998fdfb 100644 --- a/aptos-move/aptos-vm-types/src/module_write_set.rs +++ b/aptos-move/aptos-vm-types/src/module_write_set.rs @@ -32,6 +32,10 @@ impl ModuleWrite { self.id.address() } + pub fn module_id(&self) -> &ModuleId { + &self.id + } + /// Returns the name of the module written. pub fn module_name(&self) -> &IdentStr { self.id.name() @@ -98,8 +102,15 @@ impl ModuleWriteSet { module_storage: &'a impl ModuleStorage, ) -> impl Iterator>> { self.writes.iter_mut().map(move |(key, write)| { + // The unmetered access to module size is fine because: + // + // INVARIANT: + // If there is a write to the module at key K, it means the module at K has been read + // (in order to perform backward-compatibility checks) if it existed. + // If module at K previously did not exist, the read of previous size returns None. + // Because module with key K has been read, it must have been loaded and metered. let prev_size = module_storage - .fetch_module_size_in_bytes(write.module_address(), write.module_name()) + .unmetered_get_module_size(write.module_address(), write.module_name()) .map_err(|e| e.to_partial())? .unwrap_or(0) as u64; Ok(WriteOpInfo { diff --git a/aptos-move/aptos-vm/src/aptos_vm.rs b/aptos-move/aptos-vm/src/aptos_vm.rs index 4bb209717bb84..339cfecdc6fea 100644 --- a/aptos-move/aptos-vm/src/aptos_vm.rs +++ b/aptos-move/aptos-vm/src/aptos_vm.rs @@ -29,7 +29,6 @@ use crate::{ }, VMBlockExecutor, VMValidator, }; -use anyhow::anyhow; use aptos_block_executor::{ code_cache_global_manager::AptosModuleCacheManager, txn_commit_hook::NoOpTransactionCommitHook, @@ -71,6 +70,7 @@ use aptos_types::{ state_store::{StateView, TStateView}, transaction::{ authenticator::{AbstractionAuthData, AnySignature, AuthenticationProof}, + block_epilogue::{BlockEpiloguePayload, FeeDistribution}, signature_verified_transaction::SignatureVerifiedTransaction, BlockOutput, EntryFunction, ExecutionError, ExecutionStatus, ModuleBundle, MultisigTransactionPayload, ReplayProtector, Script, SignedTransaction, Transaction, @@ -1380,7 +1380,7 @@ impl AptosVM { } let size_if_old_module_exists = module_storage - .fetch_module_size_in_bytes(addr, name)? + .unmetered_get_module_size(addr, name)? .map(|v| v as u64); if let Some(old_size) = size_if_old_module_exists { gas_meter @@ -2079,7 +2079,6 @@ impl AptosVM { &self, executor_view: &dyn ExecutorView, resource_group_view: &dyn ResourceGroupView, - module_storage: &impl AptosModuleStorage, change_set: &VMChangeSet, module_write_set: &ModuleWriteSet, ) -> PartialVMResult<()> { @@ -2089,12 +2088,13 @@ impl AptosVM { ); // All Move executions satisfy the read-before-write property. Thus, we need to read each - // access path that the write set is going to update. - for write in module_write_set.writes().values() { - // It is sufficient to simply get the size in order to enforce read-before-write. - module_storage - .fetch_module_size_in_bytes(write.module_address(), write.module_name()) - .map_err(|e| e.to_partial())?; + // access path that the write set is going to update (because the write set comes directly + // form the transaction payload). + for state_key in module_write_set.writes().keys() { + executor_view.read_state_value(state_key).map_err(|err| { + PartialVMError::new(StatusCode::STORAGE_ERROR) + .with_message(format!("Cannot read module at {:?}: {:?}", state_key, err)) + })?; } for (state_key, write_op) in change_set.resource_write_set().iter() { executor_view.get_resource_state_value(state_key, None)?; @@ -2152,7 +2152,6 @@ impl AptosVM { self.read_change_set( resolver.as_executor_view(), resolver.as_resource_group_view(), - code_storage, &change_set, &module_write_set, ) @@ -2190,7 +2189,9 @@ impl AptosVM { &block_metadata.get_prologue_move_args(account_config::reserved_vm_address()), ); - let storage = TraversalStorage::new(); + let traversal_storage = TraversalStorage::new(); + let mut traversal_context = TraversalContext::new(&traversal_storage); + session .execute_function_bypass_visibility( &BLOCK_MODULE, @@ -2198,7 +2199,7 @@ impl AptosVM { vec![], args, &mut gas_meter, - &mut TraversalContext::new(&storage), + &mut traversal_context, module_storage, ) .map(|_return_vals| ()) @@ -2271,7 +2272,8 @@ impl AptosVM { .as_move_value(), ]; - let storage = TraversalStorage::new(); + let traversal_storage = TraversalStorage::new(); + let mut traversal_context = TraversalContext::new(&traversal_storage); session .execute_function_bypass_visibility( @@ -2280,7 +2282,7 @@ impl AptosVM { vec![], serialize_values(&args), &mut gas_meter, - &mut TraversalContext::new(&storage), + &mut traversal_context, module_storage, ) .map(|_return_vals| ()) @@ -2297,6 +2299,82 @@ impl AptosVM { Ok((VMStatus::Executed, output)) } + fn process_block_epilogue( + &self, + resolver: &impl AptosMoveResolver, + module_storage: &impl AptosModuleStorage, + block_epilogue: BlockEpiloguePayload, + log_context: &AdapterLogSchema, + ) -> Result<(VMStatus, VMOutput), VMStatus> { + let (block_id, fee_distribution) = match block_epilogue { + BlockEpiloguePayload::V0 { .. } => { + let status = TransactionStatus::Keep(ExecutionStatus::Success); + let output = VMOutput::empty_with_status(status); + return Ok((VMStatus::Executed, output)); + }, + BlockEpiloguePayload::V1 { + block_id, + fee_distribution, + .. + } => (block_id, fee_distribution), + }; + + let mut gas_meter = UnmeteredGasMeter; + let mut session = self.new_session(resolver, SessionId::block_epilogue(block_id), None); + + let (validator_indices, amounts) = match fee_distribution { + FeeDistribution::V0 { amount } => amount + .into_iter() + .map(|(validator_index, amount)| { + (MoveValue::U64(validator_index), MoveValue::U64(amount)) + }) + .unzip(), + }; + + let args = vec![ + MoveValue::Signer(AccountAddress::ZERO), // Run as 0x0 + MoveValue::Vector(validator_indices), + MoveValue::Vector(amounts), + ]; + + let traversal_storage = TraversalStorage::new(); + let mut traversal_context = TraversalContext::new(&traversal_storage); + + let output = match session + .execute_function_bypass_visibility( + &BLOCK_MODULE, + BLOCK_EPILOGUE, + vec![], + serialize_values(&args), + &mut gas_meter, + &mut traversal_context, + module_storage, + ) + .map(|_return_vals| ()) + .or_else(|e| expect_only_successful_execution(e, BLOCK_EPILOGUE.as_str(), log_context)) + { + Ok(_) => get_system_transaction_output( + session, + module_storage, + &self.storage_gas_params(log_context)?.change_set_configs, + )?, + Err(e) => { + error!( + "Unexpected error from BlockEpilogue txn: {e:?}, fallback to return success." + ); + let status = TransactionStatus::Keep(ExecutionStatus::Success); + VMOutput::empty_with_status(status) + }, + }; + + SYSTEM_TRANSACTIONS_EXECUTED.inc(); + + // TODO(HotState): generate an output according to the block end info in the + // transaction. (maybe resort to the move resolver, but for simplicity I would + // just include the full slot in both the transaction and the output). + Ok((VMStatus::Executed, output)) + } + pub fn execute_view_function( state_view: &impl StateView, module_id: ModuleId, @@ -2313,13 +2391,21 @@ impl AptosVM { let vm_gas_params = match vm.gas_params(&log_context) { Ok(gas_params) => gas_params.vm.clone(), Err(err) => { - return ViewFunctionOutput::new(Err(anyhow::Error::msg(format!("{}", err))), 0) + return ViewFunctionOutput::new_error_message( + format!("{}", err), + Some(err.status_code()), + 0, + ) }, }; let storage_gas_params = match vm.storage_gas_params(&log_context) { Ok(gas_params) => gas_params.clone(), Err(err) => { - return ViewFunctionOutput::new(Err(anyhow::Error::msg(format!("{}", err))), 0) + return ViewFunctionOutput::new_error_message( + format!("{}", err), + Some(err.status_code()), + 0, + ) }, }; @@ -2349,7 +2435,39 @@ impl AptosVM { let gas_used = Self::gas_used(max_gas_amount.into(), &gas_meter); match execution_result { Ok(result) => ViewFunctionOutput::new(Ok(result), gas_used), - Err(e) => ViewFunctionOutput::new(Err(e), gas_used), + Err(e) => { + let vm_status = e.clone().into_vm_status(); + match vm_status { + VMStatus::MoveAbort(_, _) => {}, + _ => { + let message = e + .message() + .map(|m| m.to_string()) + .unwrap_or_else(|| e.to_string()); + return ViewFunctionOutput::new_error_message( + message, + Some(vm_status.status_code()), + gas_used, + ); + }, + } + let txn_status = TransactionStatus::from_vm_status( + vm_status.clone(), + vm.features() + .is_enabled(FeatureFlag::CHARGE_INVARIANT_VIOLATION), + ); + let execution_status = match txn_status { + TransactionStatus::Keep(status) => status, + _ => ExecutionStatus::MiscellaneousError(Some(vm_status.status_code())), + }; + let status_with_abort_info = + vm.inject_abort_info_if_available(&module_storage, execution_status); + ViewFunctionOutput::new_move_abort_error( + status_with_abort_info, + Some(vm_status.status_code()), + gas_used, + ) + }, } } @@ -2369,7 +2487,7 @@ impl AptosVM { arguments: Vec>, gas_meter: &mut impl AptosGasMeter, module_storage: &impl AptosModuleStorage, - ) -> anyhow::Result>> { + ) -> Result>, VMError> { let traversal_storage = TraversalStorage::new(); let mut traversal_context = TraversalContext::new(&traversal_storage); @@ -2384,17 +2502,18 @@ impl AptosVM { &func, metadata.as_ref().map(Arc::as_ref), vm.features().is_enabled(FeatureFlag::STRUCT_CONSTRUCTORS), + ) + .map_err(|e| e.finish(Location::Module(module_id)))?; + + let result = session.execute_loaded_function( + func, + arguments, + gas_meter, + &mut traversal_context, + module_storage, )?; - Ok(session - .execute_loaded_function( - func, - arguments, - gas_meter, - &mut traversal_context, - module_storage, - ) - .map_err(|err| anyhow!("Failed to execute function: {:?}", err))? + Ok(result .return_values .into_iter() .map(|(bytes, _ty)| bytes) @@ -2604,14 +2723,12 @@ impl AptosVM { let output = VMOutput::empty_with_status(status); (VMStatus::Executed, output) }, - Transaction::BlockEpilogue(_) => { - let status = TransactionStatus::Keep(ExecutionStatus::Success); - // TODO(HotState): generate an output according to the block end info in the - // transaction. (maybe resort to the move resolver, but for simplicity I would - // just include the full slot in both the transaction and the output). - let output = VMOutput::empty_with_status(status); - (VMStatus::Executed, output) - }, + Transaction::BlockEpilogue(block_epilogue) => self.process_block_epilogue( + resolver, + code_storage, + block_epilogue.clone(), + log_context, + )?, Transaction::ValidatorTransaction(txn) => { let (vm_status, output) = self.process_validator_transaction( resolver, diff --git a/aptos-move/aptos-vm/src/block_executor/mod.rs b/aptos-move/aptos-vm/src/block_executor/mod.rs index 285a326f78080..f3c278379cb69 100644 --- a/aptos-move/aptos-vm/src/block_executor/mod.rs +++ b/aptos-move/aptos-vm/src/block_executor/mod.rs @@ -195,13 +195,15 @@ impl BlockExecutorTransactionOutput for AptosTransactionOutput { } /// Should never be called after incorporating materialized output, as that consumes vm_output. - fn module_write_set(&self) -> BTreeMap> { + fn module_write_set(&self) -> Vec> { self.vm_output .lock() .as_ref() .expect("Output must be set to get module writes") .module_write_set() - .clone() + .values() + .cloned() + .collect() } /// Should never be called after incorporating materialized output, as that consumes vm_output. @@ -334,9 +336,16 @@ impl BlockExecutorTransactionOutput for AptosTransactionOutput { ); } - /// Return the fee statement of the transaction. - /// Should never be called after vm_output is consumed. + /// Returns the fee statement of the transaction. + /// + /// TODO(gelash): Consider defensive access pattern to committed_output / vm_output. fn fee_statement(&self) -> FeeStatement { + if let Some(committed_output) = self.committed_output.get() { + if let Ok(Some(fee_statement)) = committed_output.try_extract_fee_statement() { + return fee_statement; + } + return FeeStatement::zero(); + } *self .vm_output .lock() @@ -367,6 +376,24 @@ impl BlockExecutorTransactionOutput for AptosTransactionOutput { .has_new_epoch_event() } + /// Returns true iff the execution status is Keep(Success). + fn is_success(&self) -> bool { + if let Some(committed_output) = self.committed_output.get() { + committed_output + .status() + .as_kept_status() + .map_or(false, |status| status.is_success()) + } else { + self.vm_output + .lock() + .as_ref() + .expect("Either vm_output or committed_output must exist.") + .status() + .as_kept_status() + .map_or(false, |status| status.is_success()) + } + } + fn output_approx_size(&self) -> u64 { let vm_output = self.vm_output.lock(); vm_output @@ -473,7 +500,7 @@ impl< ); match ret { Ok(block_output) => { - let (transaction_outputs, block_end_info) = block_output.into_inner(); + let (transaction_outputs, block_epilogue_txn) = block_output.into_inner(); let output_vec: Vec<_> = transaction_outputs .into_iter() .map(|output| output.take_output()) @@ -488,7 +515,7 @@ impl< flush_speculative_logs(pos); } - Ok(BlockOutput::new(output_vec, block_end_info)) + Ok(BlockOutput::new(output_vec, block_epilogue_txn)) }, Err(BlockExecutionError::FatalBlockExecutorError(PanicError::CodeInvariantError( err_msg, diff --git a/aptos-move/aptos-vm/src/move_vm_ext/session/mod.rs b/aptos-move/aptos-vm/src/move_vm_ext/session/mod.rs index ddadbd137e91e..faf7895cf26d7 100644 --- a/aptos-move/aptos-vm/src/move_vm_ext/session/mod.rs +++ b/aptos-move/aptos-vm/src/move_vm_ext/session/mod.rs @@ -44,7 +44,11 @@ use move_vm_runtime::{ native_extensions::NativeContextExtensions, AsFunctionValueExtension, LoadedFunction, ModuleStorage, VerifiedModuleBundle, }; -use move_vm_types::{gas::GasMeter, value_serde::ValueSerDeContext, values::Value}; +use move_vm_types::{ + gas::GasMeter, + value_serde::{FunctionValueExtension, ValueSerDeContext}, + values::Value, +}; use std::{borrow::Borrow, collections::BTreeMap, sync::Arc}; pub mod respawned_session; @@ -176,7 +180,7 @@ where // We allow serialization of native values here because we want to // temporarily store native values (via encoding to ensure deterministic // gas charging) in block storage. - ValueSerDeContext::new() + ValueSerDeContext::new(function_extension.max_value_nest_depth()) .with_delayed_fields_serde() .with_func_args_deserialization(&function_extension) .serialize(&value, &layout)? @@ -184,7 +188,7 @@ where } else { // Otherwise, there should be no native values so ensure // serialization fails here if there are any. - ValueSerDeContext::new() + ValueSerDeContext::new(function_extension.max_value_nest_depth()) .with_func_args_deserialization(&function_extension) .serialize(&value, &layout)? .map(|bytes| (bytes.into(), None)) diff --git a/aptos-move/aptos-vm/src/move_vm_ext/session/session_id.rs b/aptos-move/aptos-vm/src/move_vm_ext/session/session_id.rs index c1a7a9cd8a36a..2baaa3a241eec 100644 --- a/aptos-move/aptos-vm/src/move_vm_ext/session/session_id.rs +++ b/aptos-move/aptos-vm/src/move_vm_ext/session/session_id.rs @@ -74,6 +74,10 @@ pub enum SessionId { expiration_time: u64, script_hash: Vec, }, + BlockEpilogue { + // block id + id: HashValue, + }, } impl SessionId { @@ -109,6 +113,10 @@ impl SessionId { } } + pub fn block_epilogue(id: HashValue) -> Self { + Self::BlockEpilogue { id } + } + pub fn prologue_meta(txn_metadata: &TransactionMetadata) -> Self { match txn_metadata.replay_protector() { ReplayProtector::SequenceNumber(sequence_number) => Self::Prologue { @@ -185,6 +193,7 @@ impl SessionId { Self::BlockMeta { id: _ } | Self::Genesis { id: _ } | Self::Void + | Self::BlockEpilogue { id: _ } | Self::BlockMetaExt { id: _ } => vec![], } } diff --git a/aptos-move/aptos-vm/src/move_vm_ext/write_op_converter.rs b/aptos-move/aptos-vm/src/move_vm_ext/write_op_converter.rs index 10cae90336a66..55035b7ad8d57 100644 --- a/aptos-move/aptos-vm/src/move_vm_ext/write_op_converter.rs +++ b/aptos-move/aptos-vm/src/move_vm_ext/write_op_converter.rs @@ -86,7 +86,8 @@ impl<'r> WriteOpConverter<'r> { let name = module_id.name(); // If state value metadata exists, this is a modification. - let state_value_metadata = module_storage.fetch_state_value_metadata(addr, name)?; + let state_value_metadata = + module_storage.get_module_state_value_metadata(addr, name)?; let op = if state_value_metadata.is_some() { Op::Modify(bytes) } else { diff --git a/aptos-move/aptos-vm/src/sharded_block_executor/sharded_executor_service.rs b/aptos-move/aptos-vm/src/sharded_block_executor/sharded_executor_service.rs index 16833d1e48555..51596f3640369 100644 --- a/aptos-move/aptos-vm/src/sharded_block_executor/sharded_executor_service.rs +++ b/aptos-move/aptos-vm/src/sharded_block_executor/sharded_executor_service.rs @@ -139,7 +139,8 @@ impl ShardedExecutorService { ); }); s.spawn(move |_| { - let txn_provider = DefaultTxnProvider::new(signature_verified_transactions); + let txn_provider = + DefaultTxnProvider::new_without_info(signature_verified_transactions); let ret = AptosVMBlockExecutorWrapper::execute_block_on_thread_pool( executor_thread_pool, &txn_provider, diff --git a/aptos-move/aptos-vm/src/system_module_names.rs b/aptos-move/aptos-vm/src/system_module_names.rs index 1124da643abfc..d8fc8720ec059 100644 --- a/aptos-move/aptos-vm/src/system_module_names.rs +++ b/aptos-move/aptos-vm/src/system_module_names.rs @@ -38,6 +38,7 @@ pub static BLOCK_MODULE: Lazy = Lazy::new(|| { pub const BLOCK_PROLOGUE: &IdentStr = ident_str!("block_prologue"); pub const BLOCK_PROLOGUE_EXT: &IdentStr = ident_str!("block_prologue_ext"); +pub const BLOCK_EPILOGUE: &IdentStr = ident_str!("block_epilogue"); pub static RECONFIGURATION_WITH_DKG_MODULE: Lazy = Lazy::new(|| { ModuleId::new( diff --git a/aptos-move/aptos-vm/tests/sharded_block_executor.rs b/aptos-move/aptos-vm/tests/sharded_block_executor.rs index 4a0bd2f426102..477b5af1a98a7 100644 --- a/aptos-move/aptos-vm/tests/sharded_block_executor.rs +++ b/aptos-move/aptos-vm/tests/sharded_block_executor.rs @@ -326,7 +326,7 @@ mod test_utils { .into_iter() .map(|t| t.into_txn()) .collect(); - let txn_provider = DefaultTxnProvider::new(ordered_txns); + let txn_provider = DefaultTxnProvider::new_without_info(ordered_txns); let unsharded_txn_output = AptosVMBlockExecutor::new() .execute_block_no_limit(&txn_provider, &state_store) .unwrap(); @@ -377,7 +377,7 @@ mod test_utils { ) .unwrap(); - let txn_provider = DefaultTxnProvider::new(execution_ordered_txns); + let txn_provider = DefaultTxnProvider::new_without_info(execution_ordered_txns); let unsharded_txn_output = AptosVMBlockExecutor::new() .execute_block_no_limit(&txn_provider, &state_store) .unwrap(); @@ -432,7 +432,7 @@ mod test_utils { ) .unwrap(); - let txn_provider = DefaultTxnProvider::new(execution_ordered_txns); + let txn_provider = DefaultTxnProvider::new_without_info(execution_ordered_txns); let unsharded_txn_output = AptosVMBlockExecutor::new() .execute_block_no_limit(&txn_provider, &state_store) .unwrap(); diff --git a/aptos-move/block-executor/src/captured_reads.rs b/aptos-move/block-executor/src/captured_reads.rs index 90a5b94e5744e..9bbdfa9d5ab27 100644 --- a/aptos-move/block-executor/src/captured_reads.rs +++ b/aptos-move/block-executor/src/captured_reads.rs @@ -14,8 +14,8 @@ use aptos_aggregator::{ }; use aptos_mvhashmap::{ types::{ - MVDataError, MVDataOutput, MVDelayedFieldsError, MVGroupError, StorageVersion, TxnIndex, - ValueWithLayout, Version, + Incarnation, MVDataError, MVDataOutput, MVDelayedFieldsError, MVGroupError, StorageVersion, + TxnIndex, ValueWithLayout, Version, }, versioned_data::VersionedData, versioned_delayed_fields::TVersionedDelayedFieldView, @@ -87,39 +87,11 @@ pub(crate) enum DataRead { /// Read resolved an aggregatorV1 delta to a value. /// TODO[agg_v1](cleanup): deprecate. Resolved(u128), + // CAUTION: when adding a new variant here, it must be ensured that compare + // data reads implements a comparison (o.w. unreachable arm will be hit). } -// Represents the result of comparing DataReads ('self' and 'other'). -#[derive(Debug)] -enum DataReadComparison { - // Information in 'self' DataRead contains information about the kind of the - // 'other' DataRead, and is consistent with 'other'. - Contains, - // Information in 'self' DataRead contains information about the kind of the - // 'other' DataRead, but is inconsistent with 'other'. - Inconsistent, - // All information about the kind of 'other' is not contained in 'self' kind. - // For example, exists does not provide enough information about metadata. - Insufficient, -} - -impl DataRead { - fn merge_metadata_and_size(&mut self, other: &DataRead) -> bool { - if let DataRead::ResourceSize(size) = other { - if let DataRead::Metadata(ref mut metadata) = self { - *self = DataRead::MetadataAndResourceSize(std::mem::take(metadata), *size); - return true; - } - } - if let DataRead::Metadata(metadata) = other { - if let DataRead::ResourceSize(ref mut size) = self { - *self = DataRead::MetadataAndResourceSize(metadata.clone(), std::mem::take(size)); - return true; - } - } - false - } - +impl DataRead { // Assigns highest rank to Versioned / Resolved, then Metadata, then Exists. // (e.g. versioned read implies metadata and existence information, and // metadata information implies existence information). @@ -133,34 +105,23 @@ impl DataRead { Exists(_) => ReadKind::Exists, } } +} - // A convenience method, since the same key can be read in different modes, producing - // different DataRead / ReadKinds. Returns true if self has >= kind than other, i.e. - // contains more or equal information, and is consistent with the information in other. - fn contains(&self, other: &DataRead) -> DataReadComparison { - let self_kind = self.get_kind(); - let other_kind = other.get_kind(); - - if self_kind == other_kind { - // Optimization to avoid unnecessary clone in convert_to. Has been optimized - // because contains is called during validation. - if self == other { - DataReadComparison::Contains - } else { - DataReadComparison::Inconsistent +impl DataRead { + fn merge_metadata_and_size(&mut self, other: &DataRead) -> bool { + if let DataRead::ResourceSize(size) = other { + if let DataRead::Metadata(ref mut metadata) = self { + *self = DataRead::MetadataAndResourceSize(std::mem::take(metadata), *size); + return true; } - } else { - match self.convert_to(&other_kind) { - Some(value) => { - if &value == other { - DataReadComparison::Contains - } else { - DataReadComparison::Inconsistent - } - }, - None => DataReadComparison::Insufficient, + } + if let DataRead::Metadata(metadata) = other { + if let DataRead::ResourceSize(ref mut size) = self { + *self = DataRead::MetadataAndResourceSize(metadata.clone(), std::mem::take(size)); + return true; } } + false } fn value_size(v: &Arc) -> Option { @@ -287,6 +248,149 @@ impl DataRead { } } +// Represents the result of comparing DataReads ('self' and 'other'). +#[derive(Debug)] +enum DataReadComparison { + // Information in 'self' DataRead contains information about the kind of the + // 'other' DataRead, and is consistent with 'other'. + Contains, + // Information in 'self' DataRead contains information about the kind of the + // 'other' DataRead, but is inconsistent with 'other'. + Inconsistent, + // All information about the kind of 'other' is not contained in 'self' kind. + // For example, exists does not provide enough information about metadata. + Insufficient, +} + +struct DataReadComparator { + // If set, BlockSTM V2 is enabled. + blockstm_v2_incarnation: Option, +} + +impl DataReadComparator { + fn new(blockstm_v2_incarnation: Option) -> Self { + Self { + blockstm_v2_incarnation, + } + } + + fn data_read_equals(&self, v1: &DataRead, v2: &DataRead) -> bool { + match (v1, v2) { + ( + DataRead::Versioned(v1_version, v1_value, v1_layout), + DataRead::Versioned(v2_version, v2_value, v2_layout), + ) => { + if v1_version == v2_version { + true + } else { + // TODO(BlockSTMv2): Like in MVDataMap, we assume data reads are not equal if both layouts + // are set, in order to avoid expensive equality checks. This should be compensated here + // by the above early return if versions are equal (for both V1 and V2 BlockSTM). + self.blockstm_v2_incarnation.is_some() + && v1_layout.is_none() + && v2_layout.is_none() + && v1_value == v2_value + } + }, + (DataRead::Metadata(v1_metadata), DataRead::Metadata(v2_metadata)) => { + v1_metadata == v2_metadata + }, + (DataRead::Exists(v1_exists), DataRead::Exists(v2_exists)) => v1_exists == v2_exists, + (DataRead::Resolved(v1_resolved), DataRead::Resolved(v2_resolved)) => { + v1_resolved == v2_resolved + }, + ( + DataRead::MetadataAndResourceSize(v1_metadata, v1_size), + DataRead::MetadataAndResourceSize(v2_metadata, v2_size), + ) => v1_metadata == v2_metadata && v1_size == v2_size, + (DataRead::ResourceSize(v1_size), DataRead::ResourceSize(v2_size)) => { + v1_size == v2_size + }, + ( + DataRead::Versioned(_, _, _), + DataRead::Resolved(_) + | DataRead::MetadataAndResourceSize(_, _) + | DataRead::Metadata(_) + | DataRead::ResourceSize(_) + | DataRead::Exists(_), + ) + | ( + DataRead::Resolved(_), + DataRead::Versioned(_, _, _) + | DataRead::MetadataAndResourceSize(_, _) + | DataRead::Metadata(_) + | DataRead::ResourceSize(_) + | DataRead::Exists(_), + ) + | ( + DataRead::MetadataAndResourceSize(_, _), + DataRead::Versioned(_, _, _) + | DataRead::Resolved(_) + | DataRead::Metadata(_) + | DataRead::ResourceSize(_) + | DataRead::Exists(_), + ) + | ( + DataRead::Metadata(_), + DataRead::Versioned(_, _, _) + | DataRead::Resolved(_) + | DataRead::MetadataAndResourceSize(_, _) + | DataRead::ResourceSize(_) + | DataRead::Exists(_), + ) + | ( + DataRead::ResourceSize(_), + DataRead::Versioned(_, _, _) + | DataRead::Resolved(_) + | DataRead::MetadataAndResourceSize(_, _) + | DataRead::Metadata(_) + | DataRead::Exists(_), + ) + | ( + DataRead::Exists(_), + DataRead::Versioned(_, _, _) + | DataRead::Resolved(_) + | DataRead::MetadataAndResourceSize(_, _) + | DataRead::Metadata(_) + | DataRead::ResourceSize(_), + ) => false, + } + } + + // A convenience method, since the same key can be read in different modes, producing + // different DataRead / ReadKinds. Returns true if self has >= kind than other, i.e. + // contains more or equal information, and is consistent with the information in other. + fn compare_data_reads( + &self, + self_read: &DataRead, + other_read: &DataRead, + ) -> DataReadComparison { + let self_kind = self_read.get_kind(); + let other_kind = other_read.get_kind(); + + if self_kind == other_kind { + // Optimization to avoid unnecessary clone in convert_to (because contains + // method is called during validation). + if self.data_read_equals(self_read, other_read) { + DataReadComparison::Contains + } else { + DataReadComparison::Inconsistent + } + } else { + match self_read.convert_to(&other_kind) { + Some(value) => { + if self.data_read_equals(&value, other_read) { + DataReadComparison::Contains + } else { + DataReadComparison::Inconsistent + } + }, + None => DataReadComparison::Insufficient, + } + } + } +} + /// Additional state regarding groups that may be provided to the VM during transaction /// execution and is captured. There may be a DataRead per tag within the group, and also /// the group size, computed based on speculative information in MVHashMap, by "collecting" @@ -441,8 +545,6 @@ pub enum CacheRead { /// resolution from MVHashMap/storage should be captured. This enforces an invariant that /// 'capture_read' will never be called with a read that can be resolved from the already /// captured variant (e.g. Size, Metadata, or exists if SizeAndMetadata is already captured). -#[derive(Derivative)] -#[derivative(Default(bound = "", new = "true"))] pub(crate) struct CapturedReads { data_reads: HashMap>, group_reads: HashMap>, @@ -462,6 +564,31 @@ pub(crate) struct CapturedReads { /// Set if the invariant on CapturedReads intended use is violated. Leads to an alert /// and sequential execution fallback. incorrect_use: bool, + + data_read_comparator: DataReadComparator, +} + +impl Default for CapturedReads { + fn default() -> Self { + Self::new(None) + } +} + +impl CapturedReads { + #[allow(deprecated)] + pub(crate) fn new(blockstm_v2_incarnation: Option) -> Self { + Self { + data_reads: HashMap::new(), + group_reads: HashMap::new(), + delayed_field_reads: HashMap::new(), + deprecated_module_reads: Vec::new(), + module_reads: hashbrown::HashMap::new(), + delayed_field_speculative_failure: false, + non_delayed_field_speculative_failure: false, + incorrect_use: false, + data_read_comparator: DataReadComparator::new(blockstm_v2_incarnation), + } + } } #[derive(Debug)] @@ -524,9 +651,10 @@ where // Required usage pattern: if existing entry contains enough information to // deduce the read, update_entry should not be called by the caller (i.e. // the need to cache the read must already be established). - fn update_entry( + fn update_entry( entry: Entry>, read: DataRead, + data_read_comparator: &DataReadComparator, ) -> UpdateResult { match entry { Vacant(e) => { @@ -550,7 +678,7 @@ where } // In all other cases, new read must have more information. - match read.contains(existing_read) { + match data_read_comparator.compare_data_reads(&read, existing_read) { DataReadComparison::Contains => { *existing_read = read; UpdateResult::Updated @@ -653,9 +781,17 @@ where let ret = match maybe_tag { Some(tag) => { let group = self.group_reads.entry(state_key).or_default(); - Self::update_entry(group.inner_reads.entry(tag), read) + Self::update_entry( + group.inner_reads.entry(tag), + read, + &self.data_read_comparator, + ) }, - None => Self::update_entry(self.data_reads.entry(state_key), read), + None => Self::update_entry( + self.data_reads.entry(state_key), + read, + &self.data_read_comparator, + ), }; match ret { @@ -779,15 +915,18 @@ where use MVDataError::*; use MVDataOutput::*; self.data_reads.iter().all(|(k, r)| { + // We use fetch_data even with BlockSTMv2, because we don't want to record reads. match data_map.fetch_data(k, idx_to_validate) { Ok(Versioned(version, v)) => { matches!( - DataRead::from_value_with_layout(version, v).contains(r), + self.data_read_comparator + .compare_data_reads(&DataRead::from_value_with_layout(version, v), r), DataReadComparison::Contains ) }, Ok(Resolved(value)) => matches!( - DataRead::Resolved(value).contains(r), + self.data_read_comparator + .compare_data_reads(&DataRead::Resolved(value), r), DataReadComparison::Contains ), // Dependency implies a validation failure, and if the original read were to @@ -875,7 +1014,10 @@ where match group_map.fetch_tagged_data(key, tag, idx_to_validate) { Ok((version, v)) => { matches!( - DataRead::from_value_with_layout(version, v).contains(r), + self.data_read_comparator.compare_data_reads( + &DataRead::from_value_with_layout(version, v), + r, + ), DataReadComparison::Contains ) }, @@ -884,8 +1026,10 @@ where Arc::::new(TransactionWrite::from_state_value(None)); assert!(sentinel_deletion.is_deletion()); matches!( - DataRead::Versioned(Err(StorageVersion), sentinel_deletion, None) - .contains(r), + self.data_read_comparator.compare_data_reads( + &DataRead::Versioned(Err(StorageVersion), sentinel_deletion, None), + r, + ), DataReadComparison::Contains ) }, @@ -1049,7 +1193,10 @@ mod test { use super::*; use crate::{ code_cache_global::GlobalModuleCache, - proptest_types::types::{raw_metadata, KeyType, MockEvent, ValueType}, + proptest_types::{ + mock_executor::MockEvent, + types::{raw_metadata, KeyType, ValueType}, + }, }; use aptos_mvhashmap::{types::StorageVersion, MVHashMap}; use claims::{ @@ -1065,6 +1212,7 @@ mod test { use test_case::test_case; // Macro to reduce code duplication for CapturedReads type parameters + // TODO(BlockSTMv2): Test w. BlockSTMv2 data read comparator. macro_rules! test_captured_reads { (update_entry, $entry:expr, $read:expr) => { CapturedReads::< @@ -1073,7 +1221,7 @@ mod test { MockDeserializedCode, MockVerifiedCode, MockExtension, - >::update_entry($entry, $read) + >::update_entry($entry, $read, &DataReadComparator::new(None)) }; (new) => { CapturedReads::< @@ -1082,7 +1230,7 @@ mod test { MockDeserializedCode, MockVerifiedCode, MockExtension, - >::new() + >::new(None) }; } @@ -1209,33 +1357,55 @@ mod test { macro_rules! assert_inconsistent_same_kind { ($x:expr, $y:expr) => {{ + let data_read_comparator = DataReadComparator::new(None); assert_ne!($x, $y); assert_ne!($y, $x); - assert_matches!($x.contains(&$y), DataReadComparison::Inconsistent); - assert_matches!($y.contains(&$x), DataReadComparison::Inconsistent); + assert_matches!( + data_read_comparator.compare_data_reads(&$x, &$y), + DataReadComparison::Inconsistent + ); + assert_matches!( + data_read_comparator.compare_data_reads(&$y, &$x), + DataReadComparison::Inconsistent + ); }}; } macro_rules! assert_inconsistent_downcast { ($x:expr, $y:expr) => {{ + let data_read_comparator = DataReadComparator::new(None); assert_ne!($x, $y); assert_ne!($y, $x); - assert_matches!($x.contains(&$y), DataReadComparison::Inconsistent); - assert_matches!($y.contains(&$x), DataReadComparison::Insufficient); + assert_matches!( + data_read_comparator.compare_data_reads(&$x, &$y), + DataReadComparison::Inconsistent + ); + assert_matches!( + data_read_comparator.compare_data_reads(&$y, &$x), + DataReadComparison::Insufficient + ); }}; } macro_rules! assert_contains { ($x:expr, $y:expr) => {{ + let data_read_comparator = DataReadComparator::new(None); assert_some_eq!($x.convert_to(&$y.get_kind()), $y); - assert_matches!($x.contains(&$y), DataReadComparison::Contains); + assert_matches!( + data_read_comparator.compare_data_reads(&$x, &$y), + DataReadComparison::Contains + ); }}; } macro_rules! assert_insufficient { ($x:expr, $y:expr) => {{ + let data_read_comparator = DataReadComparator::new(None); assert_none!($x.convert_to(&$y.get_kind())); - assert_matches!($x.contains(&$y), DataReadComparison::Insufficient); + assert_matches!( + data_read_comparator.compare_data_reads(&$x, &$y), + DataReadComparison::Insufficient + ); }}; } @@ -1632,19 +1802,19 @@ mod test { assert_capture_get!( captured_reads, - KeyType::(10, false), + KeyType::(10), use_tag.then_some(30), legacy_reads ); assert_capture_get!( captured_reads, - KeyType::(11, false), + KeyType::(11), use_tag.then_some(30), deletion_reads ); assert_capture_get!( captured_reads, - KeyType::(15, false), + KeyType::(15), use_tag.then_some(30), with_metadata_reads ); @@ -1654,7 +1824,7 @@ mod test { #[test] fn metadata_for_group_member() { let captured_reads = test_captured_reads!(new); - captured_reads.get_by_kind(&KeyType::(21, false), Some(&10), ReadKind::Metadata); + captured_reads.get_by_kind(&KeyType::(21), Some(&10), ReadKind::Metadata); } macro_rules! assert_incorrect_use { @@ -1689,19 +1859,19 @@ mod test { assert_incorrect_use!( captured_reads, - KeyType::(10, false), + KeyType::(10), use_tag.then_some(30), legacy_reads ); assert_incorrect_use!( captured_reads, - KeyType::(11, false), + KeyType::(11), use_tag.then_some(30), deletion_reads ); assert_incorrect_use!( captured_reads, - KeyType::(15, false), + KeyType::(15), use_tag.then_some(30), with_metadata_reads ); @@ -1710,7 +1880,7 @@ mod test { assert!(!captured_reads.incorrect_use); for i in 0..3 { - let key = KeyType::(20 + i, false); + let key = KeyType::(20 + i); assert_ok!(captured_reads.capture_read( key, use_tag.then_some(30), @@ -1747,7 +1917,7 @@ mod test { assert!(!captured_reads.non_delayed_field_speculative_failure); assert!(!captured_reads.delayed_field_speculative_failure); - let key = KeyType::(20, false); + let key = KeyType::(20); assert_ok!(captured_reads.capture_read(key, use_tag.then_some(30), exists)); assert_err!(captured_reads.capture_read( key, @@ -1762,7 +1932,7 @@ mod test { captured_reads.incorrect_use = false; captured_reads.non_delayed_field_speculative_failure = false; captured_reads.delayed_field_speculative_failure = false; - let key = KeyType::(21, false); + let key = KeyType::(21); assert_ok!(captured_reads.capture_read(key, use_tag.then_some(30), deletion_metadata)); assert_err!(captured_reads.capture_read(key, use_tag.then_some(30), resolved)); assert!(captured_reads.non_delayed_field_speculative_failure); @@ -1776,7 +1946,7 @@ mod test { captured_reads.non_delayed_field_speculative_failure = false; captured_reads.delayed_field_speculative_failure = false; - let key = KeyType::(22, false); + let key = KeyType::(22); assert_ok!(captured_reads.capture_read(key, use_tag.then_some(30), metadata)); assert_err!(captured_reads.capture_read(key, use_tag.then_some(30), versioned_legacy)); assert!(captured_reads.non_delayed_field_speculative_failure); diff --git a/aptos-move/block-executor/src/code_cache.rs b/aptos-move/block-executor/src/code_cache.rs index 746e32f559acd..f06d6d5625213 100644 --- a/aptos-move/block-executor/src/code_cache.rs +++ b/aptos-move/block-executor/src/code_cache.rs @@ -8,6 +8,8 @@ use crate::{ }; use ambassador::delegate_to_methods; use aptos_mvhashmap::types::TxnIndex; +#[cfg(test)] +use aptos_types::on_chain_config::CurrentTimeMicroseconds; use aptos_types::{ executable::ModulePath, state_store::{state_value::StateValueMetadata, TStateView}, @@ -15,6 +17,8 @@ use aptos_types::{ vm::modules::AptosModuleExtension, }; use aptos_vm_types::module_and_script_storage::module_storage::AptosModuleStorage; +#[cfg(test)] +use fail::fail_point; use move_binary_format::{ errors::{Location, PartialVMResult, VMResult}, file_format::CompiledScript, @@ -182,17 +186,28 @@ impl> ModuleCache for LatestView<'_, } impl> AptosModuleStorage for LatestView<'_, T, S> { - fn fetch_state_value_metadata( + fn get_module_state_value_metadata( &self, address: &AccountAddress, module_name: &IdentStr, ) -> PartialVMResult> { let id = ModuleId::new(*address, module_name.to_owned()); - let state_value_metadata = self + let result = self .get_module_or_build_with(&id, self) - .map_err(|err| err.to_partial())? - .map(|(module, _)| module.extension().state_value_metadata().clone()); - Ok(state_value_metadata) + .map_err(|err| err.to_partial())?; + + // In order to test the module cache with combinatorial tests, we embed the version + // information into the state value metadata (execute_transaction has access via + // AptosModuleStorage trait only). + #[cfg(test)] + fail_point!("module_test", |_| { + Ok(result.clone().map(|(_, version)| { + let v = version.unwrap_or(u32::MAX) as u64; + StateValueMetadata::legacy(v, &CurrentTimeMicroseconds { microseconds: v }) + })) + }); + + Ok(result.map(|(module, _)| module.extension().state_value_metadata().clone())) } } diff --git a/aptos-move/block-executor/src/executor.rs b/aptos-move/block-executor/src/executor.rs index 57dc252c66c66..d71882f28cc81 100644 --- a/aptos-move/block-executor/src/executor.rs +++ b/aptos-move/block-executor/src/executor.rs @@ -16,10 +16,11 @@ use crate::{ explicit_sync_wrapper::ExplicitSyncWrapper, limit_processor::BlockGasLimitProcessor, scheduler::{DependencyStatus, ExecutionTaskType, Scheduler, SchedulerTask, Wave}, + scheduler_v2::{AbortManager, SchedulerV2, TaskKind}, scheduler_wrapper::SchedulerWrapper, task::{ExecutionStatus, ExecutorTask, TransactionOutput}, txn_commit_hook::TransactionCommitHook, - txn_last_input_output::{KeyKind, TxnLastInputOutput}, + txn_last_input_output::TxnLastInputOutput, txn_provider::TxnProvider, types::ReadWriteSummary, view::{LatestView, ParallelState, SequentialState, ViewState}, @@ -42,10 +43,11 @@ use aptos_types::{ config::BlockExecutorConfig, transaction_slice_metadata::TransactionSliceMetadata, }, error::{code_invariant_error, expect_ok, PanicError, PanicOr}, - on_chain_config::BlockGasLimitType, + on_chain_config::{BlockGasLimitType, Features}, state_store::{state_value::StateValue, TStateView}, transaction::{ - block_epilogue::TBlockEndInfoExt, BlockExecutableTransaction, BlockOutput, Transaction, + block_epilogue::TBlockEndInfoExt, BlockExecutableTransaction, BlockOutput, FeeDistribution, + Transaction, }, vm::modules::AptosModuleExtension, write_set::{TransactionWrite, WriteOp}, @@ -76,13 +78,31 @@ use std::{ }, }; +struct SharedSyncParams<'a, T, E, S> +where + T: BlockExecutableTransaction, + E: ExecutorTask, + S: TStateView + Sync, +{ + // TODO: should not need to pass base view. + base_view: &'a S, + scheduler: &'a SchedulerV2, + versioned_cache: &'a MVHashMap, + global_module_cache: + &'a GlobalModuleCache, + last_input_output: &'a TxnLastInputOutput, + delayed_field_id_counter: &'a AtomicU32, + block_limit_processor: &'a ExplicitSyncWrapper>, + final_results: &'a ExplicitSyncWrapper>, +} + pub struct BlockExecutor { // Number of active concurrent tasks, corresponding to the maximum number of rayon // threads that may be concurrently participating in parallel execution. config: BlockExecutorConfig, executor_thread_pool: Arc, transaction_commit_hook: Option, - phantom: PhantomData<(T, E, S, L, TP)>, + phantom: PhantomData (T, E, S, L, TP)>, } impl BlockExecutor @@ -142,7 +162,7 @@ where } } - fn execute( + fn execute_v2( idx_to_execute: TxnIndex, incarnation: Incarnation, signature_verified_block: &TP, @@ -158,11 +178,14 @@ where >, runtime_environment: &RuntimeEnvironment, parallel_state: ParallelState, - ) -> Result> { + scheduler: &SchedulerV2, + ) -> Result<(), PanicError> { let _timer = TASK_EXECUTE_SECONDS.start_timer(); + + // TODO(BlockSTMv2): proper integration w. execution pooling for performance. let txn = signature_verified_block.get_txn(idx_to_execute); - // VM execution. + let mut abort_manager = AbortManager::new(idx_to_execute, incarnation, scheduler); let sync_view = LatestView::new( base_view, global_module_cache, @@ -172,10 +195,94 @@ where ); let execution_result = executor.execute_transaction(&sync_view, txn, idx_to_execute); - let mut prev_modified_keys = last_input_output - .modified_keys(idx_to_execute, true) - .map_or_else(HashMap::new, |keys| keys.collect()); + let mut prev_modified_resource_keys = last_input_output + .modified_resource_keys(idx_to_execute) + .map_or_else(HashSet::new, |keys| keys.map(|(k, _)| k).collect()); + let mut read_set = sync_view.take_parallel_reads(); + if read_set.is_incorrect_use() { + return Err(code_invariant_error(format!( + "Incorrect use detected in CapturedReads after executing txn = {idx_to_execute} incarnation = {incarnation}" + ))); + } + + let maybe_output = + Self::process_execution_result(&execution_result, &mut read_set, idx_to_execute)?; + + // TODO: BlockSTMv2: use estimates for delayed field reads? (see V1 update on abort). + let mut resource_write_set = Vec::new(); + if let Some(output) = maybe_output { + resource_write_set = output.resource_write_set(); + for (key, value, maybe_layout) in resource_write_set.clone().into_iter() { + prev_modified_resource_keys.remove(&key); + abort_manager.invalidate_dependencies(versioned_cache.data().write_v2::( + key, + idx_to_execute, + incarnation, + value, + maybe_layout, + ))?; + } + // TODO(BlockSTMv2): handle groups, delayed fields and aggregator v1. + } + + // Remove entries from previous write/delta set that were not overwritten. + for key in prev_modified_resource_keys { + abort_manager.invalidate_dependencies( + versioned_cache + .data() + .remove_v2::<_, false>(&key, idx_to_execute), + )?; + } + + last_input_output.record( + idx_to_execute, + read_set, + execution_result, + resource_write_set, + // TODO(BlockSTMv2): handle groups. + vec![], + ); + + scheduler.finish_execution(abort_manager)?; + Ok(()) + } + + fn execute( + idx_to_execute: TxnIndex, + incarnation: Incarnation, + txn: &T, + last_input_output: &TxnLastInputOutput, + versioned_cache: &MVHashMap, + executor: &E, + base_view: &S, + global_module_cache: &GlobalModuleCache< + ModuleId, + CompiledModule, + Module, + AptosModuleExtension, + >, + runtime_environment: &RuntimeEnvironment, + parallel_state: ParallelState, + ) -> Result> { + let _timer = TASK_EXECUTE_SECONDS.start_timer(); + // VM execution. + let sync_view = LatestView::new( + base_view, + global_module_cache, + runtime_environment, + ViewState::Sync(parallel_state), + idx_to_execute, + ); + let execution_result = executor.execute_transaction(&sync_view, txn, idx_to_execute); + + let mut prev_modified_resource_keys = last_input_output + .modified_resource_keys(idx_to_execute) + .map_or_else(HashSet::new, |keys| keys.map(|(k, _)| k).collect()); + let mut prev_modified_group_keys: HashMap> = last_input_output + .modified_group_keys(idx_to_execute) + .into_iter() + .collect(); let mut prev_modified_delayed_fields = last_input_output .delayed_field_keys(idx_to_execute) .map_or_else(HashSet::new, |keys| keys.collect()); @@ -210,26 +317,13 @@ where }) .collect(); for (group_key, group_metadata_op, group_size, group_ops) in group_output.into_iter() { - let prev_tags = match prev_modified_keys.remove(&group_key) { - Some(KeyKind::Group(tags)) => tags, - Some(KeyKind::Resource) => { - return Err(code_invariant_error(format!( - "Group key {:?} recorded as a Resource KeyKind", - group_key - ))); - }, - Some(KeyKind::AggregatorV1) => { - return Err(code_invariant_error(format!( - "Group key {:?} recorded as an AggregatorV1 KeyKind", - group_key, - ))); - }, - None => { + let prev_tags = prev_modified_group_keys + .remove(&group_key) + .unwrap_or_else(|| { // Previously no write to the group at all. needs_suffix_validation = true; HashSet::new() - }, - }; + }); if versioned_cache.data().write_metadata( group_key.clone(), @@ -261,7 +355,7 @@ where .into_iter() .map(|(state_key, write_op)| (state_key, Arc::new(write_op), None)), ) { - if prev_modified_keys.remove(&k).is_none() { + if !prev_modified_resource_keys.remove(&k) { needs_suffix_validation = true; } versioned_cache @@ -271,7 +365,7 @@ where // Then, apply deltas. for (k, d) in output.aggregator_v1_delta_set().into_iter() { - if prev_modified_keys.remove(&k).is_none() { + if !prev_modified_resource_keys.remove(&k) { needs_suffix_validation = true; } versioned_cache.data().add_delta(k, idx_to_execute, d); @@ -326,32 +420,28 @@ where }; // Remove entries from previous write/delta set that were not overwritten. - for (k, kind) in prev_modified_keys { - use KeyKind::*; - match kind { - Resource | AggregatorV1 => versioned_cache.data().remove(&k, idx_to_execute), - Group(tags) => { - // A change in state observable during speculative execution - // (which includes group metadata and size) changes, suffix - // re-validation is needed. For resources where speculative - // execution waits on estimates, having a write that was there - // but not anymore does not qualify, as it can only cause - // additional waiting but not an incorrect speculation result. - // However, a group size or metadata might be read, and then - // speculative group update might be removed below. Without - // triggering suffix re-validation, a later transaction might - // end up with the incorrect read result (corresponding to the - // removed group information from an incorrect speculative state). - needs_suffix_validation = true; - - versioned_cache.data().remove(&k, idx_to_execute); - versioned_cache - .group_data() - .remove(&k, idx_to_execute, tags); - }, - }; + for k in prev_modified_resource_keys { + versioned_cache.data().remove(&k, idx_to_execute); + } + for (k, tags) in prev_modified_group_keys { + // A change in state observable during speculative execution + // (which includes group metadata and size) changes, suffix + // re-validation is needed. For resources where speculative + // execution waits on estimates, having a write that was there + // but not anymore does not qualify, as it can only cause + // additional waiting but not an incorrect speculation result. + // However, a group size or metadata might be read, and then + // speculative group update might be removed below. Without + // triggering suffix re-validation, a later transaction might + // end up with the incorrect read result (corresponding to the + // removed group information from an incorrect speculative state). + needs_suffix_validation = true; + + versioned_cache.data().remove(&k, idx_to_execute); + versioned_cache + .group_data() + .remove(&k, idx_to_execute, tags); } - for id in prev_modified_delayed_fields { versioned_cache.delayed_fields().remove(&id, idx_to_execute); } @@ -413,28 +503,18 @@ where clear_speculative_txn_logs(txn_idx as usize); // Not valid and successfully aborted, mark the latest write/delta sets as estimates. - if let Some(keys) = last_input_output.modified_keys(txn_idx, false) { - for (k, kind) in keys { - use KeyKind::*; - match kind { - Resource | AggregatorV1 => versioned_cache.data().mark_estimate(&k, txn_idx), - Group(tags) => { - // Validation for both group size and metadata is based on values. - // Execution may wait for estimates. - versioned_cache - .group_data() - .mark_estimate(&k, txn_idx, tags); - - // Group metadata lives in same versioned cache as data / resources. - // We are not marking metadata change as estimate, but after - // a transaction execution changes metadata, suffix validation - // is guaranteed to be triggered. Estimation affecting execution - // behavior is left to size, which uses a heuristic approach. - }, - }; + if let Some(keys) = last_input_output.modified_resource_keys(txn_idx) { + for (k, _) in keys { + versioned_cache.data().mark_estimate(&k, txn_idx); } } + // Group metadata lives in same versioned cache as data / resources. + // We are not marking metadata change as estimate, but after a transaction execution + // changes metadata, suffix validation is guaranteed to be triggered. Estimation affecting + // execution behavior is left to size, which uses a heuristic approach. + last_input_output.mark_estimate_group_keys_and_tags(versioned_cache, txn_idx); + if let Some(keys) = last_input_output.delayed_field_keys(txn_idx) { for k in keys { versioned_cache.delayed_fields().mark_estimate(&k, txn_idx); @@ -474,6 +554,7 @@ where txn_idx: TxnIndex, versioned_cache: &MVHashMap, last_input_output: &TxnLastInputOutput, + is_appended_epilogue: bool, ) -> Result { let read_set = last_input_output .read_set(txn_idx) @@ -484,10 +565,11 @@ where if execution_still_valid { if let Some(delayed_field_ids) = last_input_output.delayed_field_keys(txn_idx) { - if let Err(e) = versioned_cache - .delayed_fields() - .try_commit(txn_idx, delayed_field_ids.collect()) - { + if let Err(e) = versioned_cache.delayed_fields().try_commit( + txn_idx, + delayed_field_ids.collect(), + is_appended_epilogue, + ) { match e { CommitError::ReExecutionNeeded(_) => { execution_still_valid = false; @@ -520,7 +602,7 @@ where scheduler: SchedulerWrapper, versioned_cache: &MVHashMap, last_input_output: &TxnLastInputOutput, - shared_commit_state: &ExplicitSyncWrapper>, + block_limit_processor: &ExplicitSyncWrapper>, base_view: &S, global_module_cache: &GlobalModuleCache< ModuleId, @@ -535,10 +617,15 @@ where block: &TP, num_workers: usize, ) -> Result<(), PanicOr> { - let block_limit_processor = &mut shared_commit_state.acquire(); + let block_limit_processor = &mut block_limit_processor.acquire(); let mut side_effect_at_commit = false; - if !Self::validate_and_commit_delayed_fields(txn_idx, versioned_cache, last_input_output)? { + if !Self::validate_and_commit_delayed_fields( + txn_idx, + versioned_cache, + last_input_output, + false, + )? { // Transaction needs to be re-executed, one final time. side_effect_at_commit = true; @@ -547,6 +634,7 @@ where scheduler, start_shared_counter, shared_counter, + incarnation + 1, ); Self::update_transaction_on_abort(txn_idx, last_input_output, versioned_cache); @@ -557,7 +645,7 @@ where let _needs_suffix_validation = Self::execute( txn_idx, incarnation + 1, - block, + block.get_txn(txn_idx), last_input_output, versioned_cache, executor, @@ -571,6 +659,7 @@ where txn_idx, versioned_cache, last_input_output, + false, ) .unwrap_or(false) { @@ -670,7 +759,7 @@ where fn publish_module_writes( txn_idx: TxnIndex, - module_write_set: BTreeMap>, + module_write_set: Vec>, global_module_cache: &GlobalModuleCache< ModuleId, CompiledModule, @@ -680,7 +769,7 @@ where versioned_cache: &MVHashMap, runtime_environment: &RuntimeEnvironment, ) -> Result<(), PanicError> { - for (_, write) in module_write_set { + for write in module_write_set { Self::add_module_write_to_module_cache( write, txn_idx, @@ -760,6 +849,7 @@ where AptosModuleExtension, >, runtime_environment: &RuntimeEnvironment, + total_txns_to_materialize: &AtomicU32, final_results: &ExplicitSyncWrapper>, ) -> Result<(), PanicError> { // Do a final validation for safety as a part of (parallel) post-processing. @@ -788,6 +878,9 @@ where scheduler, start_shared_counter, shared_counter, + 0, + // Incarnation does not matter here (no re-execution & interrupts) + // TODO(BlockSTMv2): we could still provide the latest incarnation. ); let latest_view = LatestView::new( base_view, @@ -824,7 +917,7 @@ where let resource_writes_to_materialize = resource_writes_to_materialize!( resource_write_set, last_input_output, - versioned_cache.data(), + last_input_output, txn_idx )?; let materialized_resource_write_set = @@ -864,9 +957,14 @@ where } let mut final_results = final_results.acquire(); + match last_input_output.take_output(txn_idx)? { - ExecutionStatus::Success(t) | ExecutionStatus::SkipRest(t) => { + ExecutionStatus::Success(t) => { + final_results[txn_idx as usize] = t; + }, + ExecutionStatus::SkipRest(t) => { final_results[txn_idx as usize] = t; + total_txns_to_materialize.store(txn_idx + 1, Ordering::SeqCst); }, ExecutionStatus::Abort(_) => (), ExecutionStatus::SpeculativeExecutionAbortError(msg) @@ -877,10 +975,12 @@ where Ok(()) } + #[allow(clippy::too_many_arguments)] fn worker_loop( &self, environment: &AptosEnvironment, block: &TP, + transaction_slice_metadata: &TransactionSliceMetadata, last_input_output: &TxnLastInputOutput, versioned_cache: &MVHashMap, scheduler: &Scheduler, @@ -895,8 +995,11 @@ where skip_module_reads_validation: &AtomicBool, start_shared_counter: u32, shared_counter: &AtomicU32, - shared_commit_state: &ExplicitSyncWrapper>, + block_limit_processor: &ExplicitSyncWrapper>, final_results: &ExplicitSyncWrapper>, + block_epilogue_txn: &ExplicitSyncWrapper>, + num_txns_materialized: &AtomicU32, + total_txns_to_materialize: &AtomicU32, num_workers: usize, ) -> Result<(), PanicOr> { let num_txns = block.num_txns(); @@ -923,15 +1026,96 @@ where base_view, global_module_cache, runtime_environment, + total_txns_to_materialize, final_results, )?; + + if txn_idx == num_txns as u32 { + break; + } + + let num_txns_materialized = + num_txns_materialized.fetch_add(1, Ordering::SeqCst) + 1; + let total_txns_to_materialize = total_txns_to_materialize.load(Ordering::SeqCst); + + if num_txns_materialized < total_txns_to_materialize { + continue; + } else if num_txns_materialized != total_txns_to_materialize { + return Err(code_invariant_error( + format!("num_txns_materialized {num_txns_materialized} should never be larger than total_txns_to_materialize {total_txns_to_materialize}."), + )); + } + + let mut outputs = final_results.acquire(); + let has_reconfig = outputs + .iter() + .rposition(|t| !t.is_retry()) + .map_or(false, |idx| outputs[idx].has_new_epoch_event()); + + // We don't have BlockEpilogue txn for epoch ending block, due to several + // historical reasons. + if !has_reconfig { + // We only do this for block (when the block_id is returned). For other cases + // like state sync or replay, the BlockEpilogue txn should already in the input + // and we don't need to add one here. + if let Some(block_id) = + transaction_slice_metadata.append_state_checkpoint_to_block() + { + let txn = self.gen_block_epilogue( + block_id, + block, + outputs.dereference(), + block_limit_processor.acquire().get_block_end_info(), + environment.features(), + ); + outputs.dereference_mut().push(E::Output::skip_output()); // placeholder + if Self::execute( + num_txns as u32, + 0, + &T::from_txn(txn.clone()), + last_input_output, + versioned_cache, + &executor, + base_view, + global_module_cache, + runtime_environment, + ParallelState::new( + versioned_cache, + scheduler_wrapper, + start_shared_counter, + shared_counter, + 0, + ), + ) != Ok(false) + { + return Err(code_invariant_error( + "BlockEpilogue txn should not fail or need validation.", + )); + } + + if Self::validate_and_commit_delayed_fields( + num_txns as u32, + versioned_cache, + last_input_output, + true, + ) != Ok(true) + { + return Err(code_invariant_error( + "BlockEpilogue txn should not need re-execution for delayed fields.", + )); + }; + *block_epilogue_txn.acquire().dereference_mut() = Some(txn); + + scheduler.add_to_commit_queue(num_txns as u32); + } + } } Ok(()) }; loop { if let SchedulerTask::ValidationTask(txn_idx, incarnation, _) = &scheduler_task { - if *incarnation as usize > num_workers.pow(2) + num_txns + 10 { + if *incarnation as usize > num_workers.pow(2) + num_txns + 30 { // Something is wrong if we observe high incarnations (e.g. a bug // might manifest as an execution-invalidation cycle). Break out // to fallback to sequential execution. @@ -961,7 +1145,7 @@ where scheduler_wrapper, versioned_cache, last_input_output, - shared_commit_state, + block_limit_processor, base_view, global_module_cache, runtime_environment, @@ -1004,7 +1188,7 @@ where let needs_suffix_validation = Self::execute( txn_idx, incarnation, - block, + block.get_txn(txn_idx), last_input_output, versioned_cache, &executor, @@ -1016,6 +1200,7 @@ where scheduler_wrapper, start_shared_counter, shared_counter, + incarnation, ), )?; scheduler.finish_execution(txn_idx, incarnation, needs_suffix_validation)? @@ -1042,6 +1227,234 @@ where } } + fn worker_loop_v2( + &self, + block: &TP, + environment: &AptosEnvironment, + // TODO: use worker id. + _worker_id: u32, + num_workers: u32, + shared_sync_params: &SharedSyncParams<'_, T, E, S>, + start_delayed_field_id_counter: u32, + ) -> Result<(), PanicOr> { + let num_txns = block.num_txns() as u32; + let executor = { + let _init_timer = VM_INIT_SECONDS.start_timer(); + E::init(&environment.clone(), shared_sync_params.base_view) + }; + + let _work_with_task_timer = WORK_WITH_TASK_SECONDS.start_timer(); + + // Shared environment used by each executor. + let runtime_environment = environment.runtime_environment(); + + let scheduler = shared_sync_params.scheduler; + let base_view = shared_sync_params.base_view; + let last_input_output = shared_sync_params.last_input_output; + let versioned_cache = shared_sync_params.versioned_cache; + let global_module_cache = shared_sync_params.global_module_cache; + let delayed_field_id_counter = shared_sync_params.delayed_field_id_counter; + let scheduler_wrapper = SchedulerWrapper::V2(scheduler); + + loop { + while scheduler.commit_hooks_try_lock() { + // Perform sequential commit hooks. + while let Some((txn_idx, incarnation)) = scheduler.start_commit()? { + self.prepare_and_queue_commit_ready_txn( + txn_idx, + incarnation, + num_txns, + &self.config.onchain.block_gas_limit_type, + scheduler_wrapper, + versioned_cache, + last_input_output, + shared_sync_params.block_limit_processor, + base_view, + global_module_cache, + runtime_environment, + start_delayed_field_id_counter, + delayed_field_id_counter, + &executor, + block, + num_workers as usize, + )?; + } + + scheduler.commit_hooks_unlock(); + } + + // TODO(BlockSTMv2): pass worker_id to next_task. + match scheduler.next_task()? { + TaskKind::Execute(txn_idx, incarnation) => { + if incarnation > num_workers.pow(2) + num_txns + 30 { + // Something is wrong if we observe high incarnations (e.g. a bug + // might manifest as an execution-invalidation cycle). Break out + // to fallback to sequential execution. + error!("Observed incarnation {} of txn {txn_idx}", incarnation); + return Err(PanicOr::Or(ParallelBlockExecutionError::IncarnationTooHigh)); + } + + Self::execute_v2( + txn_idx, + incarnation, + block, + last_input_output, + versioned_cache, + &executor, + base_view, + shared_sync_params.global_module_cache, + runtime_environment, + ParallelState::new( + versioned_cache, + scheduler_wrapper, + start_delayed_field_id_counter, + delayed_field_id_counter, + incarnation, + ), + scheduler, + )?; + }, + TaskKind::PostCommitProcessing(txn_idx) => { + self.materialize_txn_commit( + txn_idx, + versioned_cache, + scheduler_wrapper, + start_delayed_field_id_counter, + delayed_field_id_counter, + last_input_output, + base_view, + shared_sync_params.global_module_cache, + runtime_environment, + // TODO(BlockSTMv2): fix w. block epilogue support + &AtomicU32::new(0), + shared_sync_params.final_results, + )?; + }, + TaskKind::NextTask => { + // TODO: Anything intelligent to do here?. + }, + TaskKind::Done => { + break; + }, + } + } + + Ok(()) + } + + #[allow(dead_code)] + pub(crate) fn execute_transactions_parallel_v2( + &self, + signature_verified_block: &TP, + base_view: &S, + module_cache_manager_guard: &mut AptosModuleCacheManagerGuard, + ) -> Result, ()> { + let _timer = PARALLEL_EXECUTION_SECONDS.start_timer(); + // BlockSTMv2 should have less restrictions on the number of workers but we + // still sanity check that it is not instantiated w. concurrency level 1. + // (since it makes sense to use sequential execution in this case). + assert!( + self.config.local.concurrency_level > 1, + "Must use sequential execution" + ); + + let num_txns = signature_verified_block.num_txns(); + if num_txns == 0 { + return Ok(BlockOutput::new(vec![], None)); + } + let num_workers = self.config.local.concurrency_level.min(num_txns / 2).max(2) as u32; + let final_results = ExplicitSyncWrapper::new(Vec::with_capacity(num_txns)); + { + final_results + .acquire() + .resize_with(num_txns, E::Output::skip_output); + } + let block_limit_processor = ExplicitSyncWrapper::new(BlockGasLimitProcessor::new( + base_view, + self.config.onchain.block_gas_limit_type.clone(), + self.config.onchain.block_gas_limit_override(), + num_txns, + )); + let num_txns = num_txns as u32; + + let start_delayed_field_id_counter = gen_id_start_value(false); + let delayed_field_id_counter = AtomicU32::new(start_delayed_field_id_counter); + + let shared_maybe_error = AtomicBool::new(false); + let last_input_output = TxnLastInputOutput::new(num_txns); + let mut versioned_cache = MVHashMap::new(); + let scheduler = SchedulerV2::new(num_txns, num_workers); + + let shared_sync_params: SharedSyncParams<'_, T, E, S> = SharedSyncParams { + base_view, + scheduler: &scheduler, + versioned_cache: &versioned_cache, + global_module_cache: module_cache_manager_guard.module_cache(), + last_input_output: &last_input_output, + delayed_field_id_counter: &delayed_field_id_counter, + block_limit_processor: &block_limit_processor, + final_results: &final_results, + }; + let worker_ids: Vec = (0..num_workers).collect(); + + let timer = RAYON_EXECUTION_SECONDS.start_timer(); + self.executor_thread_pool.scope(|s| { + for worker_id in &worker_ids { + s.spawn(|_| { + if let Err(err) = self.worker_loop_v2( + signature_verified_block, + module_cache_manager_guard.environment(), + *worker_id, + num_workers, + &shared_sync_params, + start_delayed_field_id_counter, + ) { + // If there are multiple errors, they all get logged: FatalVMError is + // logged at construction, below we log CodeInvariantErrors. + if let PanicOr::CodeInvariantError(err_msg) = err { + alert!( + "[BlockSTMv2] worker loop: CodeInvariantError({:?})", + err_msg + ); + } + shared_maybe_error.store(true, Ordering::SeqCst); + + // Make sure to halt the scheduler if it hasn't already been halted. + scheduler.halt(); + } + }); + } + }); + drop(timer); + + if !shared_maybe_error.load(Ordering::SeqCst) + && !scheduler.post_commit_processing_queue_is_empty() + { + // No error is recorded, parallel execution workers are done, but there is still + // a post commit processing task remaining. Commit tasks must be drained before workers + // exit, hence we log an error and fallback to sequential execution. + alert!("[BlockSTMv2] error: commit tasks not drained after parallel execution"); + + shared_maybe_error.store(true, Ordering::Relaxed); + } + + counters::update_state_counters(versioned_cache.stats(), true); + module_cache_manager_guard + .module_cache_mut() + .insert_verified(versioned_cache.take_modules_iter()) + .map_err(|err| { + alert!("[BlockSTM] Encountered panic error: {:?}", err); + })?; + + // Explicit async drops. + DEFAULT_DROPPER.schedule_drop((last_input_output, scheduler, versioned_cache)); + + // TODO(BlockSTMv2): handle block epilogue txn and add block_end_info. + (!shared_maybe_error.load(Ordering::SeqCst)) + .then(|| BlockOutput::new(final_results.into_inner(), None)) + .ok_or(()) + } + pub(crate) fn execute_transactions_parallel( &self, signature_verified_block: &TP, @@ -1070,15 +1483,15 @@ where let num_workers = self.config.local.concurrency_level.min(num_txns / 2).max(2); - let shared_commit_state = ExplicitSyncWrapper::new(BlockGasLimitProcessor::new( + let block_limit_processor = ExplicitSyncWrapper::new(BlockGasLimitProcessor::new( base_view, self.config.onchain.block_gas_limit_type.clone(), self.config.onchain.block_gas_limit_override(), - num_txns, + num_txns + 1, )); let shared_maybe_error = AtomicBool::new(false); - let final_results = ExplicitSyncWrapper::new(Vec::with_capacity(num_txns)); + let final_results = ExplicitSyncWrapper::new(Vec::with_capacity(num_txns + 1)); { final_results @@ -1086,11 +1499,16 @@ where .resize_with(num_txns, E::Output::skip_output); } + let block_epilogue_txn = ExplicitSyncWrapper::new(None); + let num_txns = num_txns as u32; let skip_module_reads_validation = AtomicBool::new(true); - let last_input_output = TxnLastInputOutput::new(num_txns); + // +1 for potential BlockEpilogue txn. + let last_input_output = TxnLastInputOutput::new(num_txns + 1); let scheduler = Scheduler::new(num_txns); + let num_txns_materialized = AtomicU32::new(0); + let total_txns_to_materialize = AtomicU32::new(num_txns); let timer = RAYON_EXECUTION_SECONDS.start_timer(); self.executor_thread_pool.scope(|s| { @@ -1099,6 +1517,7 @@ where if let Err(err) = self.worker_loop( module_cache_manager_guard.environment(), signature_verified_block, + transaction_slice_metadata, &last_input_output, &versioned_cache, &scheduler, @@ -1107,8 +1526,11 @@ where &skip_module_reads_validation, start_shared_counter, &shared_counter, - &shared_commit_state, + &block_limit_processor, &final_results, + &block_epilogue_txn, + &num_txns_materialized, + &total_txns_to_materialize, num_workers, ) { // If there are multiple errors, they all get logged: @@ -1144,35 +1566,97 @@ where alert!("[BlockSTM] Encountered panic error: {:?}", err); })?; + if shared_maybe_error.load(Ordering::SeqCst) { + return Err(()); + } + // Explicit async drops. DEFAULT_DROPPER.schedule_drop((last_input_output, scheduler, versioned_cache)); - let block_end_info = shared_commit_state.into_inner().get_block_end_info(); - let mut block_epilogue_txn = None; - - let outputs = final_results.into_inner(); - - let has_reconfig = outputs - .iter() - .rposition(|t| !t.is_retry()) - .map_or(false, |idx| outputs[idx].has_new_epoch_event()); - if !has_reconfig { - if let Some(block_id) = transaction_slice_metadata.append_state_checkpoint_to_block() { - block_epilogue_txn = Some(Self::gen_block_epilogue(block_id, block_end_info)); - // TODO(grao): Call VM for block_epilogue_txn. - } - } - - (!shared_maybe_error.load(Ordering::SeqCst)) - .then(|| BlockOutput::new(outputs, block_epilogue_txn)) - .ok_or(()) + Ok(BlockOutput::new( + final_results.into_inner(), + block_epilogue_txn.into_inner(), + )) } fn gen_block_epilogue( + &self, block_id: HashValue, + signature_verified_block: &TP, + outputs: &[E::Output], block_end_info: TBlockEndInfoExt, + features: &Features, ) -> Transaction { - Transaction::block_epilogue(block_id, block_end_info.to_persistent()) + // TODO(grao): Remove this check once AIP-88 is fully enabled. + if !self + .config + .onchain + .block_gas_limit_type + .add_block_limit_outcome_onchain() + { + return Transaction::StateCheckpoint(block_id); + } + if !features.is_calculate_transaction_fee_for_distribution_enabled() { + return Transaction::block_epilogue_v0(block_id, block_end_info.to_persistent()); + } + + let mut amount = BTreeMap::new(); + + // TODO(HotState): there are three possible paths where the block epilogue + // output is passed to the DB: + // 1. a block from consensus is executed: the VM outputs the block end info + // and the block epilogue transaction and output are generated here. + // 2. a chunk re-executed: The VM will see the block epilogue transaction and + // should output the transaction output by looking at the block end info + // embedded in the epilogue transaction (and maybe the state view). + // 3. a chunk replayed by transaction output: we get the transaction output + // directly. + assert!( + outputs.len() == signature_verified_block.num_txns(), + "Output must have same size as input." + ); + + for (i, output) in outputs.iter().enumerate() { + // TODO(grao): Also include other transactions that is "Keep" if we are confident + // that we successfully charge enough gas amount as it appears in the FeeStatement + // for every corner cases. + if !output.is_success() { + continue; + } + let txn = signature_verified_block.get_txn(i as TxnIndex); + if let Some(user_txn) = txn.try_as_signed_user_txn() { + let auxiliary_info = signature_verified_block.get_auxiliary_info(i as TxnIndex); + if let Some(ephemeral_info) = auxiliary_info.ephemeral_info() { + let gas_price = user_txn.gas_unit_price(); + let proposer_index = ephemeral_info.proposer_index; + let fee_statement = output.fee_statement(); + let total_gas_unit = fee_statement.gas_used(); + // Total gas unit here includes the storage fee (deposit), which is not + // available for distribution. Only the execution gas and IO gas are available + // to distribute. Note here we deliberately NOT use the execution gas and IO + // gas value from the fee statement, because they might round up during the + // calculation and the sum of them could be larger than the actual value we + // burn. Instead we use the total amount (which is the total we've burnt) + // minus the storage deposit (round up), to avoid over distribution. + // We burn a fix amount of gas per gas unit. + let gas_price_to_burn = self.config.onchain.gas_price_to_burn(); + if gas_price > gas_price_to_burn { + let gas_unit_available_to_distribute = total_gas_unit + .saturating_sub(fee_statement.storage_fee_used().div_ceil(gas_price)); + if gas_unit_available_to_distribute > 0 { + let fee_to_distribute = + gas_unit_available_to_distribute * (gas_price - gas_price_to_burn); + *amount.entry(proposer_index).or_insert(0) += fee_to_distribute; + } + } + } + } + } + Transaction::block_epilogue_v1( + block_id, + block_end_info.to_persistent(), + FeeDistribution::new(amount), + ) } /// Converts module write into cached module representation, and adds it to the module cache. @@ -1210,7 +1694,6 @@ where })?; let extension = Arc::new(AptosModuleExtension::new(state_value)); - global_module_cache.mark_overridden(&id); per_block_module_cache .insert_deserialized_module(id.clone(), compiled_module, extension, Some(txn_idx)) .map_err(|err| { @@ -1223,6 +1706,7 @@ where ); PanicError::CodeInvariantError(msg) })?; + global_module_cache.mark_overridden(&id); Ok(()) } @@ -1254,7 +1738,7 @@ where unsync_map.write(key, Arc::new(write_op), None); } - for (_, write) in output.module_write_set().into_iter() { + for write in output.module_write_set().into_iter() { Self::add_module_write_to_module_cache( write, txn_idx, @@ -1346,8 +1830,17 @@ where ); let mut block_epilogue_txn = None; - for idx in 0..num_txns { - let txn = signature_verified_block.get_txn(idx as TxnIndex); + let mut block_epilogue_txn_to_execute; + let mut idx = 0; + while idx <= num_txns { + let txn = if idx != num_txns { + signature_verified_block.get_txn(idx as TxnIndex) + } else if block_epilogue_txn.is_some() { + block_epilogue_txn_to_execute = T::from_txn(block_epilogue_txn.clone().unwrap()); + &block_epilogue_txn_to_execute + } else { + break; + }; let latest_view = LatestView::::new( base_view, module_cache_manager_guard.module_cache(), @@ -1522,6 +2015,7 @@ where ret.push(E::Output::discard_output( StatusCode::DELAYED_FIELD_OR_BLOCKSTM_CODE_INVARIANT_ERROR, )); + idx += 1; continue; } }; @@ -1604,32 +2098,39 @@ where }, }; - if must_skip - || block_limit_processor.should_end_block_sequential() - || idx + 1 == num_txns - { + if idx == num_txns { + break; + } + + idx += 1; + + if must_skip || block_limit_processor.should_end_block_sequential() || idx == num_txns { + let mut has_reconfig = false; + if let Some(last_output) = ret.last() { + if last_output.has_new_epoch_event() { + has_reconfig = true; + } + } + ret.resize_with(num_txns, E::Output::skip_output); if let Some(block_id) = transaction_slice_metadata.append_state_checkpoint_to_block() { - let mut has_reconfig = false; - if let Some(last_output) = ret.last() { - if last_output.has_new_epoch_event() { - has_reconfig = true; - } - } if !has_reconfig { - block_epilogue_txn = Some(Self::gen_block_epilogue( + block_epilogue_txn = Some(self.gen_block_epilogue( block_id, + signature_verified_block, + &ret, block_limit_processor.get_block_end_info(), + module_cache_manager_guard.environment().features(), )); + } else { + info!("Reach epoch ending, do not append BlockEpilogue txn, block_id: {block_id:?}."); } } - break; + idx = num_txns; } } - ret.resize_with(num_txns, E::Output::skip_output); - block_limit_processor .finish_sequential_update_counters_and_log_info(ret.len() as u32, num_txns as u32); @@ -1669,7 +2170,7 @@ where // All logs from the parallel execution should be cleared and not reported. // Clear by re-initializing the speculative logs. - init_speculative_logs(signature_verified_block.num_txns()); + init_speculative_logs(signature_verified_block.num_txns() + 1); // Flush all caches to re-run from the "clean" state. module_cache_manager_guard diff --git a/aptos-move/block-executor/src/executor_utilities.rs b/aptos-move/block-executor/src/executor_utilities.rs index ead2b338bc89d..6f34b20d91334 100644 --- a/aptos-move/block-executor/src/executor_utilities.rs +++ b/aptos-move/block-executor/src/executor_utilities.rs @@ -49,37 +49,27 @@ macro_rules! groups_to_finalize { macro_rules! resource_writes_to_materialize { ($writes:expr, $outputs:expr, $data_source:expr, $($txn_idx:expr),*) => {{ $outputs - .reads_needing_delayed_field_exchange($($txn_idx),*) - .into_iter() - .map(|(key, metadata, layout)| { - match $data_source.fetch_exchanged_data(&key, $($txn_idx),*) { - Some((value, existing_layout)) => { - randomly_check_layout_matches( - Some(&existing_layout), - Some(layout.as_ref()), - )?; - let new_value = Arc::new(TransactionWrite::from_state_value(Some( - StateValue::new_with_metadata( - value.bytes().cloned().unwrap_or_else(Bytes::new), metadata) - ))); - Ok((key, new_value, layout)) - }, - None => { - Err(code_invariant_error( - "Read value needing exchange not in Exchanged format".to_string() - )) - } - }}).chain( - $writes.into_iter().filter_map(|(key, value, maybe_layout)| { - // layout is Some(_) if it contains a delayed field - if let Some(layout) = maybe_layout { - // No need to exchange anything if a resource with delayed field is deleted. - if !value.is_deletion() { - return Some(Ok((key, value, layout))) - } - } - None - })).collect::, _>>() + .reads_needing_delayed_field_exchange($($txn_idx),*) + .into_iter() + .map(|(key, metadata, layout)| -> Result<_, PanicError> { + let (value, existing_layout) = $data_source.fetch_exchanged_data(&key, $($txn_idx),*)?; + randomly_check_layout_matches(Some(&existing_layout), Some(layout.as_ref()))?; + let new_value = Arc::new(TransactionWrite::from_state_value(Some( + StateValue::new_with_metadata( + value.bytes().cloned().unwrap_or_else(Bytes::new), + metadata, + )) + )); + Ok((key, new_value, layout)) + }) + .chain( + $writes.into_iter().filter_map(|(key, value, maybe_layout)| { + maybe_layout.map(|layout| { + (!value.is_deletion()).then_some(Ok((key, value, layout))) + }).flatten() + }) + ) + .collect::, _>>() }}; } diff --git a/aptos-move/block-executor/src/limit_processor.rs b/aptos-move/block-executor/src/limit_processor.rs index b65d474d096cf..9ff837ed48dde 100644 --- a/aptos-move/block-executor/src/limit_processor.rs +++ b/aptos-move/block-executor/src/limit_processor.rs @@ -306,7 +306,10 @@ impl<'s, T: Transaction, S: TStateView> BlockGasLimitProcessor<'s, mod test { use super::*; use crate::{ - proptest_types::types::{KeyType, MockEvent, MockTransaction}, + proptest_types::{ + mock_executor::MockEvent, + types::{KeyType, MockTransaction}, + }, types::InputOutputKey, }; use aptos_types::state_store::{ @@ -413,8 +416,8 @@ mod test { reads .iter() .map(|key| match key { - InputOutputKey::Resource(k) => InputOutputKey::Resource(KeyType(*k, false)), - InputOutputKey::Group(k, t) => InputOutputKey::Group(KeyType(*k, false), *t), + InputOutputKey::Resource(k) => InputOutputKey::Resource(KeyType(*k)), + InputOutputKey::Group(k, t) => InputOutputKey::Group(KeyType(*k), *t), InputOutputKey::DelayedField(i) => InputOutputKey::DelayedField(*i), }) .collect() diff --git a/aptos-move/block-executor/src/proptest_types/baseline.rs b/aptos-move/block-executor/src/proptest_types/baseline.rs index 890f8b9ad0a8e..575f1f63cfbf5 100644 --- a/aptos-move/block-executor/src/proptest_types/baseline.rs +++ b/aptos-move/block-executor/src/proptest_types/baseline.rs @@ -2,70 +2,75 @@ // Parts of the project are originally copyright © Meta Platforms, Inc. // SPDX-License-Identifier: Apache-2.0 -/// This file implements the baseline evaluation, performed sequentially, the output -/// of which is used to test the results of the block executor. The baseline must be -/// evaluated after the block executor has completed, as the transaction type used -/// for testing tracks the incarnation number, which is used to emulate dynamic behavior. -/// Dynamic behavior means that when a transaction is re-executed, it might read -/// different values and end up with a completely different behavior (be it read set, -/// write set, or executed code). In the tests, behavior changes based on the incarnation -/// number, and hence it is crucial for the baseline to know the final incarnation number -/// of each transaction of the tested block executor execution. use crate::{ errors::{BlockExecutionError, BlockExecutionResult}, - proptest_types::types::{ - raw_metadata, GroupSizeOrMetadata, MockOutput, MockTransaction, ValueType, RESERVED_TAG, - STORAGE_AGGREGATOR_VALUE, + proptest_types::{ + mock_executor::MockOutput, + types::{ + default_group_map, deserialize_to_delayed_field_id, deserialize_to_delayed_field_u128, + raw_metadata, serialize_from_delayed_field_u128, DeltaTestKind, GroupSizeOrMetadata, + MockIncarnation, MockTransaction, ValueType, RESERVED_TAG, STORAGE_AGGREGATOR_VALUE, + }, }, }; -use aptos_aggregator::delta_change_set::serialize; +use aptos_aggregator::delta_change_set::{serialize, DeltaOp}; +use aptos_mvhashmap::types::TxnIndex; use aptos_types::{ contract_event::TransactionEvent, state_store::state_value::StateValueMetadata, transaction::BlockOutput, write_set::TransactionWrite, }; -use aptos_vm_types::resource_group_adapter::group_size_as_sum; +use aptos_vm_types::{ + module_write_set::ModuleWrite, resolver::ResourceGroupSize, + resource_group_adapter::group_size_as_sum, +}; use bytes::Bytes; -use claims::{assert_matches, assert_none, assert_some, assert_some_eq}; -use itertools::izip; -use std::{collections::HashMap, fmt::Debug, hash::Hash, result::Result, sync::atomic::Ordering}; +use claims::{assert_gt, assert_matches, assert_none, assert_some, assert_some_eq}; +use move_core_types::language_storage::ModuleId; +use move_vm_types::delayed_values::delayed_field_id::DelayedFieldID; +use std::{ + cell::RefCell, + collections::{BTreeMap, HashMap}, + fmt::Debug, + hash::Hash, + result::Result, + sync::atomic::Ordering, +}; + +/// This file implements the baseline evaluation, performed sequentially, the output +/// of which is used to test the results of the block executor. The baseline must be +/// evaluated after the block executor has completed, as the transaction type used +/// for testing tracks the incarnation number, which is used to emulate dynamic behavior. +/// Dynamic behavior means that when a transaction is re-executed, it might read +/// different values and end up with a completely different behavior (be it read set, +/// write set, or executed code). In the tests, behavior changes based on the incarnation +/// number, and hence it is crucial for the baseline to know the final incarnation number +/// of each transaction of the tested block executor execution. +/// +/// The BaselineOutputBuilder is used to build the BaselineOutput. One difference between +/// resources and groups is that the resources are processed while building BaselineOutput, +/// while groups are processed while asserting the output. We may want to reconsider this +/// in the future, but for now it provides two different ways of testing similar invariants, +/// such as the handling of delayed fields and their IDs / values. +/// +/// TODO: Not yet tested or supported cases in the testing framework: +/// - Delayed field deletions. +/// - Writes & delta for the same resource. +/// - Multiple delta applications, including failures. +/// - Empty groups and group deletions. +/// - Gas limit with sequential execution. -// TODO: extend to derived values, and code. #[derive(Clone)] enum BaselineValue { GenericWrite(ValueType), Aggregator(u128), - Empty, -} - -// TODO: instead of the same Error on aggregator overflow / underflow, add a way to tell -// from the error and test it. -// enum AggregatorError { -// Overflow, -// Underflow, -// } - -impl BaselineValue { - // Compare to the read results during block execution. - pub(crate) fn assert_read_result(&self, bytes_read: &Option>) { - match (self, bytes_read) { - (BaselineValue::GenericWrite(v), Some(bytes)) => { - assert_some_eq!(v.extract_raw_bytes(), *bytes); - }, - (BaselineValue::GenericWrite(v), None) => { - assert_none!(v.extract_raw_bytes()); - }, - (BaselineValue::Aggregator(aggr_value), Some(bytes)) => { - assert_eq!(serialize(aggr_value), *bytes); - }, - (BaselineValue::Aggregator(_), None) => unreachable!( - "Deleted or non-existent value from storage can't match aggregator value" - ), - (BaselineValue::Empty, Some(bytes)) => { - assert_eq!(serialize(&STORAGE_AGGREGATOR_VALUE), *bytes); - }, - (BaselineValue::Empty, None) => (), - } - } + // Expected value and expected version of the delayed field. + DelayedField(u128, u32), + // If true, then baseline value (when non-empty), includes txn_idx + // serialized after STORAGE_AGGREGATOR_VALUE. This is used to test + // the delayed fields, as unlike AggregatorV1, the delayed fields + // exist within a larger resource, and the writer's index (for storage + // version max u32) is stored for testing in the same mock resource. + Empty(DeltaTestKind), } // The status of the baseline execution. @@ -77,6 +82,50 @@ enum BaselineStatus { GasLimitExceeded, } +// TODO: Update the GroupReadInfo struct to always set baseline value +// and simplify the comparison logic. +#[derive(Debug)] +struct GroupReadInfo { + group_key: K, + baseline_bytes: Option, + // Set when delayed field is contained. + maybe_delayed_field: Option<(u128, u32)>, +} + +impl GroupReadInfo { + // Compute group read results from group_reads and group_world + fn compute_from_group_reads( + group_reads: &Result, ()>, + group_world: &mut HashMap>, + ) -> Vec> { + group_reads + .as_ref() + .unwrap() + .iter() + .map(|(group_key, resource_tag, has_delayed_field)| { + if *has_delayed_field { + // Currently delayed fields are tested only with RESERVED_TAG. + assert_eq!(*resource_tag, RESERVED_TAG); + } + + let group = group_world + .entry(group_key.clone()) + .or_insert_with(default_group_map); + let baseline_bytes = group.get(resource_tag).cloned(); + let maybe_delayed_field = has_delayed_field.then(|| { + deserialize_to_delayed_field_u128(baseline_bytes.as_ref().unwrap()).unwrap() + }); + + GroupReadInfo { + group_key: group_key.clone(), + baseline_bytes, + maybe_delayed_field, + } + }) + .collect() + } +} + /// Sequential baseline of execution result for dummy transaction, containing a vector /// of BaselineValues for the reads of the (latest incarnation of the dummy) transaction. /// The size of the vector should be equal to the size of the block if the block execution @@ -86,11 +135,392 @@ enum BaselineStatus { /// /// For both read_values and resolved_deltas the keys are not included because they are /// in the same order as the reads and deltas in the Transaction::Write. -pub(crate) struct BaselineOutput { +pub(crate) struct BaselineOutput { + status: BaselineStatus, + read_values: Vec, ()>>, + resolved_deltas: Vec, ()>>, + group_reads: Vec, ()>>, + group_deltas: Vec, ()>>, + module_reads: Vec>, ()>>, + delayed_field_key_to_id_map: RefCell>, +} + +/// Builder for BaselineOutput to simplify construction +pub(crate) struct BaselineOutputBuilder { status: BaselineStatus, - read_values: Vec, ()>>, + read_values: Vec, ()>>, resolved_deltas: Vec, ()>>, - group_reads: Vec, ()>>, + group_reads: Vec, ()>>, + group_deltas: Vec, ()>>, + module_reads: Vec>, ()>>, + current_world: HashMap, + module_world: HashMap, + txn_read_write_resolved_deltas: HashMap, +} + +impl BaselineOutputBuilder { + /// Create a new builder + pub(crate) fn new() -> Self { + Self { + status: BaselineStatus::Success, + read_values: vec![], + resolved_deltas: vec![], + group_reads: vec![], + group_deltas: vec![], + module_reads: vec![], + current_world: HashMap::new(), + module_world: HashMap::new(), + txn_read_write_resolved_deltas: HashMap::new(), + } + } + + /// Build the final BaselineOutput + pub(crate) fn build(self) -> BaselineOutput { + BaselineOutput { + status: self.status, + read_values: self.read_values, + resolved_deltas: self.resolved_deltas, + group_reads: self.group_reads, + group_deltas: self.group_deltas, + module_reads: self.module_reads, + delayed_field_key_to_id_map: RefCell::new(HashMap::new()), + } + } + + /// Set the execution status + fn with_status(&mut self, status: BaselineStatus) -> &mut Self { + self.status = status; + self + } + + /// Add an empty successful transaction (for SkipRest) + fn with_empty_successful_transaction(&mut self) -> &mut Self { + self.read_values.push(Ok(vec![])); + self.resolved_deltas.push(Ok(HashMap::new())); + self + } + + /// Mark the transaction as failed by pushing errors to all result vectors + fn with_transaction_failed(&mut self) -> &mut Self { + self.read_values.push(Err(())); + self.resolved_deltas.push(Err(())); + self.group_reads.push(Err(())); + self.group_deltas.push(Err(())); + self.module_reads.push(Err(())); + self + } + + fn with_group_deltas(&mut self, deltas: Vec<(K, DeltaOp)>) -> &mut Self { + self.group_deltas.push(Ok(deltas)); + self + } + + fn with_module_reads(&mut self, module_ids: &[ModuleId]) -> &mut Self { + let result = Ok(module_ids + .iter() + .map(|module_id| self.module_world.get(module_id).cloned()) + .collect()); + self.module_reads.push(result); + self + } + + fn with_resource_reads( + &mut self, + reads: &[(K, bool)], + delta_test_kind: DeltaTestKind, + ) -> &mut Self { + let base_value = BaselineValue::Empty(delta_test_kind); + + let result = Ok(reads + .iter() + .map(|(k, has_deltas)| { + let baseline_value = self + .current_world + .entry(k.clone()) + .or_insert(base_value.clone()); + + let value = if delta_test_kind == DeltaTestKind::DelayedFields && *has_deltas { + match baseline_value { + BaselineValue::DelayedField(v, _) => { + self.txn_read_write_resolved_deltas.insert(k.clone(), *v); + baseline_value.clone() + }, + BaselineValue::Empty(delta_test_kind) => { + assert_eq!(*delta_test_kind, DeltaTestKind::DelayedFields); + self.txn_read_write_resolved_deltas + .insert(k.clone(), STORAGE_AGGREGATOR_VALUE); + BaselineValue::DelayedField(STORAGE_AGGREGATOR_VALUE, u32::MAX) + }, + BaselineValue::GenericWrite(_) => { + unreachable!("Delayed field testing should not have generic writes") + }, + BaselineValue::Aggregator(_) => { + unreachable!("Delayed field testing should not have aggregators") + }, + } + } else { + baseline_value.clone() + }; + (k.clone(), value) + }) + .collect()); + + self.read_values.push(result); + self + } + + fn with_resource_deltas( + &mut self, + resolved_deltas: Vec<(K, u128, Option)>, + delta_test_kind: DeltaTestKind, + ) -> &mut Self { + let mut result: HashMap = resolved_deltas + .into_iter() + .map(|(k, v, delayed_field_last_write_version)| { + match delta_test_kind { + DeltaTestKind::DelayedFields => { + self.current_world.insert( + k.clone(), + BaselineValue::DelayedField( + v, + delayed_field_last_write_version + .expect("Must be set by delta pre-processing"), + ), + ); + }, + DeltaTestKind::AggregatorV1 => { + // In this case transaction did not fail due to delta application + // errors, and thus we should update written_ and resolved_ worlds. + self.current_world + .insert(k.clone(), BaselineValue::Aggregator(v)); + }, + DeltaTestKind::None => { + unreachable!("None delta test kind should not be used for resource deltas"); + }, + } + (k, v) + }) + .collect(); + + for (k, v) in std::mem::take(&mut self.txn_read_write_resolved_deltas) { + result.entry(k).or_insert(v); + } + + self.resolved_deltas.push(Ok(result)); + self + } + + fn with_group_reads( + &mut self, + group_reads: &[(K, u32, bool)], + delta_test_kind: DeltaTestKind, + ) -> &mut Self { + let result = Ok(group_reads + .iter() + .map(|(k, tag, has_delayed_field)| { + if *has_delayed_field { + assert_eq!(*tag, RESERVED_TAG); + assert_eq!(delta_test_kind, DeltaTestKind::DelayedFields); + } + + (k.clone(), *tag, *has_delayed_field) + }) + .collect()); + self.group_reads.push(result); + self + } + + fn with_module_writes( + &mut self, + module_writes: &[ModuleWrite], + txn_idx: TxnIndex, + ) -> &mut Self { + for module_write in module_writes { + self.module_world + .insert(module_write.module_id().clone(), txn_idx); + } + self + } + + fn with_resource_writes( + &mut self, + writes: &[(K, ValueType, bool)], + delta_test_kind: DeltaTestKind, + txn_idx: usize, + ) -> &mut Self { + for (k, v, has_delta) in writes { + // Here we don't know IDs but we know values, so use the GenericWrite to store the + // expected value, and compare that against the actual read on delayed field that was + // performed during committed execution. + self.current_world.insert( + k.clone(), + if delta_test_kind == DeltaTestKind::DelayedFields && *has_delta { + BaselineValue::DelayedField( + match self.current_world.get(k) { + Some(BaselineValue::DelayedField(value, _)) => { + self.txn_read_write_resolved_deltas + .insert(k.clone(), *value); + *value + }, + Some(BaselineValue::GenericWrite(_)) => { + unreachable!("Delayed field testing should not have generic writes") + }, + Some(BaselineValue::Aggregator(_)) => { + unreachable!("Delayed field testing should not have aggregators") + }, + None | Some(BaselineValue::Empty(_)) => { + self.txn_read_write_resolved_deltas + .insert(k.clone(), STORAGE_AGGREGATOR_VALUE); + STORAGE_AGGREGATOR_VALUE + }, + }, + txn_idx as u32, + ) + } else { + BaselineValue::GenericWrite(v.clone()) + }, + ); + } + self + } + + /// Process a single delta and return the appropriate result. + /// + /// Returns an optional resource delta, if None, the caller should + /// mark the transaction as failed. + fn process_delta( + &mut self, + key: &K, + delta: &DeltaOp, + delta_test_kind: DeltaTestKind, + ) -> Option<(K, u128, Option)> { + let base_value = BaselineValue::Empty(delta_test_kind); + + // Delayed field last write version is used for delayed field testing only, making + // sure the writer index in the read results are compared against the correct write. + let (base, delayed_field_last_write_version) = + match self.current_world.entry(key.clone()).or_insert(base_value) { + BaselineValue::DelayedField(value, last_write_version) => { + (*value, Some(*last_write_version)) + }, + // Get base value from the latest write. + BaselineValue::GenericWrite(w_value) => { + if delta_test_kind == DeltaTestKind::DelayedFields { + let (value, last_write_version) = deserialize_to_delayed_field_u128( + &w_value + .extract_raw_bytes() + .expect("Deleted delayed fields not supported"), + ) + .expect("Must deserialize the delayed field base value"); + (value, Some(last_write_version)) + } else { + ( + w_value + .as_u128() + .expect("Delta to a non-existent aggregator") + .expect("Must deserialize the aggregator base value"), + None, + ) + } + }, + // Get base value from latest resolved aggregator value. + BaselineValue::Aggregator(value) => (*value, None), + // Storage always gets resolved to a default constant. + BaselineValue::Empty(delta_test_kind) => ( + STORAGE_AGGREGATOR_VALUE, + (*delta_test_kind == DeltaTestKind::DelayedFields).then_some(u32::MAX), + ), + }; + + match delta.apply_to(base) { + Err(_) => { + // Transaction does not take effect and we record delta application failure. + None + }, + Ok(resolved_value) => { + // Transaction succeeded, return the resolved delta + Some(( + key.clone(), + resolved_value, + delayed_field_last_write_version, + )) + }, + } + } + + /// Process all deltas for a transaction and handle failures internally + /// + /// Returns (success, group_deltas, resource_deltas) + /// If success is false, the transaction failed and the deltas should not be used + fn process_transaction_deltas( + &mut self, + deltas: &[(K, DeltaOp, Option)], + delta_test_kind: DeltaTestKind, + ) -> (bool, Vec<(K, DeltaOp)>, Vec<(K, u128, Option)>) { + let mut group_deltas = Vec::new(); + let mut resource_deltas = Vec::new(); + + for (k, delta, maybe_tag) in deltas { + if let Some(tag) = maybe_tag { + assert_eq!(*tag, RESERVED_TAG); + // This is a group delta + group_deltas.push((k.clone(), *delta)); + } else { + match self.process_delta(k, delta, delta_test_kind) { + Some(rd) => resource_deltas.push(rd), + None => { + self.with_transaction_failed(); + return (false, Vec::new(), Vec::new()); + }, + } + } + } + + (true, group_deltas, resource_deltas) + } + + /// Process a complete transaction + /// + /// Returns whether the gas limit was exceeded + fn process_transaction( + &mut self, + behavior: &MockIncarnation, + delta_test_kind: DeltaTestKind, + txn_idx: usize, + accumulated_gas: &mut u64, + maybe_block_gas_limit: Option, + ) -> bool { + // Process all deltas first + let (success, group_deltas, resource_deltas) = + self.process_transaction_deltas(&behavior.deltas, delta_test_kind); + + if !success { + return false; // Gas limit not exceeded, transaction failed + } + + // All remaining operations can be chained since the transaction is known to succeed + self.with_resource_reads(&behavior.resource_reads, delta_test_kind) + .with_module_reads(&behavior.module_reads) + .with_group_reads(&behavior.group_reads, delta_test_kind) + .with_group_deltas(group_deltas) + .with_resource_writes(&behavior.resource_writes, delta_test_kind, txn_idx) + .with_resource_deltas(resource_deltas, delta_test_kind) + .with_module_writes(&behavior.module_writes, txn_idx as TxnIndex); + + // Apply gas + *accumulated_gas += behavior.gas; + + // Check if gas limit exceeded + let gas_limit_exceeded = maybe_block_gas_limit + .map(|limit| *accumulated_gas >= limit) + .unwrap_or(false); + + if gas_limit_exceeded { + self.with_status(BaselineStatus::GasLimitExceeded); + } + + gas_limit_exceeded + } } impl BaselineOutput { @@ -100,285 +530,500 @@ impl BaselineOutput { txns: &[MockTransaction], maybe_block_gas_limit: Option, ) -> Self { - let mut current_world = HashMap::::new(); + let mut builder = BaselineOutputBuilder::new(); let mut accumulated_gas = 0; - let mut status = BaselineStatus::Success; - let mut read_values = vec![]; - let mut resolved_deltas = vec![]; - let mut group_reads = vec![]; - - for txn in txns.iter() { + for (txn_idx, txn) in txns.iter().enumerate() { match txn { MockTransaction::Abort => { - status = BaselineStatus::Aborted; + builder.with_status(BaselineStatus::Aborted); break; }, MockTransaction::SkipRest(gas) => { // In executor, SkipRest skips from the next index. Test assumes it's an empty // transaction, so create a successful empty reads and deltas. - read_values.push(Ok(vec![])); - resolved_deltas.push(Ok(HashMap::new())); + builder.with_empty_successful_transaction(); // gas in SkipRest is used for unit tests for now (can generalize when needed). assert_eq!(*gas, 0); - status = BaselineStatus::SkipRest; + builder.with_status(BaselineStatus::SkipRest); break; }, MockTransaction::Write { incarnation_counter, incarnation_behaviors, + delta_test_kind, } => { // Determine the behavior of the latest incarnation of the transaction. The index // is based on the value of the incarnation counter prior to the fetch_add during // the last mock execution, and is >= 1 because there is at least one execution. - let last_incarnation = (incarnation_counter.load(Ordering::SeqCst) - 1) - % incarnation_behaviors.len(); - - match incarnation_behaviors[last_incarnation] - .deltas - .iter() - .map(|(k, delta)| { - let base = match current_world - .entry(k.clone()) - .or_insert(BaselineValue::Empty) - { - // Get base value from the latest write. - BaselineValue::GenericWrite(w_value) => w_value - .as_u128() - .expect("Delta to a non-existent aggregator") - .expect("Must deserialize the aggregator base value"), - // Get base value from latest resolved aggregator value. - BaselineValue::Aggregator(value) => *value, - // Storage always gets resolved to a default constant. - BaselineValue::Empty => STORAGE_AGGREGATOR_VALUE, - }; - - delta - .apply_to(base) - .map(|resolved_value| (k.clone(), resolved_value)) - }) - .collect::, _>>() - { - Ok(txn_resolved_deltas) => { - // Update the read_values and resolved_deltas. Performing reads here is - // correct because written_ and resolved_ worlds have not been updated. - read_values.push(Ok(incarnation_behaviors[last_incarnation] - .reads - .iter() - .map(|k| { - current_world - .entry(k.clone()) - .or_insert(BaselineValue::Empty) - .clone() - }) - .collect())); - - resolved_deltas.push(Ok(txn_resolved_deltas - .into_iter() - .map(|(k, v)| { - // In this case transaction did not fail due to delta application - // errors, and thus we should update written_ and resolved_ worlds. - current_world.insert(k.clone(), BaselineValue::Aggregator(v)); - (k, v) - }) - .collect())); - - // We ensure that the latest state is always reflected in exactly one of - // the hashmaps, by possibly removing an element from the other Hashmap. - for (k, v) in incarnation_behaviors[last_incarnation].writes.iter() { - current_world - .insert(k.clone(), BaselineValue::GenericWrite(v.clone())); - } - - group_reads.push(Ok(incarnation_behaviors[last_incarnation] - .group_reads - .clone())); - - // Apply gas. - accumulated_gas += incarnation_behaviors[last_incarnation].gas; - if let Some(block_gas_limit) = maybe_block_gas_limit { - if accumulated_gas >= block_gas_limit { - status = BaselineStatus::GasLimitExceeded; - break; - } - } - }, - Err(_) => { - // Transaction does not take effect and we record delta application failure. - read_values.push(Err(())); - resolved_deltas.push(Err(())); - group_reads.push(Err(())); - }, + let incarnation_counter = incarnation_counter.swap(0, Ordering::SeqCst); + // Mock execute_transaction call always increments the incarnation counter. We + // perform a swap to 0 so later re-executions with the same transactions will + // also have a chance to start from scratch and e.g. assert below that at least + // one incarnation has been executed. + assert_gt!( + incarnation_counter, + 0, + "Mock execution of txn {txn_idx} never incremented incarnation" + ); + let last_incarnation = (incarnation_counter - 1) % incarnation_behaviors.len(); + + // Process the transaction + let gas_limit_exceeded = builder.process_transaction( + &incarnation_behaviors[last_incarnation], + *delta_test_kind, + txn_idx, + &mut accumulated_gas, + maybe_block_gas_limit, + ); + + // Break if gas limit exceeded + if gas_limit_exceeded { + break; } }, MockTransaction::InterruptRequested => unreachable!("Not tested with outputs"), } } - Self { - status, - read_values, - resolved_deltas, - group_reads, - } + // Initialize with empty delayed_field_key_to_id_map + let mut result = builder.build(); + result.delayed_field_key_to_id_map = RefCell::new(HashMap::new()); + result + } + + // Helper method to insert and validate delayed field IDs + fn insert_or_verify_delayed_field_id(&self, key: K, id: DelayedFieldID) { + let mut map = self.delayed_field_key_to_id_map.borrow_mut(); + assert!( + map.insert(key, id) + .map_or(true, |existing_id| existing_id == id), + "Inconsistent delayed field ID mapping" + ); + } + + // Verify the delayed field by checking ID, version, and value + fn verify_delayed_field( + &self, + bytes: &[u8], + baseline_key: &K, + expected_version: u32, + expected_value: u128, + delayed_field_reads: &mut impl Iterator, + ) { + // Deserialize the ID and version from bytes + let (id, version) = + deserialize_to_delayed_field_id(bytes).expect("Must deserialize delayed field tuple"); + + // Verify the version matches + assert_eq!( + expected_version, version, + "Version mismatch for delayed field" + ); + + // Get the corresponding delayed field read + let (delayed_id, value, key) = delayed_field_reads + .next() + .expect("Must have a delayed field read"); + + // Verify the ID, key, and value match + assert_eq!(id, delayed_id, "Delayed field ID mismatch"); + assert_eq!(*baseline_key, key, "Delayed field key mismatch"); + assert_eq!(expected_value, value, "Value mismatch for delayed field"); + + // Add ID to key map and assert consistency if already present + self.insert_or_verify_delayed_field_id(baseline_key.clone(), id); } fn assert_success(&self, block_output: &BlockOutput>) { - let base_map: HashMap = HashMap::from([(RESERVED_TAG, vec![0].into())]); let mut group_world = HashMap::new(); let mut group_metadata: HashMap> = HashMap::new(); - let results = block_output.get_transaction_outputs_forced(); - let committed = self.read_values.len(); - assert_eq!(self.resolved_deltas.len(), committed); - - // Check read values & delta writes. - izip!( - (0..committed), - results.iter().take(committed), - self.read_values.iter(), - self.resolved_deltas.iter(), - self.group_reads.iter(), - ) - .for_each(|(idx, output, reads, resolved_deltas, group_reads)| { - // Compute group read results. - let group_read_results: Vec> = group_reads - .as_ref() - .unwrap() - .iter() - .map(|(group_key, resource_tag)| { - let group_map = group_world.entry(group_key).or_insert(base_map.clone()); + let txn_outputs = block_output.get_transaction_outputs_forced(); - group_map.get(resource_tag).cloned() - }) - .collect(); - // Test group read results. - let read_len = reads.as_ref().unwrap().len(); + // Calculate the minimum number of valid iterations across all collections + let valid_txn_count = [ + txn_outputs.len(), + self.read_values.len(), + self.resolved_deltas.len(), + self.group_reads.len(), + self.group_deltas.len(), + self.module_reads.len(), + ] + .iter() + .min() + .copied() + .unwrap_or(0); - assert_eq!( - group_read_results.len(), - output.read_results.len() - read_len + // Process transactions up to the minimum valid count + for (txn_idx, txn_output) in txn_outputs.iter().enumerate().take(valid_txn_count) { + // Verify transaction wasn't skipped + assert!( + !txn_output.skipped, + "Error at txn {}: {:?}", + txn_idx, txn_output.maybe_error_msg ); - izip!( - output.read_results.iter().skip(read_len), - group_read_results.into_iter() - ) - .for_each(|(result_group_read, baseline_group_read)| { - assert!(result_group_read.clone().map(Into::::into) == baseline_group_read); - }); - for (group_key, size_or_metadata) in output.read_group_size_or_metadata.iter() { - let group_map = group_world.entry(group_key).or_insert(base_map.clone()); + // Compute group read information directly + let group_read_infos = GroupReadInfo::compute_from_group_reads( + &self.group_reads[txn_idx], + &mut group_world, + ); + + // Process resource and group read results and delayed field reads + let mut delayed_field_reads = txn_output.delayed_field_reads.clone().into_iter(); + let read_len = self.read_values[txn_idx].as_ref().unwrap().len(); + self.verify_resource_reads( + &self.read_values[txn_idx], + &txn_output.read_results[..read_len], + &mut delayed_field_reads, + ); + self.verify_group_reads( + &group_read_infos, + &txn_output.read_results[read_len..], + &mut delayed_field_reads, + ); + // Ensure all delayed field reads have been processed + assert_none!(delayed_field_reads.next()); - match size_or_metadata { - GroupSizeOrMetadata::Size(size) => { - let baseline_size = - group_size_as_sum(group_map.iter().map(|(t, v)| (t, v.len()))) - .unwrap() - .get(); + self.verify_module_reads( + &self.module_reads[txn_idx], + &txn_output.module_read_results, + txn_idx, + ); + self.verify_group_size_metadata(txn_output, &mut group_world, &group_metadata); + + // Process writes and deltas and update the group world. + self.process_group_writes(txn_output, &mut group_world, &mut group_metadata, txn_idx); + + let group_deltas = self.group_deltas[txn_idx].as_ref().unwrap(); + self.process_group_deltas(group_deltas, &mut group_world); + self.verify_groups_patched_write_set(txn_output, &group_world, group_deltas); + self.verify_materialized_deltas(txn_output, &self.resolved_deltas[txn_idx]); + } + // Check that remaining transactions are properly marked as skipped. + let mut write_summary_flag = true; + for txn_output in txn_outputs.iter().skip(valid_txn_count) { + // Ensure the transaction is skipped based on the output + assert!(txn_output.skipped); + + // materialized delta writes is only set by a callback for + // committed transactions, which requires getting write summary. + // However, the very first transaction that is not committed might + // be an exception, which is why we use a boolean flag. + if txn_output.materialized_delta_writes.get().is_some() { + let called_write_summary = txn_output.called_write_summary.get().is_some(); + assert!(write_summary_flag || called_write_summary); + write_summary_flag &= called_write_summary; + } + } + } + + fn verify_resource_reads( + &self, + reads: &Result, ()>, + read_results: &[Option>], + delayed_field_reads: &mut impl Iterator, + ) { + for ((baseline_key, baseline_read), result_read) in reads + .as_ref() + .expect("Aggregator failures not yet tested") + .iter() + .zip(read_results) + { + match (baseline_read, result_read) { + (BaselineValue::DelayedField(expected_value, expected_version), Some(bytes)) => { + self.verify_delayed_field( + bytes, + baseline_key, + *expected_version, + *expected_value, + delayed_field_reads, + ); + }, + (BaselineValue::DelayedField(_, _), None) => { + unreachable!("Deletes on delayed fields not yet tested"); + }, + (BaselineValue::GenericWrite(v), Some(bytes)) => { + assert_some_eq!(v.extract_raw_bytes(), *bytes); + }, + (BaselineValue::GenericWrite(v), None) => { + assert_none!(v.extract_raw_bytes()); + }, + (BaselineValue::Aggregator(aggr_value), Some(bytes)) => { + assert_eq!(serialize(aggr_value), *bytes); + }, + (BaselineValue::Aggregator(_), None) => { + unreachable!( + "Deleted or non-existent value from storage can't match aggregator value" + ); + }, + (BaselineValue::Empty(delta_test_kind), maybe_bytes) => match delta_test_kind { + DeltaTestKind::DelayedFields => { assert_eq!( - baseline_size, *size, - "ERR: idx = {} group_key {:?}, baseline size {} != output_size {}", - idx, group_key, baseline_size, size + maybe_bytes.as_ref().unwrap(), + &serialize_from_delayed_field_u128(STORAGE_AGGREGATOR_VALUE, u32::MAX) ); }, - GroupSizeOrMetadata::Metadata(metadata) => { - if !group_metadata.contains_key(group_key) { - assert_eq!( - *metadata, - Some(raw_metadata(5)) /* default metadata */ - ); - } else { - let baseline_metadata = - group_metadata.get(group_key).cloned().flatten(); - assert_eq!(*metadata, baseline_metadata); - } + DeltaTestKind::AggregatorV1 => { + assert_eq!(*maybe_bytes, Some(serialize(&STORAGE_AGGREGATOR_VALUE))); }, - } + DeltaTestKind::None => { + assert_none!(maybe_bytes); + }, + }, } + } + } + + fn verify_group_reads( + &self, + group_infos: &[GroupReadInfo], + read_results: &[Option>], + delayed_field_reads: &mut impl Iterator, + ) { + assert_eq!(group_infos.len(), read_results.len()); + + for (group_info, result_group_read) in group_infos.iter().zip(read_results) { + let result_bytes = result_group_read.clone().map(Into::::into); - // Test normal reads. - izip!( - reads + // Size check for all cases + if let (Some(result), Some(baseline)) = ( + result_group_read.as_ref(), + group_info.baseline_bytes.as_ref(), + ) { + assert_eq!(result.len(), baseline.len(), "Length mismatch for value"); + } + + match &group_info.maybe_delayed_field { + Some((expected_value, expected_version)) => { + // Extract bytes from the result and verify delayed field invariants. + let result_bytes = result_bytes.expect("Must have a result for verification"); + // Verify delayed field with all required parameters + self.verify_delayed_field( + result_bytes.as_ref(), + &group_info.group_key, + *expected_version, + *expected_value, + delayed_field_reads, + ); + }, + None => { + // Case 2: This is a regular value - just compare bytes directly + assert_eq!( + result_bytes, group_info.baseline_bytes, + "Result bytes don't match baseline value for regular field" + ); + }, + } + } + } + + fn verify_module_reads( + &self, + module_reads: &Result>, ()>, + module_read_results: &[Option], + txn_idx: usize, + ) { + for (module_read, baseline_module_read) in module_read_results + .iter() + .zip(module_reads.as_ref().expect("No delta failures").iter()) + { + assert_eq!( + module_read .as_ref() - .expect("Aggregator failures not yet tested") - .iter(), - output.read_results.iter().take(read_len) - ) - .for_each(|(baseline_read, result_read)| baseline_read.assert_read_result(result_read)); - - // Update group world. - for (group_key, v, group_size, updates) in output.group_writes.iter() { - group_metadata.insert(group_key.clone(), v.as_state_value_metadata()); - - let group_map = group_world.entry(group_key).or_insert(base_map.clone()); - for (tag, v) in updates { - if v.is_deletion() { - assert_some!(group_map.remove(tag)); + .map(|m| m.creation_time_usecs()) + .unwrap(), + baseline_module_read + .map(|i| i as u64) + .unwrap_or(u32::MAX as u64), + "for txn_idx = {}", + txn_idx + ); + } + } + + fn verify_group_size_metadata( + &self, + output: &MockOutput, + group_world: &mut HashMap>, + group_metadata: &HashMap>, + ) { + for (group_key, size_or_metadata) in output.read_group_size_or_metadata.iter() { + let group_map = group_world + .entry(group_key.clone()) + .or_insert_with(default_group_map); + + match size_or_metadata { + GroupSizeOrMetadata::Size(size) => { + let baseline_size = + group_size_as_sum(group_map.iter().map(|(t, v)| (t, v.len()))) + .unwrap() + .get(); + + assert_eq!( + baseline_size, *size, + "ERR: group_key {:?}, baseline size {} != output_size {}", + group_key, baseline_size, size + ); + }, + GroupSizeOrMetadata::Metadata(metadata) => { + if !group_metadata.contains_key(group_key) { + assert_eq!(*metadata, Some(raw_metadata(5)) /* default metadata */); } else { - let existed = group_map - .insert(*tag, v.extract_raw_bytes().unwrap()) - .is_some(); - assert_eq!(existed, v.is_modification()); + let baseline_metadata = group_metadata.get(group_key).cloned().flatten(); + assert_eq!(*metadata, baseline_metadata); + } + }, + } + } + } + + fn process_group_writes( + &self, + output: &MockOutput, + group_world: &mut HashMap>, + group_metadata: &mut HashMap>, + idx: usize, + ) { + for (group_key, v, group_size, updates) in output.group_writes.iter() { + group_metadata.insert(group_key.clone(), v.as_state_value_metadata()); + + let group_map = group_world + .entry(group_key.clone()) + .or_insert_with(default_group_map); + + for (tag, (v, maybe_layout)) in updates { + if v.is_deletion() { + assert_some!(group_map.remove(tag)); + } else { + let mut bytes = v.extract_raw_bytes().unwrap(); + + if maybe_layout.is_some() { + assert_eq!(*tag, RESERVED_TAG); + let (written_id, written_idx) = + deserialize_to_delayed_field_id(&bytes).unwrap(); + let (current_value, _) = deserialize_to_delayed_field_u128( + group_map.get(&RESERVED_TAG).unwrap(), + ) + .unwrap(); + assert_eq!(written_idx, idx as u32); + + // Use the helper method + self.insert_or_verify_delayed_field_id(group_key.clone(), written_id); + + bytes = serialize_from_delayed_field_u128(current_value, written_idx); } + + let existed = group_map.insert(*tag, bytes).is_some(); + assert_eq!(existed, v.is_modification()); } - let computed_size = - group_size_as_sum(group_map.iter().map(|(t, v)| (t, v.len()))).unwrap(); - assert_eq!(computed_size, *group_size); } - // Test recorded finalized group writes: it should contain the whole group, and - // as such, correspond to the contents of the group_world. - // TODO: figure out what can still be tested here, e.g. RESERVED_TAG - // let groups_tested = - // (output.group_writes.len() + group_reads.as_ref().unwrap().len()) > 0; - // for (group_key, _, finalized_updates) in output.recorded_groups.get().unwrap() { - // let baseline_group_map = - // group_world.entry(group_key).or_insert(base_map.clone()); - // assert_eq!(finalized_updates.len(), baseline_group_map.len()); - // if groups_tested { - // // RESERVED_TAG should always be contained. - // assert_ge!(finalized_updates.len(), 1); - - // for (tag, v) in finalized_updates.iter() { - // assert_eq!( - // v.bytes().unwrap(), - // baseline_group_map.get(tag).unwrap(), - // ); - // } - // } - // } - - let baseline_deltas = resolved_deltas - .as_ref() - .expect("Aggregator failures not yet tested"); - output - .materialized_delta_writes - .get() - .expect("Delta writes must be set") - .iter() - .for_each(|(k, result_delta_write)| { - assert_eq!( - *baseline_deltas.get(k).expect("Baseline must contain delta"), - result_delta_write - .as_u128() - .expect("Baseline must contain delta") - .expect("Must deserialize aggregator write value") - ); - }); - }); + let computed_size = + group_size_as_sum(group_map.iter().map(|(t, v)| (t, v.len()))).unwrap(); + assert_eq!(computed_size, *group_size); + } + } + + fn process_group_deltas( + &self, + group_deltas: &[(K, DeltaOp)], + group_world: &mut HashMap>, + ) { + for (key, delta) in group_deltas.iter() { + // Apply the delta and compute the new written value (retains txn_idx from the + // previous write but updates the value). + let value_with_delayed_field = group_world + .entry(key.clone()) + .or_insert_with(default_group_map) + .get_mut(&RESERVED_TAG) + .expect("RESERVED_TAG must exist"); + + let (value, version) = + deserialize_to_delayed_field_u128(value_with_delayed_field).unwrap(); + + let updated_value = delta + .apply_to(value) + .expect("Delta application failures not tested"); - results.iter().skip(committed).for_each(|output| { - // Ensure the transaction is skipped based on the output. - assert!(output.skipped); + *value_with_delayed_field = serialize_from_delayed_field_u128(updated_value, version); + } + } - // Implies that materialize_delta_writes was never called, as should - // be for skipped transactions. - assert_none!(output.materialized_delta_writes.get()); - }); + fn verify_groups_patched_write_set( + &self, + output: &MockOutput, + group_world: &HashMap>, + group_deltas: &[(K, DeltaOp)], + ) { + // TODO(BlockSTMv2: Do delta keys, as well as replaced_reads. + let patched_resource_write_set = output + .patched_resource_write_set + .get() + .expect("Patched resource write set must be set"); + + for (key, maybe_size) in output + .group_writes + .iter() + .map(|(k, _, size, _)| (k, Some(size))) + .chain(group_deltas.iter().map(|(k, _)| (k, None))) + { + let patched_group_bytes = patched_resource_write_set.get(key).unwrap(); + let expected_group_map = group_world.get(key).unwrap(); + + if patched_group_bytes.is_deletion() { + assert!(maybe_size.map_or(true, |size| *size == ResourceGroupSize::zero_combined())); + } else { + let bytes = patched_group_bytes.extract_raw_bytes().unwrap(); + assert!(maybe_size.map_or(true, |size| size.get() == bytes.len() as u64)); + let patched_group_map: BTreeMap = bcs::from_bytes(&bytes).unwrap(); + assert_eq!(patched_group_map, *expected_group_map); + } + } + } + + fn verify_materialized_deltas( + &self, + output: &MockOutput, + resolved_deltas: &Result, ()>, + ) { + let baseline_deltas = resolved_deltas + .as_ref() + .expect("Aggregator failures not yet tested"); + + output + .materialized_delta_writes + .get() + .expect("Delta writes must be set") + .iter() + .for_each(|(k, result_delta_write)| { + assert_eq!( + *baseline_deltas.get(k).expect("Baseline must contain delta"), + result_delta_write + .as_u128() + .expect("Baseline must contain delta") + .expect("Must deserialize aggregator write value") + ); + }); + + for (k, (_, _)) in output.reads_needing_exchange.iter() { + let patched_resource = output + .patched_resource_write_set + .get() + .unwrap() + .get(k) + .unwrap(); + + let baseline_value = *baseline_deltas.get(k).expect("Baseline must contain delta"); + let (patched_value, _) = + deserialize_to_delayed_field_u128(&patched_resource.extract_raw_bytes().unwrap()) + .unwrap(); + assert_eq!(patched_value, baseline_value); + } } // Used for testing, hence the function asserts the correctness conditions within diff --git a/aptos-move/block-executor/src/proptest_types/bencher.rs b/aptos-move/block-executor/src/proptest_types/bencher.rs index ceff511e14553..de84f87661b8d 100644 --- a/aptos-move/block-executor/src/proptest_types/bencher.rs +++ b/aptos-move/block-executor/src/proptest_types/bencher.rs @@ -7,9 +7,8 @@ use crate::{ executor::BlockExecutor, proptest_types::{ baseline::BaselineOutput, - types::{ - KeyType, MockOutput, MockTask, MockTransaction, TransactionGen, TransactionGenParams, - }, + mock_executor::{MockOutput, MockTask}, + types::{KeyType, MockTransaction, TransactionGen, TransactionGenParams}, }, txn_commit_hook::NoOpTransactionCommitHook, txn_provider::default::DefaultTxnProvider, @@ -109,14 +108,14 @@ where let transactions: Vec<_> = transaction_gens .into_iter() - .map(|txn_gen| txn_gen.materialize(&key_universe, (false, false))) + .map(|txn_gen| txn_gen.materialize(&key_universe)) .collect(); - let txns_provider = DefaultTxnProvider::new(transactions.clone()); + let txns_provider = DefaultTxnProvider::new_without_info(transactions.clone()); let baseline_output = BaselineOutput::generate(txns_provider.get_txns(), None); Self { - txns_provider: DefaultTxnProvider::new(transactions), + txns_provider: DefaultTxnProvider::new_without_info(transactions), baseline_output, } } diff --git a/aptos-move/block-executor/src/proptest_types/delta_tests.rs b/aptos-move/block-executor/src/proptest_types/delta_tests.rs new file mode 100644 index 0000000000000..9f5142e10d7af --- /dev/null +++ b/aptos-move/block-executor/src/proptest_types/delta_tests.rs @@ -0,0 +1,101 @@ +// Copyright © Aptos Foundation +// Parts of the project are originally copyright © Meta Platforms, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + proptest_types::{ + baseline::BaselineOutput, + mock_executor::{MockEvent, MockTask}, + resource_tests::{ + create_executor_thread_pool, execute_block_parallel, + generate_universe_and_transactions, get_gas_limit_variants, + }, + types::{DeltaDataView, KeyType, MockTransaction}, + }, + task::ExecutorTask, + txn_provider::default::DefaultTxnProvider, +}; +use proptest::test_runner::TestRunner; +use std::marker::PhantomData; +use test_case::test_case; + +fn run_transactions_deltas( + universe_size: usize, + transaction_count: usize, + use_gas_limit: bool, + num_executions: usize, + num_random_generations: usize, +) { + let executor_thread_pool = create_executor_thread_pool(); + + // The delta threshold controls how many keys / paths are guaranteed r/w resources even + // in the presence of deltas. + let delta_threshold = std::cmp::min(15, universe_size / 2); + + for _ in 0..num_random_generations { + let mut local_runner = TestRunner::default(); + + let (universe, transaction_gen) = generate_universe_and_transactions( + &mut local_runner, + universe_size, + transaction_count, + true, + ); + + // Do not allow deletions as resolver can't apply delta to a deleted aggregator. + let transactions: Vec, MockEvent>> = transaction_gen + .into_iter() + .map(|txn_gen| txn_gen.materialize_with_deltas(&universe, delta_threshold, false)) + .collect(); + let txn_provider = DefaultTxnProvider::new_without_info(transactions); + + let data_view = DeltaDataView::> { + phantom: PhantomData, + }; + + let gas_limits = get_gas_limit_variants(use_gas_limit, transaction_count); + + for maybe_block_gas_limit in gas_limits { + for _ in 0..num_executions { + let output = execute_block_parallel::< + MockTransaction, MockEvent>, + DeltaDataView>, + DefaultTxnProvider, MockEvent>>, + >( + executor_thread_pool.clone(), + maybe_block_gas_limit, + &txn_provider, + &data_view, + None, + false, + ); + + BaselineOutput::generate(txn_provider.get_txns(), maybe_block_gas_limit) + .assert_parallel_output(&output); + } + } + } +} + +#[test_case(50, 1000, false, 10, 2 ; "deltas and writes")] +#[test_case(10, 1000, false, 10, 2 ; "deltas with small universe")] +#[test_case(50, 1000, true, 10, 2 ; "deltas and writes with gas limit")] +#[test_case(10, 1000, true, 10, 2 ; "deltas with small universe with gas limit")] +fn deltas_transaction_tests( + universe_size: usize, + transaction_count: usize, + use_gas_limit: bool, + num_executions: usize, + num_random_generations: usize, +) where + MockTask, MockEvent>: + ExecutorTask, MockEvent>>, +{ + run_transactions_deltas( + universe_size, + transaction_count, + use_gas_limit, + num_executions, + num_random_generations, + ); +} diff --git a/aptos-move/block-executor/src/proptest_types/group_tests.rs b/aptos-move/block-executor/src/proptest_types/group_tests.rs new file mode 100644 index 0000000000000..dc49160ed5c0d --- /dev/null +++ b/aptos-move/block-executor/src/proptest_types/group_tests.rs @@ -0,0 +1,171 @@ +// Copyright © Aptos Foundation +// Parts of the project are originally copyright © Meta Platforms, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + code_cache_global_manager::AptosModuleCacheManagerGuard, + errors::SequentialBlockExecutionError, + executor::BlockExecutor, + proptest_types::{ + baseline::BaselineOutput, + mock_executor::{MockEvent, MockOutput, MockTask}, + resource_tests::{ + create_executor_thread_pool, execute_block_parallel, get_gas_limit_variants, + }, + types::{ + KeyType, MockTransaction, NonEmptyGroupDataView, TransactionGen, TransactionGenParams, + }, + }, + task::ExecutorTask, + txn_commit_hook::NoOpTransactionCommitHook, + txn_provider::default::DefaultTxnProvider, +}; +use aptos_types::block_executor::{ + config::BlockExecutorConfig, transaction_slice_metadata::TransactionSliceMetadata, +}; +use num_cpus; +use proptest::{collection::vec, prelude::*, strategy::ValueTree, test_runner::TestRunner}; +use std::sync::Arc; +use test_case::test_case; + +/// Create a data view for testing with non-empty groups +pub(crate) fn create_non_empty_group_data_view( + key_universe: &[[u8; 32]], + universe_size: usize, + delayed_field_testing: bool, +) -> NonEmptyGroupDataView> { + NonEmptyGroupDataView::> { + group_keys: key_universe[(universe_size - 3)..universe_size] + .iter() + .map(|k| KeyType(*k)) + .collect(), + delayed_field_testing, + } +} + +/// Run both parallel and sequential execution tests for a transaction provider +pub(crate) fn run_tests_with_groups( + executor_thread_pool: Arc, + gas_limits: Vec>, + transactions: Vec, MockEvent>>, + data_view: &NonEmptyGroupDataView>, + num_executions_parallel: usize, + num_executions_sequential: usize, +) { + let txn_provider = DefaultTxnProvider::new_without_info(transactions); + + // Run parallel execution tests + for maybe_block_gas_limit in gas_limits { + for _ in 0..num_executions_parallel { + let output = execute_block_parallel::< + MockTransaction, MockEvent>, + NonEmptyGroupDataView>, + DefaultTxnProvider, MockEvent>>, + >( + executor_thread_pool.clone(), + maybe_block_gas_limit, + &txn_provider, + data_view, + None, + false, + ); + + BaselineOutput::generate(txn_provider.get_txns(), maybe_block_gas_limit) + .assert_parallel_output(&output); + } + } + + // Run sequential execution tests + for _ in 0..num_executions_sequential { + let mut guard = AptosModuleCacheManagerGuard::none(); + + let output = BlockExecutor::< + MockTransaction, MockEvent>, + MockTask, MockEvent>, + NonEmptyGroupDataView>, + NoOpTransactionCommitHook, MockEvent>, usize>, + DefaultTxnProvider, MockEvent>>, + >::new( + BlockExecutorConfig::new_no_block_limit(num_cpus::get()), + executor_thread_pool.clone(), + None, + ) + .execute_transactions_sequential( + &txn_provider, + data_view, + &TransactionSliceMetadata::unknown(), + &mut guard, + false, + ); + + BaselineOutput::generate(txn_provider.get_txns(), None).assert_output(&output.map_err( + |e| match e { + SequentialBlockExecutionError::ResourceGroupSerializationError => { + panic!("Unexpected error") + }, + SequentialBlockExecutionError::ErrorToReturn(err) => err, + }, + )); + } +} + +// TODO: Change some tests (e.g. second and fifth) to use gas limit: needs to handle error in mock executor. +#[test_case(50, 100, None, None, None, false, 30, 15 ; "basic group test")] +#[test_case(50, 1000, None, None, None, false, 20, 10 ; "basic group test 2")] +#[test_case(50, 1000, None, None, None, true, 20, 10 ; "basic group test 2 with gas limit")] +#[test_case(15, 1000, None, None, None, false, 5, 5 ; "small universe group test")] +#[test_case(20, 1000, Some(30), None, None, false, 10, 5 ; "group size pct1=30%")] +#[test_case(20, 1000, Some(80), None, None, false, 10, 5 ; "group size pct1=80%")] +#[test_case(20, 1000, Some(80), None, None, true, 10, 5 ; "group size pct1=80% with gas limit")] +#[test_case(20, 1000, Some(30), Some(80), None, false, 10, 5 ; "group size pct1=30%, pct2=80%")] +#[test_case(20, 1000, Some(30), Some(50), Some(70), false, 10, 5 ; "group size pct1=30%, pct2=50%, pct3=70%")] +fn non_empty_group_transaction_tests( + universe_size: usize, + transaction_count: usize, + group_size_pct1: Option, + group_size_pct2: Option, + group_size_pct3: Option, + use_gas_limit: bool, + num_executions_parallel: usize, + num_executions_sequential: usize, +) where + MockTask, MockEvent>: + ExecutorTask, MockEvent>>, +{ + let mut local_runner = TestRunner::default(); + + let key_universe = vec(any::<[u8; 32]>(), universe_size) + .new_tree(&mut local_runner) + .expect("creating a new value should succeed") + .current(); + + let transaction_gen = vec( + any_with::>(TransactionGenParams::new_dynamic()), + transaction_count, + ) + .new_tree(&mut local_runner) + .expect("creating a new value should succeed") + .current(); + + // Group size percentages for 3 groups + let group_size_pcts = [group_size_pct1, group_size_pct2, group_size_pct3]; + let transactions = transaction_gen + .into_iter() + .map(|txn_gen| { + txn_gen.materialize_groups::<[u8; 32], MockEvent>(&key_universe, group_size_pcts, None) + }) + .collect(); + + let data_view = create_non_empty_group_data_view(&key_universe, universe_size, false); + let executor_thread_pool = create_executor_thread_pool(); + let gas_limits = get_gas_limit_variants(use_gas_limit, transaction_count); + + run_tests_with_groups( + executor_thread_pool, + gas_limits, + transactions, + &data_view, + num_executions_parallel, + num_executions_sequential, + ); +} diff --git a/aptos-move/block-executor/src/proptest_types/mock_executor.rs b/aptos-move/block-executor/src/proptest_types/mock_executor.rs new file mode 100644 index 0000000000000..3f2fa1b8911fe --- /dev/null +++ b/aptos-move/block-executor/src/proptest_types/mock_executor.rs @@ -0,0 +1,1016 @@ +// Copyright © Aptos Foundation +// Parts of the project are originally copyright © Meta Platforms, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + proptest_types::types::{ + deserialize_to_delayed_field_id, serialize_from_delayed_field_id, DeltaTestKind, + GroupSizeOrMetadata, MockIncarnation, MockTransaction, ValueType, RESERVED_TAG, + }, + task::{ExecutionStatus, ExecutorTask, TransactionOutput}, +}; +use aptos_aggregator::{ + bounded_math::SignedU128, + delayed_change::{DelayedApplyChange, DelayedChange}, + delta_change_set::{DeltaOp, DeltaWithMax}, + resolver::TAggregatorV1View, +}; +use aptos_mvhashmap::types::TxnIndex; +use aptos_types::{ + contract_event::TransactionEvent, + error::PanicError, + executable::ModulePath, + fee_statement::FeeStatement, + state_store::{state_value::StateValueMetadata, TStateView}, + transaction::BlockExecutableTransaction as Transaction, + write_set::{TransactionWrite, WriteOp, WriteOpKind}, +}; +use aptos_vm_environment::environment::AptosEnvironment; +use aptos_vm_types::{ + module_and_script_storage::code_storage::AptosCodeStorage, + module_write_set::ModuleWrite, + resolver::{ + BlockSynchronizationKillSwitch, ResourceGroupSize, TExecutorView, TResourceGroupView, + }, + resource_group_adapter::{ + decrement_size_for_remove_tag, group_tagged_resource_size, increment_size_for_add_tag, + }, +}; +use bytes::Bytes; +use claims::{assert_none, assert_ok}; +use move_core_types::{ + language_storage::ModuleId, + value::{MoveStructLayout, MoveTypeLayout}, + vm_status::StatusCode, +}; +use move_vm_types::delayed_values::delayed_field_id::DelayedFieldID; +use once_cell::sync::OnceCell; +use std::{ + collections::{BTreeMap, HashMap, HashSet}, + fmt::Debug, + hash::Hash, + marker::PhantomData, + sync::{atomic::Ordering, Arc}, +}; + +/// A lazily initialized empty struct layout used throughout tests +/// +/// This is an empty struct layout used specifically for testing delayed fields. +/// It's used when performing reads for resources that might contain delayed fields +/// to ensure consistent behavior across all test cases. +pub(crate) static MOCK_LAYOUT: once_cell::sync::Lazy = + once_cell::sync::Lazy::new(|| MoveTypeLayout::Struct(MoveStructLayout::new(vec![]))); + +/// Macro for returning an error directly when Result is an error +/// +/// This macro unwraps a Result or returns the error directly. +/// Used when the function returns the same error type as the Result. +/// +/// Usage: +/// try_with_direct!(result_expr) +#[macro_export] +macro_rules! try_with_direct { + ($expr:expr) => { + match $expr { + Ok(val) => val, + Err(e) => return e, + } + }; +} + +/// Macro for returning Err(e) when Result is an error +/// +/// This macro unwraps a Result or returns Err(e). +/// Used when the function returns Result. +/// +/// Usage: +/// try_with_error!(result_expr) +#[macro_export] +macro_rules! try_with_error { + ($expr:expr) => { + match $expr { + Ok(val) => val, + Err(e) => return Err(e), + } + }; +} + +/// Macro for returning an ExecutionStatus with error message +/// +/// This macro unwraps a Result or returns an error wrapped in +/// ExecutionStatus::Success(MockOutput::with_error(...)). +/// +/// Usage: +/// try_with_status!(result_expr, "error message") +#[macro_export] +macro_rules! try_with_status { + ($expr:expr, $msg:expr) => { + match $expr { + Ok(val) => val, + Err(e) => { + return Err(ExecutionStatus::Success(MockOutput::with_error(&format!( + "{}: {:?}", + $msg, e + )))) + }, + } + }; +} + +#[derive(Debug)] +pub(crate) struct MockOutput { + pub(crate) writes: Vec<(K, ValueType, Option>)>, + pub(crate) aggregator_v1_writes: Vec<(K, ValueType)>, + // Key, metadata_op, inner_ops + pub(crate) group_writes: Vec<( + K, + ValueType, + ResourceGroupSize, + BTreeMap>)>, + )>, + pub(crate) module_writes: Vec>, + pub(crate) deltas: Vec<(K, DeltaOp, Option<(DelayedFieldID, bool)>)>, + pub(crate) events: Vec, + pub(crate) read_results: Vec>>, + pub(crate) delayed_field_reads: Vec<(DelayedFieldID, u128, K)>, + pub(crate) module_read_results: Vec>, + pub(crate) read_group_size_or_metadata: Vec<(K, GroupSizeOrMetadata)>, + pub(crate) materialized_delta_writes: OnceCell>, + pub(crate) patched_resource_write_set: OnceCell>, + pub(crate) total_gas: u64, + pub(crate) called_write_summary: OnceCell<()>, + pub(crate) skipped: bool, + pub(crate) maybe_error_msg: Option, + pub(crate) reads_needing_exchange: HashMap)>, + pub(crate) group_reads_needing_exchange: HashMap, +} + +/// A builder for incrementally constructing MockOutput instances for cleaner code. +pub(crate) struct MockOutputBuilder { + pub(crate) output: MockOutput, +} + +impl MockOutputBuilder { + /// Create a new builder from mock incarnation. + pub(crate) fn from_mock_incarnation( + mock_incarnation: &MockIncarnation, + delta_test_kind: DeltaTestKind, + ) -> Self { + let output = MockOutput { + writes: Vec::with_capacity(mock_incarnation.resource_writes.len()), + aggregator_v1_writes: mock_incarnation + .resource_writes + .clone() + .into_iter() + .filter_map(|(k, v, has_delta)| { + (has_delta && delta_test_kind == DeltaTestKind::AggregatorV1).then_some((k, v)) + }) + .collect(), + group_writes: Vec::with_capacity(mock_incarnation.group_writes.len()), + module_writes: mock_incarnation.module_writes.clone(), + deltas: Vec::with_capacity(mock_incarnation.deltas.len()), + events: mock_incarnation.events.to_vec(), + read_results: Vec::with_capacity(mock_incarnation.resource_reads.len()), + delayed_field_reads: vec![], + module_read_results: Vec::with_capacity(mock_incarnation.module_reads.len()), + read_group_size_or_metadata: Vec::with_capacity(mock_incarnation.group_queries.len()), + materialized_delta_writes: OnceCell::new(), + patched_resource_write_set: OnceCell::new(), + total_gas: mock_incarnation.gas, + called_write_summary: OnceCell::new(), + skipped: false, + maybe_error_msg: None, + reads_needing_exchange: HashMap::new(), + group_reads_needing_exchange: HashMap::new(), + }; + + Self { output } + } + + /// This method reads metadata for each module ID in the provided list + /// and adds the results to the output. + /// + /// Returns self for method chaining + pub(crate) fn add_module_reads( + &mut self, + view: &S, + module_ids: &[ModuleId], + ) -> Result<&mut Self, ExecutionStatus, usize>> { + for module_id in module_ids { + let metadata = try_with_status!( + view.get_module_state_value_metadata(module_id.address(), module_id.name()), + "Failed to fetch module metadata" + ); + self.output.module_read_results.push(metadata); + } + + Ok(self) + } + + /// This method reads bytes for regular resources and handles delayed fields as needed. + /// + /// Returns self for method chaining + pub(crate) fn add_resource_reads( + &mut self, + view: &impl TExecutorView, + key_pairs: &[(K, bool)], + delayed_fields_enabled: bool, + ) -> Result<&mut Self, ExecutionStatus, usize>> { + for (key, has_deltas) in key_pairs { + match (has_deltas, delayed_fields_enabled) { + // Regular resource read (no delayed fields) + (false, false) | (false, true) => { + let v = try_with_status!( + view.get_resource_bytes(key, None), + "Failed to get resource bytes" + ); + self.add_read_result(v.map(Into::into)); + }, + // Aggregator V1 read + (true, false) => { + let v = try_with_status!( + view.get_aggregator_v1_state_value(key), + "Failed to get aggregator v1 state value" + ); + self.add_read_result(v.map(|state_value| state_value.bytes().clone().into())); + }, + // Delayed field read + (true, true) => { + let bytes = try_with_status!( + view.get_resource_bytes(key, Some(&*MOCK_LAYOUT)), + "Failed to get resource bytes with layout" + ) + .expect("In current tests, delayed field is always initialized"); + + // Add bytes to read_results first + self.add_read_result(Some(bytes.to_vec())); + + // Then perform delayed field read if bytes were returned + try_with_error!(self.add_delayed_field_from_read_result( + view, + key, + bytes.as_ref() + )); + }, + } + } + + Ok(self) + } + + /// This method reads resources from groups and handles delayed fields as needed. + /// + /// Returns self for method chaining + pub(crate) fn add_group_reads( + &mut self, + view: &(impl TResourceGroupView + + TExecutorView), + group_reads: &[(K, u32, bool)], + delayed_fields_enabled: bool, + ) -> Result<&mut Self, ExecutionStatus, usize>> { + for (group_key, resource_tag, has_delta) in group_reads { + let maybe_layout = + (*has_delta && delayed_fields_enabled && *resource_tag == RESERVED_TAG) + .then(|| (*MOCK_LAYOUT).clone()); + + let v = try_with_status!( + view.get_resource_from_group(group_key, resource_tag, maybe_layout.as_ref()), + "Failed to get resource from group" + ); + + self.add_read_result(v.clone().map(Into::into)); + + // Perform delayed field read if needed + if *has_delta && delayed_fields_enabled { + assert_eq!(*resource_tag, RESERVED_TAG); + try_with_error!(self.add_delayed_field_from_read_result( + view, + group_key, + v.expect("RESERVED_TAG always contains a value").as_ref(), + )); + } + } + + Ok(self) + } + + /// Add group size or metadata queries to the output + /// + /// This method queries either the size or metadata of resource groups + /// based on the query_metadata flag. + /// + /// Returns self for method chaining + pub(crate) fn add_group_queries( + &mut self, + view: &(impl TResourceGroupView + + TExecutorView), + group_queries: &[(K, bool)], + ) -> Result<&mut Self, ExecutionStatus, usize>> { + for (group_key, query_metadata) in group_queries { + let res = if *query_metadata { + // Query metadata + let v = try_with_status!( + view.get_resource_state_value_metadata(group_key), + "Failed to get resource state value metadata" + ); + GroupSizeOrMetadata::Metadata(v) + } else { + // Query size + let v = try_with_status!( + view.resource_group_size(group_key), + "Failed to get resource group size" + ); + GroupSizeOrMetadata::Size(v.get()) + }; + + self.output + .read_group_size_or_metadata + .push((group_key.clone(), res)); + } + + Ok(self) + } + + /// This method handles the complex logic of processing group writes, including: + /// - Getting the resource group size + /// - Processing inner operations for each tag + /// - Updating group size based on deletions and creations + /// - Adding the final group write to the output + /// + /// Returns self for method chaining + pub(crate) fn add_group_writes( + &mut self, + view: &View, + group_writes: &[(K, StateValueMetadata, HashMap)], + delayed_fields_enabled: bool, + txn_idx: u32, + ) -> Result<&mut Self, ExecutionStatus, usize>> + where + View: TResourceGroupView + + TExecutorView, + { + // Group writes + for (key, metadata, inner_ops) in group_writes { + let mut new_inner_ops = BTreeMap::new(); + + let mut new_group_size = try_with_status!( + view.resource_group_size(key), + "Failed to get resource group size" + ); + let group_size = new_group_size; + + for (tag, (inner_op, has_delayed_field)) in inner_ops.iter() { + let maybe_layout = + (*has_delayed_field && delayed_fields_enabled && *tag == RESERVED_TAG) + .then(|| MOCK_LAYOUT.clone()); + let exists = try_with_status!( + view.get_resource_from_group(key, tag, maybe_layout.as_ref(),), + "Failed to get resource from group" + ) + .is_some(); + assert!( + *tag != RESERVED_TAG || exists, + "RESERVED_TAG must always be present in groups in tests" + ); + + // inner op is either deletion or creation. + assert!(!inner_op.is_modification()); + + let mut new_inner_op = inner_op.clone(); + let mut new_inner_op_layout = None; + if *has_delayed_field && delayed_fields_enabled && new_inner_op.bytes().is_some() { + // For groups, delayed_fields_enabled should always be + // true when has_delta is true & tag is RESERVED_TAG. + assert!(*tag == RESERVED_TAG); + let prev_id = self.get_delayed_field_id_from_resource(view, key, Some(*tag))?; + new_inner_op.set_bytes(serialize_from_delayed_field_id(prev_id, txn_idx)); + new_inner_op_layout = Some(Arc::new(MOCK_LAYOUT.clone())); + } + + let maybe_op = if exists { + Some( + if new_inner_op.is_creation() + && (new_inner_op.bytes().unwrap()[0] % 4 < 3 || *tag == RESERVED_TAG) + { + ValueType::new( + new_inner_op.bytes().cloned(), + StateValueMetadata::none(), + WriteOpKind::Modification, + ) + } else { + ValueType::new(None, StateValueMetadata::none(), WriteOpKind::Deletion) + }, + ) + } else { + new_inner_op.is_creation().then(|| new_inner_op.clone()) + }; + + if let Some(new_inner_op) = maybe_op { + if exists { + let old_tagged_value_size = try_with_status!( + view.resource_size_in_group(key, tag), + "Failed to get resource size in group" + ); + let old_size = try_with_status!( + group_tagged_resource_size(tag, old_tagged_value_size), + "Failed to calculate group tagged resource size" + ); + + try_with_status!( + decrement_size_for_remove_tag(&mut new_group_size, old_size), + "Failed to decrement resource group size" + ); + } + if !new_inner_op.is_deletion() { + let new_size = try_with_status!( + group_tagged_resource_size( + tag, + new_inner_op.bytes().as_ref().unwrap().len(), + ), + "Failed to calculate group tagged resource size" + ); + + try_with_status!( + increment_size_for_add_tag(&mut new_group_size, new_size), + "Failed to increment resource group size" + ); + } + + new_inner_ops.insert(*tag, (new_inner_op, new_inner_op_layout)); + } + } + + if !new_inner_ops.is_empty() { + if group_size.get() > 0 && new_group_size.get() == 0 { + // Note: Even though currently the groups are never empty, speculatively the new + // size may still become zero, because atomicity is not guaranteed across + // existence queries: so even if RESERVED_TAG is present, a different tag might + // have been removed for exactly the same size. + self.output.group_writes.push(( + key.clone(), + ValueType::new(None, metadata.clone(), WriteOpKind::Deletion), + new_group_size, + new_inner_ops, + )); + } else { + let op_kind = if group_size.get() == 0 { + WriteOpKind::Creation + } else { + WriteOpKind::Modification + }; + + // Not testing metadata_op here, always modification. + self.output.group_writes.push(( + key.clone(), + ValueType::new(Some(Bytes::new()), metadata.clone(), op_kind), + new_group_size, + new_inner_ops, + )); + } + } + } + + Ok(self) + } + + /// This method handles regular resource writes and delayed fields as needed. + /// It processes writes and sets proper bytes for delayed fields. + /// + /// Returns self for method chaining + pub(crate) fn add_resource_writes( + &mut self, + view: &View, + resource_writes: &[(K, ValueType, bool)], + delayed_fields_enabled: bool, + txn_idx: u32, + ) -> Result<&mut Self, ExecutionStatus, usize>> + // Group view is because get_delayed_field_id_from_resource dispatches, but there is + // a TODO to have TExecutorView contain TResourceGroupView anyway. + where + View: TExecutorView + + TResourceGroupView, + { + for (k, new_value, has_delta) in resource_writes.iter() { + let mut value_to_add = new_value.clone(); + let mut value_to_add_layout = None; + if *has_delta && !delayed_fields_enabled { + // Already handled by aggregator_v1_writes. + continue; + } + + if *has_delta && delayed_fields_enabled && value_to_add.bytes().is_some() { + let prev_id = self.get_delayed_field_id_from_resource(view, k, None)?; + value_to_add.set_bytes(serialize_from_delayed_field_id(prev_id, txn_idx)); + value_to_add_layout = Some(Arc::new(MOCK_LAYOUT.clone())); + } + + self.output + .writes + .push((k.clone(), value_to_add, value_to_add_layout)); + } + + Ok(self) + } + + /// This method processes the deltas and adds them to the output. + /// It skips this step if delayed_fields_or_aggregator_v1 is true. + /// + /// Returns self for method chaining + pub(crate) fn add_deltas( + &mut self, + view: &(impl TExecutorView + + TResourceGroupView), + deltas: &[(K, DeltaOp, Option)], + delta_test_kind: DeltaTestKind, + ) -> Result<&mut Self, ExecutionStatus, usize>> { + match delta_test_kind { + DeltaTestKind::DelayedFields => { + for (k, delta, maybe_tag) in deltas { + let id = self.get_delayed_field_id_from_resource(view, k, *maybe_tag)?; + + // Currently, we test with base delta of 0 and a max value of u128::MAX. + let base_delta = &SignedU128::Positive(0); + let (delta_op, _, max_value) = delta.into_inner(); + let success = try_with_status!( + view.delayed_field_try_add_delta_outcome( + &id, base_delta, &delta_op, max_value + ), + "Failed to apply delta to delayed field" + ); + + self.output + .deltas + .push((k.clone(), *delta, Some((id, success)))); + } + }, + DeltaTestKind::AggregatorV1 => { + self.output + .deltas + .extend(deltas.iter().map(|(k, delta, maybe_tag)| { + assert_none!(maybe_tag, "AggregatorV1 not supported in groups"); + (k.clone(), *delta, None) + })); + }, + DeltaTestKind::None => {}, + } + + Ok(self) + } + + /// Build and return the final MockOutput + pub(crate) fn build(self) -> MockOutput { + self.output + } + + /// Helper to extract a delayed field ID for a resource key (assuming value is exchanged). + fn get_delayed_field_id_from_resource( + &mut self, + view: &(impl TExecutorView + + TResourceGroupView), + key: &K, + maybe_tag: Option, + ) -> Result, usize>> { + let bytes = match maybe_tag { + None => try_with_status!( + view.get_resource_bytes(key, Some(&*MOCK_LAYOUT)), + "Failed to get resource bytes" + ), + Some(tag) => try_with_status!( + view.get_resource_from_group(key, &tag, Some(&*MOCK_LAYOUT)), + "Failed to get resource bytes from group" + ), + } + .expect("In current tests, delayed field is always initialized"); + + if maybe_tag.is_some() { + // TODO: test metadata. + self.output + .group_reads_needing_exchange + .insert(key.clone(), StateValueMetadata::none()); + } else { + self.output.reads_needing_exchange.insert( + key.clone(), + (StateValueMetadata::none(), Arc::new(MOCK_LAYOUT.clone())), + ); + } + + Ok(deserialize_to_delayed_field_id(&bytes) + .expect("Must deserialize delayed field tuple") + .0) + } + + /// Perform a delayed field read and update the output accordingly. + /// Returns an error ExecutionStatus if the read fails. + fn add_delayed_field_from_read_result( + &mut self, + view: &impl TExecutorView, + key: &K, + bytes: &[u8], + ) -> Result<(), ExecutionStatus, usize>> { + let id = deserialize_to_delayed_field_id(bytes) + .expect("Must deserialize delayed field tuple") + .0; + + let v = try_with_status!( + view.get_delayed_field_value(&id), + "Failed to get delayed field value" + ); + + let value = v.into_aggregator_value().unwrap(); + self.output + .delayed_field_reads + .push((id, value, key.clone())); + Ok(()) + } + + /// Add a normal read result + fn add_read_result(&mut self, result: Option>) { + self.output.read_results.push(result); + } +} + +impl MockOutput { + // Helper method to create an empty MockOutput with common settings + pub(crate) fn skipped_output(error_msg: Option) -> Self { + Self { + writes: vec![], + aggregator_v1_writes: vec![], + group_writes: vec![], + module_writes: vec![], + deltas: vec![], + events: vec![], + read_results: vec![], + delayed_field_reads: vec![], + module_read_results: vec![], + read_group_size_or_metadata: vec![], + materialized_delta_writes: OnceCell::new(), + patched_resource_write_set: OnceCell::new(), + total_gas: 0, + called_write_summary: OnceCell::new(), + skipped: true, + maybe_error_msg: error_msg, + reads_needing_exchange: HashMap::new(), + group_reads_needing_exchange: HashMap::new(), + } + } + + // Helper method to create a MockOutput with an error message + pub(crate) fn with_error(error: impl std::fmt::Display) -> Self { + Self::skipped_output(Some(format!("{}", error))) + } + + // Helper method to create a MockOutput with a discard code + pub(crate) fn with_discard_code(code: StatusCode) -> Self { + Self::skipped_output(Some(format!("Discarded with code: {:?}", code))) + } +} + +impl TransactionOutput for MockOutput +where + K: PartialOrd + Ord + Send + Sync + Clone + Hash + Eq + ModulePath + Debug + 'static, + E: Send + Sync + Debug + Clone + TransactionEvent + 'static, +{ + type Txn = MockTransaction; + + // TODO[agg_v2](tests): Assigning MoveTypeLayout as None for all the writes for now. + // That means, the resources do not have any DelayedFields embedded in them. + // Change it to test resources with DelayedFields as well. + fn resource_write_set(&self) -> Vec<(K, Arc, Option>)> { + self.writes + .iter() + .map(|(key, value, maybe_layout)| { + (key.clone(), Arc::new(value.clone()), maybe_layout.clone()) + }) + .collect() + } + + fn module_write_set(&self) -> Vec> { + self.module_writes.clone() + } + + // Aggregator v1 writes are included in resource_write_set for tests (writes are produced + // for all keys including ones for v1_aggregators without distinguishing). + fn aggregator_v1_write_set(&self) -> BTreeMap { + self.aggregator_v1_writes.clone().into_iter().collect() + } + + fn aggregator_v1_delta_set(&self) -> Vec<(K, DeltaOp)> { + if !self.deltas.is_empty() && self.deltas[0].2.is_none() { + // When testing with delayed fields the Option is Some(id, success). + self.deltas + .iter() + .map(|(k, delta, _)| (k.clone(), *delta)) + .collect() + } else { + vec![] + } + } + + fn delayed_field_change_set(&self) -> BTreeMap> { + // TODO: also test creation of delayed fields. + if !self.deltas.is_empty() && self.deltas[0].2.is_some() { + self.deltas + .iter() + .filter_map(|(_, delta, maybe_id)| { + let (id, success) = maybe_id.unwrap(); + let (delta, _, _) = delta.into_inner(); + success.then(|| { + ( + id, + DelayedChange::Apply(DelayedApplyChange::AggregatorDelta { + delta: DeltaWithMax::new(delta, u128::MAX), + }), + ) + }) + }) + .collect() + } else { + BTreeMap::new() + } + } + + fn reads_needing_delayed_field_exchange( + &self, + ) -> Vec<( + ::Key, + StateValueMetadata, + Arc, + )> { + self.reads_needing_exchange + .iter() + .map(|(key, (metadata, layout))| (key.clone(), metadata.clone(), layout.clone())) + .collect() + } + + fn group_reads_needing_delayed_field_exchange( + &self, + ) -> Vec<(::Key, StateValueMetadata)> { + self.group_reads_needing_exchange + .iter() + .map(|(key, metadata)| (key.clone(), metadata.clone())) + .collect() + } + + fn resource_group_write_set( + &self, + ) -> Vec<( + K, + ValueType, + ResourceGroupSize, + BTreeMap>)>, + )> { + self.group_writes.clone() + } + + fn skip_output() -> Self { + Self::skipped_output(None) + } + + fn discard_output(discard_code: StatusCode) -> Self { + Self::with_discard_code(discard_code) + } + + fn output_approx_size(&self) -> u64 { + // TODO add block output limit testing + 0 + } + + fn get_write_summary( + &self, + ) -> HashSet< + crate::types::InputOutputKey< + ::Key, + ::Tag, + >, + > { + _ = self.called_write_summary.set(()); + HashSet::new() + } + + fn materialize_agg_v1( + &self, + _view: &impl TAggregatorV1View::Key>, + ) { + // TODO[agg_v2](tests): implement this method and compare + // against sequential execution results v. aggregator v1. + } + + // TODO[agg_v2](tests): Currently, appending None to all events, which means none of the + // events have aggregators. Test it with aggregators as well. + fn get_events(&self) -> Vec<(E, Option)> { + self.events.iter().map(|e| (e.clone(), None)).collect() + } + + fn incorporate_materialized_txn_output( + &self, + aggregator_v1_writes: Vec<(::Key, WriteOp)>, + patched_resource_write_set: Vec<( + ::Key, + ::Value, + )>, + _patched_events: Vec<::Event>, + ) -> Result<(), PanicError> { + assert_ok!(self + .patched_resource_write_set + .set(patched_resource_write_set.clone().into_iter().collect())); + assert_ok!(self.materialized_delta_writes.set(aggregator_v1_writes)); + // TODO: Also test patched events. + Ok(()) + } + + fn set_txn_output_for_non_dynamic_change_set(&self) { + // No compatibility issues here since the move-vm doesn't use the dynamic flag. + } + + fn fee_statement(&self) -> FeeStatement { + // First argument is supposed to be total (not important for the test though). + // Next two arguments are different kinds of execution gas that are counted + // towards the block limit. We split the total into two pieces for these arguments. + // TODO: add variety to generating fee statement based on total gas. + FeeStatement::new( + self.total_gas, + self.total_gas / 2, + (self.total_gas + 1) / 2, + 0, + 0, + ) + } + + fn is_retry(&self) -> bool { + self.skipped + } + + fn has_new_epoch_event(&self) -> bool { + false + } + + fn is_success(&self) -> bool { + !self.skipped + } +} + +#[derive(Clone, Debug)] +pub(crate) struct MockEvent { + event_data: Vec, +} + +impl TransactionEvent for MockEvent { + fn get_event_data(&self) -> &[u8] { + &self.event_data + } + + fn set_event_data(&mut self, event_data: Vec) { + self.event_data = event_data; + } +} + +pub(crate) struct MockTask { + phantom_data: PhantomData<(K, E)>, +} + +impl MockTask { + pub fn new() -> Self { + Self { + phantom_data: PhantomData, + } + } +} + +impl ExecutorTask for MockTask +where + K: PartialOrd + Ord + Send + Sync + Clone + Hash + Eq + ModulePath + Debug + 'static, + E: Send + Sync + Debug + Clone + TransactionEvent + 'static, +{ + type Error = usize; + type Output = MockOutput; + type Txn = MockTransaction; + + fn init(_environment: &AptosEnvironment, _state_view: &impl TStateView) -> Self { + Self::new() + } + + fn execute_transaction( + &self, + view: &(impl TExecutorView + + TResourceGroupView + + AptosCodeStorage + + BlockSynchronizationKillSwitch), + txn: &Self::Txn, + txn_idx: TxnIndex, + ) -> ExecutionStatus { + match txn { + MockTransaction::Write { + incarnation_counter, + incarnation_behaviors, + delta_test_kind, + } => { + // Use incarnation counter value as an index to determine the read- + // and write-sets of the execution. Increment incarnation counter to + // simulate dynamic behavior when there are multiple possible read- + // and write-sets (i.e. each are selected round-robin). + let idx = incarnation_counter.fetch_add(1, Ordering::SeqCst); + let behavior = &incarnation_behaviors[idx % incarnation_behaviors.len()]; + + // Initialize the builder and use the railway pattern to execute builder operations. + let mut builder = + MockOutputBuilder::from_mock_incarnation(behavior, *delta_test_kind); + let builder_result = BuilderOperation::new(&mut builder) + .and_then(|b| b.add_module_reads(view, &behavior.module_reads)) + .and_then(|b| { + b.add_resource_reads( + view, + &behavior.resource_reads, + *delta_test_kind == DeltaTestKind::DelayedFields, + ) + }) + .and_then(|b| { + b.add_group_reads( + view, + &behavior.group_reads, + *delta_test_kind == DeltaTestKind::DelayedFields, + ) + }) + .and_then(|b| b.add_group_queries(view, &behavior.group_queries)) + .and_then(|b| { + b.add_group_writes( + view, + &behavior.group_writes, + *delta_test_kind == DeltaTestKind::DelayedFields, + txn_idx, + ) + }) + .and_then(|b| { + b.add_resource_writes( + view, + &behavior.resource_writes, + *delta_test_kind == DeltaTestKind::DelayedFields, + txn_idx, + ) + }) + .and_then(|b| b.add_deltas(view, &behavior.deltas, *delta_test_kind)) + .finish(); + + // Use the direct return variant for ExecutionStatus functions + try_with_direct!(builder_result); + + ExecutionStatus::Success(builder.build()) + }, + MockTransaction::SkipRest(gas) => { + let mut mock_output = MockOutput::skip_output(); + mock_output.total_gas = *gas; + ExecutionStatus::SkipRest(mock_output) + }, + MockTransaction::Abort => ExecutionStatus::Abort(txn_idx as usize), + MockTransaction::InterruptRequested => { + while !view.interrupt_requested() {} + ExecutionStatus::SkipRest(MockOutput::skip_output()) + }, + } + } + + fn is_transaction_dynamic_change_set_capable(_txn: &Self::Txn) -> bool { + true + } +} + +/// Railway-oriented pattern wrapper for builder operations +/// +/// This implements a simple railway-oriented pattern for chaining operations +/// that might fail, allowing for a cleaner code flow. +struct BuilderOperation<'a, K: Clone + Debug, E: Clone> { + builder: &'a mut MockOutputBuilder, + status: Option, usize>>, +} + +impl<'a, K: Clone + Debug, E: Clone> BuilderOperation<'a, K, E> { + fn new(builder: &'a mut MockOutputBuilder) -> Self { + Self { + builder, + status: None, + } + } + + fn and_then(mut self, op: F) -> Self + where + F: FnOnce( + &mut MockOutputBuilder, + ) + -> Result<&mut MockOutputBuilder, ExecutionStatus, usize>>, + { + if self.status.is_none() { + if let Err(status) = op(self.builder) { + self.status = Some(status); + } + } + self + } + + fn finish( + self, + ) -> Result<&'a mut MockOutputBuilder, ExecutionStatus, usize>> { + match self.status { + None => Ok(self.builder), + Some(status) => Err(status), + } + } +} diff --git a/aptos-move/block-executor/src/proptest_types/mod.rs b/aptos-move/block-executor/src/proptest_types/mod.rs index b90ad8dae83fa..17f3f0694e17a 100644 --- a/aptos-move/block-executor/src/proptest_types/mod.rs +++ b/aptos-move/block-executor/src/proptest_types/mod.rs @@ -5,5 +5,12 @@ pub(crate) mod baseline; pub mod bencher; #[cfg(test)] -mod tests; +mod delta_tests; +#[cfg(test)] +mod group_tests; +pub(crate) mod mock_executor; +#[cfg(test)] +mod module_tests; +#[cfg(test)] +mod resource_tests; pub(crate) mod types; diff --git a/aptos-move/block-executor/src/proptest_types/module_tests.rs b/aptos-move/block-executor/src/proptest_types/module_tests.rs new file mode 100644 index 0000000000000..825178221552d --- /dev/null +++ b/aptos-move/block-executor/src/proptest_types/module_tests.rs @@ -0,0 +1,165 @@ +// Copyright © Aptos Foundation +// Parts of the project are originally copyright © Meta Platforms, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + proptest_types::{ + baseline::BaselineOutput, + mock_executor::{MockEvent, MockTask}, + resource_tests::{ + create_executor_thread_pool, execute_block_parallel, get_gas_limit_variants, + }, + types::{ + key_to_mock_module_id, KeyType, MockTransaction, TransactionGen, TransactionGenParams, + }, + }, + task::ExecutorTask, + txn_provider::default::DefaultTxnProvider, +}; +use aptos_types::state_store::MockStateView; +use fail::FailScenario; +use move_core_types::language_storage::ModuleId; +use proptest::{collection::vec, prelude::*, strategy::ValueTree, test_runner::TestRunner}; +use test_case::test_case; + +enum ModuleTestType { + // All transactions publish modules, and all accesses are module reads. + AllTransactionsAndAccesses, + // All transactions publish modules, but some accesses are not module reads. + AllTransactionsMixedAccesses, + // Some transactions publish modules and contain module reads. Other + // transactions do not publish modules and do not contain module reads. + MixedTransactionsMixedAccesses, +} + +fn execute_module_tests( + universe_size: usize, + transaction_count: usize, + use_gas_limit: bool, + modules_test_type: ModuleTestType, + num_executions: usize, + num_random_generations: usize, +) where + MockTask, MockEvent>: + ExecutorTask, MockEvent>>, +{ + let scenario = FailScenario::setup(); + assert!(fail::has_failpoints()); + fail::cfg("module_test", "return").unwrap(); + + let executor_thread_pool = create_executor_thread_pool(); + let mut runner = TestRunner::default(); + + let gas_limits = get_gas_limit_variants(use_gas_limit, transaction_count); + for maybe_block_gas_limit in gas_limits { + // Run the test cases directly + for _ in 0..num_random_generations { + // Generate universe + let universe = vec(any::<[u8; 32]>(), universe_size) + .new_tree(&mut runner) + .expect("creating universe should succeed") + .current(); + + // Generate transactions based on parameters + let transaction_strategy = match modules_test_type { + ModuleTestType::AllTransactionsAndAccesses => vec( + any_with::>( + TransactionGenParams::new_dynamic_modules_only(), + ), + transaction_count, + ), + ModuleTestType::AllTransactionsMixedAccesses + | ModuleTestType::MixedTransactionsMixedAccesses => vec( + any_with::>( + TransactionGenParams::new_dynamic_with_modules(), + ), + transaction_count, + ), + }; + + let transaction_gen = transaction_strategy + .new_tree(&mut runner) + .expect("creating transactions should succeed") + .current(); + + // Convert transactions to use modules. For mixed transactions, we convert every + // fifth transaction to use modules. + let transactions: Vec, MockEvent>> = transaction_gen + .into_iter() + .enumerate() + .map(|(i, txn_gen)| { + if i % 5 == 0 + || !matches!( + modules_test_type, + ModuleTestType::MixedTransactionsMixedAccesses + ) + { + txn_gen.materialize_modules(&universe) + } else { + txn_gen.materialize(&universe) + } + }) + .collect(); + + let txn_provider = DefaultTxnProvider::new_without_info(transactions); + let state_view = MockStateView::empty(); + + // Generate all potential module IDs that could be used in the tests + let all_module_ids = generate_all_potential_module_ids(&universe); + + // Run tests with fail point enabled to test the version metadata + for _ in 0..num_executions { + let output = execute_block_parallel::< + MockTransaction, MockEvent>, + MockStateView>, + DefaultTxnProvider, MockEvent>>, + >( + executor_thread_pool.clone(), + maybe_block_gas_limit, + &txn_provider, + &state_view, + Some(&all_module_ids), + false, + ); + + BaselineOutput::generate(txn_provider.get_txns(), maybe_block_gas_limit) + .assert_parallel_output(&output); + } + } + } + scenario.teardown(); +} + +// Generate all potential module IDs that could be used in the tests +fn generate_all_potential_module_ids(universe: &[[u8; 32]]) -> Vec { + universe + .iter() + .map(|k| key_to_mock_module_id(&KeyType(*k), universe.len())) + .collect() +} + +// Test cases with various parameters +#[test_case(50, 100, false, ModuleTestType::AllTransactionsAndAccesses, 2, 3; "basic modules only test v1")] +#[test_case(50, 100, false, ModuleTestType::MixedTransactionsMixedAccesses, 2, 3; "basic mixed txn test with modules v1")] +#[test_case(50, 100, true, ModuleTestType::AllTransactionsAndAccesses, 2, 3; "modules only with gas limit")] +#[test_case(50, 100, false, ModuleTestType::AllTransactionsMixedAccesses, 2, 3; "mixed access with modules test")] +#[test_case(50, 100, true, ModuleTestType::AllTransactionsMixedAccesses, 2, 3; "mixed access with modules with gas limit")] +#[test_case(10, 1000, false, ModuleTestType::AllTransactionsAndAccesses, 2, 2; "small universe modules only")] +#[test_case(10, 1000, false, ModuleTestType::AllTransactionsMixedAccesses, 2, 2; "small universe mixed access with modules")] +fn module_transaction_tests( + universe_size: usize, + transaction_count: usize, + use_gas_limit: bool, + modules_test_type: ModuleTestType, + num_executions: usize, + num_random_generations: usize, +) { + execute_module_tests( + universe_size, + transaction_count, + use_gas_limit, + modules_test_type, + num_executions, + num_random_generations, + ); +} diff --git a/aptos-move/block-executor/src/proptest_types/resource_tests.rs b/aptos-move/block-executor/src/proptest_types/resource_tests.rs new file mode 100644 index 0000000000000..063a6dbf17a4b --- /dev/null +++ b/aptos-move/block-executor/src/proptest_types/resource_tests.rs @@ -0,0 +1,286 @@ +// Copyright © Aptos Foundation +// Parts of the project are originally copyright © Meta Platforms, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + code_cache_global_manager::AptosModuleCacheManagerGuard, + executor::BlockExecutor, + proptest_types::{ + baseline::BaselineOutput, + mock_executor::{MockEvent, MockOutput, MockTask}, + types::{KeyType, MockTransaction, TransactionGen, TransactionGenParams, MAX_GAS_PER_TXN}, + }, + task::ExecutorTask, + txn_commit_hook::NoOpTransactionCommitHook, + txn_provider::{default::DefaultTxnProvider, TxnProvider}, +}; +use aptos_types::{ + block_executor::{ + config::BlockExecutorConfig, transaction_slice_metadata::TransactionSliceMetadata, + }, + state_store::{state_value::StateValue, MockStateView, TStateView}, + transaction::{BlockExecutableTransaction as Transaction, BlockOutput}, + vm::modules::AptosModuleExtension, +}; +use move_core_types::language_storage::ModuleId; +use move_vm_runtime::Module; +use move_vm_types::code::ModuleCode; +use num_cpus; +use proptest::{ + collection::vec, + prelude::*, + sample::Index, + strategy::{Strategy, ValueTree}, + test_runner::TestRunner, +}; +use rand::Rng; +use std::{fmt::Debug, sync::Arc}; +use test_case::test_matrix; + +pub(crate) fn get_gas_limit_variants( + use_gas_limit: bool, + transaction_count: usize, +) -> Vec> { + if use_gas_limit { + vec![ + Some(rand::thread_rng().gen_range(0, (transaction_count as u64) * MAX_GAS_PER_TXN / 2)), + Some(0), + ] + } else { + vec![None] + } +} + +pub(crate) fn create_executor_thread_pool() -> Arc { + Arc::new( + rayon::ThreadPoolBuilder::new() + .num_threads(num_cpus::get()) + .build() + .unwrap(), + ) +} + +/// Populates a module cache manager guard with empty modules for testing. +/// This function creates empty modules for each ModuleId in the provided list and adds them to the guard's module cache. +/// +/// # Arguments +/// * `guard` - The AptosModuleCacheManagerGuard to populate with empty modules +/// * `module_ids` - A slice of ModuleIds to create empty modules for +/// +/// # Returns +/// The number of modules successfully added to the cache +pub(crate) fn populate_guard_with_modules( + guard: &mut AptosModuleCacheManagerGuard<'_>, + module_ids: &[ModuleId], +) { + for module_id in module_ids { + // Create an empty module for testing with Module::new_for_test + let module = Module::new_for_test(module_id.clone()); + + // Serialize the module + let mut serialized_bytes = Vec::new(); + module + .serialize(&mut serialized_bytes) + .expect("Failed to serialize compiled module"); + + // Create a ModuleCode::verified instance with the module + let module_code = Arc::new(ModuleCode::from_arced_verified( + Arc::new(module), + Arc::new(AptosModuleExtension::new(StateValue::new_legacy( + serialized_bytes.into(), + ))), + )); + + // Add the module to the cache + guard + .module_cache_mut() + .insert(module_id.clone(), module_code); + } +} + +pub(crate) fn execute_block_parallel( + executor_thread_pool: Arc, + block_gas_limit: Option, + txn_provider: &Provider, + data_view: &ViewType, + all_module_ids: Option<&[ModuleId]>, + block_stm_v2: bool, +) -> Result, MockEvent>>, ()> +where + TxnType: Transaction> + Debug + Clone + Send + Sync + 'static, + ViewType: TStateView + Sync + 'static, + Provider: TxnProvider + Sync + 'static, + MockTask, MockEvent>: ExecutorTask, +{ + let mut guard = AptosModuleCacheManagerGuard::none(); + + // If all_module_ids is provided, populate the guard with empty modules + if let Some(module_ids) = all_module_ids { + populate_guard_with_modules(&mut guard, module_ids); + } + + let config = BlockExecutorConfig::new_maybe_block_limit(num_cpus::get(), block_gas_limit); + let block_executor = BlockExecutor::< + TxnType, + MockTask, MockEvent>, + ViewType, + NoOpTransactionCommitHook, MockEvent>, usize>, + Provider, + >::new(config, executor_thread_pool, None); + + if block_stm_v2 { + block_executor.execute_transactions_parallel_v2(txn_provider, data_view, &mut guard) + } else { + block_executor.execute_transactions_parallel( + txn_provider, + data_view, + &TransactionSliceMetadata::unknown(), + &mut guard, + ) + } +} + +pub(crate) fn generate_universe_and_transactions( + runner: &mut TestRunner, + universe_size: usize, + transaction_count: usize, + is_dynamic: bool, +) -> (Vec<[u8; 32]>, Vec>) { + let universe = vec(any::<[u8; 32]>(), universe_size) + .new_tree(runner) + .expect("creating universe should succeed") + .current(); + + let transaction_strategy = if is_dynamic { + vec( + any_with::>(TransactionGenParams::new_dynamic()), + transaction_count, + ) + } else { + vec(any::>(), transaction_count) + }; + + let transaction_gen = transaction_strategy + .new_tree(runner) + .expect("creating transactions should succeed") + .current(); + + (universe, transaction_gen) +} + +pub(crate) fn run_transactions_resources( + universe_size: usize, + transaction_count: usize, + abort_count: usize, + skip_rest_count: usize, + use_gas_limit: bool, + is_dynamic: bool, + num_executions: usize, + num_random_generations: usize, +) { + let executor_thread_pool = create_executor_thread_pool(); + let mut runner = TestRunner::default(); + + let gas_limits = get_gas_limit_variants(use_gas_limit, transaction_count); + + // Run the test cases directly + for idx_generation in 0..num_random_generations { + // Generate universe and transactions + let (universe, transaction_gen) = generate_universe_and_transactions( + &mut runner, + universe_size, + transaction_count, + is_dynamic, + ); + + // Generate abort and skip_rest transaction indices + let abort_strategy = vec(any::(), abort_count); + let skip_rest_strategy = vec(any::(), skip_rest_count); + + let abort_transactions = abort_strategy + .new_tree(&mut runner) + .expect("creating abort transactions should succeed") + .current(); + + let skip_rest_transactions = skip_rest_strategy + .new_tree(&mut runner) + .expect("creating skip_rest transactions should succeed") + .current(); + + // Create transactions + let mut transactions: Vec, MockEvent>> = transaction_gen + .into_iter() + .map(|txn_gen| txn_gen.materialize(&universe)) + .collect(); + + // Apply modifications to transactions + let length = transactions.len(); + for i in abort_transactions { + *transactions.get_mut(i.index(length)).unwrap() = MockTransaction::Abort; + } + for i in skip_rest_transactions { + *transactions.get_mut(i.index(length)).unwrap() = MockTransaction::SkipRest(0); + } + + let txn_provider = DefaultTxnProvider::new_without_info(transactions); + let state_view = MockStateView::empty(); + for idx_execution in 0..num_executions { + for maybe_block_gas_limit in &gas_limits { + if maybe_block_gas_limit.is_some_and(|v| v == 0) + && (idx_execution > 0 || idx_generation > 0) + { + // For 0 gas limit tests, run fewer configurations. + continue; + } + for block_stm_v2 in [false, true] { + let output = execute_block_parallel::< + MockTransaction, MockEvent>, + MockStateView>, + DefaultTxnProvider, MockEvent>>, + >( + executor_thread_pool.clone(), + *maybe_block_gas_limit, + &txn_provider, + &state_view, + None, + block_stm_v2, + ); + + BaselineOutput::generate(txn_provider.get_txns(), *maybe_block_gas_limit) + .assert_parallel_output(&output); + } + } + } + } +} + +#[test_matrix( + 100, 3000, 0, 0, [false, true], [false, true], 6, 5; "varying_incarnation_behavior_gas_limit" +)] +#[test_matrix( + 50, 500, [0, 3, 200], [0, 3, 50], [false, true], [false, true], 5, 3; "with_mixed_abort_skip_rest" +)] +#[test_matrix( + [10, 20], 1000, 0, 0, [false, true], [false, true], 10, 3; "contended" +)] +fn resource_transaction_tests( + universe_size: usize, + transaction_count: usize, + abort_count: usize, + skip_rest_count: usize, + use_gas_limit: bool, + is_dynamic: bool, + num_random_generations: usize, + num_executions: usize, +) { + run_transactions_resources( + universe_size, + transaction_count, + abort_count, + skip_rest_count, + use_gas_limit, + is_dynamic, + num_executions, + num_random_generations, + ); +} diff --git a/aptos-move/block-executor/src/proptest_types/tests.rs b/aptos-move/block-executor/src/proptest_types/tests.rs index cf38694168a9d..082b05a79afa0 100644 --- a/aptos-move/block-executor/src/proptest_types/tests.rs +++ b/aptos-move/block-executor/src/proptest_types/tests.rs @@ -72,7 +72,7 @@ fn run_transactions( .unwrap(), ); - let txn_provider = DefaultTxnProvider::new(transactions); + let txn_provider = DefaultTxnProvider::new_without_info(transactions); for _ in 0..num_repeat { let mut guard = AptosModuleCacheManagerGuard::none(); @@ -203,7 +203,7 @@ fn deltas_writes_mixed_with_block_gas_limit(num_txns: usize, maybe_block_gas_lim .into_iter() .map(|txn_gen| txn_gen.materialize_with_deltas(&universe, 15, false)) .collect(); - let txn_provider = DefaultTxnProvider::new(transactions); + let txn_provider = DefaultTxnProvider::new_without_info(transactions); let data_view = DeltaDataView::> { phantom: PhantomData, @@ -266,7 +266,7 @@ fn deltas_resolver_with_block_gas_limit(num_txns: usize, maybe_block_gas_limit: .into_iter() .map(|txn_gen| txn_gen.materialize_with_deltas(&universe, 15, false)) .collect(); - let txn_provider = DefaultTxnProvider::new(transactions); + let txn_provider = DefaultTxnProvider::new_without_info(transactions); let executor_thread_pool = Arc::new( rayon::ThreadPoolBuilder::new() @@ -391,7 +391,7 @@ fn publishing_fixed_params_with_block_gas_limit( .unwrap(), ); - let txn_provider = DefaultTxnProvider::new(transactions.clone()); + let txn_provider = DefaultTxnProvider::new_without_info(transactions.clone()); // Confirm still no intersection let mut guard = AptosModuleCacheManagerGuard::none(); let output = BlockExecutor::< @@ -440,7 +440,7 @@ fn publishing_fixed_params_with_block_gas_limit( .unwrap(), ); - let txn_provider = DefaultTxnProvider::new(transactions); + let txn_provider = DefaultTxnProvider::new_without_info(transactions); for _ in 0..200 { let mut guard = AptosModuleCacheManagerGuard::none(); @@ -515,7 +515,7 @@ fn non_empty_group( txn_gen.materialize_groups::<[u8; 32], MockEvent>(&key_universe, group_size_pcts) }) .collect(); - let txn_provider = DefaultTxnProvider::new(transactions); + let txn_provider = DefaultTxnProvider::new_without_info(transactions); let data_view = NonEmptyGroupDataView::> { group_keys: key_universe[(key_universe_len - 3)..key_universe_len] diff --git a/aptos-move/block-executor/src/proptest_types/types.rs b/aptos-move/block-executor/src/proptest_types/types.rs index cfed8fdfa688b..2139f583badf1 100644 --- a/aptos-move/block-executor/src/proptest_types/types.rs +++ b/aptos-move/block-executor/src/proptest_types/types.rs @@ -2,19 +2,11 @@ // Parts of the project are originally copyright © Meta Platforms, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::task::{ExecutionStatus, ExecutorTask, TransactionOutput}; -use aptos_aggregator::{ - delayed_change::DelayedChange, - delta_change_set::{delta_add, delta_sub, serialize, DeltaOp}, - resolver::TAggregatorV1View, -}; -use aptos_mvhashmap::types::TxnIndex; +use aptos_aggregator::delta_change_set::{delta_add, delta_sub, serialize, DeltaOp}; use aptos_types::{ account_address::AccountAddress, contract_event::TransactionEvent, - error::PanicError, executable::ModulePath, - fee_statement::FeeStatement, on_chain_config::CurrentTimeMicroseconds, state_store::{ errors::StateViewError, @@ -23,26 +15,17 @@ use aptos_types::{ StateViewId, TStateView, }, transaction::BlockExecutableTransaction as Transaction, - write_set::{TransactionWrite, WriteOp, WriteOpKind}, -}; -use aptos_vm_environment::environment::AptosEnvironment; -use aptos_vm_types::{ - module_and_script_storage::code_storage::AptosCodeStorage, - module_write_set::ModuleWrite, - resolver::{ - BlockSynchronizationKillSwitch, ResourceGroupSize, TExecutorView, TResourceGroupView, - }, - resource_group_adapter::{ - decrement_size_for_remove_tag, group_tagged_resource_size, increment_size_for_add_tag, - }, + write_set::{TransactionWrite, WriteOpKind}, }; +use aptos_vm_types::module_write_set::ModuleWrite; use bytes::Bytes; -use claims::{assert_ge, assert_le, assert_ok}; +use claims::{assert_ge, assert_le}; use move_core_types::{ - ident_str, identifier::IdentStr, language_storage::ModuleId, value::MoveTypeLayout, + identifier::{IdentStr, Identifier}, + language_storage::ModuleId, }; -use move_vm_types::delayed_values::delayed_field_id::DelayedFieldID; -use once_cell::sync::OnceCell; +use move_vm_runtime::Module; +use move_vm_types::delayed_values::delayed_field_id::{DelayedFieldID, ExtractUniqueIndex}; use proptest::{arbitrary::Arbitrary, collection::vec, prelude::*, proptest, sample::Index}; use proptest_derive::Arbitrary; use std::{ @@ -50,10 +33,7 @@ use std::{ fmt::Debug, hash::{Hash, Hasher}, marker::PhantomData, - sync::{ - atomic::{AtomicUsize, Ordering}, - Arc, - }, + sync::{atomic::AtomicUsize, Arc}, }; // Should not be possible to overflow or underflow, as each delta is at most 100 in the tests. @@ -92,27 +72,49 @@ where pub(crate) struct NonEmptyGroupDataView { pub(crate) group_keys: HashSet, + // When we are testing with delayed fields, currently deletion is not supported, + // so we need to return for each key that can contain a delayed field. for groups, + // the reserved tag is the only such key, and we simply return a value for all + // non-group keys to ensure the test runs. + pub(crate) delayed_field_testing: bool, +} + +pub(crate) fn default_group_map() -> BTreeMap { + let bytes: Bytes = bcs::to_bytes(&( + STORAGE_AGGREGATOR_VALUE, + // u32::MAX represents storage version. + u32::MAX, + )) + .unwrap() + .into(); + + BTreeMap::from([(RESERVED_TAG, bytes)]) } impl TStateView for NonEmptyGroupDataView where - K: PartialOrd + Ord + Send + Sync + Clone + Hash + Eq + ModulePath + 'static, + K: Debug + PartialOrd + Ord + Send + Sync + Clone + Hash + Eq + ModulePath + 'static, { type Key = K; // Contains mock storage value with a non-empty group (w. value at RESERVED_TAG). fn get_state_value(&self, key: &K) -> Result, StateViewError> { - if self.group_keys.contains(key) { - let group: BTreeMap = BTreeMap::from([(RESERVED_TAG, vec![0].into())]); - - let bytes = bcs::to_bytes(&group).unwrap(); - Ok(Some(StateValue::new_with_metadata( - bytes.into(), - raw_metadata(5), - ))) - } else { - Ok(None) - } + Ok(self + .group_keys + .contains(key) + .then(|| { + let bytes = bcs::to_bytes(&default_group_map()).unwrap(); + StateValue::new_with_metadata(bytes.into(), raw_metadata(5)) + }) + .or_else(|| { + self.delayed_field_testing.then(|| { + StateValue::new_legacy(serialize_delayed_field_tuple(&( + STORAGE_AGGREGATOR_VALUE, + // u32::MAX represents storage version. + u32::MAX, + ))) + }) + })) } fn id(&self) -> StateViewId { @@ -132,16 +134,11 @@ where pub(crate) struct KeyType( /// Wrapping the types used for testing to add ModulePath trait implementation (below). pub K, - /// The bool field determines for testing purposes, whether the key will be interpreted - /// as a module access path. In this case, if a module path is both read and written - /// during parallel execution, ModulePathReadWrite must be returned and the - /// block execution must fall back to the sequential execution. - pub bool, ); impl ModulePath for KeyType { fn is_module_path(&self) -> bool { - self.1 + false } fn from_address_and_module_name(_address: &AccountAddress, _module_name: &IdentStr) -> Self { @@ -150,7 +147,7 @@ impl ModulePath for KeyType } // TODO: this is now very similar to WriteOp, should be a wrapper and remove boilerplate below. -#[derive(Debug)] +#[derive(Debug, PartialEq, Eq)] pub(crate) struct ValueType { /// Wrapping the types used for testing to add TransactionWrite trait implementation (below). bytes: Option, @@ -168,21 +165,6 @@ impl Clone for ValueType { } } -impl Arbitrary for ValueType { - type Parameters = (); - type Strategy = BoxedStrategy; - - fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy { - vec(any::(), 17) - .prop_map(|mut v| { - let use_value = v[0] < 128; - v.resize(16, 0); - ValueType::from_value(v, use_value) - }) - .boxed() - } -} - impl ValueType { pub(crate) fn new( bytes: Option, @@ -267,21 +249,28 @@ impl TransactionWrite for ValueType { } fn set_bytes(&mut self, bytes: Bytes) { - self.bytes = bytes.into(); + self.bytes = Some(bytes); } } #[derive(Clone, Copy)] pub(crate) struct TransactionGenParams { - /// Each transaction's read-set consists of between 1 and read_size-1 many reads. + /// Each transaction's read-set consists of between 1 and read_size many reads. read_size: usize, - /// Each mock execution will produce between 1 and output_size-1 many writes and deltas. + /// Each mock execution will produce between 1 and output_size many writes and deltas. output_size: usize, /// The number of different incarnation behaviors that a mock execution of the transaction /// may exhibit. For instance, incarnation_alternatives = 1 corresponds to a "static" /// mock execution behavior regardless of the incarnation, while value > 1 may lead to "dynamic", /// i.e. different behavior when executing different incarnations of the transaction. incarnation_alternatives: usize, + // TODO(BlockSTMv2): add a parameter to control the range of possible values, which is + // necessary to better cover value validation in BlockSTMv2. Currently, certain bugs can + // only be discovered by fixed behavior (incarnation_alternatives = 1) tests, since they + // write the same value with a different version (incarnation number). This change should + // be coupled with the refactor of mock incarnation generation to make sure it does not + // interfere with other logic, such as allowing deletions or deltas (which currently are + // hackily determined by the bits of the value). } #[derive(Arbitrary, Debug, Clone)] @@ -289,13 +278,13 @@ pub(crate) struct TransactionGenParams { pub(crate) struct TransactionGen> + Arbitrary + Clone + Debug + Eq + 'static> { /// Generate keys for possible read-sets of the transaction based on the above parameters. #[proptest( - strategy = "vec(vec(any::(), 1..params.read_size), params.incarnation_alternatives)" + strategy = "vec(vec(any::(), 1..=params.read_size), params.incarnation_alternatives)" )] reads: Vec>, /// Generate keys and values for possible write-sets based on above transaction gen parameters. /// Based on how the test is configured, some of these "writes" will convert to deltas. #[proptest( - strategy = "vec(vec((any::(), any::()), 1..params.output_size), \ + strategy = "vec(vec((any::(), any::()), 1..=params.output_size), \ params.incarnation_alternatives)" )] modifications: Vec>, @@ -322,18 +311,31 @@ pub(crate) struct TransactionGen> + Arbitrary + Clone + Debug + /// first and also records the latest incarnations of each transaction (that is committed). /// Then we can generate the baseline by sequentially executing the behavior prescribed for /// those latest incarnations. +/// +/// TODO(BlockSTMv2): Mock incarnation & behavior generation should also be separated out +/// and refactored into e.g. a builder pattern. In particular, certain materialization methods +/// transform generated resource reads and writes into group or module reads and writes. +/// It would be more natural to maintain an internal builder state of the mock transaction +/// generation process and then finalize it into the desired format. Additionally, the +/// internal fields should contain structs instead of less readable tuples. #[derive(Clone, Debug)] pub(crate) struct MockIncarnation { /// A vector of keys to be read during mock incarnation execution. - pub(crate) reads: Vec, + /// bool indicates that the path contains deltas, i.e. AggregatorV1 or DelayedFields. + pub(crate) resource_reads: Vec<(K, bool)>, /// A vector of keys and corresponding values to be written during mock incarnation execution. - pub(crate) writes: Vec<(K, ValueType)>, - pub(crate) group_reads: Vec<(K, u32)>, - pub(crate) group_writes: Vec<(K, StateValueMetadata, HashMap)>, + /// bool indicates that the path contains deltas, i.e. AggregatorV1 or DelayedFields. + pub(crate) resource_writes: Vec<(K, ValueType, bool)>, + pub(crate) group_reads: Vec<(K, u32, bool)>, + pub(crate) group_writes: Vec<(K, StateValueMetadata, HashMap)>, + // For testing get_module_or_build_with and insert_verified_module interfaces. + pub(crate) module_reads: Vec, + pub(crate) module_writes: Vec>, /// Keys to query group size for - false is querying size, true is querying metadata. pub(crate) group_queries: Vec<(K, bool)>, - /// A vector of keys and corresponding deltas to be produced during mock incarnation execution. - pub(crate) deltas: Vec<(K, DeltaOp)>, + /// A vector of keys and corresponding deltas to be produced during mock incarnation + /// execution. For delayed fields in groups, the Option is set to Some(tag). + pub(crate) deltas: Vec<(K, DeltaOp, Option)>, /// A vector of events. pub(crate) events: Vec, metadata_seeds: [u64; 3], @@ -346,19 +348,21 @@ impl MockIncarnation { /// into another one with group_reads / group_writes / group_queries set. Hence, the constructor /// here always sets it to an empty vector. pub(crate) fn new_with_metadata_seeds( - reads: Vec, - writes: Vec<(K, ValueType)>, - deltas: Vec<(K, DeltaOp)>, + resource_reads: Vec<(K, bool)>, + resource_writes: Vec<(K, ValueType, bool)>, + deltas: Vec<(K, DeltaOp, Option)>, events: Vec, metadata_seeds: [u64; 3], gas: u64, ) -> Self { Self { - reads, - writes, + resource_reads, + resource_writes, group_reads: vec![], group_writes: vec![], group_queries: vec![], + module_reads: vec![], + module_writes: vec![], deltas, events, metadata_seeds, @@ -367,18 +371,20 @@ impl MockIncarnation { } pub(crate) fn new( - reads: Vec, - writes: Vec<(K, ValueType)>, - deltas: Vec<(K, DeltaOp)>, + resource_reads: Vec<(K, bool)>, + resource_writes: Vec<(K, ValueType, bool)>, + deltas: Vec<(K, DeltaOp, Option)>, events: Vec, gas: u64, ) -> Self { Self { - reads, - writes, + resource_reads, + resource_writes, group_reads: vec![], group_writes: vec![], group_queries: vec![], + module_reads: vec![], + module_writes: vec![], deltas, events, metadata_seeds: [0; 3], @@ -387,6 +393,13 @@ impl MockIncarnation { } } +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub(crate) enum DeltaTestKind { + DelayedFields, + AggregatorV1, + None, +} + /// A mock transaction that could be used to test the correctness and throughput of the system. /// To test transaction behavior where reads and writes might be dynamic (depend on previously /// read values), different read and writes sets are generated and used depending on the incarnation @@ -403,6 +416,8 @@ pub(crate) enum MockTransaction { /// A vector of mock behaviors prescribed for each incarnation of the transaction, chosen /// round robin depending on the incarnation counter value). incarnation_behaviors: Vec>, + /// If we are testing with deltas, are we testing delayed_fields? (or AggregatorV1). + delta_test_kind: DeltaTestKind, }, /// Skip the execution of trailing transactions. SkipRest(u64), @@ -415,6 +430,7 @@ impl MockTransaction { Self::Write { incarnation_counter: Arc::new(AtomicUsize::new(0)), incarnation_behaviors: vec![behavior], + delta_test_kind: DeltaTestKind::None, } } @@ -422,9 +438,30 @@ impl MockTransaction { Self::Write { incarnation_counter: Arc::new(AtomicUsize::new(0)), incarnation_behaviors: behaviors, + delta_test_kind: DeltaTestKind::None, } } + pub(crate) fn with_delayed_fields_testing(mut self) -> Self { + if let Self::Write { + delta_test_kind, .. + } = &mut self + { + *delta_test_kind = DeltaTestKind::DelayedFields; + } + self + } + + pub(crate) fn with_aggregator_v1_testing(mut self) -> Self { + if let Self::Write { + delta_test_kind, .. + } = &mut self + { + *delta_test_kind = DeltaTestKind::AggregatorV1; + } + self + } + pub(crate) fn into_behaviors(self) -> Vec> { match self { Self::Write { @@ -453,6 +490,17 @@ impl< fn user_txn_bytes_len(&self) -> usize { 0 } + + fn from_txn(txn: aptos_types::transaction::Transaction) -> Self { + match txn { + aptos_types::transaction::Transaction::StateCheckpoint(_) + | aptos_types::transaction::Transaction::BlockEpilogue(_) => { + let behaivor = MockIncarnation::new(vec![], vec![], vec![], vec![], 0); + Self::from_behavior(behaivor) + }, + _ => unreachable!(), + } + } } // TODO: try and test different strategies. @@ -464,34 +512,104 @@ impl TransactionGenParams { incarnation_alternatives: 5, } } + + // The read and write will be converted to a module read and write. + pub fn new_dynamic_modules_only() -> Self { + TransactionGenParams { + read_size: 1, + output_size: 1, + incarnation_alternatives: 5, + } + } + + // Last read and write will be converted to module reads and writes. + pub fn new_dynamic_with_modules() -> Self { + TransactionGenParams { + read_size: 3, + output_size: 3, + incarnation_alternatives: 5, + } + } } impl Default for TransactionGenParams { fn default() -> Self { TransactionGenParams { read_size: 10, - output_size: 5, + output_size: 1, incarnation_alternatives: 1, } } } -// TODO: move generation to separate file. -// TODO: consider adding writes to reads (read-before-write). Similar behavior to the Move-VM -// and may force more testing (since we check read results). +/// A simple enum to represent either a write or a delta operation result +enum WriteDeltaVariant { + Write(W), + Delta(D), +} + +fn is_delta_on(index: usize, delta_threshold: Option) -> bool { + delta_threshold.is_some_and(|threshold| threshold <= index) +} + impl> + Arbitrary + Clone + Debug + Eq + Sync + Send> TransactionGen { + /// Determines whether to generate a delta operation or a write operation based on parameters + /// + /// # Arguments + /// * `is_delta_path` - attempt to generate a delta value first + /// * `value` - The value to process + /// * `delta_threshold` - All indices below this threshold will be writes (not deltas) + /// * `allow_deletes` - Whether deletion operations are allowed + /// + /// # Returns + /// Either a delta operation or a write operation with its is_aggregator_v1 flag + fn generate_write_or_delta( + is_delta_path: bool, + value: &V, + key: KeyType, + allow_deletes: bool, + ) -> WriteDeltaVariant<(KeyType, ValueType), (KeyType, DeltaOp)> { + // First check if this should be a delta + if is_delta_path { + let val_u128 = ValueType::from_value(value.clone(), true) + .as_u128() + .unwrap() + .unwrap(); + + // Not all values become deltas - some remain as normal writes + if val_u128 % 10 != 0 { + let delta = if val_u128 % 10 < 5 { + delta_sub(val_u128 % 100, u128::MAX) + } else { + delta_add(val_u128 % 100, u128::MAX) + }; + return WriteDeltaVariant::Delta((key, delta)); + } + } + + // Otherwise create a normal write + let val_u128 = ValueType::from_value(value.clone(), true) + .as_u128() + .unwrap() + .unwrap(); + let is_deletion = allow_deletes && val_u128 % 23 == 0; + let mut write_value = ValueType::from_value(value.clone(), !is_deletion); + write_value.metadata = raw_metadata((val_u128 >> 64) as u64); + + WriteDeltaVariant::Write((key, write_value)) + } + fn writes_and_deltas_from_gen( // TODO: disentangle writes and deltas. universe: &[K], gen: Vec>, - module_write_fn: &dyn Fn(usize) -> bool, - delta_fn: &dyn Fn(usize, &V) -> Option, allow_deletes: bool, + delta_threshold: Option, ) -> Vec<( - /* writes = */ Vec<(KeyType, ValueType)>, + /* writes = */ Vec<(KeyType, ValueType, bool)>, /* deltas = */ Vec<(KeyType, DeltaOp)>, )> { - let mut ret = vec![]; + let mut ret = Vec::with_capacity(gen.len()); for write_gen in gen.into_iter() { let mut keys_modified = BTreeSet::new(); let mut incarnation_writes = vec![]; @@ -501,18 +619,19 @@ impl> + Arbitrary + Clone + Debug + Eq + Sync + Send> Transactio let key = universe[i].clone(); if !keys_modified.contains(&key) { keys_modified.insert(key.clone()); - match delta_fn(i, &value) { - Some(delta) => incarnation_deltas.push((KeyType(key, false), delta)), - None => { - // One out of 23 writes will be a deletion - let val_u128 = ValueType::from_value(value.clone(), true) - .as_u128() - .unwrap() - .unwrap(); - let is_deletion = allow_deletes && val_u128 % 23 == 0; - let mut value = ValueType::from_value(value.clone(), !is_deletion); - value.metadata = raw_metadata((val_u128 >> 64) as u64); - incarnation_writes.push((KeyType(key, module_write_fn(i)), value)); + + let is_delta_path = is_delta_on(i, delta_threshold); + match Self::generate_write_or_delta( + is_delta_path, + &value, + KeyType(key), + allow_deletes, + ) { + WriteDeltaVariant::Write((key, value)) => { + incarnation_writes.push((key, value, is_delta_path)); + }, + WriteDeltaVariant::Delta((key, delta)) => { + incarnation_deltas.push((key, delta)); }, } } @@ -525,15 +644,15 @@ impl> + Arbitrary + Clone + Debug + Eq + Sync + Send> Transactio fn reads_from_gen( universe: &[K], gen: Vec>, - module_read_fn: &dyn Fn(usize) -> bool, - ) -> Vec>> { + delta_threshold: Option, + ) -> Vec, bool)>> { let mut ret = vec![]; for read_gen in gen.into_iter() { - let mut incarnation_reads: Vec> = vec![]; + let mut incarnation_reads: Vec<(KeyType, bool)> = vec![]; for idx in read_gen.into_iter() { let i = idx.index(universe.len()); let key = universe[i].clone(); - incarnation_reads.push(KeyType(key, module_read_fn(i))); + incarnation_reads.push((KeyType(key), is_delta_on(i, delta_threshold))); } ret.push(incarnation_reads); } @@ -569,20 +688,17 @@ impl> + Arbitrary + Clone + Debug + Eq + Sync + Send> Transactio >( self, universe: &[K], - module_read_fn: &dyn Fn(usize) -> bool, - module_write_fn: &dyn Fn(usize) -> bool, - delta_fn: &dyn Fn(usize, &V) -> Option, allow_deletes: bool, + delta_threshold: Option, ) -> MockTransaction, E> { - let reads = Self::reads_from_gen(universe, self.reads, &module_read_fn); + let reads = Self::reads_from_gen(universe, self.reads, delta_threshold); let gas = Self::gas_from_gen(self.gas); let behaviors = Self::writes_and_deltas_from_gen( universe, self.modifications, - &module_write_fn, - &delta_fn, allow_deletes, + delta_threshold, ) .into_iter() .zip(reads) @@ -603,7 +719,11 @@ impl> + Arbitrary + Clone + Debug + Eq + Sync + Send> Transactio MockIncarnation::new_with_metadata_seeds( reads, writes, - deltas, + // materialize_groups sets the Option to a tag as needed. + deltas + .into_iter() + .map(|(k, delta)| (k, delta, None)) + .collect(), vec![], // events metadata_seeds, gas, @@ -620,22 +740,43 @@ impl> + Arbitrary + Clone + Debug + Eq + Sync + Send> Transactio >( self, universe: &[K], - // Are writes and reads module access (same access path). - module_access: (bool, bool), ) -> MockTransaction, E> { - let is_module_read = |_| -> bool { module_access.1 }; - let is_module_write = |_| -> bool { module_access.0 }; - let is_delta = |_, _: &V| -> Option { None }; - // Module deletion isn't allowed. - let allow_deletes = !(module_access.0 || module_access.1); + self.new_mock_write_txn(universe, true, None) + } + + pub(crate) fn materialize_modules< + K: Clone + Hash + Debug + Eq + Ord, + E: Send + Sync + Debug + Clone + TransactionEvent, + >( + self, + universe: &[K], + ) -> MockTransaction, E> { + let universe_len = universe.len(); - self.new_mock_write_txn( - universe, - &is_module_read, - &is_module_write, - &is_delta, - allow_deletes, - ) + let mut behaviors = self + .new_mock_write_txn(universe, false, None) + .into_behaviors(); + + behaviors.iter_mut().for_each(|behavior| { + // Handle writes + let (key_to_convert, mut value, _) = behavior.resource_writes.pop().unwrap(); + let module_id = key_to_mock_module_id(&key_to_convert, universe_len); + + // Serialize a module and store it in bytes so deserialization can succeed. + let mut serialized_bytes = vec![]; + Module::new_for_test(module_id.clone()) + .serialize(&mut serialized_bytes) + .expect("Failed to serialize compiled module"); + value.bytes = Some(serialized_bytes.into()); + + behavior.module_writes = vec![ModuleWrite::new(module_id, value)]; + + // Handle reads. + let (key_to_convert, _) = behavior.resource_reads.pop().unwrap(); + behavior.module_reads = vec![key_to_mock_module_id(&key_to_convert, universe_len)]; + }); + + MockTransaction::from_behaviors(behaviors) } // Generates a mock txn without group reads/writes and converts it to have group @@ -647,27 +788,18 @@ impl> + Arbitrary + Clone + Debug + Eq + Sync + Send> Transactio self, universe: &[K], group_size_query_pcts: [Option; 3], + delta_threshold: Option, ) -> MockTransaction, E> { let universe_len = universe.len(); assert_ge!(universe_len, 3, "Universe must have size >= 3"); - let is_module_read = |_| -> bool { false }; - let is_module_write = |_| -> bool { false }; - let is_delta = |_, _: &V| -> Option { None }; - let group_size_query_indicators = Self::group_size_indicator_from_gen(self.group_size_indicators.clone()); let mut behaviors = self - .new_mock_write_txn( - &universe[0..universe.len() - 3], - &is_module_read, - &is_module_write, - &is_delta, - false, - ) + .new_mock_write_txn(&universe[0..universe.len() - 3], false, delta_threshold) .into_behaviors(); - let key_to_group = |key: &KeyType| -> Option<(usize, u32)> { + let key_to_group = |key: &KeyType| -> Option<(usize, u32, bool)> { let mut hasher = DefaultHasher::new(); key.hash(&mut hasher); let bytes = hasher.finish().to_be_bytes(); @@ -676,55 +808,93 @@ impl> + Arbitrary + Clone + Debug + Eq + Sync + Send> Transactio let group_key_idx = bytes[1] % 4; - (group_key_idx < 3).then_some((group_key_idx as usize, tag)) + // 3/4 of the time key will map to group - rest are normal resource accesses. + (group_key_idx < 3).then_some((group_key_idx as usize, tag, group_key_idx > 0)) }; for (behavior_idx, behavior) in behaviors.iter_mut().enumerate() { let mut reads = vec![]; let mut group_reads = vec![]; - for read_key in behavior.reads.clone() { - assert!(read_key != KeyType(universe[universe_len - 1].clone(), false)); - assert!(read_key != KeyType(universe[universe_len - 2].clone(), false)); - assert!(read_key != KeyType(universe[universe_len - 3].clone(), false)); + for (read_key, contains_delta) in behavior.resource_reads.clone() { + assert!(read_key != KeyType(universe[universe_len - 1].clone())); + assert!(read_key != KeyType(universe[universe_len - 2].clone())); + assert!(read_key != KeyType(universe[universe_len - 3].clone())); match key_to_group(&read_key) { - Some((idx, tag)) => group_reads.push(( - KeyType(universe[universe_len - 1 - idx].clone(), false), - tag, - )), - None => reads.push(read_key), + Some((idx, tag, has_delayed_field)) => { + // Custom logic for has_delayed_fields for groups: shadowing + // the flag of the original read. + group_reads.push(( + KeyType(universe[universe_len - 1 - idx].clone()), + tag, + // Reserved tag is configured to have delayed fields. + has_delayed_field && tag == RESERVED_TAG && delta_threshold.is_some(), + )) + }, + None => reads.push((read_key, contains_delta)), } } let mut writes = vec![]; let mut group_writes = vec![]; let mut inner_ops = vec![HashMap::new(); 3]; - for (write_key, value) in behavior.writes.clone() { + for (write_key, value, has_delayed_field) in behavior.resource_writes.clone() { match key_to_group(&write_key) { - Some((key_idx, tag)) => { + Some((key_idx, tag, has_delayed_field)) => { + // Same shadowing of has_delayed_field variable and logic as above. if tag != RESERVED_TAG || !value.is_deletion() { - inner_ops[key_idx].insert(tag, value); + inner_ops[key_idx] + .insert(tag, (value, has_delayed_field && tag == RESERVED_TAG)); } }, - None => writes.push((write_key, value)), + None => { + writes.push((write_key, value, has_delayed_field)); + }, } } for (idx, inner_ops) in inner_ops.into_iter().enumerate() { if !inner_ops.is_empty() { group_writes.push(( - KeyType(universe[universe_len - 1 - idx].clone(), false), + KeyType(universe[universe_len - 1 - idx].clone()), raw_metadata(behavior.metadata_seeds[idx]), inner_ops, )); } } - // Group test does not handle deltas (different view, no default storage value). - assert!(behavior.deltas.is_empty()); - behavior.reads = reads; - behavior.writes = writes; + // Group test does not handle deltas for aggregator v1(different view, no default + // storage value). However, it does handle deltas (added below) for delayed fields. + assert!(delta_threshold.is_some() || behavior.deltas.is_empty()); + behavior.resource_reads = reads; + behavior.resource_writes = writes; behavior.group_reads = group_reads; behavior.group_writes = group_writes; + if delta_threshold.is_some() { + // TODO: We can have a threshold over which we create a delta for RESERVED_TAG, + // because currently only RESERVED_TAG in a group contains a delayed field. + let mut delta_for_keys = [false; 3]; + behavior.deltas = behavior + .deltas + .iter() + .filter_map(|(key, delta, maybe_tag)| { + if let Some((idx, _, has_delayed_field)) = key_to_group(key) { + if has_delayed_field && !delta_for_keys[idx] { + delta_for_keys[idx] = true; + Some(( + KeyType(universe[universe_len - 1 - idx].clone()), + *delta, + Some(RESERVED_TAG), + )) + } else { + None + } + } else { + Some((key.clone(), *delta, *maybe_tag)) + } + }) + .collect(); + } + behavior.group_queries = group_size_query_pcts .iter() .enumerate() @@ -739,7 +909,7 @@ impl> + Arbitrary + Clone + Debug + Eq + Sync + Send> Transactio }; (indicator < *size_query_pct).then(|| { ( - KeyType(universe[universe_len - 1 - idx].clone(), false), + KeyType(universe[universe_len - 1 - idx].clone()), // TODO: handle metadata queries more uniformly w. size. indicator % 2 == 0, ) @@ -750,7 +920,13 @@ impl> + Arbitrary + Clone + Debug + Eq + Sync + Send> Transactio .collect(); } - MockTransaction::from_behaviors(behaviors) + // When delayed fields are not enabled, the flag is ignored, so we can always + // set with_delayed_fields here. + if delta_threshold.is_some() { + MockTransaction::from_behaviors(behaviors).with_delayed_fields_testing() + } else { + MockTransaction::from_behaviors(behaviors) + } } pub(crate) fn materialize_with_deltas< @@ -762,294 +938,9 @@ impl> + Arbitrary + Clone + Debug + Eq + Sync + Send> Transactio delta_threshold: usize, allow_deletes: bool, ) -> MockTransaction, E> { - let is_module_read = |_| -> bool { false }; - let is_module_write = |_| -> bool { false }; - let is_delta = |i, v: &V| -> Option { - if i >= delta_threshold { - let val = ValueType::from_value(v.clone(), true) - .as_u128() - .unwrap() - .unwrap(); - if val % 10 == 0 { - None - } else if val % 10 < 5 { - Some(delta_sub(val % 100, u128::MAX)) - } else { - Some(delta_add(val % 100, u128::MAX)) - } - } else { - None - } - }; - - self.new_mock_write_txn( - universe, - &is_module_read, - &is_module_write, - &is_delta, - allow_deletes, - ) - } - - pub(crate) fn materialize_disjoint_module_rw< - K: Clone + Hash + Debug + Eq + Ord, - E: Send + Sync + Debug + Clone + TransactionEvent, - >( - self, - universe: &[K], - // keys generated with indices from read_threshold to write_threshold will be - // treated as module access only in reads. keys generated with indices from - // write threshold to universe.len() will be treated as module access only in - // writes. This way there will be module accesses but no intersection. - read_threshold: usize, - write_threshold: usize, - ) -> MockTransaction, E> { - assert!(read_threshold < universe.len()); - assert!(write_threshold > read_threshold); - assert!(write_threshold < universe.len()); - - let is_module_read = |i| -> bool { i >= read_threshold && i < write_threshold }; - let is_module_write = |i| -> bool { i >= write_threshold }; - let is_delta = |_, _: &V| -> Option { None }; - - self.new_mock_write_txn( - universe, - &is_module_read, - &is_module_write, - &is_delta, - false, // Module deletion isn't allowed - ) - } -} - -/////////////////////////////////////////////////////////////////////////// -// Mock transaction executor implementation. -/////////////////////////////////////////////////////////////////////////// - -pub(crate) struct MockTask { - phantom_data: PhantomData<(K, E)>, -} - -impl MockTask { - pub fn new() -> Self { - Self { - phantom_data: PhantomData, - } - } -} - -impl ExecutorTask for MockTask -where - K: PartialOrd + Ord + Send + Sync + Clone + Hash + Eq + ModulePath + Debug + 'static, - E: Send + Sync + Debug + Clone + TransactionEvent + 'static, -{ - type Error = usize; - type Output = MockOutput; - type Txn = MockTransaction; - - fn init(_environment: &AptosEnvironment, _state_view: &impl TStateView) -> Self { - Self::new() - } - - fn execute_transaction( - &self, - view: &(impl TExecutorView - + TResourceGroupView - + AptosCodeStorage - + BlockSynchronizationKillSwitch), - txn: &Self::Txn, - txn_idx: TxnIndex, - ) -> ExecutionStatus { - match txn { - MockTransaction::Write { - incarnation_counter, - incarnation_behaviors, - } => { - // Use incarnation counter value as an index to determine the read- - // and write-sets of the execution. Increment incarnation counter to - // simulate dynamic behavior when there are multiple possible read- - // and write-sets (i.e. each are selected round-robin). - let idx = incarnation_counter.fetch_add(1, Ordering::SeqCst); - - let behavior = &incarnation_behaviors[idx % incarnation_behaviors.len()]; - - // Reads - let mut read_results = vec![]; - for k in behavior.reads.iter() { - // TODO: later test errors as well? (by fixing state_view behavior). - // TODO: test aggregator reads. - if !k.is_module_path() { - // TODO: also prop test modules - match view.get_resource_bytes(k, None) { - Ok(v) => read_results.push(v.map(Into::into)), - Err(_) => read_results.push(None), - } - } - } - // Read from groups. - // TODO: also read group sizes (if there are any group reads). - for (group_key, resource_tag) in behavior.group_reads.iter() { - match view.get_resource_from_group(group_key, resource_tag, None) { - Ok(v) => read_results.push(v.map(Into::into)), - Err(_) => read_results.push(None), - } - } - - let read_group_size_or_metadata = behavior - .group_queries - .iter() - .map(|(group_key, query_metadata)| { - let res = if *query_metadata { - GroupSizeOrMetadata::Metadata( - view.get_resource_state_value_metadata(group_key) - .expect("Group must exist and size computation must succeed"), - ) - } else { - GroupSizeOrMetadata::Size( - view.resource_group_size(group_key) - .expect("Group must exist and size computation must succeed") - .get(), - ) - }; - - (group_key.clone(), res) - }) - .collect(); - - let mut group_writes = vec![]; - for (key, metadata, inner_ops) in behavior.group_writes.iter() { - let mut new_inner_ops = HashMap::new(); - let group_size = view.resource_group_size(key).unwrap(); - let mut new_group_size = view.resource_group_size(key).unwrap(); - for (tag, inner_op) in inner_ops.iter() { - let exists = view - .get_resource_from_group(key, tag, None) - .unwrap() - .is_some(); - assert!( - *tag != RESERVED_TAG || exists, - "RESERVED_TAG must always be present in groups in tests" - ); - - // inner op is either deletion or creation. - assert!(!inner_op.is_modification()); - - let maybe_op = if exists { - Some( - if inner_op.is_creation() - && (inner_op.bytes().unwrap()[0] % 4 < 3 - || *tag == RESERVED_TAG) - { - ValueType::new( - inner_op.bytes.clone(), - StateValueMetadata::none(), - WriteOpKind::Modification, - ) - } else { - ValueType::new( - None, - StateValueMetadata::none(), - WriteOpKind::Deletion, - ) - }, - ) - } else { - inner_op.is_creation().then(|| inner_op.clone()) - }; - - if let Some(new_inner_op) = maybe_op { - if exists { - let old_tagged_value_size = - view.resource_size_in_group(key, tag).unwrap(); - let old_size = - group_tagged_resource_size(tag, old_tagged_value_size).unwrap(); - // let _ = - // decrement_size_for_remove_tag(&mut new_group_size, old_size); - if decrement_size_for_remove_tag(&mut new_group_size, old_size) - .is_err() - { - // Check it only happens for speculative executions that may not - // commit by returning incorrect (empty) output. - return ExecutionStatus::Success(MockOutput::skip_output()); - } - } - if !new_inner_op.is_deletion() { - let new_size = group_tagged_resource_size( - tag, - inner_op.bytes.as_ref().unwrap().len(), - ) - .unwrap(); - if increment_size_for_add_tag(&mut new_group_size, new_size) - .is_err() - { - // Check it only happens for speculative executions that may not - // commit by returning incorrect (empty) output. - return ExecutionStatus::Success(MockOutput::skip_output()); - } - } - - new_inner_ops.insert(*tag, new_inner_op); - } - } - - if !new_inner_ops.is_empty() { - if group_size.get() > 0 - && new_group_size == ResourceGroupSize::zero_combined() - { - // TODO: reserved tag currently prevents this code from being run. - // Group got deleted. - group_writes.push(( - key.clone(), - ValueType::new(None, metadata.clone(), WriteOpKind::Deletion), - new_group_size, - new_inner_ops, - )); - } else { - let op_kind = if group_size.get() == 0 { - WriteOpKind::Creation - } else { - WriteOpKind::Modification - }; - - // Not testing metadata_op here, always modification. - group_writes.push(( - key.clone(), - ValueType::new(Some(Bytes::new()), metadata.clone(), op_kind), - new_group_size, - new_inner_ops, - )); - } - } - } - - // generate group_writes. - ExecutionStatus::Success(MockOutput { - writes: behavior.writes.clone(), - group_writes, - deltas: behavior.deltas.clone(), - events: behavior.events.to_vec(), - read_results, - read_group_size_or_metadata, - materialized_delta_writes: OnceCell::new(), - total_gas: behavior.gas, - skipped: false, - }) - }, - MockTransaction::SkipRest(gas) => { - let mut mock_output = MockOutput::skip_output(); - mock_output.total_gas = *gas; - ExecutionStatus::SkipRest(mock_output) - }, - MockTransaction::Abort => ExecutionStatus::Abort(txn_idx as usize), - MockTransaction::InterruptRequested => { - while !view.interrupt_requested() {} - ExecutionStatus::SkipRest(MockOutput::skip_output()) - }, - } - } - - fn is_transaction_dynamic_change_set_capable(_txn: &Self::Txn) -> bool { - true + // Enable delta generation for this specific method + self.new_mock_write_txn(universe, allow_deletes, Some(delta_threshold)) + .with_aggregator_v1_testing() } } @@ -1063,232 +954,92 @@ pub(crate) enum GroupSizeOrMetadata { Metadata(Option), } -#[derive(Debug)] -pub(crate) struct MockOutput { - pub(crate) writes: Vec<(K, ValueType)>, - // Key, metadata_op, inner_ops - pub(crate) group_writes: Vec<(K, ValueType, ResourceGroupSize, HashMap)>, - pub(crate) deltas: Vec<(K, DeltaOp)>, - pub(crate) events: Vec, - pub(crate) read_results: Vec>>, - pub(crate) read_group_size_or_metadata: Vec<(K, GroupSizeOrMetadata)>, - pub(crate) materialized_delta_writes: OnceCell>, - pub(crate) total_gas: u64, - pub(crate) skipped: bool, +// Utility function to convert a key to a mock module ID. It hashes the key +// to compute a consistent mock account address, with a fixed "test" module name. +pub(crate) fn key_to_mock_module_id( + key: &KeyType, + universe_len: usize, +) -> ModuleId { + let mut hasher = DefaultHasher::new(); + key.hash(&mut hasher); + let idx = (hasher.finish() % universe_len as u64) as usize; + let mut addr = [0u8; AccountAddress::LENGTH]; + addr[AccountAddress::LENGTH - 1] = idx as u8; + addr[AccountAddress::LENGTH - 2] = (idx >> 8) as u8; + ModuleId::new(AccountAddress::new(addr), Identifier::new("test").unwrap()) } -impl TransactionOutput for MockOutput -where - K: PartialOrd + Ord + Send + Sync + Clone + Hash + Eq + ModulePath + Debug + 'static, - E: Send + Sync + Debug + Clone + TransactionEvent + 'static, -{ - type Txn = MockTransaction; - - // TODO[agg_v2](tests): Assigning MoveTypeLayout as None for all the writes for now. - // That means, the resources do not have any DelayedFields embedded in them. - // Change it to test resources with DelayedFields as well. - fn resource_write_set(&self) -> Vec<(K, Arc, Option>)> { - self.writes - .iter() - .filter(|(k, _)| !k.is_module_path()) - .cloned() - .map(|(k, v)| (k, Arc::new(v), None)) - .collect() - } - - fn module_write_set(&self) -> BTreeMap> { - self.writes - .iter() - .filter(|(k, _)| k.is_module_path()) - .map(|(k, v)| { - let dummy_id = ModuleId::new(AccountAddress::ONE, ident_str!("dummy").to_owned()); - let write = ModuleWrite::new(dummy_id, v.clone()); - (k.clone(), write) - }) - .collect() - } - - // Aggregator v1 writes are included in resource_write_set for tests (writes are produced - // for all keys including ones for v1_aggregators without distinguishing). - fn aggregator_v1_write_set(&self) -> BTreeMap { - BTreeMap::new() - } - - fn aggregator_v1_delta_set(&self) -> Vec<(K, DeltaOp)> { - self.deltas.clone() - } - - fn delayed_field_change_set(&self) -> BTreeMap> { - // TODO[agg_v2](tests): add aggregators V2 to the proptest? - BTreeMap::new() - } - - fn reads_needing_delayed_field_exchange( - &self, - ) -> Vec<( - ::Key, - StateValueMetadata, - Arc, - )> { - // TODO[agg_v2](tests): add aggregators V2 to the proptest? - Vec::new() - } - - fn group_reads_needing_delayed_field_exchange( - &self, - ) -> Vec<(::Key, StateValueMetadata)> { - // TODO[agg_v2](tests): add aggregators V2 to the proptest? - Vec::new() - } - - // TODO[agg_v2](tests): Currently, appending None to all events, which means none of the - // events have aggregators. Test it with aggregators as well. - fn get_events(&self) -> Vec<(E, Option)> { - self.events.iter().map(|e| (e.clone(), None)).collect() - } - - // TODO[agg_v2](cleanup) Using the concrete type layout here. Should we find a way to use generics? - fn resource_group_write_set( - &self, - ) -> Vec<( - K, - ValueType, - ResourceGroupSize, - BTreeMap>)>, - )> { - self.group_writes - .iter() - .cloned() - .map(|(group_key, metadata_v, group_size, inner_ops)| { - ( - group_key, - metadata_v, - group_size, - inner_ops.into_iter().map(|(k, v)| (k, (v, None))).collect(), - ) - }) - .collect() - } - - fn skip_output() -> Self { - Self { - writes: vec![], - group_writes: vec![], - deltas: vec![], - events: vec![], - read_results: vec![], - read_group_size_or_metadata: vec![], - materialized_delta_writes: OnceCell::new(), - total_gas: 0, - skipped: true, - } - } - - fn discard_output(_discard_code: move_core_types::vm_status::StatusCode) -> Self { - Self { - writes: vec![], - group_writes: vec![], - deltas: vec![], - events: vec![], - read_results: vec![], - read_group_size_or_metadata: vec![], - materialized_delta_writes: OnceCell::new(), - total_gas: 0, - skipped: true, - } - } - - fn materialize_agg_v1( - &self, - _view: &impl TAggregatorV1View::Key>, - ) { - // TODO[agg_v2](tests): implement this method and compare - // against sequential execution results v. aggregator v1. - } - - fn incorporate_materialized_txn_output( - &self, - aggregator_v1_writes: Vec<(::Key, WriteOp)>, - patched_resource_write_set: Vec<( - ::Key, - ::Value, - )>, - _patched_events: Vec<::Event>, - ) -> Result<(), PanicError> { - let resources: HashMap<::Key, ::Value> = - patched_resource_write_set.clone().into_iter().collect(); - for (key, _, size, _) in &self.group_writes { - let v = resources.get(key).unwrap(); - if v.is_deletion() { - assert_eq!(*size, ResourceGroupSize::zero_combined()); - } else { - assert_eq!( - size.get(), - resources.get(key).unwrap().bytes().map_or(0, |b| b.len()) as u64 - ); - } - } - - assert_ok!(self.materialized_delta_writes.set(aggregator_v1_writes)); - // TODO[agg_v2](tests): Set the patched resource write set and events. But that requires the function - // to take &mut self as input - Ok(()) - } - - fn set_txn_output_for_non_dynamic_change_set(&self) { - // TODO[agg_v2](tests): anything to be added here for tests? - } - - fn fee_statement(&self) -> FeeStatement { - // First argument is supposed to be total (not important for the test though). - // Next two arguments are different kinds of execution gas that are counted - // towards the block limit. We split the total into two pieces for these arguments. - // TODO: add variety to generating fee statement based on total gas. - FeeStatement::new( - self.total_gas, - self.total_gas / 2, - (self.total_gas + 1) / 2, - 0, - 0, - ) - } - - fn is_retry(&self) -> bool { - self.skipped - } +// ID is just the unique index as u128. +pub(crate) fn serialize_from_delayed_field_u128(value_or_id: u128, version: u32) -> Bytes { + let tuple = (value_or_id, version); + serialize_delayed_field_tuple(&tuple) +} - fn has_new_epoch_event(&self) -> bool { - false - } +pub(crate) fn serialize_from_delayed_field_id( + delayed_field_id: DelayedFieldID, + version: u32, +) -> Bytes { + let tuple = (delayed_field_id.extract_unique_index() as u128, version); + serialize_delayed_field_tuple(&tuple) +} - fn output_approx_size(&self) -> u64 { - // TODO add block output limit testing - 0 - } +fn serialize_delayed_field_tuple(value: &(u128, u32)) -> Bytes { + bcs::to_bytes(value) + .expect("Failed to serialize (u128, u32) tuple") + .into() +} - fn get_write_summary( - &self, - ) -> HashSet< - crate::types::InputOutputKey< - ::Key, - ::Tag, - >, - > { - HashSet::new() - } +/// The width of the delayed field is not used in the tests, and fixed as 8 for +/// all delayed field constructions. However, only the real ID is actually +/// serialized and deserialized (together with the version). +pub(crate) fn deserialize_to_delayed_field_u128(bytes: &[u8]) -> Result<(u128, u32), bcs::Error> { + bcs::from_bytes::<(u128, u32)>(bytes) } -#[derive(Clone, Debug)] -pub(crate) struct MockEvent { - event_data: Vec, +pub(crate) fn deserialize_to_delayed_field_id( + bytes: &[u8], +) -> Result<(DelayedFieldID, u32), bcs::Error> { + let (id, version) = bcs::from_bytes::<(u128, u32)>(bytes)?; + Ok((DelayedFieldID::from((id as u32, 8)), version)) } -impl TransactionEvent for MockEvent { - fn get_event_data(&self) -> &[u8] { - &self.event_data +#[cfg(test)] +mod tests { + use super::*; + use test_case::test_case; + + #[test_case((0u128, 0u32) ; "zero values")] + #[test_case((1u128, 42u32) ; "small values")] + #[test_case((u128::MAX, u32::MAX) ; "maximum values")] + #[test_case((12345678u128, 87654321u32) ; "large values")] + fn test_serialize_deserialize_delayed_field_tuple(tuple: (u128, u32)) { + // Serialize and then deserialize + let serialized = serialize_delayed_field_tuple(&tuple); + let deserialized = deserialize_to_delayed_field_u128(&serialized).unwrap(); + + assert_eq!( + tuple, deserialized, + "Serialization/deserialization failed for tuple ({}, {})", + tuple.0, tuple.1 + ); } - fn set_event_data(&mut self, event_data: Vec) { - self.event_data = event_data; + #[test] + fn test_deserialize_delayed_field_tuple_invalid_data() { + // Test with invalid data that's too short + let invalid_data = vec![1, 2, 3]; + let result = deserialize_to_delayed_field_u128(&invalid_data); + assert!( + result.is_err(), + "Expected deserialization to fail with too short data" + ); + + // Test with empty data + let empty_data: Vec = vec![]; + let result = deserialize_to_delayed_field_u128(&empty_data); + assert!( + result.is_err(), + "Expected deserialization to fail with empty data" + ); } } diff --git a/aptos-move/block-executor/src/scheduler.rs b/aptos-move/block-executor/src/scheduler.rs index 9ab50114d70fa..9c7fc32841902 100644 --- a/aptos-move/block-executor/src/scheduler.rs +++ b/aptos-move/block-executor/src/scheduler.rs @@ -645,6 +645,7 @@ impl Scheduler { !self.has_halted.swap(true, Ordering::SeqCst) } + #[inline] pub(crate) fn has_halted(&self) -> bool { self.has_halted.load(Ordering::Relaxed) } diff --git a/aptos-move/block-executor/src/scheduler_v2.rs b/aptos-move/block-executor/src/scheduler_v2.rs index 47f24623db383..de42ab8c30263 100644 --- a/aptos-move/block-executor/src/scheduler_v2.rs +++ b/aptos-move/block-executor/src/scheduler_v2.rs @@ -79,7 +79,7 @@ Interaction with Other Components: consumes the `AbortManager` to finalize these aborts and update dependencies. - **Worker Threads**: Continuously request tasks from `SchedulerV2` via `[SchedulerV2::next_task]`. They execute these tasks and report results (e.g., via `[SchedulerV2::finish_execution]`, - `[SchedulerV2::commit_hook_performed]`). + `[SchedulerV2::end_commit]`). Conceptual Execution Model: -------------------------- @@ -910,6 +910,7 @@ impl SchedulerV2 { /// is never aborted early to ensure its speculative writes are produced). /// - Otherwise (not halted, `incarnation > 0`), returns the result of /// `[ExecutionStatuses::already_started_abort]`. + #[inline] pub(crate) fn is_halted_or_aborted(&self, txn_idx: TxnIndex, incarnation: Incarnation) -> bool { if self.is_halted() { return true; diff --git a/aptos-move/block-executor/src/scheduler_wrapper.rs b/aptos-move/block-executor/src/scheduler_wrapper.rs index ad1dc8fceedc4..593b6041c4d4f 100644 --- a/aptos-move/block-executor/src/scheduler_wrapper.rs +++ b/aptos-move/block-executor/src/scheduler_wrapper.rs @@ -1,8 +1,11 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::scheduler::{DependencyResult, Scheduler, TWaitForDependency}; -use aptos_mvhashmap::types::TxnIndex; +use crate::{ + scheduler::{DependencyResult, Scheduler, TWaitForDependency}, + scheduler_v2::SchedulerV2, +}; +use aptos_mvhashmap::types::{Incarnation, TxnIndex}; use aptos_types::error::PanicError; use std::sync::atomic::{AtomicBool, Ordering}; @@ -15,10 +18,14 @@ pub(crate) enum SchedulerWrapper<'a> { // stored in SchedulerWrapper only for a write (it's never read), to simplify // the implementation in executor.rs and avoid passing atomic booleans. V1(&'a Scheduler, &'a AtomicBool), - // TODO(BlockSTMv2): connect v2. + V2(&'a SchedulerV2), } impl SchedulerWrapper<'_> { + pub(crate) fn is_v2(&self) -> bool { + matches!(self, SchedulerWrapper::V2(_)) + } + pub(crate) fn wake_dependencies_and_decrease_validation_idx( &self, txn_idx: TxnIndex, @@ -27,12 +34,14 @@ impl SchedulerWrapper<'_> { SchedulerWrapper::V1(scheduler, _) => { scheduler.wake_dependencies_and_decrease_validation_idx(txn_idx) }, + SchedulerWrapper::V2(_) => Ok(()), } } pub(crate) fn halt(&self) -> bool { match self { SchedulerWrapper::V1(scheduler, _) => scheduler.halt(), + SchedulerWrapper::V2(scheduler) => scheduler.halt(), } } @@ -42,6 +51,7 @@ impl SchedulerWrapper<'_> { scheduler.add_to_commit_queue(txn_idx); Ok(()) }, + SchedulerWrapper::V2(scheduler) => scheduler.end_commit(txn_idx), } } @@ -52,12 +62,15 @@ impl SchedulerWrapper<'_> { // setting the module read validation flag. skip_module_reads_validation.store(false, Ordering::Relaxed); }, + SchedulerWrapper::V2(_) => {}, } } - pub(crate) fn has_halted(&self) -> bool { + #[inline] + pub(crate) fn interrupt_requested(&self, txn_idx: TxnIndex, incarnation: Incarnation) -> bool { match self { SchedulerWrapper::V1(scheduler, _) => scheduler.has_halted(), + SchedulerWrapper::V2(scheduler) => scheduler.is_halted_or_aborted(txn_idx, incarnation), } } } @@ -72,6 +85,9 @@ impl TWaitForDependency for SchedulerWrapper<'_> { SchedulerWrapper::V1(scheduler, _) => { scheduler.wait_for_dependency(txn_idx, dep_txn_idx) }, + SchedulerWrapper::V2(_) => { + unreachable!("SchedulerV2 does not use TWaitForDependency trait") + }, } } } diff --git a/aptos-move/block-executor/src/task.rs b/aptos-move/block-executor/src/task.rs index 9af3d0c613682..bebdf7af66395 100644 --- a/aptos-move/block-executor/src/task.rs +++ b/aptos-move/block-executor/src/task.rs @@ -56,8 +56,7 @@ pub struct Accesses { } /// Trait for single threaded transaction executor. -// TODO: Sync should not be required. Sync is only introduced because this trait occurs as a phantom type of executor struct. -pub trait ExecutorTask: Sync { +pub trait ExecutorTask { /// Type of transaction and its associated key and value. type Txn: Transaction; @@ -109,9 +108,7 @@ pub trait TransactionOutput: Send + Sync + Debug { Option>, )>; - fn module_write_set( - &self, - ) -> BTreeMap<::Key, ModuleWrite<::Value>>; + fn module_write_set(&self) -> Vec::Value>>; fn aggregator_v1_write_set( &self, @@ -200,6 +197,9 @@ pub trait TransactionOutput: Send + Sync + Debug { /// Returns true iff it has a new epoch event. fn has_new_epoch_event(&self) -> bool; + /// Returns true iff the execution status is Keep(Success). + fn is_success(&self) -> bool; + /// Deterministic, but approximate size of the output, as /// before creating actual TransactionOutput, we don't know the exact size of it. /// diff --git a/aptos-move/block-executor/src/txn_last_input_output.rs b/aptos-move/block-executor/src/txn_last_input_output.rs index 37b29dfe488a6..b1e69e70e628e 100644 --- a/aptos-move/block-executor/src/txn_last_input_output.rs +++ b/aptos-move/block-executor/src/txn_last_input_output.rs @@ -2,14 +2,14 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - captured_reads::CapturedReads, + captured_reads::{CapturedReads, DataRead, ReadKind}, errors::ParallelBlockExecutionError, explicit_sync_wrapper::ExplicitSyncWrapper, task::{ExecutionStatus, TransactionOutput}, types::{InputOutputKey, ReadWriteSummary}, }; use aptos_logger::error; -use aptos_mvhashmap::types::TxnIndex; +use aptos_mvhashmap::{types::TxnIndex, MVHashMap}; use aptos_types::{ error::{code_invariant_error, PanicError}, fee_statement::FeeStatement, @@ -26,7 +26,7 @@ use move_core_types::{language_storage::ModuleId, value::MoveTypeLayout}; use move_vm_runtime::Module; use move_vm_types::delayed_values::delayed_field_id::DelayedFieldID; use std::{ - collections::{BTreeMap, HashSet}, + collections::HashSet, fmt::Debug, iter::{empty, Iterator}, sync::Arc, @@ -47,14 +47,6 @@ macro_rules! forward_on_success_or_skip_rest { }) }}; } - -pub(crate) enum KeyKind { - Resource, - // Contains the set of tags for the given group key. - Group(HashSet), - AggregatorV1, -} - pub struct TxnLastInputOutput, E: Debug> { inputs: Vec>>>, // txn_idx -> input. @@ -104,6 +96,31 @@ impl, E: Debug + Send + Clone> self.outputs[txn_idx as usize].store(Some(Arc::new(output))); } + pub fn fetch_exchanged_data( + &self, + key: &T::Key, + txn_idx: TxnIndex, + ) -> Result<(Arc, Arc), PanicError> { + self.inputs[txn_idx as usize].load().as_ref().map_or_else( + || { + Err(code_invariant_error( + "Read must be recorded before fetching exchanged data".to_string(), + )) + }, + |input| { + let data_read = input.get_by_kind(key, None, ReadKind::Value); + if let Some(DataRead::Versioned(_, value, Some(layout))) = data_read { + Ok((value, layout)) + } else { + Err(code_invariant_error(format!( + "Read value needing exchange {:?} not in Exchanged format", + data_read + ))) + } + }, + ) + } + pub(crate) fn read_set(&self, txn_idx: TxnIndex) -> Option>> { self.inputs[txn_idx as usize].load_full() } @@ -135,7 +152,7 @@ impl, E: Debug + Send + Clone> } } - /// Does a transaction at txn_idx have SkipRest or Abort status. + /// Does a transaction at txn_idx have SkipRest. pub(crate) fn block_skips_rest_at_idx(&self, txn_idx: TxnIndex) -> bool { matches!( self.outputs[txn_idx as usize] @@ -212,44 +229,46 @@ impl, E: Debug + Send + Clone> self.outputs[txn_idx as usize].load_full() } - // Extracts a set of resource paths (keys) written or updated during execution from transaction - // output, with corresponding KeyKind. If take_group_tags is true, the final HashSet - // of tags is moved for the group key - should be called once for each incarnation / record - // due to 'take'. if false, stored modified group resource tags in the group are cloned out. - pub(crate) fn modified_keys( + // BlockSTMv1 method, avoids cloning the tags by calling mark_estimate on reference. + pub(crate) fn mark_estimate_group_keys_and_tags( &self, + versioned_cache: &MVHashMap, txn_idx: TxnIndex, - take_group_tags: bool, - ) -> Option)>> { - let group_keys_and_tags: Vec<(T::Key, HashSet)> = if take_group_tags { - std::mem::take(&mut self.resource_group_keys_and_tags[txn_idx as usize].acquire()) - } else { - self.resource_group_keys_and_tags[txn_idx as usize] - .acquire() - .clone() - }; + ) { + for (key, tags) in self.resource_group_keys_and_tags[txn_idx as usize] + .acquire() + .dereference() + .iter() + { + versioned_cache + .group_data() + .mark_estimate(key, txn_idx, tags); + } + } + pub(crate) fn modified_group_keys(&self, txn_idx: TxnIndex) -> Vec<(T::Key, HashSet)> { + std::mem::take(&mut self.resource_group_keys_and_tags[txn_idx as usize].acquire()) + } + + // Extracts a set of resource paths (keys) written or updated during execution from + // transaction output. The group keys are not included, and the boolean indicates + // whether the resource is used as an AggregatorV1. + pub(crate) fn modified_resource_keys( + &self, + txn_idx: TxnIndex, + ) -> Option> { self.outputs[txn_idx as usize] .load_full() .and_then(|txn_output| match txn_output.as_ref() { ExecutionStatus::Success(t) | ExecutionStatus::SkipRest(t) => Some( t.resource_write_set() .into_iter() - .map(|(k, _, _)| (k, KeyKind::Resource)) - .chain( - t.aggregator_v1_write_set() - .into_keys() - .map(|k| (k, KeyKind::AggregatorV1)), - ) + .map(|(k, _, _)| (k, false)) + .chain(t.aggregator_v1_write_set().into_keys().map(|k| (k, true))) .chain( t.aggregator_v1_delta_set() .into_iter() - .map(|(k, _)| (k, KeyKind::AggregatorV1)), - ) - .chain( - group_keys_and_tags - .into_iter() - .map(|(k, tags)| (k, KeyKind::Group(tags))), + .map(|(k, _)| (k, true)), ), ), ExecutionStatus::Abort(_) @@ -258,10 +277,7 @@ impl, E: Debug + Send + Clone> }) } - pub(crate) fn module_write_set( - &self, - txn_idx: TxnIndex, - ) -> BTreeMap> { + pub(crate) fn module_write_set(&self, txn_idx: TxnIndex) -> Vec> { use ExecutionStatus as E; match self.outputs[txn_idx as usize] @@ -275,7 +291,7 @@ impl, E: Debug + Send + Clone> | E::DelayedFieldsCodeInvariantError(_) | E::SpeculativeExecutionAbortError(_), ) - | None => BTreeMap::new(), + | None => Vec::new(), } } diff --git a/aptos-move/block-executor/src/txn_provider/blocking_txns_provider.rs b/aptos-move/block-executor/src/txn_provider/blocking_txns_provider.rs index c866271f41376..a2d9924047c7b 100644 --- a/aptos-move/block-executor/src/txn_provider/blocking_txns_provider.rs +++ b/aptos-move/block-executor/src/txn_provider/blocking_txns_provider.rs @@ -3,7 +3,7 @@ use crate::txn_provider::TxnProvider; use aptos_mvhashmap::types::TxnIndex; -use aptos_types::transaction::BlockExecutableTransaction as Transaction; +use aptos_types::transaction::{AuxiliaryInfo, BlockExecutableTransaction as Transaction}; use once_cell::sync::OnceCell; pub struct BlockingTxnProvider { @@ -32,4 +32,9 @@ impl TxnProvider for BlockingTxnProvider fn get_txn(&self, idx: TxnIndex) -> &T { self.txns[idx as usize].wait() } + + fn get_auxiliary_info(&self, _idx: TxnIndex) -> &AuxiliaryInfo { + // TODO: The whole struct seems to be dead code for now, implement this when necessary. + unimplemented!() + } } diff --git a/aptos-move/block-executor/src/txn_provider/default.rs b/aptos-move/block-executor/src/txn_provider/default.rs index 3f7881971856b..f37c7636ea42b 100644 --- a/aptos-move/block-executor/src/txn_provider/default.rs +++ b/aptos-move/block-executor/src/txn_provider/default.rs @@ -3,20 +3,39 @@ use crate::txn_provider::TxnProvider; use aptos_mvhashmap::types::TxnIndex; -use aptos_types::transaction::BlockExecutableTransaction as Transaction; +use aptos_types::transaction::{AuxiliaryInfo, BlockExecutableTransaction as Transaction}; pub struct DefaultTxnProvider { - pub txns: Vec, + txns: Vec, + auxiliary_info: Vec, } impl DefaultTxnProvider { - pub fn new(txns: Vec) -> Self { - Self { txns } + pub fn new(txns: Vec, auxiliary_info: Vec) -> Self { + assert!(txns.len() == auxiliary_info.len()); + Self { + txns, + auxiliary_info, + } + } + + pub fn new_without_info(txns: Vec) -> Self { + let len = txns.len(); + let mut auxiliary_info = Vec::with_capacity(len); + auxiliary_info.resize(len, AuxiliaryInfo::new_empty()); + Self { + txns, + auxiliary_info, + } } pub fn get_txns(&self) -> &Vec { &self.txns } + + pub fn into_inner(self) -> (Vec, Vec) { + (self.txns, self.auxiliary_info) + } } impl TxnProvider for DefaultTxnProvider { @@ -27,12 +46,8 @@ impl TxnProvider for DefaultTxnProvider { fn get_txn(&self, idx: TxnIndex) -> &T { &self.txns[idx as usize] } -} - -impl Iterator for DefaultTxnProvider { - type Item = T; - fn next(&mut self) -> Option { - self.txns.pop() + fn get_auxiliary_info(&self, idx: TxnIndex) -> &AuxiliaryInfo { + &self.auxiliary_info[idx as usize] } } diff --git a/aptos-move/block-executor/src/txn_provider/mod.rs b/aptos-move/block-executor/src/txn_provider/mod.rs index 88a11f2478b1c..a20d71ea421ba 100644 --- a/aptos-move/block-executor/src/txn_provider/mod.rs +++ b/aptos-move/block-executor/src/txn_provider/mod.rs @@ -5,7 +5,7 @@ mod blocking_txns_provider; pub mod default; use aptos_mvhashmap::types::TxnIndex; -use aptos_types::transaction::BlockExecutableTransaction as Transaction; +use aptos_types::transaction::{AuxiliaryInfo, BlockExecutableTransaction as Transaction}; pub trait TxnProvider { /// Get total number of transactions @@ -13,4 +13,6 @@ pub trait TxnProvider { /// Get a reference of the txn object by its index. fn get_txn(&self, idx: TxnIndex) -> &T; + + fn get_auxiliary_info(&self, idx: TxnIndex) -> &AuxiliaryInfo; } diff --git a/aptos-move/block-executor/src/unit_tests/mod.rs b/aptos-move/block-executor/src/unit_tests/mod.rs index 3bba06ce5469d..35383b8f2fdab 100644 --- a/aptos-move/block-executor/src/unit_tests/mod.rs +++ b/aptos-move/block-executor/src/unit_tests/mod.rs @@ -10,9 +10,10 @@ use crate::{ executor::BlockExecutor, proptest_types::{ baseline::BaselineOutput, + mock_executor::{MockEvent, MockOutput, MockTask}, types::{ - DeltaDataView, KeyType, MockEvent, MockIncarnation, MockOutput, MockTask, - MockTransaction, NonEmptyGroupDataView, ValueType, + DeltaDataView, KeyType, MockIncarnation, MockTransaction, NonEmptyGroupDataView, + ValueType, }, }, scheduler::{ @@ -33,7 +34,8 @@ use aptos_types::{ }, contract_event::TransactionEvent, executable::ModulePath, - state_store::state_value::StateValueMetadata, + state_store::{state_value::StateValueMetadata, MockStateView}, + transaction::AuxiliaryInfo, write_set::WriteOpKind, }; use claims::{assert_matches, assert_ok}; @@ -48,23 +50,160 @@ use std::{ sync::Arc, }; +#[test] +fn test_block_epilogue_happy_path() { + let behaivor = MockIncarnation::new(vec![], vec![], vec![], vec![], 10); + let t_0 = MockTransaction::from_behavior(behaivor.clone()); + let t_1 = MockTransaction::from_behavior(behaivor); + let transactions = vec![t_0, t_1]; + + let executor_thread_pool = Arc::new( + rayon::ThreadPoolBuilder::new() + .num_threads(num_cpus::get()) + .build() + .unwrap(), + ); + let block_executor = BlockExecutor::< + MockTransaction, MockEvent>, + MockTask, MockEvent>, + MockStateView>, + NoOpTransactionCommitHook, MockEvent>, usize>, + DefaultTxnProvider, MockEvent>>, + >::new( + BlockExecutorConfig::new_no_block_limit(num_cpus::get()), + executor_thread_pool, + None, + ); + let data_view = MockStateView::empty(); + + let aux_info = vec![AuxiliaryInfo::new_empty(), AuxiliaryInfo::new_empty()]; + + let txn_provider = DefaultTxnProvider::new(transactions, aux_info); + { + let mut guard = AptosModuleCacheManagerGuard::none(); + let result = block_executor + .execute_transactions_sequential( + &txn_provider, + &data_view, + &TransactionSliceMetadata::block_from_u64(0, 1), + &mut guard, + false, + ) + .unwrap(); + let (output, block_epilogue_txn) = result.into_inner(); + assert!(block_epilogue_txn.is_some()); + assert_eq!(output.len(), 3); + assert!(!output[0].skipped); + assert!(!output[1].skipped); + assert!(!output[2].skipped); + } + + { + let mut guard = AptosModuleCacheManagerGuard::none(); + let result = block_executor + .execute_transactions_parallel( + &txn_provider, + &data_view, + &TransactionSliceMetadata::block_from_u64(0, 1), + &mut guard, + ) + .unwrap(); + let (output, block_epilogue_txn) = result.into_inner(); + assert!(block_epilogue_txn.is_some()); + assert_eq!(output.len(), 3); + assert!(!output[0].skipped); + assert!(!output[1].skipped); + assert!(!output[2].skipped); + } +} + +#[test] +fn test_block_epilogue_block_gas_limit_reached() { + let behaivor = MockIncarnation::new(vec![], vec![], vec![], vec![], 10); + let t_0 = MockTransaction::from_behavior(behaivor.clone()); + let t_1 = MockTransaction::from_behavior(behaivor); + let transactions = vec![t_0, t_1]; + + let executor_thread_pool = Arc::new( + rayon::ThreadPoolBuilder::new() + .num_threads(num_cpus::get()) + .build() + .unwrap(), + ); + let block_executor = BlockExecutor::< + MockTransaction, MockEvent>, + MockTask, MockEvent>, + MockStateView>, + NoOpTransactionCommitHook, MockEvent>, usize>, + DefaultTxnProvider, MockEvent>>, + >::new( + BlockExecutorConfig::new_maybe_block_limit(num_cpus::get(), Some(1)), + executor_thread_pool, + None, + ); + let data_view = MockStateView::empty(); + + let aux_info = vec![AuxiliaryInfo::new_empty(), AuxiliaryInfo::new_empty()]; + + let txn_provider = DefaultTxnProvider::new(transactions, aux_info); + { + let mut guard = AptosModuleCacheManagerGuard::none(); + let result = block_executor + .execute_transactions_sequential( + &txn_provider, + &data_view, + &TransactionSliceMetadata::block_from_u64(0, 1), + &mut guard, + false, + ) + .unwrap(); + let (output, block_epilogue_txn) = result.into_inner(); + assert!(block_epilogue_txn.is_some()); + assert_eq!(output.len(), 3); + assert!(!output[0].skipped); + assert!(output[1].skipped); + assert!(!output[2].skipped); + } + + { + let mut guard = AptosModuleCacheManagerGuard::none(); + let result = block_executor + .execute_transactions_parallel( + &txn_provider, + &data_view, + &TransactionSliceMetadata::block_from_u64(0, 1), + &mut guard, + ) + .unwrap(); + let (output, block_epilogue_txn) = result.into_inner(); + assert!(block_epilogue_txn.is_some()); + assert_eq!(output.len(), 3); + assert!(!output[0].skipped); + assert!(output[1].skipped); + assert!(!output[2].skipped); + } +} + #[test] fn test_resource_group_deletion() { let mut group_creation: MockIncarnation, MockEvent> = - MockIncarnation::new(vec![KeyType::(1, false)], vec![], vec![], vec![], 10); + MockIncarnation::new(vec![(KeyType::(1), true)], vec![], vec![], vec![], 10); group_creation.group_writes.push(( - KeyType::(100, false), + KeyType::(100), StateValueMetadata::none(), - HashMap::from([(101, ValueType::from_value(vec![5], true))]), + HashMap::from([(101, (ValueType::from_value(vec![5], true), false))]), )); let mut group_deletion: MockIncarnation, MockEvent> = - MockIncarnation::new(vec![KeyType::(1, false)], vec![], vec![], vec![], 10); + MockIncarnation::new(vec![(KeyType::(1), true)], vec![], vec![], vec![], 10); group_deletion.group_writes.push(( - KeyType::(100, false), + KeyType::(100), StateValueMetadata::none(), HashMap::from([( 101, - ValueType::new(None, StateValueMetadata::none(), WriteOpKind::Deletion), + ( + ValueType::new(None, StateValueMetadata::none(), WriteOpKind::Deletion), + false, + ), )]), )); let t_0 = MockTransaction::from_behavior(group_creation); @@ -74,6 +213,7 @@ fn test_resource_group_deletion() { let data_view = NonEmptyGroupDataView::> { group_keys: HashSet::new(), + delayed_field_testing: false, }; let executor_thread_pool = Arc::new( rayon::ThreadPoolBuilder::new() @@ -94,7 +234,7 @@ fn test_resource_group_deletion() { ); let mut guard = AptosModuleCacheManagerGuard::none(); - let txn_provider = DefaultTxnProvider::new(transactions); + let txn_provider = DefaultTxnProvider::new_without_info(transactions); assert_ok!(block_executor.execute_transactions_sequential( &txn_provider, &data_view, @@ -115,20 +255,22 @@ fn test_resource_group_deletion() { #[test] fn resource_group_bcs_fallback() { let no_group_incarnation_1: MockIncarnation, MockEvent> = MockIncarnation::new( - vec![KeyType::(1, false)], + vec![(KeyType::(1), true)], vec![( - KeyType::(2, false), + KeyType::(2), ValueType::from_value(vec![5], true), + false, )], vec![], vec![], 10, ); let no_group_incarnation_2: MockIncarnation, MockEvent> = MockIncarnation::new( - vec![KeyType::(3, false), KeyType::(4, false)], + vec![(KeyType::(3), true), (KeyType::(4), true)], vec![( - KeyType::(1, false), + KeyType::(1), ValueType::from_value(vec![5], true), + false, )], vec![], vec![], @@ -138,17 +280,18 @@ fn resource_group_bcs_fallback() { let t_3 = MockTransaction::from_behavior(no_group_incarnation_2); let mut group_incarnation: MockIncarnation, MockEvent> = - MockIncarnation::new(vec![KeyType::(1, false)], vec![], vec![], vec![], 10); + MockIncarnation::new(vec![(KeyType::(1), true)], vec![], vec![], vec![], 10); group_incarnation.group_writes.push(( - KeyType::(100, false), + KeyType::(100), StateValueMetadata::none(), - HashMap::from([(101, ValueType::from_value(vec![5], true))]), + HashMap::from([(101, (ValueType::from_value(vec![5], true), false))]), )); let t_2 = MockTransaction::from_behavior(group_incarnation); let transactions = Vec::from([t_1, t_2, t_3]); let data_view = NonEmptyGroupDataView::> { group_keys: HashSet::new(), + delayed_field_testing: false, }; let executor_thread_pool = Arc::new( rayon::ThreadPoolBuilder::new() @@ -168,7 +311,7 @@ fn resource_group_bcs_fallback() { None, ); - let txn_provider = DefaultTxnProvider::new(transactions); + let txn_provider = DefaultTxnProvider::new_without_info(transactions); // Execute the block normally. let mut guard = AptosModuleCacheManagerGuard::none(); let output = block_executor.execute_transactions_parallel( @@ -261,12 +404,10 @@ fn resource_group_bcs_fallback() { #[test] fn interrupt_requested() { let transactions = Vec::from([MockTransaction::Abort, MockTransaction::InterruptRequested]); - let txn_provider = DefaultTxnProvider::new(transactions); + let txn_provider = DefaultTxnProvider::new_without_info(transactions); let mut guard = AptosModuleCacheManagerGuard::none(); - let data_view = DeltaDataView::> { - phantom: PhantomData, - }; + let data_view = MockStateView::empty(); let executor_thread_pool = Arc::new( rayon::ThreadPoolBuilder::new() .num_threads(num_cpus::get()) @@ -276,7 +417,7 @@ fn interrupt_requested() { let block_executor = BlockExecutor::< MockTransaction, MockEvent>, MockTask, MockEvent>, - DeltaDataView>, + MockStateView>, NoOpTransactionCommitHook, MockEvent>, usize>, DefaultTxnProvider, MockEvent>>, >::new( @@ -298,10 +439,11 @@ fn interrupt_requested() { #[test] fn block_output_err_precedence() { let incarnation: MockIncarnation, MockEvent> = MockIncarnation::new( - vec![KeyType::(1, false)], + vec![(KeyType::(1), false)], vec![( - KeyType::(2, false), + KeyType::(2), ValueType::from_value(vec![5], true), + false, )], vec![], vec![], @@ -309,11 +451,9 @@ fn block_output_err_precedence() { ); let txn = MockTransaction::from_behavior(incarnation); let transactions = Vec::from([txn.clone(), txn]); - let txn_provider = DefaultTxnProvider::new(transactions); + let txn_provider = DefaultTxnProvider::new_without_info(transactions); - let data_view = DeltaDataView::> { - phantom: PhantomData, - }; + let data_view = MockStateView::empty(); let executor_thread_pool = Arc::new( rayon::ThreadPoolBuilder::new() .num_threads(num_cpus::get()) @@ -323,7 +463,7 @@ fn block_output_err_precedence() { let block_executor = BlockExecutor::< MockTransaction, MockEvent>, MockTask, MockEvent>, - DeltaDataView>, + MockStateView>, NoOpTransactionCommitHook, MockEvent>, usize>, DefaultTxnProvider, MockEvent>>, >::new( @@ -354,11 +494,9 @@ fn skip_rest_gas_limit() { // The contents of the second txn does not matter, as the first should hit the gas limit and // also skip. But it ensures block is not finished at the first txn (different processing). let transactions = Vec::from([MockTransaction::SkipRest(10), MockTransaction::SkipRest(10)]); - let txn_provider = DefaultTxnProvider::new(transactions); + let txn_provider = DefaultTxnProvider::new_without_info(transactions); - let data_view = DeltaDataView::> { - phantom: PhantomData, - }; + let data_view = MockStateView::empty(); let executor_thread_pool = Arc::new( rayon::ThreadPoolBuilder::new() .num_threads(num_cpus::get()) @@ -368,7 +506,7 @@ fn skip_rest_gas_limit() { let block_executor = BlockExecutor::< MockTransaction, MockEvent>, MockTask, MockEvent>, - DeltaDataView>, + MockStateView>, NoOpTransactionCommitHook, MockEvent>, usize>, DefaultTxnProvider, MockEvent>>, >::new( @@ -388,15 +526,11 @@ fn skip_rest_gas_limit() { } // TODO: add unit test for block gas limit! -fn run_and_assert(transactions: Vec>) +fn run_and_assert(transactions: Vec>, use_delta_data_view: bool) where K: PartialOrd + Ord + Send + Sync + Clone + Hash + Eq + ModulePath + Debug + 'static, E: Send + Sync + Debug + Clone + TransactionEvent + 'static, { - let data_view = DeltaDataView:: { - phantom: PhantomData, - }; - let executor_thread_pool = Arc::new( rayon::ThreadPoolBuilder::new() .num_threads(num_cpus::get()) @@ -405,24 +539,50 @@ where ); let mut guard = AptosModuleCacheManagerGuard::none(); - let txn_provider = DefaultTxnProvider::new(transactions); - let output = BlockExecutor::< - MockTransaction, - MockTask, - DeltaDataView, - NoOpTransactionCommitHook, usize>, - _, - >::new( - BlockExecutorConfig::new_no_block_limit(num_cpus::get()), - executor_thread_pool, - None, - ) - .execute_transactions_parallel( - &txn_provider, - &data_view, - &TransactionSliceMetadata::unknown(), - &mut guard, - ); + let txn_provider = DefaultTxnProvider::new_without_info(transactions); + + let output = if use_delta_data_view { + let data_view = DeltaDataView:: { + phantom: PhantomData, + }; + + BlockExecutor::< + MockTransaction, + MockTask, + DeltaDataView, + NoOpTransactionCommitHook, usize>, + _, + >::new( + BlockExecutorConfig::new_no_block_limit(num_cpus::get()), + executor_thread_pool, + None, + ) + .execute_transactions_parallel( + &txn_provider, + &data_view, + &TransactionSliceMetadata::unknown(), + &mut guard, + ) + } else { + let data_view = MockStateView::empty(); + BlockExecutor::< + MockTransaction, + MockTask, + MockStateView, + NoOpTransactionCommitHook, usize>, + _, + >::new( + BlockExecutorConfig::new_no_block_limit(num_cpus::get()), + executor_thread_pool, + None, + ) + .execute_transactions_parallel( + &txn_provider, + &data_view, + &TransactionSliceMetadata::unknown(), + &mut guard, + ) + }; let baseline = BaselineOutput::generate(txn_provider.get_txns(), None); baseline.assert_parallel_output(&output); @@ -439,61 +599,62 @@ fn random_value(delete_value: bool) -> ValueType { fn empty_block() { // This test checks that we do not trigger asserts due to an empty block, e.g. in the // scheduler. Instead, parallel execution should gracefully early return empty output. - run_and_assert::, MockEvent>(vec![]); + run_and_assert::, MockEvent>(vec![], false); } #[test] fn delta_counters() { - let key = KeyType(random::<[u8; 32]>(), false); + // TODO(BlockSTMv2): Adjust these tests to also use V2. + let key = KeyType(random::<[u8; 32]>()); let mut transactions = vec![MockTransaction::from_behavior(MockIncarnation::< KeyType<[u8; 32]>, MockEvent, >::new( vec![], - vec![(key, random_value(false))], // writes + vec![(key, random_value(false), false)], // writes vec![], vec![], 1, // gas ))]; for _ in 0..50 { - transactions.push(MockTransaction::from_behavior(MockIncarnation::< - KeyType<[u8; 32]>, - MockEvent, - >::new( - vec![key], // reads - vec![], - vec![(key, delta_add(5, u128::MAX))], // deltas - vec![], - 1, // gas - ))); + transactions.push( + MockTransaction::from_behavior(MockIncarnation::, MockEvent>::new( + vec![(key, false)], // reads + vec![], + vec![(key, delta_add(5, u128::MAX), None)], // deltas + vec![], + 1, // gas + )) + .with_aggregator_v1_testing(), + ); } - transactions.push(MockTransaction::from_behavior(MockIncarnation::< - KeyType<[u8; 32]>, - MockEvent, - >::new( - vec![], - vec![(key, random_value(false))], // writes - vec![], - vec![], - 1, // gas - ))); - - for _ in 0..50 { - transactions.push(MockTransaction::from_behavior(MockIncarnation::< - KeyType<[u8; 32]>, - MockEvent, - >::new( - vec![key], // reads + transactions.push( + MockTransaction::from_behavior(MockIncarnation::, MockEvent>::new( + vec![], + vec![(key, random_value(false), false)], // writes vec![], - vec![(key, delta_sub(2, u128::MAX))], // deltas vec![], 1, // gas - ))); + )) + .with_aggregator_v1_testing(), + ); + + for _ in 0..50 { + transactions.push( + MockTransaction::from_behavior(MockIncarnation::, MockEvent>::new( + vec![(key, false)], // reads + vec![], + vec![(key, delta_sub(2, u128::MAX), None)], // deltas + vec![], + 1, // gas + )) + .with_aggregator_v1_testing(), + ); } - run_and_assert(transactions) + run_and_assert(transactions, true) } #[test] @@ -501,14 +662,12 @@ fn delta_chains() { let mut transactions = vec![]; // Generate a series of transactions add and subtract from an aggregator. - let keys: Vec> = (0..10) - .map(|_| KeyType(random::<[u8; 32]>(), false)) - .collect(); + let keys: Vec> = (0..10).map(|_| KeyType(random::<[u8; 32]>())).collect(); for i in 0..500 { transactions.push( MockTransaction::, MockEvent>::from_behavior(MockIncarnation::new( - keys.clone(), // reads + keys.clone().into_iter().map(|k| (k, true)).collect(), // reads vec![], keys.iter() .enumerate() @@ -526,17 +685,19 @@ fn delta_chains() { u128::MAX, DeltaHistory::new(), ), + None, )), false => None, }) .collect(), // deltas vec![], 1, // gas - )), + )) + .with_aggregator_v1_testing(), ); } - run_and_assert(transactions) + run_and_assert(transactions, true) } const TOTAL_KEY_NUM: u64 = 50; @@ -554,15 +715,15 @@ fn cycle_transactions() { KeyType<[u8; 32]>, MockEvent, >::new( - vec![KeyType(key, false)], // reads - vec![(KeyType(key, false), random_value(false))], // writes + vec![(KeyType(key), false)], // reads + vec![(KeyType(key), random_value(false), false)], // writes vec![], vec![], 1, // gas ))); } } - run_and_assert(transactions) + run_and_assert(transactions, false) } const NUM_BLOCKS: u64 = 10; @@ -572,7 +733,7 @@ const TXN_PER_BLOCK: u64 = 100; fn one_reads_all_barrier() { let mut transactions = vec![]; let keys: Vec> = (0..TXN_PER_BLOCK) - .map(|_| KeyType(random::<[u8; 32]>(), false)) + .map(|_| KeyType(random::<[u8; 32]>())) .collect(); for _ in 0..NUM_BLOCKS { for key in &keys { @@ -580,8 +741,8 @@ fn one_reads_all_barrier() { KeyType<[u8; 32]>, MockEvent, >::new( - vec![*key], // reads - vec![(*key, random_value(false))], // writes + vec![(*key, false)], // reads + vec![(*key, random_value(false), false)], // writes vec![], vec![], 1, // gas @@ -592,27 +753,27 @@ fn one_reads_all_barrier() { KeyType<[u8; 32]>, MockEvent, >::new( - keys.clone(), //reads + keys.clone().into_iter().map(|k| (k, false)).collect(), // reads vec![], vec![], vec![], 1, //gas ))); } - run_and_assert(transactions) + run_and_assert(transactions, false) } #[test] fn one_writes_all_barrier() { let mut transactions = vec![]; let keys: Vec> = (0..TXN_PER_BLOCK) - .map(|_| KeyType(random::<[u8; 32]>(), false)) + .map(|_| KeyType(random::<[u8; 32]>())) .collect(); for _ in 0..NUM_BLOCKS { for key in &keys { transactions.push(MockTransaction::from_behavior(MockIncarnation::new( - vec![*key], //reads - vec![(*key, random_value(false))], //writes + vec![(*key, false)], //reads + vec![(*key, random_value(false), false)], //writes vec![], vec![], 1, //gas @@ -623,23 +784,23 @@ fn one_writes_all_barrier() { KeyType<[u8; 32]>, MockEvent, >::new( - keys.clone(), // reads + keys.clone().into_iter().map(|k| (k, false)).collect(), // reads keys.iter() - .map(|key| (*key, random_value(false))) + .map(|key| (*key, random_value(false), false)) .collect::>(), //writes vec![], vec![], 1, // gas ))); } - run_and_assert(transactions) + run_and_assert(transactions, false) } #[test] fn early_aborts() { let mut transactions = vec![]; let keys: Vec<_> = (0..TXN_PER_BLOCK) - .map(|_| KeyType(random::<[u8; 32]>(), false)) + .map(|_| KeyType(random::<[u8; 32]>())) .collect(); for _ in 0..NUM_BLOCKS { @@ -648,8 +809,8 @@ fn early_aborts() { KeyType<[u8; 32]>, MockEvent, >::new( - vec![*key], // reads - vec![(*key, random_value(false))], // writes + vec![(*key, false)], // reads + vec![(*key, random_value(false), false)], // writes vec![], vec![], 1, // gas @@ -658,14 +819,14 @@ fn early_aborts() { // One transaction that triggers an abort transactions.push(MockTransaction::Abort) } - run_and_assert(transactions) + run_and_assert(transactions, false) } #[test] fn early_skips() { let mut transactions = vec![]; let keys: Vec<_> = (0..TXN_PER_BLOCK) - .map(|_| KeyType(random::<[u8; 32]>(), false)) + .map(|_| KeyType(random::<[u8; 32]>())) .collect(); for _ in 0..NUM_BLOCKS { @@ -674,8 +835,8 @@ fn early_skips() { KeyType<[u8; 32]>, MockEvent, >::new( - vec![*key], // reads - vec![(*key, random_value(false))], //writes + vec![(*key, false)], // reads + vec![(*key, random_value(false), false)], //writes vec![], vec![], 1, // gas @@ -684,7 +845,7 @@ fn early_skips() { // One transaction that triggers an abort transactions.push(MockTransaction::SkipRest(0)) } - run_and_assert(transactions) + run_and_assert(transactions, false) } #[test] diff --git a/aptos-move/block-executor/src/value_exchange.rs b/aptos-move/block-executor/src/value_exchange.rs index b7c3bf24453af..9d4e01667b422 100644 --- a/aptos-move/block-executor/src/value_exchange.rs +++ b/aptos-move/block-executor/src/value_exchange.rs @@ -19,7 +19,7 @@ use move_core_types::value::{IdentifierMappingKind, MoveTypeLayout}; use move_vm_runtime::AsFunctionValueExtension; use move_vm_types::{ delayed_values::delayed_field_id::{DelayedFieldID, ExtractWidth, TryFromMoveValue}, - value_serde::{ValueSerDeContext, ValueToIdentifierMapping}, + value_serde::{FunctionValueExtension, ValueSerDeContext, ValueToIdentifierMapping}, value_traversal::find_identifiers_in_value, values::Value, }; @@ -116,7 +116,7 @@ where // See if can cache identifiers in advance, or combine it with // deserialization. let function_value_extension = self.as_function_value_extension(); - let value = ValueSerDeContext::new() + let value = ValueSerDeContext::new(function_value_extension.max_value_nest_depth()) .with_func_args_deserialization(&function_value_extension) .with_delayed_fields_serde() .deserialize(bytes, layout) diff --git a/aptos-move/block-executor/src/view.rs b/aptos-move/block-executor/src/view.rs index 0f4dcce0c9ad4..0e76ed91b4235 100644 --- a/aptos-move/block-executor/src/view.rs +++ b/aptos-move/block-executor/src/view.rs @@ -24,8 +24,8 @@ use aptos_aggregator::{ use aptos_logger::error; use aptos_mvhashmap::{ types::{ - MVDataError, MVDataOutput, MVDelayedFieldsError, MVGroupError, StorageVersion, TxnIndex, - UnknownOrLayout, UnsyncGroupError, ValueWithLayout, + Incarnation, MVDataError, MVDataOutput, MVDelayedFieldsError, MVGroupError, StorageVersion, + TxnIndex, UnknownOrLayout, UnsyncGroupError, ValueWithLayout, }, unsync_map::UnsyncMap, versioned_delayed_fields::TVersionedDelayedFieldView, @@ -59,7 +59,7 @@ use move_core_types::{language_storage::ModuleId, value::MoveTypeLayout, vm_stat use move_vm_runtime::{AsFunctionValueExtension, Module, RuntimeEnvironment}; use move_vm_types::{ delayed_values::delayed_field_id::{DelayedFieldID, ExtractUniqueIndex}, - value_serde::ValueSerDeContext, + value_serde::{FunctionValueExtension, ValueSerDeContext}, }; use std::{ cell::RefCell, @@ -227,6 +227,7 @@ pub(crate) struct ParallelState<'a, T: Transaction> { scheduler: SchedulerWrapper<'a>, start_counter: u32, counter: &'a AtomicU32, + incarnation: Incarnation, pub(crate) captured_reads: RefCell>, } @@ -516,13 +517,16 @@ impl<'a, T: Transaction> ParallelState<'a, T> { shared_scheduler: SchedulerWrapper<'a>, start_shared_counter: u32, shared_counter: &'a AtomicU32, + incarnation: Incarnation, ) -> Self { + let blockstm_v2 = shared_scheduler.is_v2(); Self { versioned_map: shared_map, scheduler: shared_scheduler, start_counter: start_shared_counter, counter: shared_counter, - captured_reads: RefCell::new(CapturedReads::new()), + incarnation, + captured_reads: RefCell::new(CapturedReads::new(blockstm_v2.then_some(incarnation))), } } @@ -609,7 +613,15 @@ impl ResourceState for ParallelState<'_, T> { } loop { - match self.versioned_map.data().fetch_data(key, txn_idx) { + let data = if self.scheduler.is_v2() { + self.versioned_map + .data() + .fetch_data_v2(key, txn_idx, self.incarnation) + } else { + self.versioned_map.data().fetch_data(key, txn_idx) + }; + + match data { Ok(Versioned(version, value)) => { // If we have a known layout, upgrade RawFromStorage value to Exchanged. if let UnknownOrLayout::Known(layout) = layout { @@ -1173,15 +1185,16 @@ impl<'a, T: Transaction, S: TStateView> LatestView<'a, T, S> { // values with unique identifiers with the same type layout. // The values are stored in aggregators multi-version data structure, // see the actual trait implementation for more details. - let patched_value = ValueSerDeContext::new() - .with_delayed_fields_replacement(&mapping) - .with_func_args_deserialization(&function_value_extension) - .deserialize(bytes.as_ref(), layout) - .ok_or_else(|| { - anyhow::anyhow!("Failed to deserialize resource during id replacement") - })?; - - ValueSerDeContext::new() + let patched_value = + ValueSerDeContext::new(function_value_extension.max_value_nest_depth()) + .with_delayed_fields_replacement(&mapping) + .with_func_args_deserialization(&function_value_extension) + .deserialize(bytes.as_ref(), layout) + .ok_or_else(|| { + anyhow::anyhow!("Failed to deserialize resource during id replacement") + })?; + + ValueSerDeContext::new(function_value_extension.max_value_nest_depth()) .with_delayed_fields_serde() .with_func_args_deserialization(&function_value_extension) .serialize(&patched_value, layout)? @@ -1206,7 +1219,7 @@ impl<'a, T: Transaction, S: TStateView> LatestView<'a, T, S> { // This call will replace all occurrences of aggregator / snapshot // identifiers with values with the same type layout. let function_value_extension = self.as_function_value_extension(); - let value = ValueSerDeContext::new() + let value = ValueSerDeContext::new(function_value_extension.max_value_nest_depth()) .with_func_args_deserialization(&function_value_extension) .with_delayed_fields_serde() .deserialize(bytes, layout) @@ -1218,7 +1231,7 @@ impl<'a, T: Transaction, S: TStateView> LatestView<'a, T, S> { })?; let mapping = TemporaryValueToIdentifierMapping::new(self, self.txn_idx); - let patched_bytes = ValueSerDeContext::new() + let patched_bytes = ValueSerDeContext::new(function_value_extension.max_value_nest_depth()) .with_delayed_fields_replacement(&mapping) .with_func_args_deserialization(&function_value_extension) .serialize(&value, layout)? @@ -1516,7 +1529,9 @@ impl> BlockSynchronizationKillSwitch { fn interrupt_requested(&self) -> bool { match &self.latest_view { - ViewState::Sync(state) => state.scheduler.has_halted(), + ViewState::Sync(state) => state + .scheduler + .interrupt_requested(self.txn_idx, state.incarnation), ViewState::Unsync(_) => false, } } @@ -1675,10 +1690,7 @@ impl> TResourceGroupView for LatestV } fn is_resource_groups_split_in_change_set_capable(&self) -> bool { - match &self.latest_view { - ViewState::Sync(_) => true, - ViewState::Unsync(_) => true, - } + true } } @@ -1870,7 +1882,10 @@ mod test { use super::*; use crate::{ captured_reads::{CapturedReads, DelayedFieldRead, DelayedFieldReadKind}, - proptest_types::types::{KeyType, MockEvent, ValueType}, + proptest_types::{ + mock_executor::MockEvent, + types::{KeyType, ValueType}, + }, scheduler::{DependencyResult, Scheduler, TWaitForDependency}, view::{delayed_field_try_add_delta_outcome_impl, get_delayed_field_value_impl, ViewState}, }; @@ -1979,7 +1994,7 @@ mod test { CompiledModule, Module, AptosModuleExtension, - >::new()); + >::new(None)); let wait_for = FakeWaitForDependency(); let id = DelayedFieldID::new_for_test_for_u64(600); let max_value = 600; @@ -2124,7 +2139,7 @@ mod test { CompiledModule, Module, AptosModuleExtension, - >::new()); + >::new(None)); let wait_for = FakeWaitForDependency(); let id = DelayedFieldID::new_for_test_for_u64(600); let max_value = 600; @@ -2269,7 +2284,7 @@ mod test { CompiledModule, Module, AptosModuleExtension, - >::new()); + >::new(None)); let wait_for = FakeWaitForDependency(); let id = DelayedFieldID::new_for_test_for_u64(600); let max_value = 600; @@ -2414,7 +2429,7 @@ mod test { CompiledModule, Module, AptosModuleExtension, - >::new()); + >::new(None)); let wait_for = FakeWaitForDependency(); let id = DelayedFieldID::new_for_test_for_u64(600); let max_value = 600; @@ -2538,7 +2553,7 @@ mod test { fn create_state_value(value: &Value, layout: &MoveTypeLayout) -> StateValue { StateValue::new_legacy( - ValueSerDeContext::new() + ValueSerDeContext::new(None) .serialize(value, layout) .unwrap() .unwrap() @@ -2884,6 +2899,7 @@ mod test { SchedulerWrapper::V1(&self.scheduler, &self.holder.skip_module_validation), self.start_counter, &self.counter, + 0, )), 1, ); @@ -3003,14 +3019,14 @@ mod test { let views = holder.new_view(); assert_ok_eq!( - views.get_resource_state_value(&KeyType::(1, false), None), + views.get_resource_state_value(&KeyType::(1), None), None ); - assert_ok_eq!(views.resource_exists(&KeyType::(1, false)), false,); + assert_ok_eq!(views.resource_exists(&KeyType::(1)), false,); assert_ok_eq!( - views.get_resource_state_value_metadata(&KeyType::(1, false)), + views.get_resource_state_value_metadata(&KeyType::(1)), None, ); } @@ -3018,14 +3034,14 @@ mod test { #[test] fn test_non_value_reads_not_recorded() { let state_value = create_state_value(&Value::u64(12321), &MoveTypeLayout::U64); - let data = HashMap::from([(KeyType::(1, false), state_value.clone())]); + let data = HashMap::from([(KeyType::(1), state_value.clone())]); let holder = ComparisonHolder::new(data, 1000); let views = holder.new_view(); - assert_ok_eq!(views.resource_exists(&KeyType::(1, false)), true); + assert_ok_eq!(views.resource_exists(&KeyType::(1)), true); assert!(views - .get_resource_state_value_metadata(&KeyType::(1, false)) + .get_resource_state_value_metadata(&KeyType::(1)) .unwrap() .is_some(),); @@ -3060,21 +3076,18 @@ mod test { #[test] fn test_regular_read_operations() { let state_value = create_state_value(&Value::u64(12321), &MoveTypeLayout::U64); - let data = HashMap::from([(KeyType::(1, false), state_value.clone())]); + let data = HashMap::from([(KeyType::(1), state_value.clone())]); let holder = ComparisonHolder::new(data, 1000); let views = holder.new_view(); assert_ok_eq!( - views.get_resource_state_value(&KeyType::(1, false), None), + views.get_resource_state_value(&KeyType::(1), None), Some(state_value.clone()) ); assert_fetch_eq( - holder - .holder - .unsync_map - .fetch_data(&KeyType::(1, false)), + holder.holder.unsync_map.fetch_data(&KeyType::(1)), Some(TransactionWrite::from_state_value(Some(state_value))), None, ); @@ -3088,7 +3101,7 @@ mod test { create_struct_layout(create_aggregator_storage_layout(MoveTypeLayout::U64)); let value = create_struct_value(create_aggregator_value_u64(25, 30)); let state_value = create_state_value(&value, &storage_layout); - let data = HashMap::from([(KeyType::(1, false), state_value.clone())]); + let data = HashMap::from([(KeyType::(1), state_value.clone())]); let start_counter = 1000; let id = DelayedFieldID::new_with_width(start_counter, 8); @@ -3102,29 +3115,26 @@ mod test { match check_metadata { Some(true) => { views - .get_resource_state_value_metadata(&KeyType::(1, false)) + .get_resource_state_value_metadata(&KeyType::(1)) .unwrap(); }, Some(false) => { - assert_ok_eq!(views.resource_exists(&KeyType::(1, false)), true,); + assert_ok_eq!(views.resource_exists(&KeyType::(1)), true,); }, None => {}, }; let layout = create_struct_layout(create_aggregator_layout_u64()); assert_ok_eq!( - views.get_resource_state_value(&KeyType::(1, false), Some(&layout)), + views.get_resource_state_value(&KeyType::(1), Some(&layout)), Some(patched_state_value.clone()) ); assert!(views .get_reads_needing_exchange(&HashSet::from([id]), &HashSet::new()) .unwrap() - .contains_key(&KeyType(1, false))); + .contains_key(&KeyType::(1))); assert_fetch_eq( - holder - .holder - .unsync_map - .fetch_data(&KeyType::(1, false)), + holder.holder.unsync_map.fetch_data(&KeyType::(1)), Some(TransactionWrite::from_state_value(Some( patched_state_value, ))), @@ -3136,12 +3146,12 @@ mod test { fn test_read_operations() { let state_value_3 = create_state_value(&Value::u64(12321), &MoveTypeLayout::U64); let mut data = HashMap::new(); - data.insert(KeyType::(3, false), state_value_3.clone()); + data.insert(KeyType::(3), state_value_3.clone()); let storage_layout = create_struct_layout(create_aggregator_storage_layout(MoveTypeLayout::U64)); let value = create_struct_value(create_aggregator_value_u64(25, 30)); let state_value_4 = create_state_value(&value, &storage_layout); - data.insert(KeyType::(4, false), state_value_4); + data.insert(KeyType::(4), state_value_4); let start_counter = 1000; let id = DelayedFieldID::new_with_width(start_counter, 8); @@ -3150,20 +3160,20 @@ mod test { assert_eq!( views - .get_resource_state_value(&KeyType::(1, false), None) + .get_resource_state_value(&KeyType::(1), None) .unwrap(), None ); let layout = create_struct_layout(create_aggregator_layout_u64()); assert_eq!( views - .get_resource_state_value(&KeyType::(2, false), Some(&layout)) + .get_resource_state_value(&KeyType::(2), Some(&layout)) .unwrap(), None ); assert_eq!( views - .get_resource_state_value(&KeyType::(3, false), None) + .get_resource_state_value(&KeyType::(3), None) .unwrap(), Some(state_value_3.clone()) ); @@ -3175,14 +3185,14 @@ mod test { holder .versioned_map .data() - .fetch_data(&KeyType::(3, false), 1) + .fetch_data(&KeyType::(3), 1) ); let patched_value = create_struct_value(create_aggregator_value_u64(id.as_u64(), 30)); let state_value_4 = create_state_value(&patched_value, &storage_layout); assert_eq!( views - .get_resource_state_value(&KeyType::(4, false), Some(&layout)) + .get_resource_state_value(&KeyType::(4), Some(&layout)) .unwrap(), Some(state_value_4.clone()) ); @@ -3216,6 +3226,6 @@ mod test { // TODO[agg_v2](test): This assertion fails. // let data_read = DataRead::Versioned(Ok((1,0)), Arc::new(TransactionWrite::from_state_value(Some(state_value_4))), Some(Arc::new(layout))); - // assert!(read_set_with_delayed_fields.any(|x| x == (&KeyType::(4, false), &data_read))); + // assert!(read_set_with_delayed_fields.any(|x| x == (&KeyType::(4), &data_read))); } } diff --git a/aptos-move/e2e-benchmark/data/calibration_values.tsv b/aptos-move/e2e-benchmark/data/calibration_values.tsv index 4339752af02fc..11b0f87fa3eef 100644 --- a/aptos-move/e2e-benchmark/data/calibration_values.tsv +++ b/aptos-move/e2e-benchmark/data/calibration_values.tsv @@ -1,43 +1,43 @@ -Loop { loop_count: Some(100000), loop_type: NoOp } 56 0.948 1.108 38277.6 -Loop { loop_count: Some(10000), loop_type: Arithmetic } 56 0.944 1.077 23921.2 -CreateObjects { num_objects: 10, object_payload_size: 0 } 56 0.938 1.097 163.1 -CreateObjects { num_objects: 10, object_payload_size: 10240 } 56 0.942 1.102 8733.7 -CreateObjects { num_objects: 100, object_payload_size: 0 } 56 0.915 1.065 1476.6 -CreateObjects { num_objects: 100, object_payload_size: 10240 } 56 0.957 1.091 10568.9 -InitializeVectorPicture { length: 128 } 56 0.933 1.067 174.6 -VectorPicture { length: 128 } 56 0.916 1.233 37.5 -VectorPictureRead { length: 128 } 56 0.912 1.036 36.2 -InitializeVectorPicture { length: 30720 } 56 0.964 1.123 25548.3 -VectorPicture { length: 30720 } 56 0.940 1.112 4838.6 -VectorPictureRead { length: 30720 } 56 0.942 1.059 4784.7 -SmartTablePicture { length: 30720, num_points_per_txn: 200 } 56 0.959 1.090 33659.3 -SmartTablePicture { length: 1048576, num_points_per_txn: 300 } 56 0.964 1.093 58498.7 -ResourceGroupsSenderWriteTag { string_length: 1024 } 56 0.901 1.161 21.6 -ResourceGroupsSenderMultiChange { string_length: 1024 } 56 0.922 1.182 39.8 -TokenV1MintAndTransferFT 56 0.920 1.061 707.3 -TokenV1MintAndTransferNFTSequential 56 0.912 1.076 993.2 -TokenV2AmbassadorMint { numbered: true } 56 0.916 1.051 621.9 -LiquidityPoolSwap { is_stable: true } 56 0.922 1.056 860.7 -LiquidityPoolSwap { is_stable: false } 56 0.921 1.061 812.7 -CoinInitAndMint 56 0.919 1.055 936.3 -FungibleAssetMint 56 0.927 1.112 303.5 -IncGlobalMilestoneAggV2 { milestone_every: 1 } 56 0.907 1.167 40.6 -IncGlobalMilestoneAggV2 { milestone_every: 2 } 56 0.900 1.273 24.2 -EmitEvents { count: 1000 } 56 0.936 1.072 7961.2 -APTTransferWithPermissionedSigner 56 0.914 1.289 1236.9 -APTTransferWithMasterSigner 56 0.934 1.048 120.4 -VectorTrimAppend { vec_len: 3000, element_len: 1, index: 0, repeats: 0 } 56 0.925 1.058 5959.1 -VectorTrimAppend { vec_len: 3000, element_len: 1, index: 100, repeats: 1000 } 56 0.934 1.326 26428.9 -VectorTrimAppend { vec_len: 3000, element_len: 1, index: 2990, repeats: 1000 } 56 0.939 1.088 14490.7 -VectorRemoveInsert { vec_len: 3000, element_len: 1, index: 100, repeats: 1000 } 56 0.941 1.108 22960.6 -VectorRemoveInsert { vec_len: 3000, element_len: 1, index: 2998, repeats: 1000 } 56 0.943 1.090 15009.6 -VectorRangeMove { vec_len: 3000, element_len: 1, index: 1000, move_len: 500, repeats: 1000 } 56 0.929 1.221 24699.3 -VectorTrimAppend { vec_len: 100, element_len: 100, index: 0, repeats: 0 } 56 0.925 1.074 265.6 -VectorTrimAppend { vec_len: 100, element_len: 100, index: 10, repeats: 1000 } 56 0.935 1.181 9551.5 -VectorRangeMove { vec_len: 100, element_len: 100, index: 50, move_len: 10, repeats: 1000 } 56 0.945 1.075 4316.2 -MapInsertRemove { len: 100, repeats: 100, map_type: OrderedMap } 56 0.955 1.072 11196.4 -MapInsertRemove { len: 100, repeats: 100, map_type: SimpleMap } 56 0.944 1.099 33925.8 -MapInsertRemove { len: 100, repeats: 100, map_type: BigOrderedMap { inner_max_degree: 4, leaf_max_degree: 4 } } 56 0.948 1.131 108596.0 -MapInsertRemove { len: 100, repeats: 100, map_type: BigOrderedMap { inner_max_degree: 1024, leaf_max_degree: 1024 } } 56 0.948 1.049 19282.3 -MapInsertRemove { len: 1000, repeats: 100, map_type: OrderedMap } 56 0.945 1.061 54788.6 -OrderBook { state: OrderBookState { order_idx: 0 }, overlap_ratio: 0.0, buy_frequency: 0.5, max_sell_size: 1, max_buy_size: 1 } 56 0.921 1.189 702.3 +Loop { loop_count: Some(100000), loop_type: NoOp } 6 0.993 1.012 38277.6 +Loop { loop_count: Some(10000), loop_type: Arithmetic } 6 0.995 1.010 23083.9 +CreateObjects { num_objects: 10, object_payload_size: 0 } 6 0.989 1.007 131.5 +CreateObjects { num_objects: 10, object_payload_size: 10240 } 6 0.995 1.017 8007.9 +CreateObjects { num_objects: 100, object_payload_size: 0 } 6 0.996 1.015 1225.5 +CreateObjects { num_objects: 100, object_payload_size: 10240 } 6 0.994 1.008 9641.2 +InitializeVectorPicture { length: 128 } 6 0.995 1.015 143.7 +VectorPicture { length: 128 } 6 0.989 1.008 32.0 +VectorPictureRead { length: 128 } 6 0.990 1.015 30.9 +InitializeVectorPicture { length: 30720 } 6 0.998 1.016 23625.0 +VectorPicture { length: 30720 } 6 0.969 1.008 4333.0 +VectorPictureRead { length: 30720 } 6 0.967 1.003 4341.5 +SmartTablePicture { length: 30720, num_points_per_txn: 200 } 6 0.996 1.007 29963.1 +SmartTablePicture { length: 1048576, num_points_per_txn: 300 } 6 0.993 1.007 52575.2 +ResourceGroupsSenderWriteTag { string_length: 1024 } 6 0.995 1.021 16.9 +ResourceGroupsSenderMultiChange { string_length: 1024 } 6 0.986 1.019 31.3 +TokenV1MintAndTransferFT 6 0.997 1.010 452.0 +TokenV1MintAndTransferNFTSequential 6 0.994 1.014 642.8 +TokenV2AmbassadorMint { numbered: true } 6 0.991 1.015 418.2 +LiquidityPoolSwap { is_stable: true } 6 0.995 1.006 626.5 +LiquidityPoolSwap { is_stable: false } 6 1.000 1.011 581.5 +CoinInitAndMint 6 0.987 1.005 642.8 +FungibleAssetMint 6 0.997 1.011 201.4 +IncGlobalMilestoneAggV2 { milestone_every: 1 } 6 0.987 1.004 30.0 +IncGlobalMilestoneAggV2 { milestone_every: 2 } 6 0.979 1.010 18.5 +EmitEvents { count: 1000 } 6 0.993 1.017 6799.5 +APTTransferWithPermissionedSigner 6 0.990 1.004 834.8 +APTTransferWithMasterSigner 6 0.988 1.010 85.0 +VectorTrimAppend { vec_len: 3000, element_len: 1, index: 0, repeats: 0 } 6 0.992 1.009 5198.7 +VectorTrimAppend { vec_len: 3000, element_len: 1, index: 100, repeats: 1000 } 6 0.992 1.062 21640.8 +VectorTrimAppend { vec_len: 3000, element_len: 1, index: 2990, repeats: 1000 } 6 0.995 1.005 12593.6 +VectorRemoveInsert { vec_len: 3000, element_len: 1, index: 100, repeats: 1000 } 6 0.997 1.010 19354.3 +VectorRemoveInsert { vec_len: 3000, element_len: 1, index: 2998, repeats: 1000 } 6 0.993 1.010 13429.1 +VectorRangeMove { vec_len: 3000, element_len: 1, index: 1000, move_len: 500, repeats: 1000 } 6 0.995 1.018 22312.6 +VectorTrimAppend { vec_len: 100, element_len: 100, index: 0, repeats: 0 } 6 0.996 1.020 243.1 +VectorTrimAppend { vec_len: 100, element_len: 100, index: 10, repeats: 1000 } 6 0.994 1.055 8298.9 +VectorRangeMove { vec_len: 100, element_len: 100, index: 50, move_len: 10, repeats: 1000 } 6 0.998 1.011 3874.7 +MapInsertRemove { len: 100, repeats: 100, map_type: OrderedMap } 6 0.992 1.011 9268.2 +MapInsertRemove { len: 100, repeats: 100, map_type: SimpleMap } 6 0.997 1.002 30762.7 +MapInsertRemove { len: 100, repeats: 100, map_type: BigOrderedMap { inner_max_degree: 4, leaf_max_degree: 4 } } 6 0.993 1.015 76555.3 +MapInsertRemove { len: 100, repeats: 100, map_type: BigOrderedMap { inner_max_degree: 1024, leaf_max_degree: 1024 } } 6 0.996 1.008 14124.9 +MapInsertRemove { len: 1000, repeats: 100, map_type: OrderedMap } 6 0.991 1.007 46581.4 +OrderBook { state: OrderBookState { order_idx: 0 }, overlap_ratio: 0.0, buy_frequency: 0.5, max_sell_size: 1, max_buy_size: 1 } 6 0.997 1.013 457.9 diff --git a/aptos-move/e2e-move-tests/src/tests/aggregator_v2.data/function_values/sources/capturing.move b/aptos-move/e2e-move-tests/src/tests/aggregator_v2.data/function_values/sources/capturing.move index 31ef1be7059a0..5155df845d9e0 100644 --- a/aptos-move/e2e-move-tests/src/tests/aggregator_v2.data/function_values/sources/capturing.move +++ b/aptos-move/e2e-move-tests/src/tests/aggregator_v2.data/function_values/sources/capturing.move @@ -1,12 +1,13 @@ module 0x1::capturing { use aptos_framework::aggregator_v2::create_unbounded_aggregator; - use 0x1::proxy::destroy; - public entry fun capture_aggregator(account: &signer, value: u64, expected: u64) { - let aggregator = destroy(account); + public entry fun capture_aggregator() { + let aggregator = create_unbounded_aggregator(); + aggregator.add(1000); + let apply = |x| 0x1::function_store::fetch_and_add(aggregator, x); - let result = apply(value); - assert!(result == expected, 1); + let result = apply(100); + assert!(result == 1100, 1); } public entry fun to_bytes_with_captured_aggregator() { diff --git a/aptos-move/e2e-move-tests/src/tests/aggregator_v2_function_values.rs b/aptos-move/e2e-move-tests/src/tests/aggregator_v2_function_values.rs index a9c52d8ba8db1..353793b547477 100644 --- a/aptos-move/e2e-move-tests/src/tests/aggregator_v2_function_values.rs +++ b/aptos-move/e2e-move-tests/src/tests/aggregator_v2_function_values.rs @@ -5,8 +5,7 @@ use crate::{assert_success, assert_vm_status, tests::common, MoveHarness}; use aptos_framework::BuildOptions; use aptos_language_e2e_tests::executor::FakeExecutor; use aptos_transaction_simulation::Account; -use aptos_types::{move_utils::MemberId, transaction::ExecutionStatus}; -use claims::assert_ok; +use aptos_types::move_utils::MemberId; use move_core_types::{ account_address::AccountAddress, parser::parse_struct_tag, vm_status::StatusCode, }; @@ -92,7 +91,7 @@ fn test_function_value_captures_aggregator_is_not_storable() { vec![], vec![bcs::to_bytes(&100_u64).unwrap()], ); - assert_vm_status!(status, StatusCode::VALUE_SERIALIZATION_ERROR); + assert_vm_status!(status, StatusCode::UNABLE_TO_CAPTURE_DELAYED_FIELDS); let status = h.run_entry_function( &acc, @@ -137,37 +136,16 @@ fn test_function_value_uses_aggregator_is_storable() { #[test] fn test_function_value_captures_aggregator() { - let mut h = MoveHarness::new_with_executor(FakeExecutor::from_head_genesis().set_parallel()); + let mut h = MoveHarness::new_with_executor(FakeExecutor::from_head_genesis()); let acc = h.new_account_at(AccountAddress::from_hex_literal("0x123").unwrap()); initialize(&mut h); - let mut value = 100; - let status = h.run_entry_function( - &acc, - MemberId::from_str("0x1::proxy::initialize").unwrap(), - vec![], - vec![bcs::to_bytes(&value).unwrap()], - ); - assert_success!(status); - assert_counter_value_eq(&h, &acc, value); - - let increment = 100; - value += increment; - let status = h.run_entry_function( - &acc, - MemberId::from_str("0x1::capturing::capture_aggregator").unwrap(), - vec![], - vec![ - bcs::to_bytes(&increment).unwrap(), - bcs::to_bytes(&value).unwrap(), - ], - ); - assert_success!(status); - for name in [ + "capture_aggregator", "to_bytes_with_captured_aggregator", "to_string_with_captured_aggregator", "emit_event_with_captured_aggregator", + "serialized_size_with_captured_aggregator", ] { let status = h.run_entry_function( &acc, @@ -175,20 +153,6 @@ fn test_function_value_captures_aggregator() { vec![], vec![], ); - assert_vm_status!(status, StatusCode::VALUE_SERIALIZATION_ERROR); + assert_vm_status!(status, StatusCode::UNABLE_TO_CAPTURE_DELAYED_FIELDS); } - - let status = h.run_entry_function( - &acc, - MemberId::from_str("0x1::capturing::serialized_size_with_captured_aggregator").unwrap(), - vec![], - vec![], - ); - - // Note: the native function remaps the error and aborts with this code. - let status = assert_ok!(status.as_kept_status()); - assert!(matches!(status, ExecutionStatus::MoveAbort { - code: 453, - .. - })); } diff --git a/aptos-move/e2e-move-tests/src/tests/any.data/pack/Move.toml b/aptos-move/e2e-move-tests/src/tests/any.data/pack/Move.toml new file mode 100644 index 0000000000000..82b9006b8f1f1 --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/any.data/pack/Move.toml @@ -0,0 +1,6 @@ +[package] +name = "any_tests" +version = "0.0.0" + +[dependencies] +AptosStdlib = { local = "../../../../../framework/aptos-stdlib" } diff --git a/aptos-move/e2e-move-tests/src/tests/any.data/pack/sources/any_with_function_values.move b/aptos-move/e2e-move-tests/src/tests/any.data/pack/sources/any_with_function_values.move new file mode 100644 index 0000000000000..f0764394febd6 --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/any.data/pack/sources/any_with_function_values.move @@ -0,0 +1,65 @@ +module 0x123::any_with_function_values { + use 0x1::any::pack; + + // Should fail: we cannot gain abilities. + public entry fun roundtrip_fails_1() { + let f: || has drop + store = || dummy(); + let _g = pack(f).unpack<|| has drop + store + copy>(); + // g(); + } + + // Should fail: we cannot drop abilities. + public entry fun roundtrip_fails_2() { + let f: || has drop + store + copy = || dummy(); + let g = pack(f).unpack<|| has drop + store>(); + g(); + } + + public fun dummy() {} + + public fun dummy_with_args(x: u64): u64 { + x + } + + public fun returns_dummy(): || { + || dummy() + } + + // Should fail: cannot confuse between abilities. + public entry fun roundtrip_fails_3() { + let f: || (||) has drop + store = returns_dummy; + let g = pack(f).unpack<|| (|| has drop + store)>(); + g(); + } + + struct X { x: u64 } + struct Xu64 { x: u64 } + struct S has key { x: T } + + public fun create(): S { + S { x: X { x: 100 } } + } + + // Should fail: cannot confuse between different generic parameters - they are comma-separated. + public entry fun roundtrip_fails_4() { + let f: || S has drop + store = || create(); + let g = pack(f).unpack<||S has drop + store>(); + + let S { x } = g(); + let Xu64 { x } = x; + assert!(x == 100); + } + + public entry fun roundtrip_success_1() { + let x: u64 = 1; + let f: ||u64 has drop + store = || dummy_with_args(x); + let g = pack(f).unpack<||u64 has drop + store>(); + assert!(g() == 1, 404); + } + + public entry fun roundtrip_success_2() { + let f: || has drop + store = || dummy(); + let g = pack(f).unpack<|| has drop + store>(); + g(); + } +} diff --git a/aptos-move/e2e-move-tests/src/tests/any.rs b/aptos-move/e2e-move-tests/src/tests/any.rs new file mode 100644 index 0000000000000..bedb1ad7f930d --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/any.rs @@ -0,0 +1,44 @@ +// Copyright (c) Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{assert_abort, assert_success, tests::common, MoveHarness}; +use aptos_framework::BuildOptions; +use move_core_types::account_address::AccountAddress; + +#[test] +fn test_any_with_function_values() { + let mut h = MoveHarness::new(); + + let acc = h.new_account_at(AccountAddress::from_hex_literal("0x123").unwrap()); + assert_success!(h.publish_package_with_options( + &acc, + &common::test_dir_path("any.data/pack"), + BuildOptions::move_2().set_latest_language(), + )); + + for idx in [1, 2, 3, 4] { + let result = h.run_entry_function( + &acc, + str::parse(&format!( + "0x123::any_with_function_values::roundtrip_fails_{idx}" + )) + .unwrap(), + vec![], + vec![], + ); + assert_abort!(result, 65537); + } + + for idx in [1, 2] { + let result = h.run_entry_function( + &acc, + str::parse(&format!( + "0x123::any_with_function_values::roundtrip_success_{idx}" + )) + .unwrap(), + vec![], + vec![], + ); + assert_success!(result); + } +} diff --git a/aptos-move/e2e-move-tests/src/tests/bcs.data/function-values/Move.toml b/aptos-move/e2e-move-tests/src/tests/bcs.data/function-values/Move.toml new file mode 100644 index 0000000000000..f477487f74006 --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/bcs.data/function-values/Move.toml @@ -0,0 +1,6 @@ +[package] +name = "function_values" +version = "0.0.0" + +[dependencies] +AptosStdlib = { local = "../../../../../framework/aptos-stdlib" } diff --git a/aptos-move/e2e-move-tests/src/tests/bcs.data/function-values/sources/bcs_function_values_test.move b/aptos-move/e2e-move-tests/src/tests/bcs.data/function-values/sources/bcs_function_values_test.move new file mode 100644 index 0000000000000..f7bda695c496f --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/bcs.data/function-values/sources/bcs_function_values_test.move @@ -0,0 +1,81 @@ +module 0x1::bcs_function_values_test { + use std::bcs; + + public fun public_function(x: u64): u64 { + x + } + + #[persistent] + public fun public_persistent_function(x: u64): u64 { + x + } + + public(friend) fun friend_function(x: u64): u64 { + x + } + + #[persistent] + public(friend) fun friend_persistent_function(x: u64): u64 { + x + } + + fun private_function(x: u64): u64 { + x + } + + #[persistent] + fun private_persistent_function(x: u64): u64 { + x + } + + fun check_bcs(x: &T, abort_code: u64) { + let bytes = bcs::to_bytes(x); + let size = bcs::serialized_size(x); + assert!(bytes.length() == size, abort_code); + } + + public entry fun successful_bcs_tests() { + let f1: |u64|u64 has drop = public_function; + check_bcs(&f1, 1); + + let f2: |u64|u64 has drop = public_persistent_function; + check_bcs(&f2, 2); + + let f3: |u64|u64 has drop = friend_persistent_function; + check_bcs(&f3, 3); + + let f4: |u64|u64 has drop = private_persistent_function; + check_bcs(&f4, 4); + } + + public entry fun failure_bcs_test_friend_function() { + let f: |u64|u64 has drop = friend_function; + check_bcs(&f, 404); + } + + public entry fun failure_bcs_test_friend_function_with_capturing() { + let f: ||u64 has drop = || friend_function(3); + check_bcs(&f, 404); + } + + public entry fun failure_bcs_test_private_function() { + let f: |u64|u64 has drop = private_function; + check_bcs(&f, 404); + } + + public entry fun failure_bcs_test_private_function_with_capturing() { + let f: ||u64 has drop = || private_function(4); + check_bcs(&f, 404); + } + + public entry fun failure_bcs_test_anonymous() { + let f: |u64|u64 has drop = |x| { x }; + check_bcs(&f, 404); + } + + public entry fun failure_bcs_test_anonymous_with_capturing() { + let y: u64 = 2; + let f: |u64|u64 has drop = |x| { x + y }; + check_bcs(&f, 404); + } +} diff --git a/aptos-move/e2e-move-tests/src/tests/bcs.rs b/aptos-move/e2e-move-tests/src/tests/bcs.rs new file mode 100644 index 0000000000000..30b3e5abb511b --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/bcs.rs @@ -0,0 +1,70 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{assert_success, tests::common, MoveHarness}; +use aptos_framework::BuildOptions; +use aptos_language_e2e_tests::executor::FakeExecutor; +use aptos_types::{move_utils::MemberId, transaction::ExecutionStatus}; +use claims::assert_ok; +use move_core_types::{ + account_address::AccountAddress, + ident_str, + language_storage::ModuleId, + vm_status::{sub_status::NFE_BCS_SERIALIZATION_FAILURE, AbortLocation}, +}; +use std::str::FromStr; + +fn initialize(h: &mut MoveHarness) { + let build_options = BuildOptions::move_2().set_latest_language(); + let path = common::test_dir_path("bcs.data/function-values"); + + let framework_account = h.aptos_framework_account(); + let status = h.publish_package_with_options(&framework_account, path.as_path(), build_options); + assert_success!(status); +} + +#[test] +fn test_function_value_serialization() { + let mut h = MoveHarness::new_with_executor(FakeExecutor::from_head_genesis()); + let acc = h.new_account_at(AccountAddress::from_hex_literal("0x123").unwrap()); + initialize(&mut h); + + let status = h.run_entry_function( + &acc, + MemberId::from_str("0x1::bcs_function_values_test::successful_bcs_tests").unwrap(), + vec![], + vec![], + ); + assert_success!(status); + + let expected_failures = [ + "failure_bcs_test_friend_function", + "failure_bcs_test_friend_function_with_capturing", + "failure_bcs_test_private_function", + "failure_bcs_test_private_function_with_capturing", + "failure_bcs_test_anonymous", + "failure_bcs_test_anonymous_with_capturing", + ]; + + let bcs_location = AbortLocation::Module(ModuleId::new( + AccountAddress::ONE, + ident_str!("bcs").to_owned(), + )); + let expected_status = ExecutionStatus::MoveAbort { + location: bcs_location.clone(), + code: NFE_BCS_SERIALIZATION_FAILURE, + info: None, + }; + + for name in expected_failures { + let status = assert_ok!(h + .run_entry_function( + &acc, + MemberId::from_str(&format!("0x1::bcs_function_values_test::{name}")).unwrap(), + vec![], + vec![], + ) + .as_kept_status()); + assert_eq!(&status, &expected_status); + } +} diff --git a/aptos-move/e2e-move-tests/src/tests/cmp_generic.data/pack/Move.toml b/aptos-move/e2e-move-tests/src/tests/cmp_generic.data/pack/Move.toml new file mode 100644 index 0000000000000..33398ec91b2cb --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/cmp_generic.data/pack/Move.toml @@ -0,0 +1,6 @@ +[package] +name = "generic_cmp" +version = "0.0.0" + +[dependencies] +AptosFramework = { local = "../../../../../framework/aptos-framework" } diff --git a/aptos-move/e2e-move-tests/src/tests/cmp_generic.data/pack/sources/test.move b/aptos-move/e2e-move-tests/src/tests/cmp_generic.data/pack/sources/test.move new file mode 100644 index 0000000000000..498e79ae834ac --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/cmp_generic.data/pack/sources/test.move @@ -0,0 +1,783 @@ + /// Module for testing non-integer primitive types + module 0x99::primitive_cmp { + //* bool group + fun test_left_lt_right_bool(x: bool, y: bool): bool { + // a and b are created to test our optimization + let a = &x; + let b = &y; + + *a < *b && + x < y + } + fun test_left_le_right_bool(x: bool, y: bool): bool { + x <= y + } + fun test_left_gt_right_bool(x: bool, y: bool): bool { + x > y + } + fun test_left_ge_right_bool(x: bool, y: bool): bool { + x >= y + } + + //* address group + fun test_left_lt_right_address(x: address, y: address): bool { + // a and b are created to test our optimization + let a = &x; + let b = &y; + + *a < *b && + x < y + } + fun test_left_le_right_address(x: address, y: address): bool { + x <= y + } + fun test_left_gt_right_address(x: address, y: address): bool { + x > y + } + fun test_left_ge_right_address(x: address, y: address): bool { + x >= y + } + + //* vector group + fun test_left_lt_right_vector(x: vector, y: vector): bool { + // a and b are created to test our optimization + let a = &x; + let b = &y; + + *a < *b && + x < y + } + fun test_left_le_right_vector(x: vector, y: vector): bool { + x <= y + } + fun test_left_gt_right_vector(x: vector, y: vector): bool { + x > y + } + fun test_left_ge_right_vector(x: vector, y: vector): bool { + x >= y + } + + //* nested vector group + fun test_left_lt_right_nested_vector(x: vector>, y: vector>): bool { + // a and b are created to test our optimization + let a = &x; + let b = &y; + + *a < *b && + x < y + } + fun test_left_le_right_nested_vector(x: vector>, y: vector>): bool { + x <= y + } + fun test_left_gt_right_nested_vector(x: vector>, y: vector>): bool { + x > y + } + fun test_left_ge_right_nested_vector(x: vector>, y: vector>): bool { + x >= y + } + + //* entry functions for testing non-integer primitive types + entry fun test_bool(){ + let x = false; + let y = true; + assert!(test_left_lt_right_bool(x, y), 0); + assert!(test_left_le_right_bool(x, y), 0); + assert!(test_left_le_right_bool(x, x), 0); + assert!(test_left_le_right_bool(y, y), 0); + + assert!(test_left_gt_right_bool(y, x), 0); + assert!(test_left_ge_right_bool(y, x), 0); + assert!(test_left_ge_right_bool(x, x), 0); + assert!(test_left_ge_right_bool(y, y), 0); + } + + entry fun test_address(){ + let x = @0x1; + let y = @0x2; + assert!(test_left_lt_right_address(x, y), 0); + assert!(test_left_le_right_address(x, y), 0); + assert!(test_left_le_right_address(x, x), 0); + assert!(test_left_le_right_address(y, y), 0); + + assert!(test_left_gt_right_address(y, x), 0); + assert!(test_left_ge_right_address(y, x), 0); + assert!(test_left_ge_right_address(x, x), 0); + assert!(test_left_ge_right_address(y, y), 0); + } + + entry fun test_vector(){ + let x = vector[0u8, 1u8, 2u8, 3u8, 4u8, 5u8]; + let y = vector[0u8, 1u8, 2u8, 3u8, 4u8, 5u8, 6u8]; + let z = vector[1u8, 2u8, 3u8, 4u8, 5u8, 6u8]; + + assert!(test_left_lt_right_vector(x, y), 0); + assert!(test_left_lt_right_vector(y, z), 0); + assert!(test_left_lt_right_vector(x, z), 0); + + assert!(test_left_le_right_vector(x, y), 0); + assert!(test_left_le_right_vector(y, z), 0); + assert!(test_left_le_right_vector(x, z), 0); + assert!(test_left_le_right_vector(x, x), 0); + assert!(test_left_le_right_vector(y, y), 0); + assert!(test_left_le_right_vector(z, z), 0); + + assert!(test_left_gt_right_vector(y, x), 0); + assert!(test_left_gt_right_vector(z, y), 0); + assert!(test_left_gt_right_vector(z, x), 0); + + assert!(test_left_ge_right_vector(y, x), 0); + assert!(test_left_ge_right_vector(z, y), 0); + assert!(test_left_ge_right_vector(z, x), 0); + assert!(test_left_ge_right_vector(x, x), 0); + assert!(test_left_ge_right_vector(y, y), 0); + assert!(test_left_ge_right_vector(z, z), 0); + } + + entry fun test_nested_vector(){ + let x = vector[0u8, 1u8, 2u8, 3u8, 4u8, 5u8]; + let y = vector[0u8, 1u8, 2u8, 3u8, 4u8, 5u8, 6u8]; + let z = vector[1u8, 2u8, 3u8, 4u8, 5u8, 6u8]; + + let nested_1 = vector[x]; + let nested_2 = vector[x, x]; + let nested_3 = vector[x, y]; + let nested_4 = vector[x, y, z]; + let nested_5 = vector[x, z]; + let nested_6 = vector[y]; + let nested_7 = vector[y, y]; + let nested_8 = vector[y, z]; + let nested_9 = vector[z]; + let nested_10 = vector[z, z]; + + assert!(test_left_lt_right_nested_vector(nested_1, nested_2), 0); + assert!(test_left_lt_right_nested_vector(nested_2, nested_3), 0); + assert!(test_left_lt_right_nested_vector(nested_3, nested_4), 0); + assert!(test_left_lt_right_nested_vector(nested_4, nested_5), 0); + assert!(test_left_lt_right_nested_vector(nested_5, nested_6), 0); + assert!(test_left_lt_right_nested_vector(nested_6, nested_7), 0); + assert!(test_left_lt_right_nested_vector(nested_7, nested_8), 0); + assert!(test_left_lt_right_nested_vector(nested_8, nested_9), 0); + assert!(test_left_lt_right_nested_vector(nested_9, nested_10), 0); + + assert!(test_left_le_right_nested_vector(nested_1, nested_2), 0); + assert!(test_left_le_right_nested_vector(nested_2, nested_3), 0); + assert!(test_left_le_right_nested_vector(nested_3, nested_4), 0); + assert!(test_left_le_right_nested_vector(nested_4, nested_5), 0); + assert!(test_left_le_right_nested_vector(nested_5, nested_6), 0); + assert!(test_left_le_right_nested_vector(nested_6, nested_7), 0); + assert!(test_left_le_right_nested_vector(nested_7, nested_8), 0); + assert!(test_left_le_right_nested_vector(nested_8, nested_9), 0); + assert!(test_left_le_right_nested_vector(nested_9, nested_10), 0); + + assert!(test_left_gt_right_nested_vector(nested_2, nested_1), 0); + assert!(test_left_gt_right_nested_vector(nested_3, nested_2), 0); + assert!(test_left_gt_right_nested_vector(nested_4, nested_3), 0); + assert!(test_left_gt_right_nested_vector(nested_5, nested_4), 0); + assert!(test_left_gt_right_nested_vector(nested_6, nested_5), 0); + assert!(test_left_gt_right_nested_vector(nested_7, nested_6), 0); + assert!(test_left_gt_right_nested_vector(nested_8, nested_7), 0); + assert!(test_left_gt_right_nested_vector(nested_9, nested_8), 0); + assert!(test_left_gt_right_nested_vector(nested_10, nested_9), 0); + + assert!(test_left_ge_right_nested_vector(nested_2, nested_1), 0); + assert!(test_left_ge_right_nested_vector(nested_3, nested_2), 0); + assert!(test_left_ge_right_nested_vector(nested_4, nested_3), 0); + assert!(test_left_ge_right_nested_vector(nested_5, nested_4), 0); + assert!(test_left_ge_right_nested_vector(nested_6, nested_5), 0); + assert!(test_left_ge_right_nested_vector(nested_7, nested_6), 0); + assert!(test_left_ge_right_nested_vector(nested_8, nested_7), 0); + assert!(test_left_ge_right_nested_vector(nested_9, nested_8), 0); + assert!(test_left_ge_right_nested_vector(nested_10, nested_9), 0); + } +} + +/// Module for struct types + module 0x99::struct_cmp { + use std::cmp; + /// A simple struct + struct Int has drop, copy { + a: u8, + b: u16, + c: u32, + d: u64, + e: u128, + f: u256 + } + + /// A more complex struct + struct Complex has drop, copy { + a: u8, + b: Int, + } + + /// A more complex struct with vectors + struct ComplexWithVec has drop, copy { + a: u8, + b: Int, + c: vector, + d: vector> + } + + //* Simple struct group + fun test_simple_struct_lt(x: Int, y: Int): bool { + // a and b are created to test our optimization + let a = &x; + let b = &y; + + *a < *b && + x < y + } + fun test_simple_struct_le(x: Int, y: Int): bool { + x <= y + } + fun test_simple_struct_gt(x: Int, y: Int): bool { + x > y + } + fun test_simple_struct_ge(x: Int, y: Int): bool { + x >= y + } + + //* Complex struct group + fun test_complex_struct_lt(x: Complex, y: Complex): bool { + // a and b are created to test our optimization + let a = &x; + let b = &y; + + *a < *b && + x < y + } + fun test_complex_struct_le(x: Complex, y: Complex): bool { + x <= y + } + fun test_complex_struct_gt(x: Complex, y: Complex): bool { + x > y + } + fun test_complex_struct_ge(x: Complex, y: Complex): bool { + x >= y + } + + //* Complex struct with vector group + fun test_complex_struct_vec_lt(x: ComplexWithVec, y: ComplexWithVec): bool { + // a and b are created to test our optimization + let a = &x; + let b = &y; + + *a < *b && + x < y && + x.b < y.b && + x.c < y.c && + x.c[0] < y.c[0] && + x.d < y.d && + x.d[0] < y.d[0] && + x.d[0][0] < y.d[0][0] + } + + //* Enum as special struct group + fun test_special_struct_vec_lt(x: ComplexWithVec, y: ComplexWithVec): bool { + // a and b are created to test our optimization + let a = &cmp::compare(&x, &y); + let b = &cmp::compare(&y, &x); + + *a < *b && + cmp::compare(&x, &y) < cmp::compare(&y, &x) && + cmp::compare(&x.b, &y.b) < cmp::compare(&y.b, &x.b) && + cmp::compare(&x.c, &y.c) < cmp::compare(&y.c, &x.c) && + cmp::compare(&x.c[0], &y.c[0]) < cmp::compare(&y.c[0], &x.c[0]) && + cmp::compare(&x.d, &y.d) < cmp::compare(&y.d, &x.d) && + cmp::compare(&x.d[0], &y.d[0]) < cmp::compare(&y.d[0], &x.d[0]) && + cmp::compare(&x.d[0][0], &y.d[0][0]) < cmp::compare(&y.d[0][0], &x.d[0][0]) + } + + //* entry functions for testing struct types + entry fun test_simple_struct(){ + let x = Int { + a: 1, + b: 2, + c: 3, + d: 4, + e: 5, + f: 6 + }; + let y = Int { + a: 2, + b: 3, + c: 4, + d: 5, + e: 6, + f: 7 + }; + + assert!(test_simple_struct_lt(x, y), 0); + assert!(test_simple_struct_le(x, y), 0); + assert!(test_simple_struct_le(x, x), 0); + assert!(test_simple_struct_le(y, y), 0); + + assert!(test_simple_struct_gt(y, x), 0); + assert!(test_simple_struct_ge(y, x), 0); + assert!(test_simple_struct_ge(x, x), 0); + assert!(test_simple_struct_ge(y, y), 0); + } + + entry fun test_complex_struct(){ + + let x = Complex { + a: 1, + b: Int { + a: 1, + b: 2, + c: 3, + d: 4, + e: 5, + f: 6 + } + }; + + let y = Complex { + a: 2, + b: Int { + a: 1, + b: 2, + c: 3, + d: 4, + e: 5, + f: 6 + } + }; + + assert!(test_complex_struct_lt(x, y), 0); + assert!(test_complex_struct_le(x, y), 0); + assert!(test_complex_struct_le(x, x), 0); + assert!(test_complex_struct_le(y, y), 0); + + assert!(test_complex_struct_gt(y, x), 0); + assert!(test_complex_struct_ge(y, x), 0); + assert!(test_complex_struct_ge(x, x), 0); + assert!(test_complex_struct_ge(y, y), 0); + } + + entry fun test_nested_complex_struct(){ + let x = ComplexWithVec { + a: 1, + b: Int { + a: 1, + b: 2, + c: 3, + d: 4, + e: 5, + f: 6 + }, + c: vector[ + Int { + a: 1, + b: 2, + c: 3, + d: 4, + e: 5, + f: 6 + }, + ], + d: vector[ + vector[ + Complex { + a: 1, + b: Int { + a: 1, + b: 2, + c: 3, + d: 4, + e: 5, + f: 6 + } + } + ] + ], + }; + + let y = ComplexWithVec { + a: 2, + b: Int { + a: 2, + b: 2, + c: 3, + d: 4, + e: 5, + f: 6 + }, + c: vector[ + Int { + a: 2, + b: 2, + c: 3, + d: 4, + e: 5, + f: 6 + }, + ], + d: vector[ + vector[ + Complex { + a: 2, + b: Int { + a: 2, + b: 2, + c: 3, + d: 4, + e: 5, + f: 6 + } + } + ] + ], + }; + + assert!(test_complex_struct_vec_lt(x, y), 0); + } + + entry fun test_special_complex_struct(){ + let x = ComplexWithVec { + a: 1, + b: Int { + a: 1, + b: 2, + c: 3, + d: 4, + e: 5, + f: 6 + }, + c: vector[ + Int { + a: 1, + b: 2, + c: 3, + d: 4, + e: 5, + f: 6 + }, + ], + d: vector[ + vector[ + Complex { + a: 1, + b: Int { + a: 1, + b: 2, + c: 3, + d: 4, + e: 5, + f: 6 + } + } + ] + ], + }; + + let y = ComplexWithVec { + a: 2, + b: Int { + a: 2, + b: 2, + c: 3, + d: 4, + e: 5, + f: 6 + }, + c: vector[ + Int { + a: 2, + b: 2, + c: 3, + d: 4, + e: 5, + f: 6 + }, + ], + d: vector[ + vector[ + Complex { + a: 2, + b: Int { + a: 2, + b: 2, + c: 3, + d: 4, + e: 5, + f: 6 + } + } + ] + ], + }; + + assert!(test_special_struct_vec_lt(x, y), 0); + } +} + +/// Module for testing generic types + module 0x99::generic_cmp { + use std::cmp; + public struct Int has drop, copy { + a: u8, + b: u16, + c: u32, + d: u64, + e: u128, + f: u256 + } + + public struct Complex has drop, copy { + a: u8, + b: Int, + } + + struct Foo has drop, copy { x: T } + + struct Bar has drop, copy { + x: T1, + y: vector, + } + + //* Simple generic arg group + fun test_generic_arg_lt(x: T, y: T): bool { + // a and b are created to test our optimization + let a = &cmp::compare(&x, &y); + let b = &cmp::compare(&y, &x); + + *a < *b && + x < y + } + fun test_generic_arg_le(x: T, y: T): bool { + x <= y + } + fun test_generic_arg_gt(x: T, y: T): bool { + x > y + } + fun test_generic_arg_ge(x: T, y: T): bool { + x >= y + } + + + //* Simple generic struct arg group + fun test_generic_struct_lt(x: Foo
, y: Foo
): bool { + // a and b are created to test our optimization + let a = &cmp::compare(&x, &y); + let b = &cmp::compare(&y, &x); + + *a < *b && + x < y && + x.x < y.x + } + fun test_generic_struct_le(x: Foo
, y: Foo
): bool { + + x <= y && + x.x <= y.x + } + fun test_generic_struct_gt(x: Foo
, y: Foo
): bool { + x > y && + x.x > y.x + } + fun test_generic_struct_ge(x: Foo
, y: Foo
): bool { + x >= y && + x.x >= y.x + } + + //* Complex generic struct arg group + public fun test_generic_complex_struct_lt(x: Bar, y: Bar): bool { + // a and b are created to test our optimization + let a = &cmp::compare(&x, &y); + let b = &cmp::compare(&y, &x); + + *a < *b && + x < y && + x.x < y.x && + x.y < y.y && + x.y[0] < y.y[0] + } + public fun test_generic_complex_struct_le(x: Bar, y: Bar): bool { + x <= y && + x.x <= y.x && + x.y <= y.y && + x.y[0] <= y.y[0] + } + public fun test_generic_complex_struct_gt(x: Bar, y: Bar): bool { + x > y && + x.x > y.x && + x.y > y.y && + x.y[0] > y.y[0] + } + public fun test_generic_complex_struct_ge(x: Bar, y: Bar): bool { + x >= y && + x.x >= y.x && + x.y >= y.y && + x.y[0] >= y.y[0] + } + + //* entry functions for testing generic types + entry fun test_generic_arg(){ + let x = @0x1; + let y = @0x2; + assert!(test_generic_arg_lt(x, y), 0); + assert!(test_generic_arg_le(x, y), 0); + assert!(test_generic_arg_le(x, x), 0); + assert!(test_generic_arg_le(y, y), 0); + + assert!(test_generic_arg_gt(y, x), 0); + assert!(test_generic_arg_ge(y, x), 0); + assert!(test_generic_arg_ge(x, x), 0); + assert!(test_generic_arg_ge(y, y), 0); + + } + + entry fun test_generic_struct(){ + let x = Foo
{x: @0x1}; + let y = Foo
{x: @0x2}; + + assert!(test_generic_struct_lt(x, y), 0); + assert!(test_generic_struct_le(x, y), 0); + assert!(test_generic_struct_le(x, x), 0); + assert!(test_generic_struct_le(y, y), 0); + + assert!(test_generic_struct_gt(y, x), 0); + assert!(test_generic_struct_ge(y, x), 0); + assert!(test_generic_struct_ge(x, x), 0); + assert!(test_generic_struct_ge(y, y), 0); + } + + entry fun test_generic_complex_struct(){ + let x = Bar { + x: Int{ + a: 1, + b: 2, + c: 3, + d: 4, + e: 5, + f: 6 + }, + y: vector[ + Complex{ + a: 1, + b: Int { + a: 1, + b: 2, + c: 3, + d: 4, + e: 5, + f: 6 + } + } + ] + }; + + let y = Bar { + x: Int{ + a: 2, + b: 2, + c: 3, + d: 4, + e: 5, + f: 6 + }, + y: vector[ + Complex{ + a: 2, + b: Int { + a: 2, + b: 2, + c: 3, + d: 4, + e: 5, + f: 6 + } + } + ] + }; + + assert!(test_generic_complex_struct_lt(x, y), 0); + assert!(test_generic_complex_struct_le(x, y), 0); + assert!(test_generic_complex_struct_le(x, x), 0); + assert!(test_generic_complex_struct_le(y, y), 0); + + assert!(test_generic_complex_struct_gt(y, x), 0); + assert!(test_generic_complex_struct_ge(y, x), 0); + assert!(test_generic_complex_struct_ge(x, x), 0); + assert!(test_generic_complex_struct_ge(y, y), 0); + } +} + +/// Modules for testing function values +module 0x99::module1 { + public fun test(): u64{ + 1 + } + public fun test1(): u64{ + 1 + } + public fun test2(_x: T){ + } + public fun test3(x: u64): u64{ + x + 1 + } +} +module 0x99::module2 { + public fun test(): u64{ + 1 + } +} + +/// Function values are compared in the following order: +/// 1. Module identification is compared by address and name +/// 2. Function name is compared based on identity string +/// 3. Type parameters are compared based on types (by discriminant index in their defining enum) +/// 4. Captured values are compared + +module 0x99::function_value_cmp { + use 0x99::module1 as module1; + use 0x99::module2 as module2; + + //* entry function for testing function values + entry fun test_module_name_cmp(){ + // f1 < f2 due to module name `module1` < `module2` + // - `f1` named to `closure#0module1::test;` + // - `f2` named to `closure#0module2::test;` + let f1: ||u64 has drop = module1::test; + let f2: ||u64 has drop = module2::test; + assert!(f1 < f2, 0); + } + entry fun test_function_name_cmp(){ + // f1 < f2 due to function name `test` < `test1` + // - `f1` named to `closure#0module1::test;` + // - `f2` named to `closure#0module1::test1;` + let f1: ||u64 has drop = module1::test; + let f2: ||u64 has drop = module1::test1; + assert!(f1 < f2, 0); + + // f3 < f4 due to function name by lambda order + // - `f3` named to `closure#0function_value_cmp::__lambda__1__test_function_name_cmp;` + // - `f4` named to `closure#0function_value_cmp::__lambda__2__test_function_name_cmp;` + let f3: ||u64 has drop = ||1; + let f4: ||u64 has drop = ||1; + assert!(f3 < f4, 0); + + // f5 < f6 due to function name by lambda order + // - `f5` named to `closure#0function_value_cmp::__lambda__3__test_function_name_cmp;` + // - `f6` named to `closure#0function_value_cmp::__lambda__4__test_function_name_cmp;` + let f5: ||u64 has drop = ||1; + let f6: ||u64 has drop = ||100; + assert!(f5 < f6, 0); + } + entry fun test_typed_arg_cmp(){ + // f1 < f2 due to type parameter `u8` < `u64` + let x: u8 = 1; + let y: u64 = 1; + let f1: || has drop = ||module1::test2(x); + let f2: || has drop = ||module1::test2(y); + assert!(f1 < f2, 0); + } + entry fun test_captured_var_cmp(){ + // f1 < f2 due to captured values `1` < `2` + let x = 1; + let y = 2; + let f1: ||u64 has drop = ||module1::test3(x); + let f2: ||u64 has drop = ||module1::test3(y); + assert!(f1 < f2, 0); + } +} diff --git a/aptos-move/e2e-move-tests/src/tests/cmp_generic.data/script/Move.toml b/aptos-move/e2e-move-tests/src/tests/cmp_generic.data/script/Move.toml new file mode 100644 index 0000000000000..ea3a9ed9652dd --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/cmp_generic.data/script/Move.toml @@ -0,0 +1,6 @@ +[package] +name = "two_signer_cmp" +version = "0.0.0" + +[dependencies] +AptosFramework = { local = "../../../../../framework/aptos-framework" } diff --git a/aptos-move/e2e-move-tests/src/tests/cmp_generic.data/script/sources/script.move b/aptos-move/e2e-move-tests/src/tests/cmp_generic.data/script/sources/script.move new file mode 100644 index 0000000000000..bdf4e4c325b82 --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/cmp_generic.data/script/sources/script.move @@ -0,0 +1,8 @@ +script { + fun main( + first: signer, + second: signer + ) { + assert!(first <= second, 0); + } +} diff --git a/aptos-move/e2e-move-tests/src/tests/enum_upgrade_fv.rs b/aptos-move/e2e-move-tests/src/tests/enum_upgrade_fv.rs new file mode 100644 index 0000000000000..bdd8b965fe5a4 --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/enum_upgrade_fv.rs @@ -0,0 +1,197 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +//! Tests for enum type upgrade compatibility + +use crate::{assert_success, assert_vm_status, tests::common, MoveHarness}; +use aptos_framework::BuildOptions; +use aptos_language_e2e_tests::account::Account; +use aptos_package_builder::PackageBuilder; +use aptos_types::{account_address::AccountAddress, transaction::TransactionStatus}; +use move_core_types::vm_status::StatusCode; + +#[test] +fn enum_upgrade() { + let mut h = MoveHarness::new(); + let acc = h.new_account_at(AccountAddress::from_hex_literal("0x815").unwrap()); + + // Initial publish + let result = publish( + &mut h, + &acc, + r#" + module 0x815::m { + enum Data has key { + V1{x: ||T has copy + store } + } + } + "#, + ); + assert_success!(result); + + // incompatible variant + let result = publish( + &mut h, + &acc, + r#" + module 0x815::m { + enum Data has key { + V1 {x: ||T has store} + } + } + "#, + ); + assert_vm_status!(result, StatusCode::BACKWARD_INCOMPATIBLE_MODULE_UPDATE); + + // identical variant + let result = publish( + &mut h, + &acc, + r#" + module 0x815::m { + enum Data has key { + V1 {x: ||T has copy + store + drop} + } + } + "#, + ); + assert_vm_status!(result, StatusCode::BACKWARD_INCOMPATIBLE_MODULE_UPDATE); + + // prepare test for executing function value stored in an enum + let result = publish( + &mut h, + &acc, + r#" + module 0x815::m { + use std::signer; + enum Data has key { + V1 {x: ||T has copy + store} + } + + public fun make_data(f: ||T has copy + drop + store): Data { + Data::V1 {x: f} + } + + public fun store_v1(s: &signer, data: Data) { + move_to(s, data); + } + + public fun retrieve_data_and_execute(s: &signer): T { + let data = borrow_global>(signer::address_of(s)); + (data.x)() + } + + } + + module 0x815::n { + use 0x815::m; + + public fun f(x: u64): u64 { + x + 3 + } + + public entry fun create_store_data(s: &signer) { + let k = 3; + let f: ||u64 has copy + drop + store = || f(k); + m::store_v1(s, m::make_data(f)); + } + + public entry fun execute_stored_data(s: &signer) { + assert!(m::retrieve_data_and_execute(s) == 6, 99); + } + + } + "#, + ); + assert_success!(result); + + assert_success!(h.run_entry_function( + &acc, + str::parse("0x815::n::create_store_data").unwrap(), + vec![], + vec![], + )); + + assert_success!(h.run_entry_function( + &acc, + str::parse("0x815::n::execute_stored_data").unwrap(), + vec![], + vec![], + )); + + // update enum with a new variant + let result = publish( + &mut h, + &acc, + r#" + module 0x815::m { + use std::signer; + enum Data has key { + V1 {x: ||T has copy + store}, + V2 {x1: ||T has copy + drop + store, x: ||T has copy + store} + } + + public fun make_data(f: ||T has copy + drop + store): Data { + Data::V1 {x: f} + } + + public fun make_data_v2(f1: ||T has copy + drop + store, f2: ||T has copy + drop + store): Data { + Data::V2 {x1: f1, x: f2} + } + + public fun store_v1(s: &signer, data: Data) { + move_to(s, data); + } + + public fun retrieve_data_and_execute(s: &signer): T { + let data = borrow_global>(signer::address_of(s)); + (data.x)() + } + + } + + module 0x815::n { + use 0x815::m; + + public fun f(x: u64): u64 { + x + 3 + } + + public entry fun create_store_data(s: &signer) { + let k = 3; + let f: ||u64 has copy + drop + store = || f(k); + m::store_v1(s, m::make_data(f)); + } + + public entry fun execute_stored_data(s: &signer) { + assert!(m::retrieve_data_and_execute(s) == 6, 99); + } + + } + "#, + ); + assert_success!(result); + + // execution still exceeds after enum upgrade + assert_success!(h.run_entry_function( + &acc, + str::parse("0x815::n::execute_stored_data").unwrap(), + vec![], + vec![], + )); +} + +fn publish(h: &mut MoveHarness, account: &Account, source: &str) -> TransactionStatus { + let mut builder = PackageBuilder::new("Package"); + builder.add_source("m.move", source); + builder.add_local_dep( + "MoveStdlib", + &common::framework_dir_path("move-stdlib").to_string_lossy(), + ); + let path = builder.write_to_temp().unwrap(); + h.publish_package_with_options( + account, + path.path(), + BuildOptions::move_2().set_latest_language(), + ) +} diff --git a/aptos-move/e2e-move-tests/src/tests/fee_payer.rs b/aptos-move/e2e-move-tests/src/tests/fee_payer.rs index 4fe4aa3e4fe81..a774904fdd12b 100644 --- a/aptos-move/e2e-move-tests/src/tests/fee_payer.rs +++ b/aptos-move/e2e-move-tests/src/tests/fee_payer.rs @@ -38,7 +38,7 @@ fn test_existing_account_with_fee_payer() { FeatureFlag::GAS_PAYER_ENABLED, FeatureFlag::SPONSORED_AUTOMATIC_ACCOUNT_V1_CREATION, ], - vec![], + vec![FeatureFlag::DEFAULT_ACCOUNT_RESOURCE], ); let alice = h.new_account_with_balance_and_sequence_number(0, 0); @@ -73,7 +73,7 @@ fn test_existing_account_with_fee_payer_aborts() { FeatureFlag::GAS_PAYER_ENABLED, FeatureFlag::SPONSORED_AUTOMATIC_ACCOUNT_V1_CREATION, ], - vec![], + vec![FeatureFlag::DEFAULT_ACCOUNT_RESOURCE], ); let alice = h.new_account_with_balance_and_sequence_number(0, 0); @@ -109,7 +109,7 @@ fn test_account_not_exist_with_fee_payer() { FeatureFlag::GAS_PAYER_ENABLED, FeatureFlag::SPONSORED_AUTOMATIC_ACCOUNT_V1_CREATION, ], - vec![], + vec![FeatureFlag::DEFAULT_ACCOUNT_RESOURCE], ); let alice = Account::new(); @@ -151,7 +151,7 @@ fn test_account_not_exist_with_fee_payer_insufficient_gas() { FeatureFlag::GAS_PAYER_ENABLED, FeatureFlag::SPONSORED_AUTOMATIC_ACCOUNT_V1_CREATION, ], - vec![], + vec![FeatureFlag::DEFAULT_ACCOUNT_RESOURCE], ); let alice = Account::new(); @@ -195,7 +195,7 @@ fn test_account_not_exist_and_move_abort_with_fee_payer_create_account() { FeatureFlag::GAS_PAYER_ENABLED, FeatureFlag::SPONSORED_AUTOMATIC_ACCOUNT_V1_CREATION, ], - vec![], + vec![FeatureFlag::DEFAULT_ACCOUNT_RESOURCE], ); let alice = Account::new(); @@ -256,7 +256,7 @@ fn test_account_not_exist_out_of_gas_with_fee_payer() { FeatureFlag::GAS_PAYER_ENABLED, FeatureFlag::SPONSORED_AUTOMATIC_ACCOUNT_V1_CREATION, ], - vec![], + vec![FeatureFlag::DEFAULT_ACCOUNT_RESOURCE], ); let alice = Account::new(); @@ -298,7 +298,7 @@ fn test_account_not_exist_move_abort_with_fee_payer_out_of_gas() { FeatureFlag::GAS_PAYER_ENABLED, FeatureFlag::SPONSORED_AUTOMATIC_ACCOUNT_V1_CREATION, ], - vec![], + vec![FeatureFlag::DEFAULT_ACCOUNT_RESOURCE], ); let alice = Account::new(); diff --git a/aptos-move/e2e-move-tests/src/tests/function_value_depth.rs b/aptos-move/e2e-move-tests/src/tests/function_value_depth.rs new file mode 100644 index 0000000000000..06d5a9aca6a2f --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/function_value_depth.rs @@ -0,0 +1,65 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +//! Tests for deeply-nested function values. The Move VM must ensure that it is not possible to +//! construct values that are too deep, as this can cause stack overflow. + +use crate::{assert_success, assert_vm_status, tests::common, MoveHarness}; +use aptos_framework::BuildOptions; +use aptos_package_builder::PackageBuilder; +use aptos_transaction_simulation::Account; +use aptos_types::transaction::TransactionStatus; +use move_core_types::{account_address::AccountAddress, vm_status::StatusCode}; + +#[test] +fn test_vm_value_too_deep_with_function_values() { + let mut h = MoveHarness::new(); + let acc = h.new_account_at(AccountAddress::from_hex_literal("0x99").unwrap()); + + let status = publish( + &mut h, + &acc, + r#" + module 0x99::m { + public fun dummy2(_v: || has drop+copy) {} + + // Creates a very deep value that can be tested for off by 1 around the current maximum + // depth value. + public entry fun run2(n: u64) { + let f: || has copy+drop = || {}; + let i = 0; + while (i < n) { + f = || dummy2(f); + i = i + 1; + }; + } + } + "#, + ); + assert_success!(status); + + let status = h.run_entry_function(&acc, str::parse("0x99::m::run2").unwrap(), vec![], vec![ + bcs::to_bytes(&129_u64).unwrap(), + ]); + assert_vm_status!(status, StatusCode::VM_MAX_VALUE_DEPTH_REACHED); + + let status = h.run_entry_function(&acc, str::parse("0x99::m::run2").unwrap(), vec![], vec![ + bcs::to_bytes(&128_u64).unwrap(), + ]); + assert_success!(status); +} + +fn publish(h: &mut MoveHarness, account: &Account, source: &str) -> TransactionStatus { + let mut builder = PackageBuilder::new("Package"); + builder.add_source("m.move", source); + builder.add_local_dep( + "MoveStdlib", + &common::framework_dir_path("move-stdlib").to_string_lossy(), + ); + let path = builder.write_to_temp().unwrap(); + h.publish_package_with_options( + account, + path.path(), + BuildOptions::move_2().set_latest_language(), + ) +} diff --git a/aptos-move/e2e-move-tests/src/tests/fv_as_table_keys.rs b/aptos-move/e2e-move-tests/src/tests/fv_as_table_keys.rs new file mode 100644 index 0000000000000..1bee4d9c17670 --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/fv_as_table_keys.rs @@ -0,0 +1,437 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +//! Tests for using function values as keys in tables. + +use crate::{assert_success, tests::common, MoveHarness}; +use aptos_framework::BuildOptions; +use aptos_language_e2e_tests::account::Account; +use aptos_package_builder::PackageBuilder; +use aptos_types::{account_address::AccountAddress, transaction::TransactionStatus}; + +#[test] +fn fv_in_table() { + let mut h = MoveHarness::new(); + let acc = h.new_account_at(AccountAddress::from_hex_literal("0x99").unwrap()); + + // Initial publish + let result = publish( + &mut h, + &acc, + r#" + module 0x99::m { + use aptos_std::table; + use std::signer; + + struct Container has key { t: table::Table } + + #[persistent] + fun foo(f: ||u64): u64 { + if (f() == 1) + 1 + else + 2 + } + #[persistent] + fun bar(f: ||u64): u64 { + if (f() == 1) + 2 + else + 1 + } + + // Stores a function value of type `| ||u64 |u64` with a key of the same type in a table. + public entry fun test_store(account: &signer) { + let f1: | ||u64 |u64 has copy+store+drop = |x| foo(x); + let f2: | ||u64 |u64 has store+copy+drop = |x| foo(x); + + let table = table::new<| ||u64 |u64 has copy+store+drop, | ||u64 |u64 has store+copy+drop>(); + table::add(&mut table, f1, f2); + move_to>(account, Container {t: table}); + } + + // Fecth a function value from the table and call it with different arguments. + public entry fun test_fetch(account: &signer) { + let f1: | ||u64 |u64 has copy+store+drop = |x| foo(x); + let table = borrow_global>(signer::address_of(account)); + let f2 = table::borrow(&(table.t), f1); + let arg = || 1; + assert!((*f2)(arg) == 1, 0); + let arg = || 2; + assert!((*f2)(arg) == 2, 0); + } + + // Test the non-existence of a key + public entry fun not_contain(account: &signer){ + let f1: | ||u64 |u64 has copy+store+drop = |x| bar(x); + let table = borrow_global>(signer::address_of(account)); + let contains_key = table::contains(&(table.t), f1); + assert!(!contains_key, 0); + } + + // Test the existence of a key + public entry fun contain(account: &signer){ + let f1: | ||u64 |u64 has copy+store+drop = |x| foo(x); + let table = borrow_global>(signer::address_of(account)); + let contains_key = table::contains(&(table.t), f1); + assert!(contains_key, 0); + } + + // Test the existence of a key (variant aspect 1: parameter name of function value used as key) + // Expected result: no impact + public entry fun contain_with_diff_param_name(account: &signer){ + let f1: | ||u64 |u64 has copy+store+drop = |y| foo(y); + let table = borrow_global>(signer::address_of(account)); + let contains_key = table::contains(&(table.t), f1); + assert!(contains_key, 0); + } + + // Test updating a function value saved in table + public entry fun update(account: &signer){ + let f1: | ||u64 |u64 has copy+store+drop = |x| foo(x); + let table = borrow_global_mut>(signer::address_of(account)); + + let f2: | ||u64 |u64 has store+copy+drop = |x| bar(x); + table::upsert(&mut (table.t), f1, f2); + + let f2 = table::borrow(&(table.t), f1); + let arg = || 1; + assert!((*f2)(arg) == 2, 0); + let arg = || 2; + assert!((*f2)(arg) == 1, 0); + } + + // Test removing a function value saved in table + public entry fun remove(account: &signer){ + let f1: | ||u64 |u64 has copy+store+drop = |x| foo(x); + let table = borrow_global_mut>(signer::address_of(account)); + table::remove(&mut (table.t), f1); + let contains_key = table::contains(&(table.t), f1); + assert!(!contains_key, 0); + } + } + "#, + ); + assert_success!(result); + assert_success!(h.run_entry_function( + &acc, + str::parse("0x99::m::test_store").unwrap(), + vec![], + vec![], + )); + assert_success!(h.run_entry_function( + &acc, + str::parse("0x99::m::test_fetch").unwrap(), + vec![], + vec![], + )); + assert_success!(h.run_entry_function( + &acc, + str::parse("0x99::m::not_contain").unwrap(), + vec![], + vec![], + )); + assert_success!(h.run_entry_function( + &acc, + str::parse("0x99::m::contain").unwrap(), + vec![], + vec![], + )); + assert_success!(h.run_entry_function( + &acc, + str::parse("0x99::m::contain_with_diff_param_name").unwrap(), + vec![], + vec![], + )); + assert_success!(h.run_entry_function( + &acc, + str::parse("0x99::m::update").unwrap(), + vec![], + vec![], + )); + assert_success!(h.run_entry_function( + &acc, + str::parse("0x99::m::remove").unwrap(), + vec![], + vec![], + )); +} + +#[test] +fn fv_in_table_with_refs() { + let mut h = MoveHarness::new(); + let acc = h.new_account_at(AccountAddress::from_hex_literal("0x99").unwrap()); + + // Initial publish + let result = publish( + &mut h, + &acc, + r#" + module 0x99::m1 { + use aptos_std::table; + use std::signer; + use std::vector; + + struct Container has key { t: table::Table } + + #[persistent] + fun foo(_f: &||u64, x: &u64): &u64 { + x + } + + // Stores a function value of type `| &||u64 | &||u64` with a key of the same type in a table. + public entry fun test_store(account: &signer) { + let f1: | &||u64, &u64|&u64 has copy+store+drop = |f, x| foo(f, x); + let f2: | &||u64, &u64|&u64 has store+copy+drop = |f, x| foo(f, x); + + let table = table::new<| &||u64, &u64|&u64 has copy+store+drop, | &||u64, &u64|&u64 has copy+store+drop>(); + table::add(&mut table, f1, f2); + move_to>(account, Container {t: table}); + } + + // Test the existence of a key + public entry fun contain(account: &signer){ + let f1: | &||u64, &u64|&u64 has copy+store+drop = |g, y| foo(g, y); + let table = borrow_global>(signer::address_of(account)); + let contains_key = table::contains(&(table.t), f1); + assert!(contains_key, 0); + } + + // Test saving references via function value args in vector + public entry fun ref_in_vec(){ + let f1: | &||u64, &u64|&u64 has copy+store+drop = |g, y| foo(g, y); + let f2: | &||u64, &u64|&u64 has copy+store+drop = |g, y| foo(g, y); + let v = vector::empty<| &||u64, &u64|&u64 has copy+store+drop>(); + vector::push_back(&mut v, f1); + vector::push_back(&mut v, f2); + assert!(v[0] == v[1]); + } + + // Test the existence of a key saved in a vector + public entry fun contain_via_vec(account: &signer){ + let f1: | &||u64, &u64|&u64 has copy+store+drop = |g, y| foo(g, y); + let f2: | &||u64, &u64|&u64 has copy+store+drop = |g, y| foo(g, y); + let v = vector::empty<| &||u64, &u64|&u64 has copy+store+drop>(); + vector::push_back(&mut v, f1); + vector::push_back(&mut v, f2); + let table = borrow_global>(signer::address_of(account)); + let contains_key = table::contains(&(table.t), v[0]); + assert!(contains_key, 0); + } + + // Fecth a function value from the table and check the result. + public entry fun test_fetch(account: &signer) { + let f1: | &||u64, &u64|&u64 has copy+store+drop = |g, y| foo(g, y); + let table = borrow_global>(signer::address_of(account)); + let f2 = table::borrow(&(table.t), f1); + assert!(*f2 == f1); + } + } + "#, + ); + assert_success!(result); + assert_success!(h.run_entry_function( + &acc, + str::parse("0x99::m1::test_store").unwrap(), + vec![], + vec![], + )); + assert_success!(h.run_entry_function( + &acc, + str::parse("0x99::m1::contain").unwrap(), + vec![], + vec![], + )); + assert_success!(h.run_entry_function( + &acc, + str::parse("0x99::m1::ref_in_vec").unwrap(), + vec![], + vec![], + )); + assert_success!(h.run_entry_function( + &acc, + str::parse("0x99::m1::contain_via_vec").unwrap(), + vec![], + vec![], + )); + assert_success!(h.run_entry_function( + &acc, + str::parse("0x99::m1::test_fetch").unwrap(), + vec![], + vec![], + )); +} + +#[test] +fn fv_in_table_with_captured_vars() { + let mut h = MoveHarness::new(); + let acc = h.new_account_at(AccountAddress::from_hex_literal("0x99").unwrap()); + + // Initial publish + let result = publish( + &mut h, + &acc, + r#" + module 0x99::m2 { + use aptos_std::table; + use std::signer; + + struct Container has key { t: table::Table } + + #[persistent] + fun foo(x: u64, _y: T):u64 { + if (x == 1) + 1 + else + 2 + } + + #[persistent] + fun bar(x: u64, _y: T):u64 { + if (x == 1) + 2 + else + 1 + } + + // Stores a function value of type `|u64, u64|u64` with a captured variable of a generic type, using a key of the same type in a table. + public entry fun test_store(account: &signer) { + let y = 1; + let f1: |u64|u64 has copy+store+drop = |x| foo(x, y); + let f2: |u64|u64 has copy+store+drop = |x| foo(x, y); + + let table = table::new<|u64|u64 has copy+store+drop, |u64|u64 has copy+store+drop>(); + table::add(&mut table, f1, f2); + move_to>(account, Container {t: table}); + } + + // Test the existence of a key + public entry fun contain(account: &signer){ + let y = 1; + let f1: |u64|u64 has copy+store+drop = |x| foo(x, y); + let table = borrow_global>(signer::address_of(account)); + let contains_key = table::contains(&(table.t), f1); + assert!(contains_key, 0); + } + + // Test the existence of a key (variant 1: different captured variable name) + public entry fun contain_var1(account: &signer){ + let z = 1; + let f1: |u64|u64 has copy+store+drop = |x| foo(x, z); + let table = borrow_global>(signer::address_of(account)); + let contains_key = table::contains(&(table.t), f1); + assert!(contains_key, 0); + } + + // Test the non-existence of a key (variant 1: different captured variable value) + public entry fun not_contain_var1(account: &signer){ + let z = 2; + let f1: |u64|u64 has copy+store+drop = |x| foo(x, z); + let table = borrow_global>(signer::address_of(account)); + let contains_key = table::contains(&(table.t), f1); + assert!(!contains_key, 0); + } + + // Test the non-existence of a key (variant 2: different captured variable type) + public entry fun not_contain_var2(account: &signer){ + let a1: address = @0x1; + let f1: |u64|u64 has copy+store+drop = |x| foo(x, a1); + let table = borrow_global>(signer::address_of(account)); + let contains_key = table::contains(&(table.t), f1); + assert!(!contains_key, 0); + } + + // Test updating a function value saved in table + public entry fun update(account: &signer) { + let table = borrow_global_mut>(signer::address_of(account)); + + // check the original value + let z = 1; + let f1: |u64|u64 has copy+store+drop = |x| foo(x, z); + let f2 = table::borrow(&(table.t), f1); + assert!((*f2)(1) == 1, 0); + + // update the value + let f2: |u64|u64 has copy+store+drop = |x| bar(x, z); + table::upsert(&mut (table.t), f1, f2); + + // check the updated value + let f2 = table::borrow(&(table.t), f1); + assert!((*f2)(1) == 2, 0); + } + + // Test removing a function value saved in table + public entry fun remove(account: &signer){ + let table = borrow_global_mut>(signer::address_of(account)); + let z = 1; + let f1: |u64|u64 has copy+store+drop = |x| foo(x, z); + table::remove(&mut (table.t), f1); + let contains_key = table::contains(&(table.t), f1); + assert!(!contains_key, 0); + } + } + "#, + ); + assert_success!(result); + assert_success!(h.run_entry_function( + &acc, + str::parse("0x99::m2::test_store").unwrap(), + vec![], + vec![], + )); + assert_success!(h.run_entry_function( + &acc, + str::parse("0x99::m2::contain").unwrap(), + vec![], + vec![], + )); + assert_success!(h.run_entry_function( + &acc, + str::parse("0x99::m2::contain_var1").unwrap(), + vec![], + vec![], + )); + assert_success!(h.run_entry_function( + &acc, + str::parse("0x99::m2::not_contain_var1").unwrap(), + vec![], + vec![], + )); + assert_success!(h.run_entry_function( + &acc, + str::parse("0x99::m2::not_contain_var2").unwrap(), + vec![], + vec![], + )); + assert_success!(h.run_entry_function( + &acc, + str::parse("0x99::m2::update").unwrap(), + vec![], + vec![], + )); + assert_success!(h.run_entry_function( + &acc, + str::parse("0x99::m2::remove").unwrap(), + vec![], + vec![], + )); +} + +fn publish(h: &mut MoveHarness, account: &Account, source: &str) -> TransactionStatus { + let mut builder = PackageBuilder::new("Package"); + builder.add_source("m.move", source); + builder.add_local_dep( + "AptosStdlib", + &common::framework_dir_path("aptos-stdlib").to_string_lossy(), + ); + builder.add_local_dep( + "MoveStdlib", + &common::framework_dir_path("move-stdlib").to_string_lossy(), + ); + let path = builder.write_to_temp().unwrap(); + h.publish_package_with_options( + account, + path.path(), + BuildOptions::move_2().set_latest_language(), + ) +} diff --git a/aptos-move/e2e-move-tests/src/tests/generic_cmp.rs b/aptos-move/e2e-move-tests/src/tests/generic_cmp.rs new file mode 100644 index 0000000000000..5bf5891909500 --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/generic_cmp.rs @@ -0,0 +1,158 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +//! Transactional tests for comparison operations, Lt/Le/Ge/Gt, over non-integer types, +//! introduced in Move language version 2.2 and onwards. + +use crate::{assert_success, tests::common, MoveHarness}; +use aptos_framework::{BuildOptions, BuiltPackage}; +use aptos_language_e2e_tests::account::TransactionBuilder; +use aptos_types::{account_address::AccountAddress, transaction::Script}; + +#[test] +fn function_generic_cmp() { + let mut h = MoveHarness::new(); + + // Load the code + let acc = h.new_account_at(AccountAddress::from_hex_literal("0x99").unwrap()); + assert_success!(h.publish_package_with_options( + &acc, + &common::test_dir_path("cmp_generic.data/pack"), + BuildOptions::move_2().set_latest_language() + )); + + assert_success!(h.run_entry_function( + &acc, + str::parse("0x99::primitive_cmp::test_bool").unwrap(), + vec![], + vec![], + )); + + assert_success!(h.run_entry_function( + &acc, + str::parse("0x99::primitive_cmp::test_address").unwrap(), + vec![], + vec![], + )); + + assert_success!(h.run_entry_function( + &acc, + str::parse("0x99::primitive_cmp::test_vector").unwrap(), + vec![], + vec![], + )); + + assert_success!(h.run_entry_function( + &acc, + str::parse("0x99::struct_cmp::test_simple_struct").unwrap(), + vec![], + vec![], + )); + + assert_success!(h.run_entry_function( + &acc, + str::parse("0x99::struct_cmp::test_complex_struct").unwrap(), + vec![], + vec![], + )); + + assert_success!(h.run_entry_function( + &acc, + str::parse("0x99::struct_cmp::test_nested_complex_struct").unwrap(), + vec![], + vec![], + )); + + assert_success!(h.run_entry_function( + &acc, + str::parse("0x99::struct_cmp::test_special_complex_struct").unwrap(), + vec![], + vec![], + )); + + assert_success!(h.run_entry_function( + &acc, + str::parse("0x99::generic_cmp::test_generic_arg").unwrap(), + vec![], + vec![], + )); + + assert_success!(h.run_entry_function( + &acc, + str::parse("0x99::generic_cmp::test_generic_struct").unwrap(), + vec![], + vec![], + )); + + assert_success!(h.run_entry_function( + &acc, + str::parse("0x99::generic_cmp::test_generic_complex_struct").unwrap(), + vec![], + vec![], + )); + + assert_success!(h.run_entry_function( + &acc, + str::parse("0x99::function_value_cmp::test_module_name_cmp").unwrap(), + vec![], + vec![], + )); + + assert_success!(h.run_entry_function( + &acc, + str::parse("0x99::function_value_cmp::test_function_name_cmp").unwrap(), + vec![], + vec![], + )); + + assert_success!(h.run_entry_function( + &acc, + str::parse("0x99::function_value_cmp::test_typed_arg_cmp").unwrap(), + vec![], + vec![], + )); + + assert_success!(h.run_entry_function( + &acc, + str::parse("0x99::function_value_cmp::test_captured_var_cmp").unwrap(), + vec![], + vec![], + )); +} + +/// Special case of comparing two signers +#[test] +fn function_signer_cmp() { + let mut h = MoveHarness::new(); + + let alice = h.new_account_at(AccountAddress::from_hex_literal("0xa11ce").unwrap()); + let bob = h.new_account_at(AccountAddress::from_hex_literal("0xb0b01").unwrap()); + + let build_options = BuildOptions { + with_srcs: false, + with_abis: false, + with_source_maps: false, + with_error_map: false, + ..BuildOptions::move_2().set_latest_language() + }; + + let package = BuiltPackage::build( + common::test_dir_path("cmp_generic.data/script"), + build_options, + ) + .expect("building package must succeed"); + + let code = package.extract_script_code()[0].clone(); + let script = Script::new(code, vec![], vec![]); + + let transaction = TransactionBuilder::new(alice.clone()) + .secondary_signers(vec![bob.clone()]) + .script(script) + .sequence_number(h.sequence_number(alice.address())) + .max_gas_amount(1_000_000) + .gas_unit_price(1) + .sign_multi_agent(); + + let output = h.executor.execute_transaction(transaction); + assert_success!(output.status().to_owned()); +} diff --git a/aptos-move/e2e-move-tests/src/tests/mod.rs b/aptos-move/e2e-move-tests/src/tests/mod.rs index ba57f93cd5988..ae60a3edf46b9 100644 --- a/aptos-move/e2e-move-tests/src/tests/mod.rs +++ b/aptos-move/e2e-move-tests/src/tests/mod.rs @@ -9,21 +9,27 @@ mod aggregator_v2_enums; mod aggregator_v2_events; mod aggregator_v2_function_values; mod aggregator_v2_runtime_checks; +mod any; mod attributes; +mod bcs; mod chain_id; mod code_publishing; mod common; mod constructor_args; mod dependencies; mod enum_upgrade; +mod enum_upgrade_fv; mod enum_variant_count; mod error_map; mod events; mod fee_payer; +mod function_value_depth; mod function_values; mod fungible_asset; +mod fv_as_table_keys; mod gas; mod generate_upgrade_script; +mod generic_cmp; mod governance_updates; mod infinite_loop; mod init_module; @@ -54,6 +60,8 @@ mod stake; mod state_metadata; mod storage_refund; mod string_args; +mod string_utils; +mod swap_function_values; mod test_self; mod token_event_store; mod token_objects; diff --git a/aptos-move/e2e-move-tests/src/tests/storage_refund.rs b/aptos-move/e2e-move-tests/src/tests/storage_refund.rs index 489dd56eca762..80fb3e67dc539 100644 --- a/aptos-move/e2e-move-tests/src/tests/storage_refund.rs +++ b/aptos-move/e2e-move-tests/src/tests/storage_refund.rs @@ -166,7 +166,7 @@ fn assert_result( deletes += 1 }, BaseStateOp::Modification { .. } => (), - BaseStateOp::MakeHot { .. } => unreachable!(), + BaseStateOp::MakeHot { .. } | BaseStateOp::Eviction { .. } => unreachable!(), } } if expect_success { diff --git a/aptos-move/e2e-move-tests/src/tests/string_utils.data/pack/Move.toml b/aptos-move/e2e-move-tests/src/tests/string_utils.data/pack/Move.toml new file mode 100644 index 0000000000000..3a3493eb88720 --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/string_utils.data/pack/Move.toml @@ -0,0 +1,6 @@ +[package] +name = "string_utils_test" +version = "0.0.0" + +[dependencies] +AptosStdlib= { local = "../../../../../framework/aptos-stdlib" } diff --git a/aptos-move/e2e-move-tests/src/tests/string_utils.data/pack/scripts/main.move b/aptos-move/e2e-move-tests/src/tests/string_utils.data/pack/scripts/main.move new file mode 100644 index 0000000000000..5dda6843b5461 --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/string_utils.data/pack/scripts/main.move @@ -0,0 +1,11 @@ +script { + use 0x1::string_utils_test::{assert_eq, Test}; + + fun main() { + let f1: || has drop = 0x1::string_utils_test::test1; + assert_eq(&f1, b"0x1::string_utils_test::test1()", 1); + + let f2: |u64, vector| has drop = |a, b| 0x1::string_utils_test::test2(a, 20, @0x123, b); + assert_eq(&f2, b"0x1::string_utils_test::test2(_, 20, @0x123, ..)", 2); + } +} diff --git a/aptos-move/e2e-move-tests/src/tests/string_utils.data/pack/sources/string_utils_test.move b/aptos-move/e2e-move-tests/src/tests/string_utils.data/pack/sources/string_utils_test.move new file mode 100644 index 0000000000000..38ea517622309 --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/string_utils.data/pack/sources/string_utils_test.move @@ -0,0 +1,88 @@ +module 0x1::string_utils_test { + use std::string; + use aptos_std::string_utils; + + struct Test has copy, drop { + x: u64, + } + + enum TestEnum has copy, drop { + V1 { x: u64, }, + V2 { x: u64, y: Test }, + } + + public fun dummy(_x: &u64, _v: u64) { } + + public fun test1() {} + + public fun test2(_a: u64, _b: u16, _c: address, _d: vector) {} + + public fun test3(f: |&u64|, x: u64) { f(&x) } + + public fun test4(_x: A, _y: B, _z: C) {} + + public entry fun run_all() { + + // === Lambda lifting === // + + let f1: || has drop = || {}; + assert_eq(&f1, b"0x1::string_utils_test::__lambda__1__run_all()", 1); + + let f2: |u8, u8| has drop = |a, b| { + let e = TestEnum::V2 { x: 10, y: Test { x: 20 } }; + test4(a, e, b) + }; + assert_eq(&f2, b"0x1::string_utils_test::__lambda__2__run_all()", 2); + + // === No capturing === // + + let f3: || has drop = test1; + assert_eq(&f3, b"0x1::string_utils_test::test1()", 3); + + let f4: |u64, u16, address, vector| has drop = test2; + assert_eq(&f4, b"0x1::string_utils_test::test2()", 4); + + let f5: |(|&u64|), u64| has drop = test3; + assert_eq(&f5, b"0x1::string_utils_test::test3()", 5); + + // === Capturing simple === // + + let f6: |u64, vector| has drop = |a, b| test2(a, 20, @0x123, b); + assert_eq(&f6, b"0x1::string_utils_test::test2(_, 20, @0x123, ..)", 6); + + let v = vector[Test { x: 1 }, Test { x: 2 }]; + let f7: |u16| has drop = |a| test2(10, a, @0x123, v); + assert_eq(&f7, b"0x1::string_utils_test::test2(10, _, @0x123, [ { 1 }, { 2 } ], ..)", 7); + + // === With type arguments === // + + let f8: |u64, Test, (|u64| has drop)| has drop = test4; + assert_eq( + &f8, + b"0x1::string_utils_test::test4()", + 8, + ); + + let e = TestEnum::V2 { x: 10, y: Test { x: 20 } }; + let f9: |u64, u8| has drop = |a, b| test4(a, e, b); + assert_eq( + &f9, + b"0x1::string_utils_test::test4(_, #1{ 10, { 20 } }, ..)", + 9, + ); + + let h1: |&u64| has drop = |x| dummy(x, 10); + let h2: || has drop = || test3(h1, 30); + assert_eq( + &h2, + b"0x1::string_utils_test::test3(0x1::string_utils_test::dummy(_, 10, ..), 30, ..)", + 10, + ); + } + + public fun assert_eq(x: &T, expected: vector, abort_code: u64) { + let actual = string_utils::to_string(x); + let expected = string::utf8(expected); + assert!(actual == expected, abort_code); + } +} diff --git a/aptos-move/e2e-move-tests/src/tests/string_utils.rs b/aptos-move/e2e-move-tests/src/tests/string_utils.rs new file mode 100644 index 0000000000000..c102015fae043 --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/string_utils.rs @@ -0,0 +1,54 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{assert_success, tests::common, MoveHarness}; +use aptos_framework::{BuildOptions, BuiltPackage}; +use aptos_language_e2e_tests::executor::FakeExecutor; +use aptos_types::move_utils::MemberId; +use move_core_types::account_address::AccountAddress; +use std::str::FromStr; + +fn initialize(h: &mut MoveHarness) { + let build_options = BuildOptions::move_2().set_latest_language(); + let path = common::test_dir_path("string_utils.data/pack"); + + let framework_account = h.aptos_framework_account(); + let status = h.publish_package_with_options(&framework_account, path.as_path(), build_options); + assert_success!(status); +} + +#[test] +fn test_function_value_formatting_in_modules() { + let mut h = MoveHarness::new_with_executor(FakeExecutor::from_head_genesis()); + let acc = h.new_account_at(AccountAddress::from_hex_literal("0x123").unwrap()); + initialize(&mut h); + + let status = h.run_entry_function( + &acc, + MemberId::from_str("0x1::string_utils_test::run_all").unwrap(), + vec![], + vec![], + ); + assert_success!(status); +} + +#[test] +fn test_function_value_formatting_in_scripts() { + let build_options = BuildOptions::move_2().set_latest_language(); + let path = common::test_dir_path("string_utils.data/pack"); + let package = BuiltPackage::build(path.to_owned(), build_options.clone()) + .expect("Building a package must succeed"); + + let mut scripts = package.extract_script_code(); + assert_eq!(scripts.len(), 1); + let script = scripts.pop().expect("Script exists"); + + let mut h = MoveHarness::new_with_executor(FakeExecutor::from_head_genesis()); + let framework_account = h.aptos_framework_account(); + let txn = h.create_publish_built_package(&framework_account, &package, |_| {}); + assert_success!(h.run(txn)); + + let acc = h.new_account_at(AccountAddress::from_hex_literal("0x123").unwrap()); + let txn = h.create_script(&acc, script, vec![], vec![]); + assert_success!(h.run(txn)); +} diff --git a/aptos-move/e2e-move-tests/src/tests/swap_function_values.rs b/aptos-move/e2e-move-tests/src/tests/swap_function_values.rs new file mode 100644 index 0000000000000..9bab1e2174e71 --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/swap_function_values.rs @@ -0,0 +1,57 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +//! Test swapping of function values via vector::replace. + +use crate::{assert_success, tests::common, MoveHarness}; +use aptos_framework::BuildOptions; +use aptos_package_builder::PackageBuilder; +use aptos_types::account_address::AccountAddress; + +#[test] +fn swap_function_values() { + let mut builder = PackageBuilder::new("swap_function_values"); + let source = r#" + module 0xc0ffee::m { + + struct NoCopy; + + entry fun test() { + let nc = NoCopy; + + let f1 = || { + let NoCopy = nc; + 42 + }; + let f2 = || 44; + let v = vector[f2]; + let f3 = v.replace(0, f1); + assert!(f3() == 44, 0); + let f4 = v.pop_back(); + assert!(f4() == 42, 1); + v.destroy_empty(); + } + } + "#; + builder.add_source("swap_function_values.move", source); + builder.add_local_dep( + "AptosStdlib", + &common::framework_dir_path("aptos-stdlib").to_string_lossy(), + ); + let path = builder.write_to_temp().unwrap(); + + let mut h = MoveHarness::new(); + let acc = h.new_account_at(AccountAddress::from_hex_literal("0xc0ffee").unwrap()); + assert_success!(h.publish_package_with_options( + &acc, + path.path(), + BuildOptions::move_2().set_latest_language() + )); + + assert_success!(h.run_entry_function( + &acc, + str::parse("0xc0ffee::m::test").unwrap(), + vec![], + vec![], + )); +} diff --git a/aptos-move/e2e-move-tests/src/tests/token_event_store.rs b/aptos-move/e2e-move-tests/src/tests/token_event_store.rs index 2aa48699eb2be..e19c93c2a8d63 100644 --- a/aptos-move/e2e-move-tests/src/tests/token_event_store.rs +++ b/aptos-move/e2e-move-tests/src/tests/token_event_store.rs @@ -53,6 +53,6 @@ fn test_token_creation_with_token_events_store() { let event = events.pop().unwrap(); assert_eq!( "0x3::token_event_store::OptInTransfer".to_string(), - event.type_tag().to_string() + event.type_tag().to_canonical_string() ); } diff --git a/aptos-move/e2e-tests/src/executor.rs b/aptos-move/e2e-tests/src/executor.rs index e847ff0c49a74..a69240adef7c5 100644 --- a/aptos-move/e2e-tests/src/executor.rs +++ b/aptos-move/e2e-tests/src/executor.rs @@ -747,7 +747,7 @@ impl FakeExecutor { }, onchain: onchain_config, }; - let txn_provider = DefaultTxnProvider::new(txn_block); + let txn_provider = DefaultTxnProvider::new_without_info(txn_block); AptosVMBlockExecutorWrapper::execute_block_on_thread_pool::< _, NoOpTransactionCommitHook, @@ -1245,7 +1245,9 @@ impl FakeExecutor { let fun_name = Self::name(function_name); let should_error = fun_name.clone().into_string().ends_with(POSTFIX); - let storage = TraversalStorage::new(); + let traversal_storage = TraversalStorage::new(); + let mut traversal_context = TraversalContext::new(&traversal_storage); + let result = session.execute_function_bypass_visibility( module, &fun_name, @@ -1262,7 +1264,7 @@ impl FakeExecutor { ), shared_buffer: Arc::clone(&a1), }), - &mut TraversalContext::new(&storage), + &mut traversal_context, &module_storage, ); if let Err(err) = result { @@ -1302,7 +1304,10 @@ impl FakeExecutor { let module_storage = self.state_store.as_aptos_code_storage(&env); let mut session = vm.new_session(&resolver, SessionId::void(), None); - let storage = TraversalStorage::new(); + + let traversal_storage = TraversalStorage::new(); + let mut traversal_context = TraversalContext::new(&traversal_storage); + session .execute_function_bypass_visibility( &module_id, @@ -1311,7 +1316,7 @@ impl FakeExecutor { args, // TODO(Gas): we probably want to switch to metered execution in the future &mut UnmeteredGasMeter, - &mut TraversalContext::new(&storage), + &mut traversal_context, &module_storage, ) .unwrap_or_else(|e| { diff --git a/aptos-move/framework/Cargo.toml b/aptos-move/framework/Cargo.toml index f8088c64eb201..35030aaf0fb88 100644 --- a/aptos-move/framework/Cargo.toml +++ b/aptos-move/framework/Cargo.toml @@ -88,6 +88,7 @@ claims = { workspace = true } move-cli = { workspace = true } move-prover = { workspace = true } move-unit-test = { workspace = true } +move-vm-types = { workspace = true, features = ["testing"] } [features] default = [] diff --git a/aptos-move/framework/aptos-experimental/doc/active_order_book.md b/aptos-move/framework/aptos-experimental/doc/active_order_book.md index aaafb50fa6f7b..508776942f978 100644 --- a/aptos-move/framework/aptos-experimental/doc/active_order_book.md +++ b/aptos-move/framework/aptos-experimental/doc/active_order_book.md @@ -3,7 +3,11 @@ # Module `0x7::active_order_book` -(work in progress) +ActiveOrderBook: This is the main order book that keeps track of active orders and their states. The active order +book is backed by a BigOrderedMap, which is a data structure that allows for efficient insertion, deletion, and matching of the order +The orders are matched based on time-price priority. + +This is internal module, which cannot be used directly, use OrderBook instead. - [Struct `ActiveBidKey`](#0x7_active_order_book_ActiveBidKey) @@ -111,7 +115,7 @@ OrderBook tracking active (i.e. unconditional, immediately executable) limit orders. - invariant - all buys are smaller than sells, at all times. -- tie_breaker in sells is U256_MAX-value, to make sure largest value in the book +- tie_breaker in sells is U128_MAX-value, to make sure largest value in the book that is taken first, is the one inserted first, amongst those with same bid price. @@ -169,15 +173,6 @@ There is a code bug that breaks internal invariant - - - - -
const U256_MAX: u256 = 115792089237316195423570985008687907853269984665640564039457584007913129639935;
-
- - - @@ -203,7 +198,7 @@ There is a code bug that breaks internal invariant -
public fun new_active_order_book(): active_order_book::ActiveOrderBook
+
public(friend) fun new_active_order_book(): active_order_book::ActiveOrderBook
 
@@ -212,7 +207,7 @@ There is a code bug that breaks internal invariant Implementation -
public fun new_active_order_book(): ActiveOrderBook {
+
public(friend) fun new_active_order_book(): ActiveOrderBook {
     // potentially add max value to both sides (that will be skipped),
     // so that max_key never changes, and doesn't create conflict.
     ActiveOrderBook::V1 {
@@ -234,7 +229,7 @@ Picks the best (i.e. highest) bid (i.e. buy) price from the active order book.
 aborts if there are no buys
 
 
-
public fun best_bid_price(self: &active_order_book::ActiveOrderBook): option::Option<u64>
+
public(friend) fun best_bid_price(self: &active_order_book::ActiveOrderBook): option::Option<u64>
 
@@ -243,7 +238,7 @@ aborts if there are no buys Implementation -
public fun best_bid_price(self: &ActiveOrderBook): Option<u64> {
+
public(friend) fun best_bid_price(self: &ActiveOrderBook): Option<u64> {
     if (self.buys.is_empty()) {
         option::none()
     } else {
@@ -265,7 +260,7 @@ Picks the best (i.e. lowest) ask (i.e. sell) price from the active order book.
 aborts if there are no sells
 
 
-
public fun best_ask_price(self: &active_order_book::ActiveOrderBook): option::Option<u64>
+
public(friend) fun best_ask_price(self: &active_order_book::ActiveOrderBook): option::Option<u64>
 
@@ -274,7 +269,7 @@ aborts if there are no sells Implementation -
public fun best_ask_price(self: &ActiveOrderBook): Option<u64> {
+
public(friend) fun best_ask_price(self: &ActiveOrderBook): Option<u64> {
     if (self.sells.is_empty()) {
         option::none()
     } else {
@@ -294,7 +289,7 @@ aborts if there are no sells
 
 
 
-
public fun get_mid_price(self: &active_order_book::ActiveOrderBook): option::Option<u64>
+
public(friend) fun get_mid_price(self: &active_order_book::ActiveOrderBook): option::Option<u64>
 
@@ -303,7 +298,7 @@ aborts if there are no sells Implementation -
public fun get_mid_price(self: &ActiveOrderBook): Option<u64> {
+
public(friend) fun get_mid_price(self: &ActiveOrderBook): Option<u64> {
     let best_bid = self.best_bid_price();
     let best_ask = self.best_ask_price();
     if (best_bid.is_none() || best_ask.is_none()) {
@@ -326,7 +321,7 @@ aborts if there are no sells
 
 
 
-
public fun get_slippage_price(self: &active_order_book::ActiveOrderBook, is_buy: bool, slippage_pct: u64): option::Option<u64>
+
public(friend) fun get_slippage_price(self: &active_order_book::ActiveOrderBook, is_bid: bool, slippage_pct: u64): option::Option<u64>
 
@@ -335,8 +330,8 @@ aborts if there are no sells Implementation -
public fun get_slippage_price(
-    self: &ActiveOrderBook, is_buy: bool, slippage_pct: u64
+
public(friend) fun get_slippage_price(
+    self: &ActiveOrderBook, is_bid: bool, slippage_pct: u64
 ): Option<u64> {
     let mid_price = self.get_mid_price();
     if (mid_price.is_none()) {
@@ -346,7 +341,7 @@ aborts if there are no sells
     let slippage = mul_div(
         mid_price, slippage_pct, get_slippage_pct_precision() * 100
     );
-    if (is_buy) {
+    if (is_bid) {
         option::some(mid_price + slippage)
     } else {
         option::some(mid_price - slippage)
@@ -388,9 +383,8 @@ aborts if there are no sells
             } else {
                 front_value.size
             };
-        total_value = total_value
-            + (matched_size as u128) * (front_key.price as u128);
-        total_size = total_size + matched_size;
+        total_value +=(matched_size as u128) * (front_key.price as u128);
+        total_size += matched_size;
         let next_key = orders.prev_key(&front_key);
         if (next_key.is_none()) {
             // TODO maybe we should return none if there is not enough depth?
@@ -437,9 +431,8 @@ aborts if there are no sells
             } else {
                 front_value.size
             };
-        total_value = total_value
-            + (matched_size as u128) * (front_key.price as u128);
-        total_size = total_size + matched_size;
+        total_value +=(matched_size as u128) * (front_key.price as u128);
+        total_size += matched_size;
         let next_key = orders.next_key(&front_key);
         if (next_key.is_none()) {
             break;
@@ -461,7 +454,7 @@ aborts if there are no sells
 
 
 
-
fun get_tie_breaker(unique_priority_idx: order_book_types::UniqueIdxType, is_buy: bool): order_book_types::UniqueIdxType
+
fun get_tie_breaker(unique_priority_idx: order_book_types::UniqueIdxType, is_bid: bool): order_book_types::UniqueIdxType
 
@@ -471,9 +464,9 @@ aborts if there are no sells
inline fun get_tie_breaker(
-    unique_priority_idx: UniqueIdxType, is_buy: bool
+    unique_priority_idx: UniqueIdxType, is_bid: bool
 ): UniqueIdxType {
-    if (is_buy) {
+    if (is_bid) {
         unique_priority_idx
     } else {
         unique_priority_idx.descending_idx()
@@ -491,7 +484,7 @@ aborts if there are no sells
 
 
 
-
public fun cancel_active_order(self: &mut active_order_book::ActiveOrderBook, price: u64, unique_priority_idx: order_book_types::UniqueIdxType, is_buy: bool): u64
+
public(friend) fun cancel_active_order(self: &mut active_order_book::ActiveOrderBook, price: u64, unique_priority_idx: order_book_types::UniqueIdxType, is_bid: bool): u64
 
@@ -500,16 +493,16 @@ aborts if there are no sells Implementation -
public fun cancel_active_order(
+
public(friend) fun cancel_active_order(
     self: &mut ActiveOrderBook,
     price: u64,
     unique_priority_idx: UniqueIdxType,
-    is_buy: bool
+    is_bid: bool
 ): u64 {
-    let tie_breaker = get_tie_breaker(unique_priority_idx, is_buy);
+    let tie_breaker = get_tie_breaker(unique_priority_idx, is_bid);
     let key = ActiveBidKey { price: price, tie_breaker };
     let value =
-        if (is_buy) {
+        if (is_bid) {
             self.buys.remove(&key)
         } else {
             self.sells.remove(&key)
@@ -528,7 +521,7 @@ aborts if there are no sells
 
 
 
-
public fun is_active_order(self: &active_order_book::ActiveOrderBook, price: u64, unique_priority_idx: order_book_types::UniqueIdxType, is_buy: bool): bool
+
public(friend) fun is_active_order(self: &active_order_book::ActiveOrderBook, price: u64, unique_priority_idx: order_book_types::UniqueIdxType, is_bid: bool): bool
 
@@ -537,15 +530,15 @@ aborts if there are no sells Implementation -
public fun is_active_order(
+
public(friend) fun is_active_order(
     self: &ActiveOrderBook,
     price: u64,
     unique_priority_idx: UniqueIdxType,
-    is_buy: bool
+    is_bid: bool
 ): bool {
-    let tie_breaker = get_tie_breaker(unique_priority_idx, is_buy);
+    let tie_breaker = get_tie_breaker(unique_priority_idx, is_bid);
     let key = ActiveBidKey { price: price, tie_breaker };
-    if (is_buy) {
+    if (is_bid) {
         self.buys.contains(&key)
     } else {
         self.sells.contains(&key)
@@ -564,7 +557,7 @@ aborts if there are no sells
 Check if the order is a taker order - i.e. if it can be immediately matched with the order book fully or partially.
 
 
-
public fun is_taker_order(self: &active_order_book::ActiveOrderBook, price: u64, is_buy: bool): bool
+
public fun is_taker_order(self: &active_order_book::ActiveOrderBook, price: option::Option<u64>, is_bid: bool): bool
 
@@ -574,14 +567,22 @@ Check if the order is a taker order - i.e. if it can be immediately matched with
public fun is_taker_order(
-    self: &ActiveOrderBook, price: u64, is_buy: bool
+    self: &ActiveOrderBook, price: Option<u64>, is_bid: bool
 ): bool {
-    if (is_buy) {
+    if (is_bid) {
         let best_ask_price = self.best_ask_price();
-        best_ask_price.is_some() && price >= best_ask_price.destroy_some()
+        best_ask_price.is_some()
+            && (
+                price.is_none()
+                    || price.destroy_some() >= best_ask_price.destroy_some()
+            )
     } else {
         let best_bid_price = self.best_bid_price();
-        best_bid_price.is_some() && price <= best_bid_price.destroy_some()
+        best_bid_price.is_some()
+            && (
+                price.is_none()
+                    || price.destroy_some() <= best_bid_price.destroy_some()
+            )
     }
 }
 
@@ -646,7 +647,7 @@ Check if the order is a taker order - i.e. if it can be immediately matched with -
fun get_single_match_for_buy_order(self: &mut active_order_book::ActiveOrderBook, price: u64, size: u64): order_book_types::ActiveMatchedOrder
+
fun get_single_match_for_buy_order(self: &mut active_order_book::ActiveOrderBook, price: option::Option<u64>, size: u64): order_book_types::ActiveMatchedOrder
 
@@ -656,10 +657,14 @@ Check if the order is a taker order - i.e. if it can be immediately matched with
fun get_single_match_for_buy_order(
-    self: &mut ActiveOrderBook, price: u64, size: u64
+    self: &mut ActiveOrderBook, price: Option<u64>, size: u64
 ): ActiveMatchedOrder {
     let (smallest_key, smallest_value) = self.sells.borrow_front();
-    assert!(price >= smallest_key.price, EINTERNAL_INVARIANT_BROKEN);
+    if (price.is_some()) {
+        assert!(
+            price.destroy_some() >= smallest_key.price, EINTERNAL_INVARIANT_BROKEN
+        );
+    };
     single_match_with_current_active_order(
         size,
         smallest_key,
@@ -679,7 +684,7 @@ Check if the order is a taker order - i.e. if it can be immediately matched with
 
 
 
-
fun get_single_match_for_sell_order(self: &mut active_order_book::ActiveOrderBook, price: u64, size: u64): order_book_types::ActiveMatchedOrder
+
fun get_single_match_for_sell_order(self: &mut active_order_book::ActiveOrderBook, price: option::Option<u64>, size: u64): order_book_types::ActiveMatchedOrder
 
@@ -689,10 +694,14 @@ Check if the order is a taker order - i.e. if it can be immediately matched with
fun get_single_match_for_sell_order(
-    self: &mut ActiveOrderBook, price: u64, size: u64
+    self: &mut ActiveOrderBook, price: Option<u64>, size: u64
 ): ActiveMatchedOrder {
     let (largest_key, largest_value) = self.buys.borrow_back();
-    assert!(price <= largest_key.price, EINTERNAL_INVARIANT_BROKEN);
+    if (price.is_some()) {
+        assert!(
+            price.destroy_some() <= largest_key.price, EINTERNAL_INVARIANT_BROKEN
+        );
+    };
     single_match_with_current_active_order(
         size,
         largest_key,
@@ -712,7 +721,7 @@ Check if the order is a taker order - i.e. if it can be immediately matched with
 
 
 
-
public fun get_single_match_result(self: &mut active_order_book::ActiveOrderBook, price: u64, size: u64, is_buy: bool): order_book_types::ActiveMatchedOrder
+
public(friend) fun get_single_match_result(self: &mut active_order_book::ActiveOrderBook, price: option::Option<u64>, size: u64, is_bid: bool): order_book_types::ActiveMatchedOrder
 
@@ -721,13 +730,13 @@ Check if the order is a taker order - i.e. if it can be immediately matched with Implementation -
public fun get_single_match_result(
+
public(friend) fun get_single_match_result(
     self: &mut ActiveOrderBook,
-    price: u64,
+    price: Option<u64>,
     size: u64,
-    is_buy: bool
+    is_bid: bool
 ): ActiveMatchedOrder {
-    if (is_buy) {
+    if (is_bid) {
         self.get_single_match_for_buy_order(price, size)
     } else {
         self.get_single_match_for_sell_order(price, size)
@@ -746,7 +755,7 @@ Check if the order is a taker order - i.e. if it can be immediately matched with
 Increase the size of the order in the orderbook without altering its position in the price-time priority.
 
 
-
public fun increase_order_size(self: &mut active_order_book::ActiveOrderBook, price: u64, unique_priority_idx: order_book_types::UniqueIdxType, size_delta: u64, is_buy: bool)
+
public(friend) fun increase_order_size(self: &mut active_order_book::ActiveOrderBook, price: u64, unique_priority_idx: order_book_types::UniqueIdxType, size_delta: u64, is_bid: bool)
 
@@ -755,16 +764,16 @@ Increase the size of the order in the orderbook without altering its position in Implementation -
public fun increase_order_size(
+
public(friend) fun increase_order_size(
     self: &mut ActiveOrderBook,
     price: u64,
     unique_priority_idx: UniqueIdxType,
     size_delta: u64,
-    is_buy: bool
+    is_bid: bool
 ) {
-    let tie_breaker = get_tie_breaker(unique_priority_idx, is_buy);
+    let tie_breaker = get_tie_breaker(unique_priority_idx, is_bid);
     let key = ActiveBidKey { price, tie_breaker };
-    if (is_buy) {
+    if (is_bid) {
         self.buys.borrow_mut(&key).size += size_delta;
     } else {
         self.sells.borrow_mut(&key).size += size_delta;
@@ -783,7 +792,7 @@ Increase the size of the order in the orderbook without altering its position in
 Decrease the size of the order in the order book without altering its position in the price-time priority.
 
 
-
public fun decrease_order_size(self: &mut active_order_book::ActiveOrderBook, price: u64, unique_priority_idx: order_book_types::UniqueIdxType, size_delta: u64, is_buy: bool)
+
public(friend) fun decrease_order_size(self: &mut active_order_book::ActiveOrderBook, price: u64, unique_priority_idx: order_book_types::UniqueIdxType, size_delta: u64, is_bid: bool)
 
@@ -792,16 +801,16 @@ Decrease the size of the order in the order book without altering its position i Implementation -
public fun decrease_order_size(
+
public(friend) fun decrease_order_size(
     self: &mut ActiveOrderBook,
     price: u64,
     unique_priority_idx: UniqueIdxType,
     size_delta: u64,
-    is_buy: bool
+    is_bid: bool
 ) {
-    let tie_breaker = get_tie_breaker(unique_priority_idx, is_buy);
+    let tie_breaker = get_tie_breaker(unique_priority_idx, is_bid);
     let key = ActiveBidKey { price, tie_breaker };
-    if (is_buy) {
+    if (is_bid) {
         self.buys.borrow_mut(&key).size -= size_delta;
     } else {
         self.sells.borrow_mut(&key).size -= size_delta;
@@ -819,7 +828,7 @@ Decrease the size of the order in the order book without altering its position i
 
 
 
-
public fun place_maker_order(self: &mut active_order_book::ActiveOrderBook, order_id: order_book_types::OrderIdType, price: u64, unique_priority_idx: order_book_types::UniqueIdxType, size: u64, is_buy: bool)
+
public(friend) fun place_maker_order(self: &mut active_order_book::ActiveOrderBook, order_id: order_book_types::OrderIdType, price: u64, unique_priority_idx: order_book_types::UniqueIdxType, size: u64, is_bid: bool)
 
@@ -828,20 +837,20 @@ Decrease the size of the order in the order book without altering its position i Implementation -
public fun place_maker_order(
+
public(friend) fun place_maker_order(
     self: &mut ActiveOrderBook,
     order_id: OrderIdType,
     price: u64,
     unique_priority_idx: UniqueIdxType,
     size: u64,
-    is_buy: bool
+    is_bid: bool
 ) {
-    let tie_breaker = get_tie_breaker(unique_priority_idx, is_buy);
+    let tie_breaker = get_tie_breaker(unique_priority_idx, is_bid);
     let key = ActiveBidKey { price, tie_breaker };
     let value = ActiveBidData { order_id, size };
     // Assert that this is not a taker order
-    assert!(!self.is_taker_order(price, is_buy), EINVALID_MAKER_ORDER);
-    if (is_buy) {
+    assert!(!self.is_taker_order(option::some(price), is_bid), EINVALID_MAKER_ORDER);
+    if (is_bid) {
         self.buys.add(key, value);
     } else {
         self.sells.add(key, value);
diff --git a/aptos-move/framework/aptos-experimental/doc/benchmark_utils.md b/aptos-move/framework/aptos-experimental/doc/benchmark_utils.md
index 828f397918990..64adfcc062071 100644
--- a/aptos-move/framework/aptos-experimental/doc/benchmark_utils.md
+++ b/aptos-move/framework/aptos-experimental/doc/benchmark_utils.md
@@ -32,7 +32,9 @@ and so actual costs of entry functions can be more precisely measured.
 Implementation
 
 
-
entry fun transfer_and_create_account(source: &signer, to: address, amount: u64) {
+
entry fun transfer_and_create_account(
+    source: &signer, to: address, amount: u64
+) {
     account::create_account_if_does_not_exist(to);
     aptos_account::transfer(source, to, amount);
 }
diff --git a/aptos-move/framework/aptos-experimental/doc/confidential_asset.md b/aptos-move/framework/aptos-experimental/doc/confidential_asset.md
index 3d0b0c0c23344..e1ee26be7a364 100644
--- a/aptos-move/framework/aptos-experimental/doc/confidential_asset.md
+++ b/aptos-move/framework/aptos-experimental/doc/confidential_asset.md
@@ -36,6 +36,7 @@ It enables private transfers by obfuscating token amounts while keeping sender a
 -  [Function `disable_token`](#0x7_confidential_asset_disable_token)
 -  [Function `set_auditor`](#0x7_confidential_asset_set_auditor)
 -  [Function `has_confidential_asset_store`](#0x7_confidential_asset_has_confidential_asset_store)
+-  [Function `confidential_asset_controller_exists`](#0x7_confidential_asset_confidential_asset_controller_exists)
 -  [Function `is_token_allowed`](#0x7_confidential_asset_is_token_allowed)
 -  [Function `is_allow_list_enabled`](#0x7_confidential_asset_is_allow_list_enabled)
 -  [Function `pending_balance`](#0x7_confidential_asset_pending_balance)
@@ -45,6 +46,7 @@ It enables private transfers by obfuscating token amounts while keeping sender a
 -  [Function `is_frozen`](#0x7_confidential_asset_is_frozen)
 -  [Function `get_auditor`](#0x7_confidential_asset_get_auditor)
 -  [Function `confidential_asset_balance`](#0x7_confidential_asset_confidential_asset_balance)
+-  [Function `get_pending_balance_transfer_count`](#0x7_confidential_asset_get_pending_balance_transfer_count)
 -  [Function `register_internal`](#0x7_confidential_asset_register_internal)
 -  [Function `deposit_to_internal`](#0x7_confidential_asset_deposit_to_internal)
 -  [Function `withdraw_to_internal`](#0x7_confidential_asset_withdraw_to_internal)
@@ -67,6 +69,7 @@ It enables private transfers by obfuscating token amounts while keeping sender a
 -  [Function `deserialize_auditor_eks`](#0x7_confidential_asset_deserialize_auditor_eks)
 -  [Function `deserialize_auditor_amounts`](#0x7_confidential_asset_deserialize_auditor_amounts)
 -  [Function `ensure_sufficient_fa`](#0x7_confidential_asset_ensure_sufficient_fa)
+-  [Function `init_module_for_genesis`](#0x7_confidential_asset_init_module_for_genesis)
 
 
 
use 0x1::bcs;
@@ -443,6 +446,26 @@ The confidential asset store has not been published for the given user-token pai
 
 
 
+
+
+The confidential asset controller is not installed.
+
+
+
const EFA_CONTROLLER_NOT_INSTALLED: u64 = 18;
+
+ + + + + +[TEST-ONLY] The confidential asset module initialization failed. + + +
const EINIT_MODULE_FAILED: u64 = 1000;
+
+ + + The provided auditors or auditor proofs are invalid. @@ -553,6 +576,16 @@ The maximum number of transactions can be aggregated on the pending balance befo + + +The testnet chain ID. + + +
const TESTNET_CHAIN_ID: u8 = 2;
+
+ + + ## Function `init_module` @@ -1302,6 +1335,32 @@ Checks if the user has a confidential asset store for the specified token. + + + + +## Function `confidential_asset_controller_exists` + +Checks if the confidential asset controller is installed. + + +
#[view]
+public fun confidential_asset_controller_exists(): bool
+
+ + + +
+Implementation + + +
public fun confidential_asset_controller_exists(): bool {
+    exists<FAController>(@aptos_experimental)
+}
+
+ + +
@@ -1360,6 +1419,7 @@ Otherwise, all tokens are allowed.
public fun is_allow_list_enabled(): bool acquires FAController {
+    assert!(confidential_asset_controller_exists(), error::invalid_state(EFA_CONTROLLER_NOT_INSTALLED));
     borrow_global<FAController>(@aptos_experimental).allow_list_enabled
 }
 
@@ -1583,6 +1643,34 @@ Returns the circulating supply of the confidential asset. + + + + +## Function `get_pending_balance_transfer_count` + +Returns the pending balance transfer count for the specified token. + + +
#[view]
+public fun get_pending_balance_transfer_count(user: address, token: object::Object<fungible_asset::Metadata>): u64
+
+ + + +
+Implementation + + +
public fun get_pending_balance_transfer_count(user: address, token: Object<Metadata>): u64 acquires ConfidentialAssetStore {
+    assert!(has_confidential_asset_store(user, token), error::not_found(ECA_STORE_NOT_PUBLISHED));
+
+    borrow_global<ConfidentialAssetStore>(get_user_address(user, token)).pending_counter
+}
+
+ + +
@@ -2482,6 +2570,33 @@ Returns Some(Object<Metadata>) if user has a suffucient amoun + + + + +## Function `init_module_for_genesis` + + + +
entry fun init_module_for_genesis(deployer: &signer)
+
+ + + +
+Implementation + + +
entry fun init_module_for_genesis(deployer: &signer) {
+    assert!(signer::address_of(deployer) == @aptos_experimental, error::invalid_argument(EINIT_MODULE_FAILED));
+    assert!(chain_id::get() != MAINNET_CHAIN_ID, error::invalid_state(EINIT_MODULE_FAILED));
+    assert!(chain_id::get() != TESTNET_CHAIN_ID, error::invalid_state(EINIT_MODULE_FAILED));
+    init_module(deployer)
+}
+
+ + +
diff --git a/aptos-move/framework/aptos-experimental/doc/market.md b/aptos-move/framework/aptos-experimental/doc/market.md index 04ee9fb20effd..5c87f6f10619e 100644 --- a/aptos-move/framework/aptos-experimental/doc/market.md +++ b/aptos-move/framework/aptos-experimental/doc/market.md @@ -7,7 +7,7 @@ This module provides a generic trading engine implementation for a market. On a that stores an order book and provides APIs to place orders, cancel orders, and match orders. The market also acts as a wrapper around the order book and pluggable clearinghouse implementation. A clearing house implementation is expected to implement the following APIs -- settle_trade(taker, maker, taker_order_id, maker_order_id, fill_id, is_taker_long, price, size): SettleTradeResult -> +- settle_trade(taker, taker_order_id, maker, maker_order_id, fill_id, is_taker_long, price, size): SettleTradeResult -> Called by the market when there is an match between taker and maker. The clearinghouse is expected to settle the trade and return the result. Please note that the clearing house settlment size might not be the same as the order match size and the settlement might also fail. The fill_id is an incremental counter for matched orders and can be used to track specific fills @@ -57,19 +57,12 @@ a sell order its triggered when the market price is greater than or equal to the TimeBased(time): The order is triggered when the current time is greater than or equal to the time. -- [Struct `Market`](#0x7_market_Market) -- [Struct `MarketConfig`](#0x7_market_MarketConfig) +- [Enum `Market`](#0x7_market_Market) +- [Enum `MarketConfig`](#0x7_market_MarketConfig) - [Struct `OrderEvent`](#0x7_market_OrderEvent) - [Enum `OrderCancellationReason`](#0x7_market_OrderCancellationReason) - [Struct `OrderMatchResult`](#0x7_market_OrderMatchResult) - [Constants](#@Constants_0) -- [Function `good_till_cancelled`](#0x7_market_good_till_cancelled) -- [Function `post_only`](#0x7_market_post_only) -- [Function `immediate_or_cancel`](#0x7_market_immediate_or_cancel) -- [Function `order_status_open`](#0x7_market_order_status_open) -- [Function `order_status_filled`](#0x7_market_order_status_filled) -- [Function `order_status_cancelled`](#0x7_market_order_status_cancelled) -- [Function `order_status_rejected`](#0x7_market_order_status_rejected) - [Function `destroy_order_match_result`](#0x7_market_destroy_order_match_result) - [Function `number_of_fills`](#0x7_market_number_of_fills) - [Function `total_fill_size`](#0x7_market_total_fill_size) @@ -86,14 +79,15 @@ TimeBased(time): The order is triggered when the current time is greater than or - [Function `best_bid_price`](#0x7_market_best_bid_price) - [Function `best_ask_price`](#0x7_market_best_ask_price) - [Function `is_taker_order`](#0x7_market_is_taker_order) -- [Function `place_order`](#0x7_market_place_order) +- [Function `place_limit_order`](#0x7_market_place_limit_order) +- [Function `place_market_order`](#0x7_market_place_market_order) - [Function `next_order_id`](#0x7_market_next_order_id) - [Function `next_fill_id`](#0x7_market_next_fill_id) - [Function `emit_event_for_order`](#0x7_market_emit_event_for_order) -- [Function `place_order_with_user_addr`](#0x7_market_place_order_with_user_addr) - [Function `place_maker_order_internal`](#0x7_market_place_maker_order_internal) - [Function `cancel_maker_order_internal`](#0x7_market_cancel_maker_order_internal) - [Function `cancel_order_internal`](#0x7_market_cancel_order_internal) +- [Function `settle_single_trade`](#0x7_market_settle_single_trade) - [Function `place_order_with_order_id`](#0x7_market_place_order_with_order_id) - [Function `cancel_order`](#0x7_market_cancel_order) - [Function `decrease_order_size`](#0x7_market_decrease_order_size) @@ -116,15 +110,23 @@ TimeBased(time): The order is triggered when the current time is greater than or -## Struct `Market` +## Enum `Market` -
struct Market<M: copy, drop, store> has store
+
enum Market<M: copy, drop, store> has store
 
+
+Variants + + +
+V1 + +
Fields @@ -144,7 +146,7 @@ TimeBased(time): The order is triggered when the current time is greater than or Address of the market object of this market.
-last_order_id: u64 +order_id_generator: order_book_types::AscendingIdGenerator
@@ -170,19 +172,31 @@ TimeBased(time): The order is triggered when the current time is greater than or +
+ +
+
-## Struct `MarketConfig` +## Enum `MarketConfig` -
struct MarketConfig has store
+
enum MarketConfig has store
 
+
+Variants + + +
+V1 + +
Fields @@ -203,6 +217,10 @@ TimeBased(time): The order is triggered when the current time is greater than or +
+ +
+
@@ -235,7 +253,13 @@ TimeBased(time): The order is triggered when the current time is greater than or
-order_id: u64 +order_id: u128 +
+
+ +
+
+client_order_id: option::Option<u64>
@@ -268,13 +292,13 @@ TimeBased(time): The order is triggered when the current time is greater than or REJECTED - size_delta will always be 0
-price: u64 +price: option::Option<u64>
-is_buy: bool +is_bid: bool
@@ -286,7 +310,7 @@ TimeBased(time): The order is triggered when the current time is greater than or Whether the order crosses the orderbook.
-status: u8 +status: market_types::OrderStatus
@@ -432,7 +456,7 @@ TimeBased(time): The order is triggered when the current time is greater than or
-order_id: u64 +order_id: order_book_types::OrderIdType
@@ -519,29 +543,20 @@ TimeBased(time): The order is triggered when the current time is greater than or - - - - -
const EINVALID_TIME_IN_FORCE_FOR_MAKER: u64 = 7;
-
- - - - + -
const EINVALID_TIME_IN_FORCE_FOR_TAKER: u64 = 8;
+
const EMARKET_NOT_FOUND: u64 = 3;
 
- + -
const EMARKET_NOT_FOUND: u64 = 3;
+
const ENOT_ORDER_CREATOR: u64 = 12;
 
@@ -564,265 +579,13 @@ TimeBased(time): The order is triggered when the current time is greater than or - - - - -
const ORDER_SIZE_REDUCED: u8 = 4;
-
- - - - - -Order has been cancelled by the user or engine. - - -
const ORDER_STATUS_CANCELLED: u8 = 2;
-
- - - - - -Order has been fully or partially filled. - - -
const ORDER_STATUS_FILLED: u8 = 1;
-
- - - - - -Order has been accepted by the engine. - - -
const ORDER_STATUS_OPEN: u8 = 0;
-
- - - - - -Order has been rejected by the engine. Unlike cancelled orders, rejected -orders are invalid orders. Rejection reasons: -1. Insufficient margin -2. Order is reduce_only but does not reduce - - -
const ORDER_STATUS_REJECTED: u8 = 3;
-
- - - - - -Order time in force -Good till cancelled order type - - -
const TIME_IN_FORCE_GTC: u8 = 0;
-
- - - - - -Immediate or Cancel order type - ensures that the order is a taker order. Try to match as much of the -order as possible as taker order and cancel the rest. - - -
const TIME_IN_FORCE_IOC: u8 = 2;
-
- - - - - -Post Only order type - ensures that the order is not a taker order - - -
const TIME_IN_FORCE_POST_ONLY: u8 = 1;
-
- - - - - -## Function `good_till_cancelled` - - - -
public fun good_till_cancelled(): u8
-
- - - -
-Implementation - - -
public fun good_till_cancelled(): u8 {
-    TIME_IN_FORCE_GTC
-}
-
- - - -
- - - -## Function `post_only` - - - -
public fun post_only(): u8
-
- - - -
-Implementation - - -
public fun post_only(): u8 {
-    TIME_IN_FORCE_POST_ONLY
-}
-
- - - -
- - - -## Function `immediate_or_cancel` - - - -
public fun immediate_or_cancel(): u8
-
- - - -
-Implementation - - -
public fun immediate_or_cancel(): u8 {
-    TIME_IN_FORCE_IOC
-}
-
- - - -
- - - -## Function `order_status_open` - - - -
public fun order_status_open(): u8
-
- - - -
-Implementation - - -
public fun order_status_open(): u8 {
-    ORDER_STATUS_OPEN
-}
-
- - - -
- - - -## Function `order_status_filled` - - - -
public fun order_status_filled(): u8
-
- - - -
-Implementation - - -
public fun order_status_filled(): u8 {
-    ORDER_STATUS_FILLED
-}
-
- - - -
- - - -## Function `order_status_cancelled` - - - -
public fun order_status_cancelled(): u8
-
- - - -
-Implementation - - -
public fun order_status_cancelled(): u8 {
-    ORDER_STATUS_CANCELLED
-}
-
- - - -
- - - -## Function `order_status_rejected` - - - -
public fun order_status_rejected(): u8
-
- - - -
-Implementation - - -
public fun order_status_rejected(): u8 {
-    ORDER_STATUS_REJECTED
-}
-
- - - -
- ## Function `destroy_order_match_result` -
public fun destroy_order_match_result(self: market::OrderMatchResult): (u64, u64, option::Option<market::OrderCancellationReason>, vector<u64>)
+
public fun destroy_order_match_result(self: market::OrderMatchResult): (order_book_types::OrderIdType, u64, option::Option<market::OrderCancellationReason>, vector<u64>)
 
@@ -833,7 +596,7 @@ Post Only order type - ensures that the order is not a taker order
public fun destroy_order_match_result(
     self: OrderMatchResult
-): (u64, u64, Option<OrderCancellationReason>, vector<u64>) {
+): (OrderIdType, u64, Option<OrderCancellationReason>, vector<u64>) {
     let OrderMatchResult { order_id, remaining_size, cancel_reason, fill_sizes } =
         self;
     (order_id, remaining_size, cancel_reason, fill_sizes)
@@ -996,7 +759,7 @@ Post Only order type - ensures that the order is not a taker order
 
 
 
-
public fun get_order_id(self: market::OrderMatchResult): u64
+
public fun get_order_id(self: market::OrderMatchResult): order_book_types::OrderIdType
 
@@ -1005,7 +768,7 @@ Post Only order type - ensures that the order is not a taker order Implementation -
public fun get_order_id(self: OrderMatchResult): u64 {
+
public fun get_order_id(self: OrderMatchResult): OrderIdType {
     self.order_id
 }
 
@@ -1032,7 +795,10 @@ Post Only order type - ensures that the order is not a taker order
public fun new_market_config(
     allow_self_matching: bool, allow_events_emission: bool
 ): MarketConfig {
-    MarketConfig { allow_self_trade: allow_self_matching, allow_events_emission: allow_events_emission }
+    MarketConfig::V1 {
+        allow_self_trade: allow_self_matching,
+        allow_events_emission: allow_events_emission
+    }
 }
 
@@ -1060,10 +826,10 @@ Post Only order type - ensures that the order is not a taker order ): Market<M> { // requiring signers, and not addresses, purely to guarantee different dexes // cannot polute events to each other, accidentally or maliciously. - Market { + Market::V1 { parent: signer::address_of(parent), market: signer::address_of(market), - last_order_id: 0, + order_id_generator: new_ascending_id_generator(), next_fill_id: 0, config, order_book: new_order_book() @@ -1203,7 +969,7 @@ Post Only order type - ensures that the order is not a taker order -
public fun is_taker_order<M: copy, drop, store>(self: &market::Market<M>, price: u64, is_buy: bool, trigger_condition: option::Option<order_book_types::TriggerCondition>): bool
+
public fun is_taker_order<M: copy, drop, store>(self: &market::Market<M>, price: option::Option<u64>, is_bid: bool, trigger_condition: option::Option<order_book_types::TriggerCondition>): bool
 
@@ -1214,11 +980,11 @@ Post Only order type - ensures that the order is not a taker order
public fun is_taker_order<M: store + copy + drop>(
     self: &Market<M>,
-    price: u64,
-    is_buy: bool,
+    price: Option<u64>,
+    is_bid: bool,
     trigger_condition: Option<TriggerCondition>
 ): bool {
-    self.order_book.is_taker_order(price, is_buy, trigger_condition)
+    self.order_book.is_taker_order(price, is_bid, trigger_condition)
 }
 
@@ -1226,25 +992,28 @@ Post Only order type - ensures that the order is not a taker order - + -## Function `place_order` +## Function `place_limit_order` -Places an order - If its a taker order, it will be matched immediately and if its a maker order, it will simply +Places a limt order - If its a taker order, it will be matched immediately and if its a maker order, it will simply be placed in the order book. An order id is generated when the order is placed and this id can be used to uniquely identify the order for this market and can also be used to get the status of the order or cancel the order. The order is placed with the following parameters: - user: The user who is placing the order - price: The price at which the order is placed - orig_size: The original size of the order -- is_buy: Whether the order is a buy order or a sell order +- is_bid: Whether the order is a buy order or a sell order - time_in_force: The time in force for the order. This can be one of the following: -- TIME_IN_FORCE_GTC: Good till cancelled order type -- TIME_IN_FORCE_POST_ONLY: Post Only order type - ensures that the order is not a taker order -- TIME_IN_FORCE_IOC: Immediate or Cancel order type - ensures that the order is a taker order. Try to match as much of the +- TimeInForce::GTC: Good till cancelled order type +- TimeInForce::POST_ONLY: Post Only order type - ensures that the order is not a taker order +- TimeInForce::IOC: Immediate or Cancel order type - ensures that the order is a taker order. Try to match as much of the order as possible as taker order and cancel the rest. - trigger_condition: The trigger condition - metadata: The metadata for the order. This can be any type that the clearing house implementation supports. +- client_order_id: The client order id for the order. This is an optional field that can be specified by the client +is solely used for their own tracking of the order. client order id doesn't have semantic meaning and +is not be inspected by the orderbook internally. - max_fill_limit: The maximum fill limit for the order. This is the maximum number of fills to trigger for this order. This knob is present to configure maximum amount of gas any order placement transaction might consume and avoid hitting the maximum has limit of the blockchain. @@ -1256,7 +1025,7 @@ interface. This is used to validate the order and settle the trade. Returns the order id, remaining size, cancel reason and number of fills for the order. -
public fun place_order<M: copy, drop, store>(self: &mut market::Market<M>, user: &signer, price: u64, orig_size: u64, is_bid: bool, time_in_force: u8, trigger_condition: option::Option<order_book_types::TriggerCondition>, metadata: M, max_fill_limit: u64, emit_cancel_on_fill_limit: bool, callbacks: &market_types::MarketClearinghouseCallbacks<M>): market::OrderMatchResult
+
public fun place_limit_order<M: copy, drop, store>(self: &mut market::Market<M>, user: &signer, limit_price: u64, orig_size: u64, is_bid: bool, time_in_force: market_types::TimeInForce, trigger_condition: option::Option<order_book_types::TriggerCondition>, metadata: M, client_order_id: option::Option<u64>, max_fill_limit: u64, emit_cancel_on_fill_limit: bool, callbacks: &market_types::MarketClearinghouseCallbacks<M>): market::OrderMatchResult
 
@@ -1265,30 +1034,81 @@ Returns the order id, remaining size, cancel reason and number of fills for the Implementation -
public fun place_order<M: store + copy + drop>(
+
public fun place_limit_order<M: store + copy + drop>(
     self: &mut Market<M>,
     user: &signer,
-    price: u64,
+    limit_price: u64,
     orig_size: u64,
     is_bid: bool,
-    time_in_force: u8,
+    time_in_force: TimeInForce,
     trigger_condition: Option<TriggerCondition>,
     metadata: M,
+    client_order_id: Option<u64>,
     max_fill_limit: u64,
     emit_cancel_on_fill_limit: bool,
     callbacks: &MarketClearinghouseCallbacks<M>
 ): OrderMatchResult {
-    let order_id = self.next_order_id();
     self.place_order_with_order_id(
         signer::address_of(user),
-        price,
+        option::some(limit_price),
         orig_size,
         orig_size,
         is_bid,
         time_in_force,
         trigger_condition,
         metadata,
-        order_id,
+        option::none(), // order_id
+        client_order_id,
+        max_fill_limit,
+        emit_cancel_on_fill_limit,
+        true,
+        callbacks
+    )
+}
+
+ + + + + + + +## Function `place_market_order` + +Places a market order - The order is guaranteed to be a taker order and will be matched immediately. + + +
public fun place_market_order<M: copy, drop, store>(self: &mut market::Market<M>, user: &signer, orig_size: u64, is_bid: bool, metadata: M, client_order_id: option::Option<u64>, max_fill_limit: u64, emit_cancel_on_fill_limit: bool, callbacks: &market_types::MarketClearinghouseCallbacks<M>): market::OrderMatchResult
+
+ + + +
+Implementation + + +
public fun place_market_order<M: store + copy + drop>(
+    self: &mut Market<M>,
+    user: &signer,
+    orig_size: u64,
+    is_bid: bool,
+    metadata: M,
+    client_order_id: Option<u64>,
+    max_fill_limit: u64,
+    emit_cancel_on_fill_limit: bool,
+    callbacks: &MarketClearinghouseCallbacks<M>
+): OrderMatchResult {
+    self.place_order_with_order_id(
+        signer::address_of(user),
+        option::none(),
+        orig_size,
+        orig_size,
+        is_bid,
+        market_types::immediate_or_cancel(), // market orders are always IOC
+        option::none(), // trigger_condition
+        metadata,
+        option::none(), // order_id
+        client_order_id,
         max_fill_limit,
         emit_cancel_on_fill_limit,
         true,
@@ -1307,7 +1127,7 @@ Returns the order id, remaining size, cancel reason and number of fills for the
 
 
 
-
public fun next_order_id<M: copy, drop, store>(self: &mut market::Market<M>): u64
+
public fun next_order_id<M: copy, drop, store>(self: &mut market::Market<M>): order_book_types::OrderIdType
 
@@ -1316,9 +1136,8 @@ Returns the order id, remaining size, cancel reason and number of fills for the Implementation -
public fun next_order_id<M: store + copy + drop>(self: &mut Market<M>): u64 {
-    self.last_order_id += 1;
-    self.last_order_id
+
public fun next_order_id<M: store + copy + drop>(self: &mut Market<M>): OrderIdType {
+    new_order_id_type(self.order_id_generator.next_ascending_id())
 }
 
@@ -1358,7 +1177,7 @@ Returns the order id, remaining size, cancel reason and number of fills for the -
fun emit_event_for_order<M: copy, drop, store>(self: &market::Market<M>, order_id: u64, user: address, orig_size: u64, remaining_size: u64, size_delta: u64, price: u64, is_bid: bool, is_taker: bool, status: u8, details: &string::String)
+
fun emit_event_for_order<M: copy, drop, store>(self: &market::Market<M>, order_id: order_book_types::OrderIdType, client_order_id: option::Option<u64>, user: address, orig_size: u64, remaining_size: u64, size_delta: u64, price: option::Option<u64>, is_bid: bool, is_taker: bool, status: market_types::OrderStatus, details: &string::String)
 
@@ -1369,15 +1188,16 @@ Returns the order id, remaining size, cancel reason and number of fills for the
fun emit_event_for_order<M: store + copy + drop>(
     self: &Market<M>,
-    order_id: u64,
+    order_id: OrderIdType,
+    client_order_id: Option<u64>,
     user: address,
     orig_size: u64,
     remaining_size: u64,
     size_delta: u64,
-    price: u64,
+    price: Option<u64>,
     is_bid: bool,
     is_taker: bool,
-    status: u8,
+    status: OrderStatus,
     details: &String
 ) {
     // Final check whether event sending is enabled
@@ -1386,13 +1206,14 @@ Returns the order id, remaining size, cancel reason and number of fills for the
             OrderEvent {
                 parent: self.parent,
                 market: self.market,
-                order_id,
+                order_id: order_id.get_order_id_value(),
+                client_order_id,
                 user,
                 orig_size,
                 remaining_size,
                 size_delta,
                 price,
-                is_buy: is_bid,
+                is_bid: is_bid,
                 is_taker,
                 status,
                 details: *details
@@ -1404,60 +1225,6 @@ Returns the order id, remaining size, cancel reason and number of fills for the
 
 
 
-
- - - -## Function `place_order_with_user_addr` - -Similar to place_order API but instead of a signer, it takes a user address - can be used in case trading -functionality is delegated to a different address. Please note that it is the responsibility of the caller -to verify that the transaction signer is authorized to place orders on behalf of the user. - - -
public fun place_order_with_user_addr<M: copy, drop, store>(self: &mut market::Market<M>, user_addr: address, price: u64, orig_size: u64, is_bid: bool, time_in_force: u8, trigger_condition: option::Option<order_book_types::TriggerCondition>, metadata: M, max_fill_limit: u64, emit_cancel_on_fill_limit: bool, callbacks: &market_types::MarketClearinghouseCallbacks<M>): market::OrderMatchResult
-
- - - -
-Implementation - - -
public fun place_order_with_user_addr<M: store + copy + drop>(
-    self: &mut Market<M>,
-    user_addr: address,
-    price: u64,
-    orig_size: u64,
-    is_bid: bool,
-    time_in_force: u8,
-    trigger_condition: Option<TriggerCondition>,
-    metadata: M,
-    max_fill_limit: u64,
-    emit_cancel_on_fill_limit: bool,
-    callbacks: &MarketClearinghouseCallbacks<M>
-): OrderMatchResult {
-    let order_id = self.next_order_id();
-    self.place_order_with_order_id(
-        user_addr,
-        price,
-        orig_size,
-        orig_size,
-        is_bid,
-        time_in_force,
-        trigger_condition,
-        metadata,
-        order_id,
-        max_fill_limit,
-        emit_cancel_on_fill_limit,
-        true,
-        callbacks
-    )
-}
-
- - -
@@ -1466,7 +1233,7 @@ to verify that the transaction signer is authorized to place orders on behalf of -
fun place_maker_order_internal<M: copy, drop, store>(self: &mut market::Market<M>, user_addr: address, price: u64, orig_size: u64, remaining_size: u64, fill_sizes: vector<u64>, is_bid: bool, time_in_force: u8, trigger_condition: option::Option<order_book_types::TriggerCondition>, metadata: M, order_id: u64, emit_order_open: bool, callbacks: &market_types::MarketClearinghouseCallbacks<M>): market::OrderMatchResult
+
fun place_maker_order_internal<M: copy, drop, store>(self: &mut market::Market<M>, user_addr: address, limit_price: option::Option<u64>, orig_size: u64, remaining_size: u64, fill_sizes: vector<u64>, is_bid: bool, time_in_force: market_types::TimeInForce, trigger_condition: option::Option<order_book_types::TriggerCondition>, metadata: M, order_id: order_book_types::OrderIdType, client_order_id: option::Option<u64>, emit_order_open: bool, callbacks: &market_types::MarketClearinghouseCallbacks<M>): market::OrderMatchResult
 
@@ -1478,24 +1245,26 @@ to verify that the transaction signer is authorized to place orders on behalf of
fun place_maker_order_internal<M: store + copy + drop>(
     self: &mut Market<M>,
     user_addr: address,
-    price: u64,
+    limit_price: Option<u64>,
     orig_size: u64,
     remaining_size: u64,
     fill_sizes: vector<u64>,
     is_bid: bool,
-    time_in_force: u8,
+    time_in_force: TimeInForce,
     trigger_condition: Option<TriggerCondition>,
     metadata: M,
-    order_id: u64,
+    order_id: OrderIdType,
+    client_order_id: Option<u64>,
     emit_order_open: bool,
     callbacks: &MarketClearinghouseCallbacks<M>
 ): OrderMatchResult {
     // Validate that the order is valid from position management perspective
-    if (time_in_force == TIME_IN_FORCE_IOC) {
+    if (time_in_force == market_types::immediate_or_cancel() || limit_price.is_none()) {
         return self.cancel_order_internal(
             user_addr,
-            price,
+            limit_price,
             order_id,
+            client_order_id,
             orig_size,
             remaining_size,
             fill_sizes,
@@ -1508,30 +1277,35 @@ to verify that the transaction signer is authorized to place orders on behalf of
     };
 
     if (emit_order_open) {
-        emit_event_for_order(
-            self,
+        self.emit_event_for_order(
             order_id,
+            client_order_id,
             user_addr,
             orig_size,
             remaining_size,
             orig_size,
-            price,
+            limit_price,
             is_bid,
-            false, // is_taker
-            ORDER_STATUS_OPEN,
+            false,
+            market_types::order_status_open(),
             &std::string::utf8(b"")
         );
     };
 
     callbacks.place_maker_order(
-        user_addr, order_id, is_bid, price, remaining_size, metadata
+        user_addr,
+        order_id,
+        is_bid,
+        limit_price.destroy_some(),
+        remaining_size,
+        metadata
     );
     self.order_book.place_maker_order(
         new_order_request(
             user_addr,
             order_id,
-            option::none(),
-            price,
+            client_order_id,
+            limit_price.destroy_some(),
             orig_size,
             remaining_size,
             is_bid,
@@ -1558,7 +1332,7 @@ to verify that the transaction signer is authorized to place orders on behalf of
 
 
 
-
fun cancel_maker_order_internal<M: copy, drop, store>(self: &mut market::Market<M>, maker_order: &order_book_types::Order<M>, order_id: u64, maker_address: address, maker_cancellation_reason: string::String, unsettled_size: u64, callbacks: &market_types::MarketClearinghouseCallbacks<M>)
+
fun cancel_maker_order_internal<M: copy, drop, store>(self: &mut market::Market<M>, maker_order: &order_book_types::Order<M>, client_order_id: option::Option<u64>, maker_address: address, order_id: order_book_types::OrderIdType, maker_cancellation_reason: string::String, unsettled_size: u64, callbacks: &market_types::MarketClearinghouseCallbacks<M>)
 
@@ -1570,25 +1344,25 @@ to verify that the transaction signer is authorized to place orders on behalf of
fun cancel_maker_order_internal<M: store + copy + drop>(
     self: &mut Market<M>,
     maker_order: &Order<M>,
-    order_id: u64,
+    client_order_id: Option<u64>,
     maker_address: address,
+    order_id: OrderIdType,
     maker_cancellation_reason: String,
     unsettled_size: u64,
     callbacks: &MarketClearinghouseCallbacks<M>
 ) {
     let maker_cancel_size = unsettled_size + maker_order.get_remaining_size();
-
-    emit_event_for_order(
-        self,
+    self.emit_event_for_order(
         order_id,
+        client_order_id,
         maker_address,
         maker_order.get_orig_size(),
         0,
         maker_cancel_size,
-        maker_order.get_price(),
+        option::some(maker_order.get_price()),
         maker_order.is_bid(),
         false,
-        ORDER_STATUS_CANCELLED,
+        market_types::order_status_cancelled(),
         &maker_cancellation_reason
     );
     // If the maker is invalid cancel the maker order and continue to the next maker order
@@ -1611,7 +1385,7 @@ to verify that the transaction signer is authorized to place orders on behalf of
 
 
 
-
fun cancel_order_internal<M: copy, drop, store>(self: &mut market::Market<M>, user_addr: address, price: u64, order_id: u64, orig_size: u64, size_delta: u64, fill_sizes: vector<u64>, is_bid: bool, is_taker: bool, cancel_reason: market::OrderCancellationReason, cancel_details: string::String, callbacks: &market_types::MarketClearinghouseCallbacks<M>): market::OrderMatchResult
+
fun cancel_order_internal<M: copy, drop, store>(self: &mut market::Market<M>, user_addr: address, limit_price: option::Option<u64>, order_id: order_book_types::OrderIdType, client_order_id: option::Option<u64>, orig_size: u64, size_delta: u64, fill_sizes: vector<u64>, is_bid: bool, is_taker: bool, cancel_reason: market::OrderCancellationReason, cancel_details: string::String, callbacks: &market_types::MarketClearinghouseCallbacks<M>): market::OrderMatchResult
 
@@ -1623,8 +1397,9 @@ to verify that the transaction signer is authorized to place orders on behalf of
fun cancel_order_internal<M: store + copy + drop>(
     self: &mut Market<M>,
     user_addr: address,
-    price: u64,
-    order_id: u64,
+    limit_price: Option<u64>,
+    order_id: OrderIdType,
+    client_order_id: Option<u64>,
     orig_size: u64,
     size_delta: u64,
     fill_sizes: vector<u64>,
@@ -1634,17 +1409,17 @@ to verify that the transaction signer is authorized to place orders on behalf of
     cancel_details: String,
     callbacks: &MarketClearinghouseCallbacks<M>
 ): OrderMatchResult {
-    emit_event_for_order(
-        self,
+    self.emit_event_for_order(
         order_id,
+        client_order_id,
         user_addr,
         orig_size,
-        0, // remaining size
+        0,
         size_delta,
-        price,
+        limit_price,
         is_bid,
         is_taker,
-        ORDER_STATUS_CANCELLED,
+        market_types::order_status_cancelled(),
         &cancel_details
     );
     callbacks.cleanup_order(
@@ -1661,6 +1436,165 @@ to verify that the transaction signer is authorized to place orders on behalf of
 
 
 
+
+
+
+
+## Function `settle_single_trade`
+
+
+
+
fun settle_single_trade<M: copy, drop, store>(self: &mut market::Market<M>, user_addr: address, price: option::Option<u64>, orig_size: u64, remaining_size: &mut u64, is_bid: bool, metadata: M, order_id: order_book_types::OrderIdType, client_order_id: option::Option<u64>, callbacks: &market_types::MarketClearinghouseCallbacks<M>, fill_sizes: &mut vector<u64>): option::Option<market::OrderCancellationReason>
+
+ + + +
+Implementation + + +
fun settle_single_trade<M: store + copy + drop>(
+    self: &mut Market<M>,
+    user_addr: address,
+    price: Option<u64>,
+    orig_size: u64,
+    remaining_size: &mut u64,
+    is_bid: bool,
+    metadata: M,
+    order_id: OrderIdType,
+    client_order_id: Option<u64>,
+    callbacks: &MarketClearinghouseCallbacks<M>,
+    fill_sizes: &mut vector<u64>
+): Option<OrderCancellationReason> {
+    let result = self.order_book
+        .get_single_match_for_taker(price, *remaining_size, is_bid);
+    let (
+        maker_order, maker_matched_size
+    ) = result.destroy_single_order_match();
+    if (!self.config.allow_self_trade && maker_order.get_account() == user_addr) {
+        self.cancel_maker_order_internal(
+            &maker_order,
+            maker_order.get_client_order_id(),
+            maker_order.get_account(),
+            maker_order.get_order_id(),
+            std::string::utf8(b"Disallowed self trading"),
+            maker_matched_size,
+            callbacks
+        );
+        return option::none();
+    };
+    let fill_id = self.next_fill_id();
+    let settle_result = callbacks.settle_trade(
+        user_addr,
+        order_id,
+        maker_order.get_account(),
+        maker_order.get_order_id(),
+        fill_id,
+        is_bid,
+        maker_order.get_price(), // Order is always matched at the price of the maker
+        maker_matched_size,
+        metadata,
+        maker_order.get_metadata_from_order()
+    );
+
+    let unsettled_maker_size = maker_matched_size;
+    let settled_size = settle_result.get_settled_size();
+    if (settled_size > 0) {
+        *remaining_size -= settled_size;
+        unsettled_maker_size -= settled_size;
+        fill_sizes.push_back(settled_size);
+            // Event for taker fill
+        self.emit_event_for_order(
+            order_id,
+            client_order_id,
+            user_addr,
+            orig_size,
+            *remaining_size,
+            settled_size,
+            option::some(maker_order.get_price()),
+            is_bid,
+            true,
+            market_types::order_status_filled(),
+            &std::string::utf8(b"")
+        );
+        // Event for maker fill
+        self.emit_event_for_order(
+            maker_order.get_order_id(),
+            maker_order.get_client_order_id(),
+            maker_order.get_account(),
+            maker_order.get_orig_size(),
+            maker_order.get_remaining_size() + unsettled_maker_size,
+            settled_size,
+            option::some(maker_order.get_price()),
+            !is_bid,
+            false,
+            market_types::order_status_filled(),
+            &std::string::utf8(b"")
+        );
+    };
+
+    let maker_cancellation_reason = settle_result.get_maker_cancellation_reason();
+
+    let taker_cancellation_reason = settle_result.get_taker_cancellation_reason();
+    if (taker_cancellation_reason.is_some()) {
+        self.cancel_order_internal(
+            user_addr,
+            price,
+            order_id,
+            client_order_id,
+            orig_size,
+            *remaining_size,
+            *fill_sizes,
+            is_bid,
+            true, // is_taker
+            OrderCancellationReason::ClearinghouseSettleViolation,
+            taker_cancellation_reason.destroy_some(),
+            callbacks
+        );
+        if (maker_cancellation_reason.is_none() && unsettled_maker_size > 0) {
+            // If the taker is cancelled but the maker is not cancelled, then we need to re-insert
+            // the maker order back into the order book
+            self.order_book.reinsert_maker_order(
+                new_order_request(
+                    maker_order.get_account(),
+                    maker_order.get_order_id(),
+                    maker_order.get_client_order_id(),
+                    maker_order.get_price(),
+                    maker_order.get_orig_size(),
+                    unsettled_maker_size,
+                    !is_bid,
+                    option::none(),
+                    maker_order.get_metadata_from_order()
+                ),
+                maker_order
+            );
+        };
+        return option::some(OrderCancellationReason::ClearinghouseSettleViolation);
+    };
+    if (maker_cancellation_reason.is_some()) {
+        self.cancel_maker_order_internal(
+            &maker_order,
+            maker_order.get_client_order_id(),
+            maker_order.get_account(),
+            maker_order.get_order_id(),
+            maker_cancellation_reason.destroy_some(),
+            unsettled_maker_size,
+            callbacks
+        );
+    } else if (maker_order.get_remaining_size() == 0) {
+        callbacks.cleanup_order(
+            maker_order.get_account(),
+            maker_order.get_order_id(),
+            !is_bid, // is_bid is inverted for maker orders
+            0 // 0 because the order is fully filled
+        );
+    };
+    option::none()
+}
+
+ + +
@@ -1675,7 +1609,7 @@ the caller do not wants to emit an open order event for a taker in case the take of fill limit violation in the previous transaction and the order is just a continuation of the previous order. -
public fun place_order_with_order_id<M: copy, drop, store>(self: &mut market::Market<M>, user_addr: address, price: u64, orig_size: u64, remaining_size: u64, is_bid: bool, time_in_force: u8, trigger_condition: option::Option<order_book_types::TriggerCondition>, metadata: M, order_id: u64, max_fill_limit: u64, cancel_on_fill_limit: bool, emit_taker_order_open: bool, callbacks: &market_types::MarketClearinghouseCallbacks<M>): market::OrderMatchResult
+
public fun place_order_with_order_id<M: copy, drop, store>(self: &mut market::Market<M>, user_addr: address, limit_price: option::Option<u64>, orig_size: u64, remaining_size: u64, is_bid: bool, time_in_force: market_types::TimeInForce, trigger_condition: option::Option<order_book_types::TriggerCondition>, metadata: M, order_id: option::Option<order_book_types::OrderIdType>, client_order_id: option::Option<u64>, max_fill_limit: u64, cancel_on_fill_limit: bool, emit_taker_order_open: bool, callbacks: &market_types::MarketClearinghouseCallbacks<M>): market::OrderMatchResult
 
@@ -1687,14 +1621,15 @@ of fill limit violation in the previous transaction and the order is just a con
public fun place_order_with_order_id<M: store + copy + drop>(
     self: &mut Market<M>,
     user_addr: address,
-    price: u64,
+    limit_price: Option<u64>,
     orig_size: u64,
     remaining_size: u64,
     is_bid: bool,
-    time_in_force: u8,
+    time_in_force: TimeInForce,
     trigger_condition: Option<TriggerCondition>,
     metadata: M,
-    order_id: u64,
+    order_id: Option<OrderIdType>,
+    client_order_id: Option<u64>,
     max_fill_limit: u64,
     cancel_on_fill_limit: bool,
     emit_taker_order_open: bool,
@@ -1704,23 +1639,31 @@ of fill limit violation  in the previous transaction and the order is just a con
         orig_size > 0 && remaining_size > 0,
         EINVALID_ORDER
     );
+    if (order_id.is_none()) {
+        // If order id is not provided, generate a new order id
+        order_id = option::some(self.next_order_id());
+    };
+    let order_id = order_id.destroy_some();
     // TODO(skedia) is_taker_order API can actually return false positive as the maker orders might not be valid.
     // Changes are needed to ensure the maker order is valid for this order to be a valid taker order.
     // TODO(skedia) reconsile the semantics around global order id vs account local id.
+    let is_taker_order =
+        self.order_book.is_taker_order(limit_price, is_bid, trigger_condition);
     if (
         !callbacks.validate_order_placement(
             user_addr,
             order_id,
-            true, // is_taker
+            is_taker_order, // is_taker
             is_bid,
-            price,
+            limit_price,
             remaining_size,
             metadata
         )) {
         return self.cancel_order_internal(
             user_addr,
-            price,
+            limit_price,
             order_id,
+            client_order_id,
             orig_size,
             0, // 0 because order was never placed
             vector[],
@@ -1732,27 +1675,25 @@ of fill limit violation  in the previous transaction and the order is just a con
         );
     };
 
-    let is_taker_order =
-        self.order_book.is_taker_order(price, is_bid, trigger_condition);
     if (emit_taker_order_open) {
-        emit_event_for_order(
-            self,
+        self.emit_event_for_order(
             order_id,
+            client_order_id,
             user_addr,
             orig_size,
             remaining_size,
             orig_size,
-            price,
+            limit_price,
             is_bid,
             is_taker_order,
-            ORDER_STATUS_OPEN,
+            market_types::order_status_open(),
             &std::string::utf8(b"")
         );
     };
     if (!is_taker_order) {
         return self.place_maker_order_internal(
             user_addr,
-            price,
+            limit_price,
             orig_size,
             remaining_size,
             vector[],
@@ -1761,6 +1702,7 @@ of fill limit violation  in the previous transaction and the order is just a con
             trigger_condition,
             metadata,
             order_id,
+            client_order_id,
             false,
             callbacks
         );
@@ -1768,11 +1710,12 @@ of fill limit violation  in the previous transaction and the order is just a con
 
     // NOTE: We should always use is_taker: true for this order past this
     // point so that indexer can consistently track the order's status
-    if (time_in_force == TIME_IN_FORCE_POST_ONLY) {
+    if (time_in_force == market_types::post_only()) {
         return self.cancel_order_internal(
             user_addr,
-            price,
+            limit_price,
             order_id,
+            client_order_id,
             orig_size,
             remaining_size,
             vector[],
@@ -1785,130 +1728,26 @@ of fill limit violation  in the previous transaction and the order is just a con
     };
     let fill_sizes = vector::empty();
     loop {
-        let result =
-            self.order_book.get_single_match_for_taker(price, remaining_size, is_bid);
-        let (maker_order, maker_matched_size) = result.destroy_single_order_match();
-        let (maker_address, maker_order_id) =
-            maker_order.get_order_id().destroy_order_id_type();
-        if (!self.config.allow_self_trade && maker_address == user_addr) {
-            self.cancel_maker_order_internal(
-                &maker_order,
-                maker_order_id,
-                maker_address,
-                std::string::utf8(b"Disallowed self trading"),
-                maker_matched_size,
-                callbacks
-            );
-            continue;
-        };
-
-        let fill_id = self.next_fill_id();
-
-        let settle_result =
-            callbacks.settle_trade(
+        let taker_cancellation_reason =
+            self.settle_single_trade(
                 user_addr,
-                maker_address,
-                order_id,
-                maker_order_id,
-                fill_id,
+                limit_price,
+                orig_size,
+                &mut remaining_size,
                 is_bid,
-                maker_order.get_price(), // Order is always matched at the price of the maker
-                maker_matched_size,
                 metadata,
-                maker_order.get_metadata_from_order()
-            );
-
-        let unsettled_maker_size = maker_matched_size;
-        let settled_size = settle_result.get_settled_size();
-        if (settled_size > 0) {
-            remaining_size -= settled_size;
-            unsettled_maker_size -= settled_size;
-            fill_sizes.push_back(settled_size);
-            // Event for taker fill
-            emit_event_for_order(
-                self,
                 order_id,
-                user_addr,
-                orig_size,
-                remaining_size,
-                settled_size,
-                maker_order.get_price(),
-                is_bid,
-                true, // is_taker
-                ORDER_STATUS_FILLED,
-                &std::string::utf8(b"")
-            );
-            // Event for maker fill
-            emit_event_for_order(
-                self,
-                maker_order_id,
-                maker_address,
-                maker_order.get_orig_size(),
-                maker_order.get_remaining_size() + unsettled_maker_size,
-                settled_size,
-                maker_order.get_price(),
-                !is_bid,
-                false, // is_taker
-                ORDER_STATUS_FILLED,
-                &std::string::utf8(b"")
-            );
-        };
-
-        let maker_cancellation_reason = settle_result.get_maker_cancellation_reason();
-        if (maker_cancellation_reason.is_some()) {
-            self.cancel_maker_order_internal(
-                &maker_order,
-                maker_order_id,
-                maker_address,
-                maker_cancellation_reason.destroy_some(),
-                unsettled_maker_size,
-                callbacks
+                client_order_id,
+                callbacks,
+                &mut fill_sizes
             );
-        };
-
-        let taker_cancellation_reason = settle_result.get_taker_cancellation_reason();
         if (taker_cancellation_reason.is_some()) {
-            let result =
-                self.cancel_order_internal(
-                    user_addr,
-                    price,
-                    order_id,
-                    orig_size,
-                    remaining_size,
-                    fill_sizes,
-                    is_bid,
-                    true, // is_taker
-                    OrderCancellationReason::ClearinghouseSettleViolation,
-                    taker_cancellation_reason.destroy_some(),
-                    callbacks
-                );
-            if (maker_cancellation_reason.is_none() && unsettled_maker_size > 0) {
-                // If the taker is cancelled but the maker is not cancelled, then we need to re-insert
-                // the maker order back into the order book
-                self.order_book.reinsert_maker_order(
-                    new_order_request(
-                        maker_address,
-                        maker_order_id,
-                        option::some(maker_order.get_unique_priority_idx()),
-                        maker_order.get_price(),
-                        maker_order.get_orig_size(),
-                        unsettled_maker_size,
-                        !is_bid,
-                        option::none(),
-                        maker_order.get_metadata_from_order()
-                    )
-                );
-            };
-            return result;
-        };
-
-        if (maker_order.get_remaining_size() == 0) {
-            callbacks.cleanup_order(
-                maker_address,
-                maker_order_id,
-                !is_bid, // is_bid is inverted for maker orders
-                0 // 0 because the order is fully filled
-            );
+            return OrderMatchResult {
+                order_id,
+                remaining_size,
+                cancel_reason: taker_cancellation_reason,
+                fill_sizes
+            }
         };
         if (remaining_size == 0) {
             callbacks.cleanup_order(
@@ -1919,13 +1758,14 @@ of fill limit violation  in the previous transaction and the order is just a con
 
         // Check if the next iteration will still match
         let is_taker_order =
-            self.order_book.is_taker_order(price, is_bid, option::none());
+            self.order_book.is_taker_order(limit_price, is_bid, option::none());
         if (!is_taker_order) {
-            if (time_in_force == TIME_IN_FORCE_IOC) {
+            if (time_in_force == market_types::immediate_or_cancel()) {
                 return self.cancel_order_internal(
                     user_addr,
-                    price,
+                    limit_price,
                     order_id,
+                    client_order_id,
                     orig_size,
                     remaining_size,
                     fill_sizes,
@@ -1939,7 +1779,7 @@ of fill limit violation  in the previous transaction and the order is just a con
                 // If the order is not a taker order, then we can place it as a maker order
                 return self.place_maker_order_internal(
                     user_addr,
-                    price,
+                    limit_price,
                     orig_size,
                     remaining_size,
                     fill_sizes,
@@ -1948,6 +1788,7 @@ of fill limit violation  in the previous transaction and the order is just a con
                     trigger_condition,
                     metadata,
                     order_id,
+                    client_order_id,
                     true, // emit_order_open
                     callbacks
                 );
@@ -1958,8 +1799,9 @@ of fill limit violation  in the previous transaction and the order is just a con
             if (cancel_on_fill_limit) {
                 return self.cancel_order_internal(
                     user_addr,
-                    price,
+                    limit_price,
                     order_id,
+                    client_order_id,
                     orig_size,
                     remaining_size,
                     fill_sizes,
@@ -2001,7 +1843,7 @@ of fill limit violation  in the previous transaction and the order is just a con
 Cancels an order - this will cancel the order and emit an event for the order cancellation.
 
 
-
public fun cancel_order<M: copy, drop, store>(self: &mut market::Market<M>, user: &signer, order_id: u64, callbacks: &market_types::MarketClearinghouseCallbacks<M>)
+
public fun cancel_order<M: copy, drop, store>(self: &mut market::Market<M>, user: &signer, order_id: order_book_types::OrderIdType, callbacks: &market_types::MarketClearinghouseCallbacks<M>)
 
@@ -2013,41 +1855,39 @@ Cancels an order - this will cancel the order and emit an event for the order ca
public fun cancel_order<M: store + copy + drop>(
     self: &mut Market<M>,
     user: &signer,
-    order_id: u64,
+    order_id: OrderIdType,
     callbacks: &MarketClearinghouseCallbacks<M>
 ) {
     let account = signer::address_of(user);
-    let maybe_order = self.order_book.cancel_order(account, order_id);
-    if (maybe_order.is_some()) {
-        let order = maybe_order.destroy_some();
-        let (
-            order_id_type,
-            _unique_priority_idx,
-            price,
-            orig_size,
-            remaining_size,
-            is_bid,
-            _trigger_condition,
-            _metadata
-        ) = order.destroy_order();
-        callbacks.cleanup_order(
-            account, order_id, is_bid, remaining_size
-        );
-        let (user, order_id) = order_id_type.destroy_order_id_type();
-        emit_event_for_order(
-            self,
-            order_id,
-            user,
-            orig_size,
-            remaining_size,
-            remaining_size,
-            price,
-            is_bid,
-            false, // is_taker
-            ORDER_STATUS_CANCELLED,
-            &std::string::utf8(b"Order cancelled")
-        );
-    }
+    let order = self.order_book.cancel_order(account, order_id);
+    assert!(account == order.get_account(), ENOT_ORDER_CREATOR);
+    let (
+        account,
+        order_id,
+        client_order_id,
+        price,
+        orig_size,
+        remaining_size,
+        is_bid,
+        _trigger_condition,
+        _metadata
+    ) = order.destroy_order();
+    callbacks.cleanup_order(
+        account, order_id, is_bid, remaining_size
+    );
+    self.emit_event_for_order(
+        order_id,
+        client_order_id,
+        account,
+        orig_size,
+        remaining_size,
+        remaining_size,
+        option::some(price),
+        is_bid,
+        false,
+        market_types::order_status_cancelled(),
+        &std::string::utf8(b"Order cancelled")
+    );
 }
 
@@ -2062,7 +1902,7 @@ Cancels an order - this will cancel the order and emit an event for the order ca Cancels an order - this will cancel the order and emit an event for the order cancellation. -
public fun decrease_order_size<M: copy, drop, store>(self: &mut market::Market<M>, user: &signer, order_id: u64, size_delta: u64, callbacks: &market_types::MarketClearinghouseCallbacks<M>)
+
public fun decrease_order_size<M: copy, drop, store>(self: &mut market::Market<M>, user: &signer, order_id: order_book_types::OrderIdType, size_delta: u64, callbacks: &market_types::MarketClearinghouseCallbacks<M>)
 
@@ -2074,18 +1914,20 @@ Cancels an order - this will cancel the order and emit an event for the order ca
public fun decrease_order_size<M: store + copy + drop>(
     self: &mut Market<M>,
     user: &signer,
-    order_id: u64,
+    order_id: OrderIdType,
     size_delta: u64,
     callbacks: &MarketClearinghouseCallbacks<M>
 ) {
     let account = signer::address_of(user);
     self.order_book.decrease_order_size(account, order_id, size_delta);
-    let maybe_order = self.order_book.get_order(account, order_id);
+    let maybe_order = self.order_book.get_order(order_id);
     assert!(maybe_order.is_some(), EORDER_DOES_NOT_EXIST);
     let (order, _) = maybe_order.destroy_some().destroy_order_from_state();
+    assert!(order.get_account() == account, ENOT_ORDER_CREATOR);
     let (
-        order_id_type,
-        _unique_priority_idx,
+        user,
+        order_id,
+        client_order_id,
         price,
         orig_size,
         remaining_size,
@@ -2093,22 +1935,21 @@ Cancels an order - this will cancel the order and emit an event for the order ca
         _trigger_condition,
         _metadata
     ) = order.destroy_order();
-    let (user, order_id) = order_id_type.destroy_order_id_type();
     callbacks.decrease_order_size(
         user, order_id, is_bid, price, remaining_size
     );
 
-    emit_event_for_order(
-        self,
+    self.emit_event_for_order(
         order_id,
+        client_order_id,
         user,
         orig_size,
         remaining_size,
         size_delta,
-        price,
+        option::some(price),
         is_bid,
-        false, // is_taker
-        ORDER_SIZE_REDUCED,
+        false,
+        market_types::order_status_size_reduced(),
         &std::string::utf8(b"Order size reduced")
     );
 }
@@ -2125,7 +1966,7 @@ Cancels an order - this will cancel the order and emit an event for the order ca
 Remaining size of the order in the order book.
 
 
-
public fun get_remaining_size<M: copy, drop, store>(self: &market::Market<M>, user: address, order_id: u64): u64
+
public fun get_remaining_size<M: copy, drop, store>(self: &market::Market<M>, order_id: order_book_types::OrderIdType): u64
 
@@ -2135,9 +1976,9 @@ Remaining size of the order in the order book.
public fun get_remaining_size<M: store + copy + drop>(
-    self: &Market<M>, user: address, order_id: u64
+    self: &Market<M>, order_id: OrderIdType
 ): u64 {
-    self.order_book.get_remaining_size(user, order_id)
+    self.order_book.get_remaining_size(order_id)
 }
 
diff --git a/aptos-move/framework/aptos-experimental/doc/market_types.md b/aptos-move/framework/aptos-experimental/doc/market_types.md index 5febf56cc442d..a8bdaa1f4f2d7 100644 --- a/aptos-move/framework/aptos-experimental/doc/market_types.md +++ b/aptos-move/framework/aptos-experimental/doc/market_types.md @@ -5,9 +5,20 @@ -- [Struct `SettleTradeResult`](#0x7_market_types_SettleTradeResult) -- [Struct `MarketClearinghouseCallbacks`](#0x7_market_types_MarketClearinghouseCallbacks) +- [Enum `TimeInForce`](#0x7_market_types_TimeInForce) +- [Enum `OrderStatus`](#0x7_market_types_OrderStatus) +- [Enum `SettleTradeResult`](#0x7_market_types_SettleTradeResult) +- [Enum `MarketClearinghouseCallbacks`](#0x7_market_types_MarketClearinghouseCallbacks) - [Constants](#@Constants_0) +- [Function `time_in_force_from_index`](#0x7_market_types_time_in_force_from_index) +- [Function `good_till_cancelled`](#0x7_market_types_good_till_cancelled) +- [Function `post_only`](#0x7_market_types_post_only) +- [Function `immediate_or_cancel`](#0x7_market_types_immediate_or_cancel) +- [Function `order_status_open`](#0x7_market_types_order_status_open) +- [Function `order_status_filled`](#0x7_market_types_order_status_filled) +- [Function `order_status_cancelled`](#0x7_market_types_order_status_cancelled) +- [Function `order_status_rejected`](#0x7_market_types_order_status_rejected) +- [Function `order_status_size_reduced`](#0x7_market_types_order_status_size_reduced) - [Function `new_settle_trade_result`](#0x7_market_types_new_settle_trade_result) - [Function `new_market_clearinghouse_callbacks`](#0x7_market_types_new_market_clearinghouse_callbacks) - [Function `get_settled_size`](#0x7_market_types_get_settled_size) @@ -22,21 +33,193 @@
use 0x1::option;
 use 0x1::string;
+use 0x7::order_book_types;
 
+ + +## Enum `TimeInForce` + +Order time in force + + +
enum TimeInForce has copy, drop, store
+
+ + + +
+Variants + + +
+GTC + + +
+Fields + + +
+
+ + +
+ +
+ +
+POST_ONLY + + +
+Fields + + +
+
+ + +
+ +
+ +
+IOC + + +
+Fields + + +
+
+ + +
+ +
+ +
+ + + +## Enum `OrderStatus` + + + +
enum OrderStatus has copy, drop, store
+
+ + + +
+Variants + + +
+OPEN + + +
+Fields + + +
+
+ + +
+ +
+ +
+FILLED + + +
+Fields + + +
+
+ + +
+ +
+ +
+CANCELLED + + +
+Fields + + +
+
+ + +
+ +
+ +
+REJECTED + + +
+Fields + + +
+
+ + +
+ +
+ +
+SIZE_REDUCED + + +
+Fields + + +
+
+ + +
+ +
+ +
+ -## Struct `SettleTradeResult` +## Enum `SettleTradeResult` -
struct SettleTradeResult has drop
+
enum SettleTradeResult has drop
 
+
+Variants + + +
+V1 + +
Fields @@ -63,57 +246,73 @@
+ + + + -## Struct `MarketClearinghouseCallbacks` +## Enum `MarketClearinghouseCallbacks` -
struct MarketClearinghouseCallbacks<M: copy, drop, store> has drop
+
enum MarketClearinghouseCallbacks<M: copy, drop, store> has drop
 
+
+Variants + + +
+V1 + +
Fields
-settle_trade_f: |(address, address, u64, u64, u64, bool, u64, u64, M, M)|market_types::SettleTradeResult has copy + drop +settle_trade_f: |(address, order_book_types::OrderIdType, address, order_book_types::OrderIdType, u64, bool, u64, u64, M, M)|market_types::SettleTradeResult has copy + drop
- + settle_trade_f arguments: taker, taker_order_id, maker, maker_order_id, fill_id, is_taker_long, price, size
-validate_order_placement_f: |(address, u64, bool, bool, u64, u64, M)|bool has copy + drop +validate_order_placement_f: |(address, order_book_types::OrderIdType, bool, bool, option::Option<u64>, u64, M)|bool has copy + drop
- + validate_settlement_update_f arguments: account, order_id, is_taker, is_long, price, size
-place_maker_order_f: |(address, u64, bool, u64, u64, M)| has copy + drop +place_maker_order_f: |(address, order_book_types::OrderIdType, bool, u64, u64, M)| has copy + drop
- + place_maker_order_f arguments: account, order_id, is_bid, price, size, order_metadata
-cleanup_order_f: |(address, u64, bool, u64)| has copy + drop +cleanup_order_f: |(address, order_book_types::OrderIdType, bool, u64)| has copy + drop
- + cleanup_order_f arguments: account, order_id, is_bid, remaining_size
-decrease_order_size_f: |(address, u64, bool, u64, u64)| has copy + drop +decrease_order_size_f: |(address, order_book_types::OrderIdType, bool, u64, u64)| has copy + drop
- + decrease_order_size_f arguments: account, order_id, is_bid, price, size
+
+ +
+
@@ -139,6 +338,239 @@ + + + + +
const EINVALID_TIME_IN_FORCE: u64 = 3;
+
+ + + + + +## Function `time_in_force_from_index` + + + +
public fun time_in_force_from_index(index: u8): market_types::TimeInForce
+
+ + + +
+Implementation + + +
public fun time_in_force_from_index(index: u8): TimeInForce {
+    if (index == 0) {
+        TimeInForce::GTC
+    } else if (index == 1) {
+        TimeInForce::POST_ONLY
+    } else if (index == 2) {
+        TimeInForce::IOC
+    } else {
+        abort EINVALID_TIME_IN_FORCE
+    }
+}
+
+ + + +
+ + + +## Function `good_till_cancelled` + + + +
public fun good_till_cancelled(): market_types::TimeInForce
+
+ + + +
+Implementation + + +
public fun good_till_cancelled(): TimeInForce {
+    TimeInForce::GTC
+}
+
+ + + +
+ + + +## Function `post_only` + + + +
public fun post_only(): market_types::TimeInForce
+
+ + + +
+Implementation + + +
public fun post_only(): TimeInForce {
+    TimeInForce::POST_ONLY
+}
+
+ + + +
+ + + +## Function `immediate_or_cancel` + + + +
public fun immediate_or_cancel(): market_types::TimeInForce
+
+ + + +
+Implementation + + +
public fun immediate_or_cancel(): TimeInForce {
+    TimeInForce::IOC
+}
+
+ + + +
+ + + +## Function `order_status_open` + + + +
public fun order_status_open(): market_types::OrderStatus
+
+ + + +
+Implementation + + +
public fun order_status_open(): OrderStatus {
+    OrderStatus::OPEN
+}
+
+ + + +
+ + + +## Function `order_status_filled` + + + +
public fun order_status_filled(): market_types::OrderStatus
+
+ + + +
+Implementation + + +
public fun order_status_filled(): OrderStatus {
+    OrderStatus::FILLED
+}
+
+ + + +
+ + + +## Function `order_status_cancelled` + + + +
public fun order_status_cancelled(): market_types::OrderStatus
+
+ + + +
+Implementation + + +
public fun order_status_cancelled(): OrderStatus {
+    OrderStatus::CANCELLED
+}
+
+ + + +
+ + + +## Function `order_status_rejected` + + + +
public fun order_status_rejected(): market_types::OrderStatus
+
+ + + +
+Implementation + + +
public fun order_status_rejected(): OrderStatus {
+    OrderStatus::REJECTED
+}
+
+ + + +
+ + + +## Function `order_status_size_reduced` + + + +
public fun order_status_size_reduced(): market_types::OrderStatus
+
+ + + +
+Implementation + + +
public fun order_status_size_reduced(): OrderStatus {
+    OrderStatus::SIZE_REDUCED
+}
+
+ + + +
+ ## Function `new_settle_trade_result` @@ -159,7 +591,7 @@ maker_cancellation_reason: Option<String>, taker_cancellation_reason: Option<String> ): SettleTradeResult { - SettleTradeResult { + SettleTradeResult::V1 { settled_size, maker_cancellation_reason, taker_cancellation_reason @@ -177,7 +609,7 @@ -
public fun new_market_clearinghouse_callbacks<M: copy, drop, store>(settle_trade_f: |(address, address, u64, u64, u64, bool, u64, u64, M, M)|market_types::SettleTradeResult has copy + drop, validate_order_placement_f: |(address, u64, bool, bool, u64, u64, M)|bool has copy + drop, place_maker_order_f: |(address, u64, bool, u64, u64, M)| has copy + drop, cleanup_order_f: |(address, u64, bool, u64)| has copy + drop, decrease_order_size_f: |(address, u64, bool, u64, u64)| has copy + drop): market_types::MarketClearinghouseCallbacks<M>
+
public fun new_market_clearinghouse_callbacks<M: copy, drop, store>(settle_trade_f: |(address, order_book_types::OrderIdType, address, order_book_types::OrderIdType, u64, bool, u64, u64, M, M)|market_types::SettleTradeResult has copy + drop, validate_order_placement_f: |(address, order_book_types::OrderIdType, bool, bool, option::Option<u64>, u64, M)|bool has copy + drop, place_maker_order_f: |(address, order_book_types::OrderIdType, bool, u64, u64, M)| has copy + drop, cleanup_order_f: |(address, order_book_types::OrderIdType, bool, u64)| has copy + drop, decrease_order_size_f: |(address, order_book_types::OrderIdType, bool, u64, u64)| has copy + drop): market_types::MarketClearinghouseCallbacks<M>
 
@@ -187,15 +619,18 @@
public fun new_market_clearinghouse_callbacks<M: store + copy + drop>(
-    // settle_trade_f arguments: taker, maker, taker_order_id, maker_order_id, fill_id, is_taker_long, price, size
-    settle_trade_f: |address, address, u64, u64, u64, bool, u64, u64, M, M| SettleTradeResult has drop + copy,
-    // validate_settlement_update_f arguments: accoun, is_taker, is_long, price, size
-    validate_order_placement_f: |address, u64, bool, bool, u64, u64, M| bool has drop + copy,
-    place_maker_order_f: |address, u64, bool, u64, u64, M| has drop + copy,
-    cleanup_order_f: |address, u64, bool, u64| has drop + copy,
-    decrease_order_size_f: |address, u64, bool, u64, u64| has drop + copy,
+    // settle_trade_f arguments: taker, taker_order_id, maker, maker_order_id, fill_id, is_taker_long, price, size
+    settle_trade_f: |address, OrderIdType, address, OrderIdType, u64, bool, u64, u64, M, M| SettleTradeResult has drop + copy,
+    // validate_settlement_update_f arguments: account, order_id, is_taker, is_long, price, size
+    validate_order_placement_f: |address, OrderIdType, bool, bool, Option<u64>, u64, M| bool has drop + copy,
+    // place_maker_order_f arguments: account, order_id, is_bid, price, size, order_metadata
+    place_maker_order_f: |address, OrderIdType, bool, u64, u64, M| has drop + copy,
+    // cleanup_order_f arguments: account, order_id, is_bid, remaining_size
+    cleanup_order_f: |address, OrderIdType, bool, u64| has drop + copy,
+    /// decrease_order_size_f arguments: account, order_id, is_bid, price, size
+    decrease_order_size_f: |address, OrderIdType, bool, u64, u64| has drop + copy,
 ): MarketClearinghouseCallbacks<M> {
-    MarketClearinghouseCallbacks {
+    MarketClearinghouseCallbacks::V1 {
         settle_trade_f,
         validate_order_placement_f,
         place_maker_order_f,
@@ -287,7 +722,7 @@
 
 
 
-
public fun settle_trade<M: copy, drop, store>(self: &market_types::MarketClearinghouseCallbacks<M>, taker: address, maker: address, taker_order_id: u64, maker_order_id: u64, fill_id: u64, is_taker_long: bool, price: u64, size: u64, taker_metadata: M, maker_metadata: M): market_types::SettleTradeResult
+
public(friend) fun settle_trade<M: copy, drop, store>(self: &market_types::MarketClearinghouseCallbacks<M>, taker: address, taker_order_id: order_book_types::OrderIdType, maker: address, maker_order_id: order_book_types::OrderIdType, fill_id: u64, is_taker_long: bool, price: u64, size: u64, taker_metadata: M, maker_metadata: M): market_types::SettleTradeResult
 
@@ -296,19 +731,19 @@ Implementation -
public fun settle_trade<M: store + copy + drop>(
+
public(friend) fun settle_trade<M: store + copy + drop>(
     self: &MarketClearinghouseCallbacks<M>,
     taker: address,
+    taker_order_id: OrderIdType,
     maker: address,
-    taker_order_id: u64,
-    maker_order_id:u64,
+    maker_order_id: OrderIdType,
     fill_id: u64,
     is_taker_long: bool,
     price: u64,
     size: u64,
     taker_metadata: M,
     maker_metadata: M): SettleTradeResult {
-    (self.settle_trade_f)(taker, maker, taker_order_id, maker_order_id, fill_id, is_taker_long, price, size, taker_metadata, maker_metadata)
+    (self.settle_trade_f)(taker, taker_order_id, maker, maker_order_id, fill_id, is_taker_long, price, size, taker_metadata, maker_metadata)
 }
 
@@ -322,7 +757,7 @@ -
public fun validate_order_placement<M: copy, drop, store>(self: &market_types::MarketClearinghouseCallbacks<M>, account: address, order_id: u64, is_taker: bool, is_bid: bool, price: u64, size: u64, order_metadata: M): bool
+
public(friend) fun validate_order_placement<M: copy, drop, store>(self: &market_types::MarketClearinghouseCallbacks<M>, account: address, order_id: order_book_types::OrderIdType, is_taker: bool, is_bid: bool, price: option::Option<u64>, size: u64, order_metadata: M): bool
 
@@ -331,13 +766,13 @@ Implementation -
public fun validate_order_placement<M: store + copy + drop>(
+
public(friend) fun validate_order_placement<M: store + copy + drop>(
     self: &MarketClearinghouseCallbacks<M>,
     account: address,
-    order_id: u64,
+    order_id: OrderIdType,
     is_taker: bool,
     is_bid: bool,
-    price: u64,
+    price: Option<u64>,
     size: u64,
     order_metadata: M): bool {
     (self.validate_order_placement_f)(account, order_id, is_taker, is_bid, price, size, order_metadata)
@@ -354,7 +789,7 @@
 
 
 
-
public fun place_maker_order<M: copy, drop, store>(self: &market_types::MarketClearinghouseCallbacks<M>, account: address, order_id: u64, is_bid: bool, price: u64, size: u64, order_metadata: M)
+
public(friend) fun place_maker_order<M: copy, drop, store>(self: &market_types::MarketClearinghouseCallbacks<M>, account: address, order_id: order_book_types::OrderIdType, is_bid: bool, price: u64, size: u64, order_metadata: M)
 
@@ -363,10 +798,10 @@ Implementation -
public fun place_maker_order<M: store + copy + drop>(
+
public(friend) fun place_maker_order<M: store + copy + drop>(
     self: &MarketClearinghouseCallbacks<M>,
     account: address,
-    order_id: u64,
+    order_id: OrderIdType,
     is_bid: bool,
     price: u64,
     size: u64,
@@ -385,7 +820,7 @@
 
 
 
-
public fun cleanup_order<M: copy, drop, store>(self: &market_types::MarketClearinghouseCallbacks<M>, account: address, order_id: u64, is_bid: bool, remaining_size: u64)
+
public(friend) fun cleanup_order<M: copy, drop, store>(self: &market_types::MarketClearinghouseCallbacks<M>, account: address, order_id: order_book_types::OrderIdType, is_bid: bool, remaining_size: u64)
 
@@ -394,10 +829,10 @@ Implementation -
public fun cleanup_order<M: store + copy + drop>(
+
public(friend) fun cleanup_order<M: store + copy + drop>(
     self: &MarketClearinghouseCallbacks<M>,
     account: address,
-    order_id: u64,
+    order_id: OrderIdType,
     is_bid: bool,
     remaining_size: u64) {
     (self.cleanup_order_f)(account, order_id, is_bid, remaining_size)
@@ -414,7 +849,7 @@
 
 
 
-
public fun decrease_order_size<M: copy, drop, store>(self: &market_types::MarketClearinghouseCallbacks<M>, account: address, order_id: u64, is_bid: bool, price: u64, size: u64)
+
public(friend) fun decrease_order_size<M: copy, drop, store>(self: &market_types::MarketClearinghouseCallbacks<M>, account: address, order_id: order_book_types::OrderIdType, is_bid: bool, price: u64, size: u64)
 
@@ -423,10 +858,10 @@ Implementation -
public fun decrease_order_size<M: store + copy + drop>(
+
public(friend) fun decrease_order_size<M: store + copy + drop>(
     self: &MarketClearinghouseCallbacks<M>,
     account: address,
-    order_id: u64,
+    order_id: OrderIdType,
     is_bid: bool,
     price: u64,
     size: u64,) {
diff --git a/aptos-move/framework/aptos-experimental/doc/order_book.md b/aptos-move/framework/aptos-experimental/doc/order_book.md
index bde80d0e7a1e7..fac9530ab3c60 100644
--- a/aptos-move/framework/aptos-experimental/doc/order_book.md
+++ b/aptos-move/framework/aptos-experimental/doc/order_book.md
@@ -16,7 +16,7 @@ types of pending orders are supported.
 3. Orders: This is a BigOrderMap of order id to order details.
 
 
--  [Struct `OrderRequest`](#0x7_order_book_OrderRequest)
+-  [Enum `OrderRequest`](#0x7_order_book_OrderRequest)
 -  [Enum `OrderBook`](#0x7_order_book_OrderBook)
 -  [Enum `OrderType`](#0x7_order_book_OrderType)
 -  [Struct `TestMetadata`](#0x7_order_book_TestMetadata)
@@ -38,7 +38,6 @@ types of pending orders are supported.
 -  [Function `best_ask_price`](#0x7_order_book_best_ask_price)
 -  [Function `get_slippage_price`](#0x7_order_book_get_slippage_price)
 -  [Function `take_ready_time_based_orders`](#0x7_order_book_take_ready_time_based_orders)
--  [Function `place_order_and_get_matches`](#0x7_order_book_place_order_and_get_matches)
 
 
 
use 0x1::big_ordered_map;
@@ -54,15 +53,23 @@ types of pending orders are supported.
 
 
 
-## Struct `OrderRequest`
+## Enum `OrderRequest`
 
 
 
-
struct OrderRequest<M: copy, drop, store> has copy, drop
+
enum OrderRequest<M: copy, drop, store> has copy, drop
 
+
+Variants + + +
+V1 + +
Fields @@ -75,13 +82,13 @@ types of pending orders are supported.
-account_order_id: u64 +order_id: order_book_types::OrderIdType
-unique_priority_idx: option::Option<order_book_types::UniqueIdxType> +client_order_id: option::Option<u64>
@@ -105,7 +112,7 @@ types of pending orders are supported.
-is_buy: bool +is_bid: bool
@@ -125,6 +132,10 @@ types of pending orders are supported. + + + + @@ -168,6 +179,12 @@ types of pending orders are supported.
+
+
+ascending_id_generator: order_book_types::AscendingIdGenerator +
+
+
@@ -275,38 +292,38 @@ types of pending orders are supported. ## Constants - + -
const U256_MAX: u256 = 115792089237316195423570985008687907853269984665640564039457584007913129639935;
+
const EORDER_ALREADY_EXISTS: u64 = 1;
 
- + -
const EORDER_ALREADY_EXISTS: u64 = 1;
+
const EINVALID_ADD_SIZE_TO_ORDER: u64 = 6;
 
- + -
const EINVALID_ADD_SIZE_TO_ORDER: u64 = 6;
+
const EINVALID_INACTIVE_ORDER_STATE: u64 = 5;
 
- + -
const EINVALID_INACTIVE_ORDER_STATE: u64 = 5;
+
const EORDER_CREATOR_MISMATCH: u64 = 9;
 
@@ -338,13 +355,22 @@ types of pending orders are supported. + + + + +
const E_REINSERT_ORDER_MISMATCH: u64 = 8;
+
+ + + ## Function `new_order_request` -
public fun new_order_request<M: copy, drop, store>(account: address, account_order_id: u64, unique_priority_idx: option::Option<order_book_types::UniqueIdxType>, price: u64, orig_size: u64, remaining_size: u64, is_buy: bool, trigger_condition: option::Option<order_book_types::TriggerCondition>, metadata: M): order_book::OrderRequest<M>
+
public fun new_order_request<M: copy, drop, store>(account: address, order_id: order_book_types::OrderIdType, client_order_id: option::Option<u64>, price: u64, orig_size: u64, remaining_size: u64, is_bid: bool, trigger_condition: option::Option<order_book_types::TriggerCondition>, metadata: M): order_book::OrderRequest<M>
 
@@ -355,23 +381,23 @@ types of pending orders are supported.
public fun new_order_request<M: store + copy + drop>(
     account: address,
-    account_order_id: u64,
-    unique_priority_idx: Option<UniqueIdxType>,
+    order_id: OrderIdType,
+    client_order_id: Option<u64>,
     price: u64,
     orig_size: u64,
     remaining_size: u64,
-    is_buy: bool,
+    is_bid: bool,
     trigger_condition: Option<TriggerCondition>,
     metadata: M
 ): OrderRequest<M> {
-    OrderRequest {
+    OrderRequest::V1 {
         account,
-        account_order_id,
-        unique_priority_idx,
+        order_id,
+        client_order_id,
         price,
         orig_size,
         remaining_size,
-        is_buy,
+        is_bid,
         trigger_condition,
         metadata
     }
@@ -401,7 +427,8 @@ types of pending orders are supported.
     OrderBook::V1 {
         orders: new_default_big_ordered_map(),
         active_orders: new_active_order_book(),
-        pending_orders: new_pending_order_book_index()
+        pending_orders: new_pending_order_book_index(),
+        ascending_id_generator: new_ascending_id_generator()
     }
 }
 
@@ -415,11 +442,13 @@ types of pending orders are supported. ## Function `cancel_order` Cancels an order from the order book. If the order is active, it is removed from the active order book else -it is removed from the pending order book. The API doesn't abort if the order is not found in the order book - -this is a TODO for now. +it is removed from the pending order book. +If order doesn't exist, it aborts with EORDER_NOT_FOUND. + +order_creator is passed to only verify order cancellation is authorized correctly -
public fun cancel_order<M: copy, drop, store>(self: &mut order_book::OrderBook<M>, account: address, account_order_id: u64): option::Option<order_book_types::Order<M>>
+
public fun cancel_order<M: copy, drop, store>(self: &mut order_book::OrderBook<M>, order_creator: address, order_id: order_book_types::OrderIdType): order_book_types::Order<M>
 
@@ -429,32 +458,35 @@ this is a TODO for now.
public fun cancel_order<M: store + copy + drop>(
-    self: &mut OrderBook<M>, account: address, account_order_id: u64
-): Option<Order<M>> {
-    let order_id = new_order_id_type(account, account_order_id);
+    self: &mut OrderBook<M>, order_creator: address, order_id: OrderIdType
+): Order<M> {
     assert!(self.orders.contains(&order_id), EORDER_NOT_FOUND);
     let order_with_state = self.orders.remove(&order_id);
     let (order, is_active) = order_with_state.destroy_order_from_state();
+    assert!(order_creator == order.get_account(), EORDER_CREATOR_MISMATCH);
     if (is_active) {
-        let (_, unique_priority_idx, bid_price, _orig_size, _size, is_buy, _, _) =
+        let unique_priority_idx = order.get_unique_priority_idx();
+        let (_account, _order_id, _client_order_id, bid_price, _orig_size, _size, is_bid, _, _) =
             order.destroy_order();
-        self.active_orders.cancel_active_order(bid_price, unique_priority_idx, is_buy);
+        self.active_orders.cancel_active_order(bid_price, unique_priority_idx, is_bid);
     } else {
+        let unique_priority_idx = order.get_unique_priority_idx();
         let (
-            _,
-            unique_priority_idx,
+            _account,
+            _order_id,
+            _client_order_id,
             _bid_price,
             _orig_size,
             _size,
-            is_buy,
+            is_bid,
             trigger_condition,
             _
         ) = order.destroy_order();
         self.pending_orders.cancel_pending_order(
-            trigger_condition.destroy_some(), unique_priority_idx, is_buy
+            trigger_condition.destroy_some(), unique_priority_idx, is_bid
         );
     };
-    return option::some(order)
+    return order
 }
 
@@ -469,7 +501,7 @@ this is a TODO for now. Checks if the order is a taker order i.e., matched immediatedly with the active order book. -
public fun is_taker_order<M: copy, drop, store>(self: &order_book::OrderBook<M>, price: u64, is_buy: bool, trigger_condition: option::Option<order_book_types::TriggerCondition>): bool
+
public fun is_taker_order<M: copy, drop, store>(self: &order_book::OrderBook<M>, price: option::Option<u64>, is_bid: bool, trigger_condition: option::Option<order_book_types::TriggerCondition>): bool
 
@@ -480,14 +512,14 @@ Checks if the order is a taker order i.e., matched immediatedly with the active
public fun is_taker_order<M: store + copy + drop>(
     self: &OrderBook<M>,
-    price: u64,
-    is_buy: bool,
+    price: Option<u64>,
+    is_bid: bool,
     trigger_condition: Option<TriggerCondition>
 ): bool {
     if (trigger_condition.is_some()) {
         return false;
     };
-    return self.active_orders.is_taker_order(price, is_buy)
+    return self.active_orders.is_taker_order(price, is_bid)
 }
 
@@ -519,37 +551,34 @@ else it is added to the active order book. The API aborts if its not a maker ord return self.place_pending_maker_order(order_req); }; - let order_id = new_order_id_type(order_req.account, order_req.account_order_id); - let unique_priority_idx = - if (order_req.unique_priority_idx.is_some()) { - order_req.unique_priority_idx.destroy_some() - } else { - generate_unique_idx_fifo_tiebraker() - }; + let ascending_idx = + new_unique_idx_type(self.ascending_id_generator.next_ascending_id()); assert!( - !self.orders.contains(&order_id), + !self.orders.contains(&order_req.order_id), error::invalid_argument(EORDER_ALREADY_EXISTS) ); let order = new_order( - order_id, - unique_priority_idx, + order_req.order_id, + order_req.account, + ascending_idx, + order_req.client_order_id, order_req.price, order_req.orig_size, order_req.remaining_size, - order_req.is_buy, + order_req.is_bid, order_req.trigger_condition, order_req.metadata ); - self.orders.add(order_id, new_order_with_state(order, true)); + self.orders.add(order_req.order_id, new_order_with_state(order, true)); self.active_orders.place_maker_order( - order_id, + order_req.order_id, order_req.price, - unique_priority_idx, + ascending_idx, order_req.remaining_size, - order_req.is_buy + order_req.is_bid ); }
@@ -567,7 +596,7 @@ but the clearinghouse fails to settle all or part of the order. If the order doe it is added to the order book, if it exists, it's size is updated. -
public fun reinsert_maker_order<M: copy, drop, store>(self: &mut order_book::OrderBook<M>, order_req: order_book::OrderRequest<M>)
+
public fun reinsert_maker_order<M: copy, drop, store>(self: &mut order_book::OrderBook<M>, order_req: order_book::OrderRequest<M>, original_order: order_book_types::Order<M>)
 
@@ -577,21 +606,40 @@ it is added to the order book, if it exists, it's size is updated.
public fun reinsert_maker_order<M: store + copy + drop>(
-    self: &mut OrderBook<M>, order_req: OrderRequest<M>
+    self: &mut OrderBook<M>, order_req: OrderRequest<M>, original_order: Order<M>
 ) {
+    assert!(
+        &original_order.get_order_id() == &order_req.order_id,
+        E_REINSERT_ORDER_MISMATCH
+    );
+    assert!(
+        &original_order.get_account() == &order_req.account,
+        E_REINSERT_ORDER_MISMATCH
+    );
+    assert!(
+        original_order.get_orig_size() == order_req.orig_size,
+        E_REINSERT_ORDER_MISMATCH
+    );
+    // TODO check what should the rule be for remaining_size. check test_maker_order_reinsert_not_exists unit test.
+    // assert!(
+    //     original_order.get_remaining_size() >= order_req.remaining_size,
+    //     E_REINSERT_ORDER_MISMATCH
+    // );
+    assert!(original_order.get_price() == order_req.price, E_REINSERT_ORDER_MISMATCH);
+    assert!(original_order.is_bid() == order_req.is_bid, E_REINSERT_ORDER_MISMATCH);
+
     assert!(order_req.trigger_condition.is_none(), E_NOT_ACTIVE_ORDER);
-    let order_id = new_order_id_type(order_req.account, order_req.account_order_id);
-    if (!self.orders.contains(&order_id)) {
+    if (!self.orders.contains(&order_req.order_id)) {
         return self.place_maker_order(order_req);
     };
-    let order_with_state = self.orders.remove(&order_id);
+    let order_with_state = self.orders.remove(&order_req.order_id);
     order_with_state.increase_remaining_size(order_req.remaining_size);
-    self.orders.add(order_id, order_with_state);
+    self.orders.add(order_req.order_id, order_with_state);
     self.active_orders.increase_order_size(
         order_req.price,
-        order_req.unique_priority_idx.destroy_some(),
+        original_order.get_unique_priority_idx(),
         order_req.remaining_size,
-        order_req.is_buy
+        order_req.is_bid
     );
 }
 
@@ -618,21 +666,19 @@ it is added to the order book, if it exists, it's size is updated.
fun place_pending_maker_order<M: store + copy + drop>(
     self: &mut OrderBook<M>, order_req: OrderRequest<M>
 ) {
-    let order_id = new_order_id_type(order_req.account, order_req.account_order_id);
-    let unique_priority_idx =
-        if (order_req.unique_priority_idx.is_some()) {
-            order_req.unique_priority_idx.destroy_some()
-        } else {
-            generate_unique_idx_fifo_tiebraker()
-        };
+    let order_id = order_req.order_id;
+    let ascending_idx =
+        new_unique_idx_type(self.ascending_id_generator.next_ascending_id());
     let order =
         new_order(
             order_id,
-            unique_priority_idx,
+            order_req.account,
+            ascending_idx,
+            order_req.client_order_id,
             order_req.price,
             order_req.orig_size,
             order_req.remaining_size,
-            order_req.is_buy,
+            order_req.is_bid,
             order_req.trigger_condition,
             order_req.metadata
         );
@@ -642,8 +688,8 @@ it is added to the order book, if it exists, it's size is updated.
     self.pending_orders.place_pending_maker_order(
         order_id,
         order_req.trigger_condition.destroy_some(),
-        unique_priority_idx,
-        order_req.is_buy
+        ascending_idx,
+        order_req.is_bid
     );
 }
 
@@ -660,7 +706,7 @@ Returns a single match for a taker order. It is responsibility of the caller to API to ensure that the order is a taker order before calling this API, otherwise it will abort. -
public fun get_single_match_for_taker<M: copy, drop, store>(self: &mut order_book::OrderBook<M>, price: u64, size: u64, is_buy: bool): order_book_types::SingleOrderMatch<M>
+
public fun get_single_match_for_taker<M: copy, drop, store>(self: &mut order_book::OrderBook<M>, price: option::Option<u64>, size: u64, is_bid: bool): order_book_types::SingleOrderMatch<M>
 
@@ -671,11 +717,11 @@ API to ensure that the order is a taker order before calling this API, otherwise
public fun get_single_match_for_taker<M: store + copy + drop>(
     self: &mut OrderBook<M>,
-    price: u64,
+    price: Option<u64>,
     size: u64,
-    is_buy: bool
+    is_bid: bool
 ): SingleOrderMatch<M> {
-    let result = self.active_orders.get_single_match_result(price, size, is_buy);
+    let result = self.active_orders.get_single_match_result(price, size, is_bid);
     let (order_id, matched_size, remaining_size) =
         result.destroy_active_matched_order();
     let order_with_state = self.orders.remove(&order_id);
@@ -702,8 +748,10 @@ if the size delta is greater than or equal to the remaining size of the order. P
 not cancel the order if the size delta is equal to the remaining size of the order, to avoid unintended
 cancellation of the order. Please use the cancel_order API to cancel the order.
 
+order_creator is passed to only verify order cancellation is authorized correctly
 
-
public fun decrease_order_size<M: copy, drop, store>(self: &mut order_book::OrderBook<M>, account: address, account_order_id: u64, size_delta: u64)
+
+
public fun decrease_order_size<M: copy, drop, store>(self: &mut order_book::OrderBook<M>, order_creator: address, order_id: order_book_types::OrderIdType, size_delta: u64)
 
@@ -713,11 +761,11 @@ cancellation of the order. Please use the cancel_order API to cance
public fun decrease_order_size<M: store + copy + drop>(
-    self: &mut OrderBook<M>, account: address, account_order_id: u64, size_delta: u64
+    self: &mut OrderBook<M>, order_creator: address, order_id: OrderIdType, size_delta: u64
 ) {
-    let order_id = new_order_id_type(account, account_order_id);
     assert!(self.orders.contains(&order_id), EORDER_NOT_FOUND);
     let order_with_state = self.orders.remove(&order_id);
+    assert!(order_creator == order_with_state.get_order_from_state().get_account(), EORDER_CREATOR_MISMATCH);
     order_with_state.decrease_remaining_size(size_delta);
     if (order_with_state.is_active_order()) {
         let order = order_with_state.get_order_from_state();
@@ -742,7 +790,7 @@ cancellation of the order. Please use the cancel_order API to cance
 
 
 
-
public fun is_active_order<M: copy, drop, store>(self: &order_book::OrderBook<M>, account: address, account_order_id: u64): bool
+
public fun is_active_order<M: copy, drop, store>(self: &order_book::OrderBook<M>, order_id: order_book_types::OrderIdType): bool
 
@@ -752,9 +800,8 @@ cancellation of the order. Please use the cancel_order API to cance
public fun is_active_order<M: store + copy + drop>(
-    self: &OrderBook<M>, account: address, account_order_id: u64
+    self: &OrderBook<M>, order_id: OrderIdType
 ): bool {
-    let order_id = new_order_id_type(account, account_order_id);
     if (!self.orders.contains(&order_id)) {
         return false;
     };
@@ -772,7 +819,7 @@ cancellation of the order. Please use the cancel_order API to cance
 
 
 
-
public fun get_order<M: copy, drop, store>(self: &order_book::OrderBook<M>, account: address, account_order_id: u64): option::Option<order_book_types::OrderWithState<M>>
+
public fun get_order<M: copy, drop, store>(self: &order_book::OrderBook<M>, order_id: order_book_types::OrderIdType): option::Option<order_book_types::OrderWithState<M>>
 
@@ -782,9 +829,8 @@ cancellation of the order. Please use the cancel_order API to cance
public fun get_order<M: store + copy + drop>(
-    self: &OrderBook<M>, account: address, account_order_id: u64
+    self: &OrderBook<M>, order_id: OrderIdType
 ): Option<OrderWithState<M>> {
-    let order_id = new_order_id_type(account, account_order_id);
     if (!self.orders.contains(&order_id)) {
         return option::none();
     };
@@ -802,7 +848,7 @@ cancellation of the order. Please use the cancel_order API to cance
 
 
 
-
public fun get_remaining_size<M: copy, drop, store>(self: &order_book::OrderBook<M>, account: address, account_order_id: u64): u64
+
public fun get_remaining_size<M: copy, drop, store>(self: &order_book::OrderBook<M>, order_id: order_book_types::OrderIdType): u64
 
@@ -812,9 +858,8 @@ cancellation of the order. Please use the cancel_order API to cance
public fun get_remaining_size<M: store + copy + drop>(
-    self: &OrderBook<M>, account: address, account_order_id: u64
+    self: &OrderBook<M>, order_id: OrderIdType
 ): u64 {
-    let order_id = new_order_id_type(account, account_order_id);
     if (!self.orders.contains(&order_id)) {
         return 0;
     };
@@ -916,7 +961,7 @@ Removes and returns the orders that are ready to be executed based on the curren
 
 
 
-
public fun get_slippage_price<M: copy, drop, store>(self: &order_book::OrderBook<M>, is_buy: bool, slippage_pct: u64): option::Option<u64>
+
public fun get_slippage_price<M: copy, drop, store>(self: &order_book::OrderBook<M>, is_bid: bool, slippage_pct: u64): option::Option<u64>
 
@@ -926,9 +971,9 @@ Removes and returns the orders that are ready to be executed based on the curren
public fun get_slippage_price<M: store + copy + drop>(
-    self: &OrderBook<M>, is_buy: bool, slippage_pct: u64
+    self: &OrderBook<M>, is_bid: bool, slippage_pct: u64
 ): Option<u64> {
-    self.active_orders.get_slippage_price(is_buy, slippage_pct)
+    self.active_orders.get_slippage_price(is_bid, slippage_pct)
 }
 
@@ -970,59 +1015,6 @@ Removes and returns the orders that are ready to be executed based on the time c - - - - -## Function `place_order_and_get_matches` - - - -
public fun place_order_and_get_matches<M: copy, drop, store>(self: &mut order_book::OrderBook<M>, order_req: order_book::OrderRequest<M>): vector<order_book_types::SingleOrderMatch<M>>
-
- - - -
-Implementation - - -
public fun place_order_and_get_matches<M: store + copy + drop>(
-    self: &mut OrderBook<M>, order_req: OrderRequest<M>
-): vector<SingleOrderMatch<M>> {
-    let match_results = vector::empty();
-    let remainig_size = order_req.remaining_size;
-    while (remainig_size > 0) {
-        if (!self.is_taker_order(order_req.price, order_req.is_buy, order_req.trigger_condition)) {
-            self.place_maker_order(
-                OrderRequest {
-                    account: order_req.account,
-                    account_order_id: order_req.account_order_id,
-                    unique_priority_idx: option::none(),
-                    price: order_req.price,
-                    orig_size: order_req.orig_size,
-                    remaining_size: remainig_size,
-                    is_buy: order_req.is_buy,
-                    trigger_condition: order_req.trigger_condition,
-                    metadata: order_req.metadata
-                }
-            );
-            return match_results;
-        };
-        let match_result =
-            self.get_single_match_for_taker(
-                order_req.price, remainig_size, order_req.is_buy
-            );
-        let matched_size = match_result.get_matched_size();
-        match_results.push_back(match_result);
-        remainig_size -= matched_size;
-    };
-    return match_results
-}
-
- - -
diff --git a/aptos-move/framework/aptos-experimental/doc/order_book_types.md b/aptos-move/framework/aptos-experimental/doc/order_book_types.md index 885eaf1beee35..3f4454a7f4f7d 100644 --- a/aptos-move/framework/aptos-experimental/doc/order_book_types.md +++ b/aptos-move/framework/aptos-experimental/doc/order_book_types.md @@ -8,17 +8,19 @@ - [Struct `OrderIdType`](#0x7_order_book_types_OrderIdType) - [Struct `UniqueIdxType`](#0x7_order_book_types_UniqueIdxType) +- [Enum `AscendingIdGenerator`](#0x7_order_book_types_AscendingIdGenerator) - [Struct `ActiveMatchedOrder`](#0x7_order_book_types_ActiveMatchedOrder) -- [Struct `SingleOrderMatch`](#0x7_order_book_types_SingleOrderMatch) -- [Struct `Order`](#0x7_order_book_types_Order) +- [Enum `SingleOrderMatch`](#0x7_order_book_types_SingleOrderMatch) +- [Enum `Order`](#0x7_order_book_types_Order) - [Enum `TriggerCondition`](#0x7_order_book_types_TriggerCondition) -- [Struct `OrderWithState`](#0x7_order_book_types_OrderWithState) +- [Enum `OrderWithState`](#0x7_order_book_types_OrderWithState) - [Constants](#@Constants_0) - [Function `new_default_big_ordered_map`](#0x7_order_book_types_new_default_big_ordered_map) - [Function `get_slippage_pct_precision`](#0x7_order_book_types_get_slippage_pct_precision) - [Function `new_time_based_trigger_condition`](#0x7_order_book_types_new_time_based_trigger_condition) - [Function `new_order_id_type`](#0x7_order_book_types_new_order_id_type) -- [Function `generate_unique_idx_fifo_tiebraker`](#0x7_order_book_types_generate_unique_idx_fifo_tiebraker) +- [Function `new_ascending_id_generator`](#0x7_order_book_types_new_ascending_id_generator) +- [Function `next_ascending_id`](#0x7_order_book_types_next_ascending_id) - [Function `new_unique_idx_type`](#0x7_order_book_types_new_unique_idx_type) - [Function `descending_idx`](#0x7_order_book_types_descending_idx) - [Function `new_active_matched_order`](#0x7_order_book_types_new_active_matched_order) @@ -34,6 +36,7 @@ - [Function `get_order_from_state`](#0x7_order_book_types_get_order_from_state) - [Function `get_metadata_from_state`](#0x7_order_book_types_get_metadata_from_state) - [Function `get_order_id`](#0x7_order_book_types_get_order_id) +- [Function `get_account`](#0x7_order_book_types_get_account) - [Function `get_unique_priority_idx`](#0x7_order_book_types_get_unique_priority_idx) - [Function `get_metadata_from_order`](#0x7_order_book_types_get_metadata_from_order) - [Function `get_trigger_condition_from_order`](#0x7_order_book_types_get_trigger_condition_from_order) @@ -44,21 +47,19 @@ - [Function `get_unique_priority_idx_from_state`](#0x7_order_book_types_get_unique_priority_idx_from_state) - [Function `get_remaining_size`](#0x7_order_book_types_get_remaining_size) - [Function `get_orig_size`](#0x7_order_book_types_get_orig_size) +- [Function `get_client_order_id`](#0x7_order_book_types_get_client_order_id) - [Function `destroy_order_from_state`](#0x7_order_book_types_destroy_order_from_state) - [Function `destroy_active_match_order`](#0x7_order_book_types_destroy_active_match_order) - [Function `destroy_order`](#0x7_order_book_types_destroy_order) - [Function `destroy_single_order_match`](#0x7_order_book_types_destroy_single_order_match) -- [Function `destroy_order_id_type`](#0x7_order_book_types_destroy_order_id_type) +- [Function `get_order_id_value`](#0x7_order_book_types_get_order_id_value) - [Function `is_active_order`](#0x7_order_book_types_is_active_order) - [Function `get_price`](#0x7_order_book_types_get_price) - [Function `is_bid`](#0x7_order_book_types_is_bid) -
use 0x1::bcs;
-use 0x1::big_ordered_map;
-use 0x1::from_bcs;
+
use 0x1::big_ordered_map;
 use 0x1::option;
-use 0x1::transaction_context;
 
@@ -80,13 +81,34 @@
-account: address +order_id: u128
+
+ + + + + + +## Struct `UniqueIdxType` + + + +
struct UniqueIdxType has copy, drop, store
+
+ + + +
+Fields + + +
-account_order_id: u64 +idx: u128
@@ -96,24 +118,32 @@
- + -## Struct `UniqueIdxType` +## Enum `AscendingIdGenerator` -
struct UniqueIdxType has copy, drop, store
+
enum AscendingIdGenerator has drop, store
 
+
+Variants + + +
+FromCounter + +
Fields
-idx: u256 +value: u64
@@ -121,6 +151,10 @@
+
+ +
+
@@ -164,15 +198,23 @@ -## Struct `SingleOrderMatch` +## Enum `SingleOrderMatch` -
struct SingleOrderMatch<M: copy, drop, store> has copy, drop
+
enum SingleOrderMatch<M: copy, drop, store> has copy, drop
 
+
+Variants + + +
+V1 + +
Fields @@ -193,19 +235,31 @@ +
+ +
+
-## Struct `Order` +## Enum `Order` -
struct Order<M: copy, drop, store> has copy, drop, store
+
enum Order<M: copy, drop, store> has copy, drop, store
 
+
+Variants + + +
+V1 + +
Fields @@ -216,6 +270,18 @@
+
+
+account: address +
+
+ +
+
+client_order_id: option::Option<u64> +
+
+
unique_priority_idx: order_book_types::UniqueIdxType @@ -262,6 +328,10 @@ +
+ +
+
@@ -349,15 +419,23 @@ -## Struct `OrderWithState` +## Enum `OrderWithState` -
struct OrderWithState<M: copy, drop, store> has copy, drop, store
+
enum OrderWithState<M: copy, drop, store> has copy, drop, store
 
+
+Variants + + +
+V1 + +
Fields @@ -380,18 +458,13 @@
- - -## Constants - - - - +
+
-
const U256_MAX: u256 = 115792089237316195423570985008687907853269984665640564039457584007913129639935;
-
+ +## Constants @@ -457,6 +530,15 @@ + + + + +
const U128_MAX: u128 = 340282366920938463463374607431768211455;
+
+ + + ## Function `new_default_big_ordered_map` @@ -539,7 +621,7 @@ -
public fun new_order_id_type(account: address, account_order_id: u64): order_book_types::OrderIdType
+
public fun new_order_id_type(order_id: u128): order_book_types::OrderIdType
 
@@ -548,8 +630,8 @@ Implementation -
public fun new_order_id_type(account: address, account_order_id: u64): OrderIdType {
-    OrderIdType { account, account_order_id }
+
public fun new_order_id_type(order_id: u128): OrderIdType {
+    OrderIdType { order_id }
 }
 
@@ -557,13 +639,13 @@ - + -## Function `generate_unique_idx_fifo_tiebraker` +## Function `new_ascending_id_generator` -
public fun generate_unique_idx_fifo_tiebraker(): order_book_types::UniqueIdxType
+
public(friend) fun new_ascending_id_generator(): order_book_types::AscendingIdGenerator
 
@@ -572,13 +654,33 @@ Implementation -
public fun generate_unique_idx_fifo_tiebraker(): UniqueIdxType {
-    // TODO change from random to monothonically increasing value
-    new_unique_idx_type(
-        from_bcs::to_u256(
-            bcs::to_bytes(&transaction_context::generate_auid_address())
-        )
-    )
+
public(friend) fun new_ascending_id_generator(): AscendingIdGenerator {
+    AscendingIdGenerator::FromCounter { value: 0 }
+}
+
+ + + + + + + +## Function `next_ascending_id` + + + +
public(friend) fun next_ascending_id(self: &mut order_book_types::AscendingIdGenerator): u128
+
+ + + +
+Implementation + + +
public(friend) fun next_ascending_id(self: &mut AscendingIdGenerator): u128 {
+    self.value += 1;
+    self.value as u128
 }
 
@@ -592,7 +694,7 @@ -
public fun new_unique_idx_type(idx: u256): order_book_types::UniqueIdxType
+
public(friend) fun new_unique_idx_type(idx: u128): order_book_types::UniqueIdxType
 
@@ -601,7 +703,7 @@ Implementation -
public fun new_unique_idx_type(idx: u256): UniqueIdxType {
+
public(friend) fun new_unique_idx_type(idx: u128): UniqueIdxType {
     UniqueIdxType { idx }
 }
 
@@ -616,7 +718,7 @@ -
public fun descending_idx(self: &order_book_types::UniqueIdxType): order_book_types::UniqueIdxType
+
public(friend) fun descending_idx(self: &order_book_types::UniqueIdxType): order_book_types::UniqueIdxType
 
@@ -625,8 +727,8 @@ Implementation -
public fun descending_idx(self: &UniqueIdxType): UniqueIdxType {
-    UniqueIdxType { idx: U256_MAX - self.idx }
+
public(friend) fun descending_idx(self: &UniqueIdxType): UniqueIdxType {
+    UniqueIdxType { idx: U128_MAX - self.idx }
 }
 
@@ -640,7 +742,7 @@ -
public fun new_active_matched_order(order_id: order_book_types::OrderIdType, matched_size: u64, remaining_size: u64): order_book_types::ActiveMatchedOrder
+
public(friend) fun new_active_matched_order(order_id: order_book_types::OrderIdType, matched_size: u64, remaining_size: u64): order_book_types::ActiveMatchedOrder
 
@@ -649,7 +751,7 @@ Implementation -
public fun new_active_matched_order(
+
public(friend) fun new_active_matched_order(
     order_id: OrderIdType, matched_size: u64, remaining_size: u64
 ): ActiveMatchedOrder {
     ActiveMatchedOrder { order_id, matched_size, remaining_size }
@@ -666,7 +768,7 @@
 
 
 
-
public fun destroy_active_matched_order(self: order_book_types::ActiveMatchedOrder): (order_book_types::OrderIdType, u64, u64)
+
public(friend) fun destroy_active_matched_order(self: order_book_types::ActiveMatchedOrder): (order_book_types::OrderIdType, u64, u64)
 
@@ -675,7 +777,9 @@ Implementation -
public fun destroy_active_matched_order(self: ActiveMatchedOrder): (OrderIdType, u64, u64) {
+
public(friend) fun destroy_active_matched_order(
+    self: ActiveMatchedOrder
+): (OrderIdType, u64, u64) {
     (self.order_id, self.matched_size, self.remaining_size)
 }
 
@@ -690,7 +794,7 @@ -
public fun new_order<M: copy, drop, store>(order_id: order_book_types::OrderIdType, unique_priority_idx: order_book_types::UniqueIdxType, price: u64, orig_size: u64, size: u64, is_buy: bool, trigger_condition: option::Option<order_book_types::TriggerCondition>, metadata: M): order_book_types::Order<M>
+
public(friend) fun new_order<M: copy, drop, store>(order_id: order_book_types::OrderIdType, account: address, unique_priority_idx: order_book_types::UniqueIdxType, client_order_id: option::Option<u64>, price: u64, orig_size: u64, size: u64, is_bid: bool, trigger_condition: option::Option<order_book_types::TriggerCondition>, metadata: M): order_book_types::Order<M>
 
@@ -699,23 +803,27 @@ Implementation -
public fun new_order<M: store + copy + drop>(
+
public(friend) fun new_order<M: store + copy + drop>(
     order_id: OrderIdType,
+    account: address,
     unique_priority_idx: UniqueIdxType,
+    client_order_id: Option<u64>,
     price: u64,
     orig_size: u64,
     size: u64,
-    is_buy: bool,
+    is_bid: bool,
     trigger_condition: Option<TriggerCondition>,
     metadata: M
 ): Order<M> {
-    Order {
+    Order::V1 {
         order_id,
+        account,
         unique_priority_idx,
+        client_order_id,
         price,
         orig_size,
         remaining_size: size,
-        is_bid: is_buy,
+        is_bid: is_bid,
         trigger_condition,
         metadata
     }
@@ -732,7 +840,7 @@
 
 
 
-
public fun new_single_order_match<M: copy, drop, store>(order: order_book_types::Order<M>, matched_size: u64): order_book_types::SingleOrderMatch<M>
+
public(friend) fun new_single_order_match<M: copy, drop, store>(order: order_book_types::Order<M>, matched_size: u64): order_book_types::SingleOrderMatch<M>
 
@@ -741,10 +849,10 @@ Implementation -
public fun new_single_order_match<M: store + copy + drop>(
+
public(friend) fun new_single_order_match<M: store + copy + drop>(
     order: Order<M>, matched_size: u64
 ): SingleOrderMatch<M> {
-    SingleOrderMatch { order, matched_size }
+    SingleOrderMatch::V1 { order, matched_size }
 }
 
@@ -758,7 +866,7 @@ -
public fun get_active_matched_size(self: &order_book_types::ActiveMatchedOrder): u64
+
public(friend) fun get_active_matched_size(self: &order_book_types::ActiveMatchedOrder): u64
 
@@ -767,7 +875,7 @@ Implementation -
public fun get_active_matched_size(self: &ActiveMatchedOrder): u64 {
+
public(friend) fun get_active_matched_size(self: &ActiveMatchedOrder): u64 {
     self.matched_size
 }
 
@@ -820,7 +928,7 @@
public fun new_order_with_state<M: store + copy + drop>(
     order: Order<M>, is_active: bool
 ): OrderWithState<M> {
-    OrderWithState { order, is_active }
+    OrderWithState::V1 { order, is_active }
 }
 
@@ -882,7 +990,7 @@ -
public fun index(self: &order_book_types::TriggerCondition, is_buy: bool): (option::Option<u64>, option::Option<u64>, option::Option<u64>)
+
public fun index(self: &order_book_types::TriggerCondition, is_bid: bool): (option::Option<u64>, option::Option<u64>, option::Option<u64>)
 
@@ -891,18 +999,18 @@ Implementation -
public fun index(self: &TriggerCondition, is_buy: bool):
+
public fun index(self: &TriggerCondition, is_bid: bool):
     (Option<u64>, Option<u64>, Option<u64>) {
     match(self) {
         TriggerCondition::TakeProfit(tp) => {
-            if (is_buy) {
+            if (is_bid) {
                 (option::some(*tp), option::none(), option::none())
             } else {
                 (option::none(), option::some(*tp), option::none())
             }
         }
         TriggerCondition::StopLoss(sl) => {
-            if (is_buy) {
+            if (is_bid) {
                 (option::none(), option::some(*sl), option::none())
             } else {
                 (option::some(*sl), option::none(), option::none())
@@ -993,6 +1101,30 @@
 
 
 
+
+ + + +## Function `get_account` + + + +
public fun get_account<M: copy, drop, store>(self: &order_book_types::Order<M>): address
+
+ + + +
+Implementation + + +
public fun get_account<M: store + copy + drop>(self: &Order<M>): address {
+    self.account
+}
+
+ + +
@@ -1001,7 +1133,7 @@ -
public fun get_unique_priority_idx<M: copy, drop, store>(self: &order_book_types::Order<M>): order_book_types::UniqueIdxType
+
public(friend) fun get_unique_priority_idx<M: copy, drop, store>(self: &order_book_types::Order<M>): order_book_types::UniqueIdxType
 
@@ -1010,7 +1142,9 @@ Implementation -
public fun get_unique_priority_idx<M: store + copy + drop>(self: &Order<M>): UniqueIdxType {
+
public(friend) fun get_unique_priority_idx<M: store + copy + drop>(
+    self: &Order<M>
+): UniqueIdxType {
     self.unique_priority_idx
 }
 
@@ -1246,6 +1380,30 @@ + + + + +## Function `get_client_order_id` + + + +
public fun get_client_order_id<M: copy, drop, store>(self: &order_book_types::Order<M>): option::Option<u64>
+
+ + + +
+Implementation + + +
public fun get_client_order_id<M: store + copy + drop>(self: &Order<M>): Option<u64> {
+    self.client_order_id
+}
+
+ + +
@@ -1304,7 +1462,7 @@ -
public fun destroy_order<M: copy, drop, store>(self: order_book_types::Order<M>): (order_book_types::OrderIdType, order_book_types::UniqueIdxType, u64, u64, u64, bool, option::Option<order_book_types::TriggerCondition>, M)
+
public fun destroy_order<M: copy, drop, store>(self: order_book_types::Order<M>): (address, order_book_types::OrderIdType, option::Option<u64>, u64, u64, u64, bool, option::Option<order_book_types::TriggerCondition>, M)
 
@@ -1315,16 +1473,29 @@
public fun destroy_order<M: store + copy + drop>(
     self: Order<M>
-): (OrderIdType, UniqueIdxType, u64, u64, u64, bool, Option<TriggerCondition>, M) {
+): (address, OrderIdType, Option<u64>, u64, u64, u64, bool, Option<TriggerCondition>, M) {
+    let Order::V1 {
+        order_id,
+        account,
+        client_order_id,
+        unique_priority_idx: _,
+        price,
+        orig_size,
+        remaining_size,
+        is_bid,
+        trigger_condition,
+        metadata
+    } = self;
     (
-        self.order_id,
-        self.unique_priority_idx,
-        self.price,
-        self.orig_size,
-        self.remaining_size,
-        self.is_bid,
-        self.trigger_condition,
-        self.metadata
+        account,
+        order_id,
+        client_order_id,
+        price,
+        orig_size,
+        remaining_size,
+        is_bid,
+        trigger_condition,
+        metadata
     )
 }
 
@@ -1359,13 +1530,13 @@ - + -## Function `destroy_order_id_type` +## Function `get_order_id_value` -
public fun destroy_order_id_type(self: order_book_types::OrderIdType): (address, u64)
+
public fun get_order_id_value(self: &order_book_types::OrderIdType): u128
 
@@ -1374,8 +1545,8 @@ Implementation -
public fun destroy_order_id_type(self: OrderIdType): (address, u64) {
-    (self.account, self.account_order_id)
+
public fun get_order_id_value(self: &OrderIdType): u128 {
+    self.order_id
 }
 
diff --git a/aptos-move/framework/aptos-experimental/doc/pending_order_book_index.md b/aptos-move/framework/aptos-experimental/doc/pending_order_book_index.md index f115008451451..1fcabb08e06e0 100644 --- a/aptos-move/framework/aptos-experimental/doc/pending_order_book_index.md +++ b/aptos-move/framework/aptos-experimental/doc/pending_order_book_index.md @@ -141,7 +141,7 @@ -
public(friend) fun cancel_pending_order(self: &mut pending_order_book_index::PendingOrderBookIndex, trigger_condition: order_book_types::TriggerCondition, unique_priority_idx: order_book_types::UniqueIdxType, is_buy: bool)
+
public(friend) fun cancel_pending_order(self: &mut pending_order_book_index::PendingOrderBookIndex, trigger_condition: order_book_types::TriggerCondition, unique_priority_idx: order_book_types::UniqueIdxType, is_bid: bool)
 
@@ -154,10 +154,10 @@ self: &mut PendingOrderBookIndex, trigger_condition: TriggerCondition, unique_priority_idx: UniqueIdxType, - is_buy: bool + is_bid: bool ) { let (price_move_up_index, price_move_down_index, time_based_index) = - trigger_condition.index(is_buy); + trigger_condition.index(is_bid); if (price_move_up_index.is_some()) { self.price_move_up_index.remove( &PendingOrderKey { @@ -190,7 +190,7 @@ -
public(friend) fun place_pending_maker_order(self: &mut pending_order_book_index::PendingOrderBookIndex, order_id: order_book_types::OrderIdType, trigger_condition: order_book_types::TriggerCondition, unique_priority_idx: order_book_types::UniqueIdxType, is_buy: bool)
+
public(friend) fun place_pending_maker_order(self: &mut pending_order_book_index::PendingOrderBookIndex, order_id: order_book_types::OrderIdType, trigger_condition: order_book_types::TriggerCondition, unique_priority_idx: order_book_types::UniqueIdxType, is_bid: bool)
 
@@ -204,11 +204,11 @@ order_id: OrderIdType, trigger_condition: TriggerCondition, unique_priority_idx: UniqueIdxType, - is_buy: bool + is_bid: bool ) { // Add this order to the pending order book index let (price_move_down_index, price_move_up_index, time_based_index) = - trigger_condition.index(is_buy); + trigger_condition.index(is_bid); if (price_move_up_index.is_some()) { self.price_move_up_index.add( @@ -242,7 +242,7 @@ -
public fun take_ready_price_based_orders(self: &mut pending_order_book_index::PendingOrderBookIndex, current_price: u64): vector<order_book_types::OrderIdType>
+
public(friend) fun take_ready_price_based_orders(self: &mut pending_order_book_index::PendingOrderBookIndex, current_price: u64): vector<order_book_types::OrderIdType>
 
@@ -251,7 +251,7 @@ Implementation -
public fun take_ready_price_based_orders(
+
public(friend) fun take_ready_price_based_orders(
     self: &mut PendingOrderBookIndex, current_price: u64
 ): vector<OrderIdType> {
     let orders = vector::empty();
@@ -287,7 +287,7 @@
 
 
 
-
public fun take_time_time_based_orders(self: &mut pending_order_book_index::PendingOrderBookIndex): vector<order_book_types::OrderIdType>
+
public(friend) fun take_time_time_based_orders(self: &mut pending_order_book_index::PendingOrderBookIndex): vector<order_book_types::OrderIdType>
 
@@ -296,7 +296,7 @@ Implementation -
public fun take_time_time_based_orders(
+
public(friend) fun take_time_time_based_orders(
     self: &mut PendingOrderBookIndex
 ): vector<OrderIdType> {
     let orders = vector::empty();
diff --git a/aptos-move/framework/aptos-experimental/sources/benchmark_utils.move b/aptos-move/framework/aptos-experimental/sources/benchmark_utils.move
index 5ff6796a12101..45398aa576467 100644
--- a/aptos-move/framework/aptos-experimental/sources/benchmark_utils.move
+++ b/aptos-move/framework/aptos-experimental/sources/benchmark_utils.move
@@ -5,7 +5,9 @@ module aptos_experimental::benchmark_utils {
     /// Entry function that creates account resource, and funds the account.
     /// This makes sure that transactions later don't need to create an account,
     /// and so actual costs of entry functions can be more precisely measured.
-    entry fun transfer_and_create_account(source: &signer, to: address, amount: u64) {
+    entry fun transfer_and_create_account(
+        source: &signer, to: address, amount: u64
+    ) {
         account::create_account_if_does_not_exist(to);
         aptos_account::transfer(source, to, amount);
     }
diff --git a/aptos-move/framework/aptos-experimental/sources/confidential_asset/confidential_asset.move b/aptos-move/framework/aptos-experimental/sources/confidential_asset/confidential_asset.move
index 32c07f8c04f59..f669aba424d48 100644
--- a/aptos-move/framework/aptos-experimental/sources/confidential_asset/confidential_asset.move
+++ b/aptos-move/framework/aptos-experimental/sources/confidential_asset/confidential_asset.move
@@ -82,6 +82,12 @@ module aptos_experimental::confidential_asset {
     /// Sender and recipient amounts encrypt different transfer amounts
     const EINVALID_SENDER_AMOUNT: u64 = 17;
 
+    /// The confidential asset controller is not installed.
+    const EFA_CONTROLLER_NOT_INSTALLED: u64 = 18;
+
+    /// [TEST-ONLY] The confidential asset module initialization failed.
+    const EINIT_MODULE_FAILED: u64 = 1000;
+
     //
     // Constants
     //
@@ -92,6 +98,9 @@ module aptos_experimental::confidential_asset {
     /// The mainnet chain ID. If the chain ID is 1, the allow list is enabled.
     const MAINNET_CHAIN_ID: u8 = 1;
 
+    /// The testnet chain ID.
+    const TESTNET_CHAIN_ID: u8 = 2;
+
     //
     // Structs
     //
@@ -511,6 +520,12 @@ module aptos_experimental::confidential_asset {
         exists(get_user_address(user, token))
     }
 
+    #[view]
+    /// Checks if the confidential asset controller is installed.
+    public fun confidential_asset_controller_exists(): bool {
+        exists(@aptos_experimental)
+    }
+
     #[view]
     /// Checks if the token is allowed for confidential transfers.
     public fun is_token_allowed(token: Object): bool acquires FAController, FAConfig {
@@ -532,6 +547,7 @@ module aptos_experimental::confidential_asset {
     /// If the allow list is enabled, only tokens from the allow list can be transferred.
     /// Otherwise, all tokens are allowed.
     public fun is_allow_list_enabled(): bool acquires FAController {
+        assert!(confidential_asset_controller_exists(), error::invalid_state(EFA_CONTROLLER_NOT_INSTALLED));
         borrow_global(@aptos_experimental).allow_list_enabled
     }
 
@@ -612,6 +628,14 @@ module aptos_experimental::confidential_asset {
         primary_fungible_store::balance(fa_store_address, token)
     }
 
+    #[view]
+    /// Returns the pending balance transfer count for the specified token.
+    public fun get_pending_balance_transfer_count(user: address, token: Object): u64 acquires ConfidentialAssetStore {
+        assert!(has_confidential_asset_store(user, token), error::not_found(ECA_STORE_NOT_PUBLISHED));
+
+        borrow_global(get_user_address(user, token)).pending_counter
+    }
+
     //
     // Public functions that correspond to the entry functions and don't require serializtion of the input data.
     // These function can be useful for external contracts that want to integrate with the Confidential Asset protocol.
@@ -1080,10 +1104,16 @@ module aptos_experimental::confidential_asset {
         fa
     }
 
+    entry fun init_module_for_genesis(deployer: &signer) {
+        assert!(signer::address_of(deployer) == @aptos_experimental, error::invalid_argument(EINIT_MODULE_FAILED));
+        assert!(chain_id::get() != MAINNET_CHAIN_ID, error::invalid_state(EINIT_MODULE_FAILED));
+        assert!(chain_id::get() != TESTNET_CHAIN_ID, error::invalid_state(EINIT_MODULE_FAILED));
+        init_module(deployer)
+    }
+
     //
     // Test-only functions
     //
-
     #[test_only]
     public fun init_module_for_testing(deployer: &signer) {
         init_module(deployer)
diff --git a/aptos-move/framework/aptos-experimental/sources/trading/market/market.move b/aptos-move/framework/aptos-experimental/sources/trading/market/market.move
index 0771349adb5f7..2907bbe60f7a9 100644
--- a/aptos-move/framework/aptos-experimental/sources/trading/market/market.move
+++ b/aptos-move/framework/aptos-experimental/sources/trading/market/market.move
@@ -2,7 +2,7 @@
 /// that stores an order book and provides APIs to place orders, cancel orders, and match orders. The market also acts
 /// as a wrapper around the order book and pluggable clearinghouse implementation.
 /// A clearing house implementation is expected to implement the following APIs
-///  - settle_trade(taker, maker, taker_order_id, maker_order_id, fill_id, is_taker_long, price, size): SettleTradeResult ->
+///  - settle_trade(taker, taker_order_id, maker, maker_order_id, fill_id, is_taker_long, price, size): SettleTradeResult ->
 /// Called by the market when there is an match between taker and maker. The clearinghouse is expected to settle the trade
 /// and return the result. Please note that the clearing house settlment size might not be the same as the order match size and
 /// the settlement might also fail. The fill_id is an incremental counter for matched orders and can be used to track specific fills
@@ -60,8 +60,20 @@ module aptos_experimental::market {
     use std::vector;
     use aptos_framework::event;
     use aptos_experimental::order_book::{OrderBook, new_order_book, new_order_request};
-    use aptos_experimental::order_book_types::{TriggerCondition, Order};
-    use aptos_experimental::market_types::MarketClearinghouseCallbacks;
+    use aptos_experimental::order_book_types::{
+        new_order_id_type,
+        new_ascending_id_generator,
+        AscendingIdGenerator,
+        TriggerCondition,
+        Order,
+        OrderIdType
+    };
+    use aptos_experimental::market_types::{
+        Self,
+        TimeInForce,
+        OrderStatus,
+        MarketClearinghouseCallbacks
+    };
 
     // Error codes
     const EINVALID_ORDER: u64 = 1;
@@ -70,88 +82,41 @@ module aptos_experimental::market {
     const ENOT_ADMIN: u64 = 4;
     const EINVALID_FEE_TIER: u64 = 5;
     const EORDER_DOES_NOT_EXIST: u64 = 6;
-    const EINVALID_TIME_IN_FORCE_FOR_MAKER: u64 = 7;
-    const EINVALID_TIME_IN_FORCE_FOR_TAKER: u64 = 8;
     const EINVALID_MATCHING_FOR_MAKER_REINSERT: u64 = 9;
     const EINVALID_TAKER_POSITION_UPDATE: u64 = 10;
     const EINVALID_LIQUIDATION: u64 = 11;
-
-    /// Order time in force
-    /// Good till cancelled order type
-    const TIME_IN_FORCE_GTC: u8 = 0;
-    /// Post Only order type - ensures that the order is not a taker order
-    const TIME_IN_FORCE_POST_ONLY: u8 = 1;
-    /// Immediate or Cancel order type - ensures that the order is a taker order. Try to match as much of the
-    /// order as possible as taker order and cancel the rest.
-    const TIME_IN_FORCE_IOC: u8 = 2;
-
-    public fun good_till_cancelled(): u8 {
-        TIME_IN_FORCE_GTC
-    }
-
-    public fun post_only(): u8 {
-        TIME_IN_FORCE_POST_ONLY
-    }
-
-    public fun immediate_or_cancel(): u8 {
-        TIME_IN_FORCE_IOC
-    }
-
-    struct Market has store {
-        /// Address of the parent object that created this market
-        /// Purely for grouping events based on the source DEX, not used otherwise
-        parent: address,
-        /// Address of the market object of this market.
-        market: address,
-        // TODO: remove sequential order id generation
-        last_order_id: u64,
-        // Incremental fill id for matched orders
-        next_fill_id: u64,
-        config: MarketConfig,
-        order_book: OrderBook
-    }
-
-    struct MarketConfig has store {
-        /// Weather to allow self matching orders
-        allow_self_trade: bool,
-        /// Whether to allow sending all events for the markett
-        allow_events_emission: bool
-    }
-
-    /// Order has been accepted by the engine.
-    const ORDER_STATUS_OPEN: u8 = 0;
-    /// Order has been fully or partially filled.
-    const ORDER_STATUS_FILLED: u8 = 1;
-    /// Order has been cancelled by the user or engine.
-    const ORDER_STATUS_CANCELLED: u8 = 2;
-    /// Order has been rejected by the engine. Unlike cancelled orders, rejected
-    /// orders are invalid orders. Rejection reasons:
-    /// 1. Insufficient margin
-    /// 2. Order is reduce_only but does not reduce
-    const ORDER_STATUS_REJECTED: u8 = 3;
-    const ORDER_SIZE_REDUCED: u8 = 4;
-
-    public fun order_status_open(): u8 {
-        ORDER_STATUS_OPEN
-    }
-
-    public fun order_status_filled(): u8 {
-        ORDER_STATUS_FILLED
-    }
-
-    public fun order_status_cancelled(): u8 {
-        ORDER_STATUS_CANCELLED
+    const ENOT_ORDER_CREATOR: u64 = 12;
+
+    enum Market has store {
+        V1 {
+            /// Address of the parent object that created this market
+            /// Purely for grouping events based on the source DEX, not used otherwise
+            parent: address,
+            /// Address of the market object of this market.
+            market: address,
+            order_id_generator: AscendingIdGenerator,
+            // Incremental fill id for matched orders
+            next_fill_id: u64,
+            config: MarketConfig,
+            order_book: OrderBook
+        }
     }
 
-    public fun order_status_rejected(): u8 {
-        ORDER_STATUS_REJECTED
+    enum MarketConfig has store {
+        V1 {
+            /// Weather to allow self matching orders
+            allow_self_trade: bool,
+            /// Whether to allow sending all events for the markett
+            allow_events_emission: bool
+        }
     }
 
     #[event]
     struct OrderEvent has drop, copy, store {
         parent: address,
         market: address,
-        order_id: u64,
+        order_id: u128,
+        client_order_id: Option,
         user: address,
         /// Original size of the order
         orig_size: u64,
@@ -164,11 +129,11 @@ module aptos_experimental::market {
         /// FILLED - size_delta will be amount of size filled
         /// REJECTED - size_delta will always be 0
         size_delta: u64,
-        price: u64,
-        is_buy: bool,
+        price: Option,
+        is_bid: bool,
         /// Whether the order crosses the orderbook.
         is_taker: bool,
-        status: u8,
+        status: OrderStatus,
         details: std::string::String
     }
 
@@ -182,7 +147,7 @@ module aptos_experimental::market {
     }
 
     struct OrderMatchResult has drop {
-        order_id: u64,
+        order_id: OrderIdType,
         remaining_size: u64,
         cancel_reason: Option,
         fill_sizes: vector
@@ -190,7 +155,7 @@ module aptos_experimental::market {
 
     public fun destroy_order_match_result(
         self: OrderMatchResult
-    ): (u64, u64, Option, vector) {
+    ): (OrderIdType, u64, Option, vector) {
         let OrderMatchResult { order_id, remaining_size, cancel_reason, fill_sizes } =
             self;
         (order_id, remaining_size, cancel_reason, fill_sizes)
@@ -222,14 +187,17 @@ module aptos_experimental::market {
         return cancel_reason == OrderCancellationReason::MaxFillLimitViolation
     }
 
-    public fun get_order_id(self: OrderMatchResult): u64 {
+    public fun get_order_id(self: OrderMatchResult): OrderIdType {
         self.order_id
     }
 
     public fun new_market_config(
         allow_self_matching: bool, allow_events_emission: bool
     ): MarketConfig {
-        MarketConfig { allow_self_trade: allow_self_matching, allow_events_emission: allow_events_emission }
+        MarketConfig::V1 {
+            allow_self_trade: allow_self_matching,
+            allow_events_emission: allow_events_emission
+        }
     }
 
     public fun new_market(
@@ -237,10 +205,10 @@ module aptos_experimental::market {
     ): Market {
         // requiring signers, and not addresses, purely to guarantee different dexes
         // cannot polute events to each other, accidentally or maliciously.
-        Market {
+        Market::V1 {
             parent: signer::address_of(parent),
             market: signer::address_of(market),
-            last_order_id: 0,
+            order_id_generator: new_ascending_id_generator(),
             next_fill_id: 0,
             config,
             order_book: new_order_book()
@@ -271,28 +239,31 @@ module aptos_experimental::market {
 
     public fun is_taker_order(
         self: &Market,
-        price: u64,
-        is_buy: bool,
+        price: Option,
+        is_bid: bool,
         trigger_condition: Option
     ): bool {
-        self.order_book.is_taker_order(price, is_buy, trigger_condition)
+        self.order_book.is_taker_order(price, is_bid, trigger_condition)
     }
 
-    /// Places an order - If its a taker order, it will be matched immediately and if its a maker order, it will simply
+    /// Places a limt order - If its a taker order, it will be matched immediately and if its a maker order, it will simply
     /// be placed in the order book. An order id is generated when the order is placed and this id can be used to
     /// uniquely identify the order for this market and can also be used to get the status of the order or cancel the order.
     /// The order is placed with the following parameters:
     /// - user: The user who is placing the order
     /// - price: The price at which the order is placed
     /// - orig_size: The original size of the order
-    /// - is_buy: Whether the order is a buy order or a sell order
+    /// - is_bid: Whether the order is a buy order or a sell order
     /// - time_in_force: The time in force for the order. This can be one of the following:
-    ///  - TIME_IN_FORCE_GTC: Good till cancelled order type
-    /// - TIME_IN_FORCE_POST_ONLY: Post Only order type - ensures that the order is not a taker order
-    /// - TIME_IN_FORCE_IOC: Immediate or Cancel order type - ensures that the order is a taker order. Try to match as much of the
+    ///  - TimeInForce::GTC: Good till cancelled order type
+    /// - TimeInForce::POST_ONLY: Post Only order type - ensures that the order is not a taker order
+    /// - TimeInForce::IOC: Immediate or Cancel order type - ensures that the order is a taker order. Try to match as much of the
     /// order as possible as taker order and cancel the rest.
     /// - trigger_condition: The trigger condition
     /// - metadata: The metadata for the order. This can be any type that the clearing house implementation supports.
+    /// - client_order_id: The client order id for the order. This is an optional field that can be specified by the client
+    ///   is solely used for their own tracking of the order. client order id doesn't have semantic meaning and
+    ///   is not be inspected by the orderbook internally.
     /// - max_fill_limit: The maximum fill limit for the order. This is the maximum number of fills to trigger for this order.
     /// This knob is present to configure maximum amount of gas any order placement transaction might consume and avoid
     /// hitting the maximum has limit of the blockchain.
@@ -302,30 +273,31 @@ module aptos_experimental::market {
     /// - callbacks: The callbacks for the market clearinghouse. This is a struct that implements the MarketClearinghouseCallbacks
     /// interface. This is used to validate the order and settle the trade.
     /// Returns the order id, remaining size, cancel reason and number of fills for the order.
-    public fun place_order(
+    public fun place_limit_order(
         self: &mut Market,
         user: &signer,
-        price: u64,
+        limit_price: u64,
         orig_size: u64,
         is_bid: bool,
-        time_in_force: u8,
+        time_in_force: TimeInForce,
         trigger_condition: Option,
         metadata: M,
+        client_order_id: Option,
         max_fill_limit: u64,
         emit_cancel_on_fill_limit: bool,
         callbacks: &MarketClearinghouseCallbacks
     ): OrderMatchResult {
-        let order_id = self.next_order_id();
         self.place_order_with_order_id(
             signer::address_of(user),
-            price,
+            option::some(limit_price),
             orig_size,
             orig_size,
             is_bid,
             time_in_force,
             trigger_condition,
             metadata,
-            order_id,
+            option::none(), // order_id
+            client_order_id,
             max_fill_limit,
             emit_cancel_on_fill_limit,
             true,
@@ -333,9 +305,38 @@ module aptos_experimental::market {
         )
     }
 
-    public fun next_order_id(self: &mut Market): u64 {
-        self.last_order_id += 1;
-        self.last_order_id
+    /// Places a market order - The order is guaranteed to be a taker order and will be matched immediately.
+    public fun place_market_order(
+        self: &mut Market,
+        user: &signer,
+        orig_size: u64,
+        is_bid: bool,
+        metadata: M,
+        client_order_id: Option,
+        max_fill_limit: u64,
+        emit_cancel_on_fill_limit: bool,
+        callbacks: &MarketClearinghouseCallbacks
+    ): OrderMatchResult {
+        self.place_order_with_order_id(
+            signer::address_of(user),
+            option::none(),
+            orig_size,
+            orig_size,
+            is_bid,
+            market_types::immediate_or_cancel(), // market orders are always IOC
+            option::none(), // trigger_condition
+            metadata,
+            option::none(), // order_id
+            client_order_id,
+            max_fill_limit,
+            emit_cancel_on_fill_limit,
+            true,
+            callbacks
+        )
+    }
+
+    public fun next_order_id(self: &mut Market): OrderIdType {
+        new_order_id_type(self.order_id_generator.next_ascending_id())
     }
 
     fun next_fill_id(self: &mut Market): u64 {
@@ -346,15 +347,16 @@ module aptos_experimental::market {
 
     fun emit_event_for_order(
         self: &Market,
-        order_id: u64,
+        order_id: OrderIdType,
+        client_order_id: Option,
         user: address,
         orig_size: u64,
         remaining_size: u64,
         size_delta: u64,
-        price: u64,
+        price: Option,
         is_bid: bool,
         is_taker: bool,
-        status: u8,
+        status: OrderStatus,
         details: &String
     ) {
         // Final check whether event sending is enabled
@@ -363,13 +365,14 @@ module aptos_experimental::market {
                 OrderEvent {
                     parent: self.parent,
                     market: self.market,
-                    order_id,
+                    order_id: order_id.get_order_id_value(),
+                    client_order_id,
                     user,
                     orig_size,
                     remaining_size,
                     size_delta,
                     price,
-                    is_buy: is_bid,
+                    is_bid: is_bid,
                     is_taker,
                     status,
                     details: *details
@@ -378,61 +381,29 @@ module aptos_experimental::market {
         };
     }
 
-    /// Similar to `place_order` API but instead of a signer, it takes a user address - can be used in case trading
-    /// functionality is delegated to a different address. Please note that it is the responsibility of the caller
-    /// to verify that the transaction signer is authorized to place orders on behalf of the user.
-    public fun place_order_with_user_addr(
-        self: &mut Market,
-        user_addr: address,
-        price: u64,
-        orig_size: u64,
-        is_bid: bool,
-        time_in_force: u8,
-        trigger_condition: Option,
-        metadata: M,
-        max_fill_limit: u64,
-        emit_cancel_on_fill_limit: bool,
-        callbacks: &MarketClearinghouseCallbacks
-    ): OrderMatchResult {
-        let order_id = self.next_order_id();
-        self.place_order_with_order_id(
-            user_addr,
-            price,
-            orig_size,
-            orig_size,
-            is_bid,
-            time_in_force,
-            trigger_condition,
-            metadata,
-            order_id,
-            max_fill_limit,
-            emit_cancel_on_fill_limit,
-            true,
-            callbacks
-        )
-    }
-
     fun place_maker_order_internal(
         self: &mut Market,
         user_addr: address,
-        price: u64,
+        limit_price: Option,
         orig_size: u64,
         remaining_size: u64,
         fill_sizes: vector,
         is_bid: bool,
-        time_in_force: u8,
+        time_in_force: TimeInForce,
         trigger_condition: Option,
         metadata: M,
-        order_id: u64,
+        order_id: OrderIdType,
+        client_order_id: Option,
         emit_order_open: bool,
         callbacks: &MarketClearinghouseCallbacks
     ): OrderMatchResult {
         // Validate that the order is valid from position management perspective
-        if (time_in_force == TIME_IN_FORCE_IOC) {
+        if (time_in_force == market_types::immediate_or_cancel() || limit_price.is_none()) {
             return self.cancel_order_internal(
                 user_addr,
-                price,
+                limit_price,
                 order_id,
+                client_order_id,
                 orig_size,
                 remaining_size,
                 fill_sizes,
@@ -445,30 +416,35 @@ module aptos_experimental::market {
         };
 
         if (emit_order_open) {
-            emit_event_for_order(
-                self,
+            self.emit_event_for_order(
                 order_id,
+                client_order_id,
                 user_addr,
                 orig_size,
                 remaining_size,
                 orig_size,
-                price,
+                limit_price,
                 is_bid,
-                false, // is_taker
-                ORDER_STATUS_OPEN,
+                false,
+                market_types::order_status_open(),
                 &std::string::utf8(b"")
             );
         };
 
         callbacks.place_maker_order(
-            user_addr, order_id, is_bid, price, remaining_size, metadata
+            user_addr,
+            order_id,
+            is_bid,
+            limit_price.destroy_some(),
+            remaining_size,
+            metadata
         );
         self.order_book.place_maker_order(
             new_order_request(
                 user_addr,
                 order_id,
-                option::none(),
-                price,
+                client_order_id,
+                limit_price.destroy_some(),
                 orig_size,
                 remaining_size,
                 is_bid,
@@ -487,25 +463,25 @@ module aptos_experimental::market {
     fun cancel_maker_order_internal(
         self: &mut Market,
         maker_order: &Order,
-        order_id: u64,
+        client_order_id: Option,
         maker_address: address,
+        order_id: OrderIdType,
         maker_cancellation_reason: String,
         unsettled_size: u64,
         callbacks: &MarketClearinghouseCallbacks
     ) {
         let maker_cancel_size = unsettled_size + maker_order.get_remaining_size();
-
-        emit_event_for_order(
-            self,
+        self.emit_event_for_order(
             order_id,
+            client_order_id,
             maker_address,
             maker_order.get_orig_size(),
             0,
             maker_cancel_size,
-            maker_order.get_price(),
+            option::some(maker_order.get_price()),
             maker_order.is_bid(),
             false,
-            ORDER_STATUS_CANCELLED,
+            market_types::order_status_cancelled(),
             &maker_cancellation_reason
         );
         // If the maker is invalid cancel the maker order and continue to the next maker order
@@ -520,8 +496,9 @@ module aptos_experimental::market {
     fun cancel_order_internal(
         self: &mut Market,
         user_addr: address,
-        price: u64,
-        order_id: u64,
+        limit_price: Option,
+        order_id: OrderIdType,
+        client_order_id: Option,
         orig_size: u64,
         size_delta: u64,
         fill_sizes: vector,
@@ -531,17 +508,17 @@ module aptos_experimental::market {
         cancel_details: String,
         callbacks: &MarketClearinghouseCallbacks
     ): OrderMatchResult {
-        emit_event_for_order(
-            self,
+        self.emit_event_for_order(
             order_id,
+            client_order_id,
             user_addr,
             orig_size,
-            0, // remaining size
+            0,
             size_delta,
-            price,
+            limit_price,
             is_bid,
             is_taker,
-            ORDER_STATUS_CANCELLED,
+            market_types::order_status_cancelled(),
             &cancel_details
         );
         callbacks.cleanup_order(
@@ -555,6 +532,145 @@ module aptos_experimental::market {
         }
     }
 
+    fun settle_single_trade(
+        self: &mut Market,
+        user_addr: address,
+        price: Option,
+        orig_size: u64,
+        remaining_size: &mut u64,
+        is_bid: bool,
+        metadata: M,
+        order_id: OrderIdType,
+        client_order_id: Option,
+        callbacks: &MarketClearinghouseCallbacks,
+        fill_sizes: &mut vector
+    ): Option {
+        let result = self.order_book
+            .get_single_match_for_taker(price, *remaining_size, is_bid);
+        let (
+            maker_order, maker_matched_size
+        ) = result.destroy_single_order_match();
+        if (!self.config.allow_self_trade && maker_order.get_account() == user_addr) {
+            self.cancel_maker_order_internal(
+                &maker_order,
+                maker_order.get_client_order_id(),
+                maker_order.get_account(),
+                maker_order.get_order_id(),
+                std::string::utf8(b"Disallowed self trading"),
+                maker_matched_size,
+                callbacks
+            );
+            return option::none();
+        };
+        let fill_id = self.next_fill_id();
+        let settle_result = callbacks.settle_trade(
+            user_addr,
+            order_id,
+            maker_order.get_account(),
+            maker_order.get_order_id(),
+            fill_id,
+            is_bid,
+            maker_order.get_price(), // Order is always matched at the price of the maker
+            maker_matched_size,
+            metadata,
+            maker_order.get_metadata_from_order()
+        );
+
+        let unsettled_maker_size = maker_matched_size;
+        let settled_size = settle_result.get_settled_size();
+        if (settled_size > 0) {
+            *remaining_size -= settled_size;
+            unsettled_maker_size -= settled_size;
+            fill_sizes.push_back(settled_size);
+                // Event for taker fill
+            self.emit_event_for_order(
+                order_id,
+                client_order_id,
+                user_addr,
+                orig_size,
+                *remaining_size,
+                settled_size,
+                option::some(maker_order.get_price()),
+                is_bid,
+                true,
+                market_types::order_status_filled(),
+                &std::string::utf8(b"")
+            );
+            // Event for maker fill
+            self.emit_event_for_order(
+                maker_order.get_order_id(),
+                maker_order.get_client_order_id(),
+                maker_order.get_account(),
+                maker_order.get_orig_size(),
+                maker_order.get_remaining_size() + unsettled_maker_size,
+                settled_size,
+                option::some(maker_order.get_price()),
+                !is_bid,
+                false,
+                market_types::order_status_filled(),
+                &std::string::utf8(b"")
+            );
+        };
+
+        let maker_cancellation_reason = settle_result.get_maker_cancellation_reason();
+
+        let taker_cancellation_reason = settle_result.get_taker_cancellation_reason();
+        if (taker_cancellation_reason.is_some()) {
+            self.cancel_order_internal(
+                user_addr,
+                price,
+                order_id,
+                client_order_id,
+                orig_size,
+                *remaining_size,
+                *fill_sizes,
+                is_bid,
+                true, // is_taker
+                OrderCancellationReason::ClearinghouseSettleViolation,
+                taker_cancellation_reason.destroy_some(),
+                callbacks
+            );
+            if (maker_cancellation_reason.is_none() && unsettled_maker_size > 0) {
+                // If the taker is cancelled but the maker is not cancelled, then we need to re-insert
+                // the maker order back into the order book
+                self.order_book.reinsert_maker_order(
+                    new_order_request(
+                        maker_order.get_account(),
+                        maker_order.get_order_id(),
+                        maker_order.get_client_order_id(),
+                        maker_order.get_price(),
+                        maker_order.get_orig_size(),
+                        unsettled_maker_size,
+                        !is_bid,
+                        option::none(),
+                        maker_order.get_metadata_from_order()
+                    ),
+                    maker_order
+                );
+            };
+            return option::some(OrderCancellationReason::ClearinghouseSettleViolation);
+        };
+        if (maker_cancellation_reason.is_some()) {
+            self.cancel_maker_order_internal(
+                &maker_order,
+                maker_order.get_client_order_id(),
+                maker_order.get_account(),
+                maker_order.get_order_id(),
+                maker_cancellation_reason.destroy_some(),
+                unsettled_maker_size,
+                callbacks
+            );
+        } else if (maker_order.get_remaining_size() == 0) {
+            callbacks.cleanup_order(
+                maker_order.get_account(),
+                maker_order.get_order_id(),
+                !is_bid, // is_bid is inverted for maker orders
+                0 // 0 because the order is fully filled
+            );
+        };
+        option::none()
+    }
+
     /// Similar to `place_order` API but allows few extra parameters as follows
     /// - order_id: The order id for the order - this is needed because for orders with trigger conditions, the order
     /// id is generated when the order is placed and when they are triggered, the same order id is used to match the order.
@@ -564,14 +680,15 @@ module aptos_experimental::market {
     public fun place_order_with_order_id(
         self: &mut Market,
         user_addr: address,
-        price: u64,
+        limit_price: Option,
         orig_size: u64,
         remaining_size: u64,
         is_bid: bool,
-        time_in_force: u8,
+        time_in_force: TimeInForce,
         trigger_condition: Option,
         metadata: M,
-        order_id: u64,
+        order_id: Option,
+        client_order_id: Option,
         max_fill_limit: u64,
         cancel_on_fill_limit: bool,
         emit_taker_order_open: bool,
@@ -581,23 +698,31 @@ module aptos_experimental::market {
             orig_size > 0 && remaining_size > 0,
             EINVALID_ORDER
         );
+        if (order_id.is_none()) {
+            // If order id is not provided, generate a new order id
+            order_id = option::some(self.next_order_id());
+        };
+        let order_id = order_id.destroy_some();
         // TODO(skedia) is_taker_order API can actually return false positive as the maker orders might not be valid.
         // Changes are needed to ensure the maker order is valid for this order to be a valid taker order.
         // TODO(skedia) reconsile the semantics around global order id vs account local id.
+        let is_taker_order =
+            self.order_book.is_taker_order(limit_price, is_bid, trigger_condition);
         if (
             !callbacks.validate_order_placement(
                 user_addr,
                 order_id,
-                true, // is_taker
+                is_taker_order, // is_taker
                 is_bid,
-                price,
+                limit_price,
                 remaining_size,
                 metadata
             )) {
             return self.cancel_order_internal(
                 user_addr,
-                price,
+                limit_price,
                 order_id,
+                client_order_id,
                 orig_size,
                 0, // 0 because order was never placed
                 vector[],
@@ -609,27 +734,25 @@ module aptos_experimental::market {
             );
         };
 
-        let is_taker_order =
-            self.order_book.is_taker_order(price, is_bid, trigger_condition);
         if (emit_taker_order_open) {
-            emit_event_for_order(
-                self,
+            self.emit_event_for_order(
                 order_id,
+                client_order_id,
                 user_addr,
                 orig_size,
                 remaining_size,
                 orig_size,
-                price,
+                limit_price,
                 is_bid,
                 is_taker_order,
-                ORDER_STATUS_OPEN,
+                market_types::order_status_open(),
                 &std::string::utf8(b"")
             );
         };
         if (!is_taker_order) {
             return self.place_maker_order_internal(
                 user_addr,
-                price,
+                limit_price,
                 orig_size,
                 remaining_size,
                 vector[],
@@ -638,6 +761,7 @@ module aptos_experimental::market {
                 trigger_condition,
                 metadata,
                 order_id,
+                client_order_id,
                 false,
                 callbacks
             );
@@ -645,11 +769,12 @@ module aptos_experimental::market {
 
         // NOTE: We should always use is_taker: true for this order past this
         // point so that indexer can consistently track the order's status
-        if (time_in_force == TIME_IN_FORCE_POST_ONLY) {
+        if (time_in_force == market_types::post_only()) {
             return self.cancel_order_internal(
                 user_addr,
-                price,
+                limit_price,
                 order_id,
+                client_order_id,
                 orig_size,
                 remaining_size,
                 vector[],
@@ -662,130 +787,26 @@ module aptos_experimental::market {
         };
         let fill_sizes = vector::empty();
         loop {
-            let result =
-                self.order_book.get_single_match_for_taker(price, remaining_size, is_bid);
-            let (maker_order, maker_matched_size) = result.destroy_single_order_match();
-            let (maker_address, maker_order_id) =
-                maker_order.get_order_id().destroy_order_id_type();
-            if (!self.config.allow_self_trade && maker_address == user_addr) {
-                self.cancel_maker_order_internal(
-                    &maker_order,
-                    maker_order_id,
-                    maker_address,
-                    std::string::utf8(b"Disallowed self trading"),
-                    maker_matched_size,
-                    callbacks
-                );
-                continue;
-            };
-
-            let fill_id = self.next_fill_id();
-
-            let settle_result =
-                callbacks.settle_trade(
+            let taker_cancellation_reason =
+                self.settle_single_trade(
                     user_addr,
-                    maker_address,
-                    order_id,
-                    maker_order_id,
-                    fill_id,
+                    limit_price,
+                    orig_size,
+                    &mut remaining_size,
                     is_bid,
-                    maker_order.get_price(), // Order is always matched at the price of the maker
-                    maker_matched_size,
                     metadata,
-                    maker_order.get_metadata_from_order()
-                );
-
-            let unsettled_maker_size = maker_matched_size;
-            let settled_size = settle_result.get_settled_size();
-            if (settled_size > 0) {
-                remaining_size -= settled_size;
-                unsettled_maker_size -= settled_size;
-                fill_sizes.push_back(settled_size);
-                // Event for taker fill
-                emit_event_for_order(
-                    self,
                     order_id,
-                    user_addr,
-                    orig_size,
-                    remaining_size,
-                    settled_size,
-                    maker_order.get_price(),
-                    is_bid,
-                    true, // is_taker
-                    ORDER_STATUS_FILLED,
-                    &std::string::utf8(b"")
-                );
-                // Event for maker fill
-                emit_event_for_order(
-                    self,
-                    maker_order_id,
-                    maker_address,
-                    maker_order.get_orig_size(),
-                    maker_order.get_remaining_size() + unsettled_maker_size,
-                    settled_size,
-                    maker_order.get_price(),
-                    !is_bid,
-                    false, // is_taker
-                    ORDER_STATUS_FILLED,
-                    &std::string::utf8(b"")
+                    client_order_id,
+                    callbacks,
+                    &mut fill_sizes
                 );
-            };
-
-            let maker_cancellation_reason = settle_result.get_maker_cancellation_reason();
-            if (maker_cancellation_reason.is_some()) {
-                self.cancel_maker_order_internal(
-                    &maker_order,
-                    maker_order_id,
-                    maker_address,
-                    maker_cancellation_reason.destroy_some(),
-                    unsettled_maker_size,
-                    callbacks
-                );
-            };
-
-            let taker_cancellation_reason = settle_result.get_taker_cancellation_reason();
             if (taker_cancellation_reason.is_some()) {
-                let result =
-                    self.cancel_order_internal(
-                        user_addr,
-                        price,
-                        order_id,
-                        orig_size,
-                        remaining_size,
-                        fill_sizes,
-                        is_bid,
-                        true, // is_taker
-                        OrderCancellationReason::ClearinghouseSettleViolation,
-                        taker_cancellation_reason.destroy_some(),
-                        callbacks
-                    );
-                if (maker_cancellation_reason.is_none() && unsettled_maker_size > 0) {
-                    // If the taker is cancelled but the maker is not cancelled, then we need to re-insert
-                    // the maker order back into the order book
-                    self.order_book.reinsert_maker_order(
-                        new_order_request(
-                            maker_address,
-                            maker_order_id,
-                            option::some(maker_order.get_unique_priority_idx()),
-                            maker_order.get_price(),
-                            maker_order.get_orig_size(),
-                            unsettled_maker_size,
-                            !is_bid,
-                            option::none(),
-                            maker_order.get_metadata_from_order()
-                        )
-                    );
-                };
-                return result;
-            };
-
-            if (maker_order.get_remaining_size() == 0) {
-                callbacks.cleanup_order(
-                    maker_address,
-                    maker_order_id,
-                    !is_bid, // is_bid is inverted for maker orders
-                    0 // 0 because the order is fully filled
-                );
+                return OrderMatchResult {
+                    order_id,
+                    remaining_size,
+                    cancel_reason: taker_cancellation_reason,
+                    fill_sizes
+                }
             };
             if (remaining_size == 0) {
                 callbacks.cleanup_order(
@@ -796,13 +817,14 @@ module aptos_experimental::market {
 
             // Check if the next iteration will still match
             let is_taker_order =
-                self.order_book.is_taker_order(price, is_bid, option::none());
+                self.order_book.is_taker_order(limit_price, is_bid, option::none());
             if (!is_taker_order) {
-                if (time_in_force == TIME_IN_FORCE_IOC) {
+                if (time_in_force == market_types::immediate_or_cancel()) {
                     return self.cancel_order_internal(
                         user_addr,
-                        price,
+                        limit_price,
                         order_id,
+                        client_order_id,
                         orig_size,
                         remaining_size,
                         fill_sizes,
@@ -816,7 +838,7 @@ module aptos_experimental::market {
                     // If the order is not a taker order, then we can place it as a maker order
                     return self.place_maker_order_internal(
                         user_addr,
-                        price,
+                        limit_price,
                         orig_size,
                         remaining_size,
                         fill_sizes,
@@ -825,6 +847,7 @@ module aptos_experimental::market {
                         trigger_condition,
                         metadata,
                         order_id,
+                        client_order_id,
                         true, // emit_order_open
                         callbacks
                     );
@@ -835,8 +858,9 @@ module aptos_experimental::market {
                 if (cancel_on_fill_limit) {
                     return self.cancel_order_internal(
                         user_addr,
-                        price,
+                        limit_price,
                         order_id,
+                        client_order_id,
                         orig_size,
                         remaining_size,
                         fill_sizes,
@@ -870,59 +894,59 @@ module aptos_experimental::market {
     public fun cancel_order(
         self: &mut Market,
         user: &signer,
-        order_id: u64,
+        order_id: OrderIdType,
         callbacks: &MarketClearinghouseCallbacks
     ) {
         let account = signer::address_of(user);
-        let maybe_order = self.order_book.cancel_order(account, order_id);
-        if (maybe_order.is_some()) {
-            let order = maybe_order.destroy_some();
-            let (
-                order_id_type,
-                _unique_priority_idx,
-                price,
-                orig_size,
-                remaining_size,
-                is_bid,
-                _trigger_condition,
-                _metadata
-            ) = order.destroy_order();
-            callbacks.cleanup_order(
-                account, order_id, is_bid, remaining_size
-            );
-            let (user, order_id) = order_id_type.destroy_order_id_type();
-            emit_event_for_order(
-                self,
-                order_id,
-                user,
-                orig_size,
-                remaining_size,
-                remaining_size,
-                price,
-                is_bid,
-                false, // is_taker
-                ORDER_STATUS_CANCELLED,
-                &std::string::utf8(b"Order cancelled")
-            );
-        }
+        let order = self.order_book.cancel_order(account, order_id);
+        assert!(account == order.get_account(), ENOT_ORDER_CREATOR);
+        let (
+            account,
+            order_id,
+            client_order_id,
+            price,
+            orig_size,
+            remaining_size,
+            is_bid,
+            _trigger_condition,
+            _metadata
+        ) = order.destroy_order();
+        callbacks.cleanup_order(
+            account, order_id, is_bid, remaining_size
+        );
+        self.emit_event_for_order(
+            order_id,
+            client_order_id,
+            account,
+            orig_size,
+            remaining_size,
+            remaining_size,
+            option::some(price),
+            is_bid,
+            false,
+            market_types::order_status_cancelled(),
+            &std::string::utf8(b"Order cancelled")
+        );
     }
 
     /// Cancels an order - this will cancel the order and emit an event for the order cancellation.
     public fun decrease_order_size(
         self: &mut Market,
         user: &signer,
-        order_id: u64,
+        order_id: OrderIdType,
         size_delta: u64,
         callbacks: &MarketClearinghouseCallbacks
     ) {
         let account = signer::address_of(user);
         self.order_book.decrease_order_size(account, order_id, size_delta);
-        let maybe_order = self.order_book.get_order(account, order_id);
+        let maybe_order = self.order_book.get_order(order_id);
         assert!(maybe_order.is_some(), EORDER_DOES_NOT_EXIST);
         let (order, _) = maybe_order.destroy_some().destroy_order_from_state();
+        assert!(order.get_account() == account, ENOT_ORDER_CREATOR);
         let (
-            order_id_type,
-            _unique_priority_idx,
+            user,
+            order_id,
+            client_order_id,
             price,
             orig_size,
             remaining_size,
@@ -930,31 +954,30 @@ module aptos_experimental::market {
             _trigger_condition,
             _metadata
         ) = order.destroy_order();
-        let (user, order_id) = order_id_type.destroy_order_id_type();
         callbacks.decrease_order_size(
             user, order_id, is_bid, price, remaining_size
         );
 
-        emit_event_for_order(
-            self,
+        self.emit_event_for_order(
             order_id,
+            client_order_id,
             user,
             orig_size,
             remaining_size,
             size_delta,
-            price,
+            option::some(price),
             is_bid,
-            false, // is_taker
-            ORDER_SIZE_REDUCED,
+            false,
+            market_types::order_status_size_reduced(),
             &std::string::utf8(b"Order size reduced")
         );
     }
 
     /// Remaining size of the order in the order book.
     public fun get_remaining_size(
-        self: &Market, user: address, order_id: u64
+        self: &Market, order_id: OrderIdType
     ): u64 {
-        self.order_book.get_remaining_size(user, order_id)
+        self.order_book.get_remaining_size(order_id)
     }
 
     /// Returns all the pending order ready to be executed based on the oracle price. The caller is responsible to
@@ -976,15 +999,15 @@ module aptos_experimental::market {
     // ============================= test_only APIs ====================================
     #[test_only]
     public fun destroy_market(self: Market) {
-        let Market {
+        let Market::V1 {
             parent: _parent,
             market: _market,
-            last_order_id: _last_order_id,
+            order_id_generator: _order_id_generator,
             next_fill_id: _next_fill_id,
             config,
             order_book
         } = self;
-        let MarketConfig { allow_self_trade: _, allow_events_emission: _ } = config;
+        let MarketConfig::V1 { allow_self_trade: _, allow_events_emission: _ } = config;
         order_book.destroy_order_book()
     }
 
@@ -1000,32 +1023,34 @@ module aptos_experimental::market {
     }
 
     #[test_only]
-    public fun get_order_id_from_event(self: OrderEvent): u64 {
-        self.order_id
+    public fun get_order_id_from_event(self: OrderEvent): OrderIdType {
+        new_order_id_type(self.order_id)
     }
 
     #[test_only]
     public fun verify_order_event(
         self: OrderEvent,
-        order_id: u64,
+        order_id: OrderIdType,
+        client_order_id: Option,
         market: address,
         user: address,
         orig_size: u64,
         remaining_size: u64,
         size_delta: u64,
-        price: u64,
-        is_buy: bool,
+        price: Option,
+        is_bid: bool,
         is_taker: bool,
-        status: u8
+        status: OrderStatus
     ) {
-        assert!(self.order_id == order_id);
+        assert!(self.order_id == order_id.get_order_id_value());
+        assert!(self.client_order_id == client_order_id);
         assert!(self.market == market);
         assert!(self.user == user);
         assert!(self.orig_size == orig_size);
         assert!(self.remaining_size == remaining_size);
         assert!(self.size_delta == size_delta);
         assert!(self.price == price);
-        assert!(self.is_buy == is_buy);
+        assert!(self.is_bid == is_bid);
         assert!(self.is_taker == is_taker);
         assert!(self.status == status);
     }
diff --git a/aptos-move/framework/aptos-experimental/sources/trading/market/market_types.move b/aptos-move/framework/aptos-experimental/sources/trading/market/market_types.move
index 3d2251cc6ee74..1f02f37031570 100644
--- a/aptos-move/framework/aptos-experimental/sources/trading/market/market_types.move
+++ b/aptos-move/framework/aptos-experimental/sources/trading/market/market_types.move
@@ -2,26 +2,105 @@ module aptos_experimental::market_types {
     use std::option::Option;
     use std::string::String;
 
+    use aptos_experimental::order_book_types::OrderIdType;
+
+    friend aptos_experimental::market;
+
     const EINVALID_ADDRESS: u64 = 1;
     const EINVALID_SETTLE_RESULT: u64 = 2;
+    const EINVALID_TIME_IN_FORCE: u64 = 3;
 
-    struct SettleTradeResult has drop {
-        settled_size: u64,
-        maker_cancellation_reason: Option,
-        taker_cancellation_reason: Option
+    /// Order time in force
+    enum TimeInForce has drop, copy, store {
+        /// Good till cancelled order type
+        GTC,
+        /// Post Only order type - ensures that the order is not a taker order
+        POST_ONLY,
+        /// Immediate or Cancel order type - ensures that the order is a taker order. Try to match as much of the
+        /// order as possible as taker order and cancel the rest.
+        IOC
     }
 
-    struct MarketClearinghouseCallbacks has drop {
-        // settle_trade_f arguments: taker, maker, taker_order_id, maker_order_id, fill_id, is_taker_long, price, size
-        settle_trade_f:  |address, address, u64, u64, u64, bool, u64, u64, M, M| SettleTradeResult has drop + copy,
-        // validate_settlement_update_f arguments: account, is_taker, is_long, price, size
-        validate_order_placement_f: |address, u64, bool, bool, u64, u64, M| bool has drop + copy,
-        // place_maker_order_f arguments: account, order_id, is_bid, price, size, order_metadata
-        place_maker_order_f: |address, u64, bool, u64, u64, M| has drop + copy,
-        // cleanup_order_f arguments: account, order_id, is_bid, remaining_size
-        cleanup_order_f: |address, u64, bool, u64| has drop + copy,
-        // decrease_order_size_f arguments: account, order_id, is_bid, price, size
-        decrease_order_size_f: |address, u64, bool, u64, u64| has drop + copy,
+    public fun time_in_force_from_index(index: u8): TimeInForce {
+        if (index == 0) {
+            TimeInForce::GTC
+        } else if (index == 1) {
+            TimeInForce::POST_ONLY
+        } else if (index == 2) {
+            TimeInForce::IOC
+        } else {
+            abort EINVALID_TIME_IN_FORCE
+        }
+    }
+
+    public fun good_till_cancelled(): TimeInForce {
+        TimeInForce::GTC
+    }
+
+    public fun post_only(): TimeInForce {
+        TimeInForce::POST_ONLY
+    }
+
+    public fun immediate_or_cancel(): TimeInForce {
+        TimeInForce::IOC
+    }
+
+    enum OrderStatus has drop, copy, store {
+        /// Order has been accepted by the engine.
+        OPEN,
+        /// Order has been fully or partially filled.
+        FILLED,
+        /// Order has been cancelled by the user or engine.
+        CANCELLED,
+        /// Order has been rejected by the engine. Unlike cancelled orders, rejected
+        /// orders are invalid orders. Rejection reasons:
+        /// 1. Insufficient margin
+        /// 2. Order is reduce_only but does not reduce
+        REJECTED,
+        SIZE_REDUCED
+    }
+
+    public fun order_status_open(): OrderStatus {
+        OrderStatus::OPEN
+    }
+
+    public fun order_status_filled(): OrderStatus {
+        OrderStatus::FILLED
+    }
+
+    public fun order_status_cancelled(): OrderStatus {
+        OrderStatus::CANCELLED
+    }
+
+    public fun order_status_rejected(): OrderStatus {
+        OrderStatus::REJECTED
+    }
+
+    public fun order_status_size_reduced(): OrderStatus {
+        OrderStatus::SIZE_REDUCED
+    }
+
+    enum SettleTradeResult has drop {
+        V1 {
+            settled_size: u64,
+            maker_cancellation_reason: Option,
+            taker_cancellation_reason: Option,
+        }
+    }
+
+    enum MarketClearinghouseCallbacks has drop {
+        V1 {
+            /// settle_trade_f arguments: taker, taker_order_id, maker, maker_order_id, fill_id, is_taker_long, price, size
+            settle_trade_f:  |address, OrderIdType, address, OrderIdType, u64, bool, u64, u64, M, M| SettleTradeResult has drop + copy,
+            /// validate_settlement_update_f arguments: account, order_id, is_taker, is_long, price, size
+            validate_order_placement_f: |address, OrderIdType, bool, bool, Option, u64, M| bool has drop + copy,
+            /// place_maker_order_f arguments: account, order_id, is_bid, price, size, order_metadata
+            place_maker_order_f: |address, OrderIdType, bool, u64, u64, M| has drop + copy,
+            /// cleanup_order_f arguments: account, order_id, is_bid, remaining_size
+            cleanup_order_f: |address, OrderIdType, bool, u64| has drop + copy,
+            /// decrease_order_size_f arguments: account, order_id, is_bid, price, size
+            decrease_order_size_f: |address, OrderIdType, bool, u64, u64| has drop + copy,
+        }
     }
 
     public fun new_settle_trade_result(
@@ -29,7 +108,7 @@ module aptos_experimental::market_types {
         maker_cancellation_reason: Option,
         taker_cancellation_reason: Option
     ): SettleTradeResult {
-        SettleTradeResult {
+        SettleTradeResult::V1 {
             settled_size,
             maker_cancellation_reason,
             taker_cancellation_reason
@@ -37,15 +116,18 @@ module aptos_experimental::market_types {
     }
 
     public fun new_market_clearinghouse_callbacks(
-        // settle_trade_f arguments: taker, maker, taker_order_id, maker_order_id, fill_id, is_taker_long, price, size
-        settle_trade_f: |address, address, u64, u64, u64, bool, u64, u64, M, M| SettleTradeResult has drop + copy,
-        // validate_settlement_update_f arguments: accoun, is_taker, is_long, price, size
-        validate_order_placement_f: |address, u64, bool, bool, u64, u64, M| bool has drop + copy,
-        place_maker_order_f: |address, u64, bool, u64, u64, M| has drop + copy,
-        cleanup_order_f: |address, u64, bool, u64| has drop + copy,
-        decrease_order_size_f: |address, u64, bool, u64, u64| has drop + copy,
+        // settle_trade_f arguments: taker, taker_order_id, maker, maker_order_id, fill_id, is_taker_long, price, size
+        settle_trade_f: |address, OrderIdType, address, OrderIdType, u64, bool, u64, u64, M, M| SettleTradeResult has drop + copy,
+        // validate_settlement_update_f arguments: account, order_id, is_taker, is_long, price, size
+        validate_order_placement_f: |address, OrderIdType, bool, bool, Option, u64, M| bool has drop + copy,
+        // place_maker_order_f arguments: account, order_id, is_bid, price, size, order_metadata
+        place_maker_order_f: |address, OrderIdType, bool, u64, u64, M| has drop + copy,
+        // cleanup_order_f arguments: account, order_id, is_bid, remaining_size
+        cleanup_order_f: |address, OrderIdType, bool, u64| has drop + copy,
+        /// decrease_order_size_f arguments: account, order_id, is_bid, price, size
+        decrease_order_size_f: |address, OrderIdType, bool, u64, u64| has drop + copy,
     ): MarketClearinghouseCallbacks {
-        MarketClearinghouseCallbacks {
+        MarketClearinghouseCallbacks::V1 {
             settle_trade_f,
             validate_order_placement_f,
             place_maker_order_f,
@@ -66,37 +148,37 @@ module aptos_experimental::market_types {
         self.taker_cancellation_reason
     }
 
-    public fun settle_trade(
+    public(friend) fun settle_trade(
         self: &MarketClearinghouseCallbacks,
         taker: address,
+        taker_order_id: OrderIdType,
         maker: address,
-        taker_order_id: u64,
-        maker_order_id:u64,
+        maker_order_id: OrderIdType,
         fill_id: u64,
         is_taker_long: bool,
         price: u64,
         size: u64,
         taker_metadata: M,
         maker_metadata: M): SettleTradeResult {
-        (self.settle_trade_f)(taker, maker, taker_order_id, maker_order_id, fill_id, is_taker_long, price, size, taker_metadata, maker_metadata)
+        (self.settle_trade_f)(taker, taker_order_id, maker, maker_order_id, fill_id, is_taker_long, price, size, taker_metadata, maker_metadata)
     }
 
-    public fun validate_order_placement(
+    public(friend) fun validate_order_placement(
         self: &MarketClearinghouseCallbacks,
         account: address,
-        order_id: u64,
+        order_id: OrderIdType,
         is_taker: bool,
         is_bid: bool,
-        price: u64,
+        price: Option,
         size: u64,
         order_metadata: M): bool {
         (self.validate_order_placement_f)(account, order_id, is_taker, is_bid, price, size, order_metadata)
     }
 
-    public fun place_maker_order(
+    public(friend) fun place_maker_order(
         self: &MarketClearinghouseCallbacks,
         account: address,
-        order_id: u64,
+        order_id: OrderIdType,
         is_bid: bool,
         price: u64,
         size: u64,
@@ -104,19 +186,19 @@ module aptos_experimental::market_types {
         (self.place_maker_order_f)(account, order_id, is_bid, price, size, order_metadata)
     }
 
-    public fun cleanup_order(
+    public(friend) fun cleanup_order(
         self: &MarketClearinghouseCallbacks,
         account: address,
-        order_id: u64,
+        order_id: OrderIdType,
         is_bid: bool,
         remaining_size: u64) {
         (self.cleanup_order_f)(account, order_id, is_bid, remaining_size)
     }
 
-    public fun decrease_order_size(
+    public(friend) fun decrease_order_size(
         self: &MarketClearinghouseCallbacks,
         account: address,
-        order_id: u64,
+        order_id: OrderIdType,
         is_bid: bool,
         price: u64,
         size: u64,) {
diff --git a/aptos-move/framework/aptos-experimental/sources/trading/order_book/active_order_book.move b/aptos-move/framework/aptos-experimental/sources/trading/order_book/active_order_book.move
index 660b52dde2726..c101b8929b5ba 100644
--- a/aptos-move/framework/aptos-experimental/sources/trading/order_book/active_order_book.move
+++ b/aptos-move/framework/aptos-experimental/sources/trading/order_book/active_order_book.move
@@ -1,4 +1,8 @@
-/// (work in progress)
+/// ActiveOrderBook: This is the main order book that keeps track of active orders and their states. The active order
+/// book is backed by a BigOrderedMap, which is a data structure that allows for efficient insertion, deletion, and matching of the order
+/// The orders are matched based on time-price priority.
+///
+/// This is internal module, which cannot be used directly, use OrderBook instead.
 module aptos_experimental::active_order_book {
     use std::option::{Self, Option};
     use aptos_std::math64::mul_div;
@@ -31,10 +35,6 @@ module aptos_experimental::active_order_book {
 
     const U64_MAX: u64 = 0xffffffffffffffff;
 
-    const U256_MAX: u256 =
-        0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff;
-    // 115792089237316195423570985008687907853269984665640564039457584007913129639935;
-
     struct ActiveBidKey has store, copy, drop {
         price: u64,
         tie_breaker: UniqueIdxType
@@ -48,7 +48,7 @@ module aptos_experimental::active_order_book {
     /// OrderBook tracking active (i.e. unconditional, immediately executable) limit orders.
     ///
     /// - invariant - all buys are smaller than sells, at all times.
-    /// - tie_breaker in sells is U256_MAX-value, to make sure largest value in the book
+    /// - tie_breaker in sells is U128_MAX-value, to make sure largest value in the book
     ///   that is taken first, is the one inserted first, amongst those with same bid price.
     enum ActiveOrderBook has store {
         V1 {
@@ -57,7 +57,7 @@ module aptos_experimental::active_order_book {
         }
     }
 
-    public fun new_active_order_book(): ActiveOrderBook {
+    public(friend) fun new_active_order_book(): ActiveOrderBook {
         // potentially add max value to both sides (that will be skipped),
         // so that max_key never changes, and doesn't create conflict.
         ActiveOrderBook::V1 {
@@ -66,10 +66,9 @@ module aptos_experimental::active_order_book {
         }
     }
 
-
     /// Picks the best (i.e. highest) bid (i.e. buy) price from the active order book.
     /// aborts if there are no buys
-    public fun best_bid_price(self: &ActiveOrderBook): Option {
+    public(friend) fun best_bid_price(self: &ActiveOrderBook): Option {
         if (self.buys.is_empty()) {
             option::none()
         } else {
@@ -80,7 +79,7 @@ module aptos_experimental::active_order_book {
 
     /// Picks the best (i.e. lowest) ask (i.e. sell) price from the active order book.
     /// aborts if there are no sells
-    public fun best_ask_price(self: &ActiveOrderBook): Option {
+    public(friend) fun best_ask_price(self: &ActiveOrderBook): Option {
         if (self.sells.is_empty()) {
             option::none()
         } else {
@@ -89,7 +88,7 @@ module aptos_experimental::active_order_book {
         }
     }
 
-    public fun get_mid_price(self: &ActiveOrderBook): Option {
+    public(friend) fun get_mid_price(self: &ActiveOrderBook): Option {
         let best_bid = self.best_bid_price();
         let best_ask = self.best_ask_price();
         if (best_bid.is_none() || best_ask.is_none()) {
@@ -101,8 +100,8 @@ module aptos_experimental::active_order_book {
         }
     }
 
-    public fun get_slippage_price(
-        self: &ActiveOrderBook, is_buy: bool, slippage_pct: u64
+    public(friend) fun get_slippage_price(
+        self: &ActiveOrderBook, is_bid: bool, slippage_pct: u64
     ): Option {
         let mid_price = self.get_mid_price();
         if (mid_price.is_none()) {
@@ -112,7 +111,7 @@ module aptos_experimental::active_order_book {
         let slippage = mul_div(
             mid_price, slippage_pct, get_slippage_pct_precision() * 100
         );
-        if (is_buy) {
+        if (is_bid) {
             option::some(mid_price + slippage)
         } else {
             option::some(mid_price - slippage)
@@ -136,9 +135,8 @@ module aptos_experimental::active_order_book {
                 } else {
                     front_value.size
                 };
-            total_value = total_value
-                + (matched_size as u128) * (front_key.price as u128);
-            total_size = total_size + matched_size;
+            total_value +=(matched_size as u128) * (front_key.price as u128);
+            total_size += matched_size;
             let next_key = orders.prev_key(&front_key);
             if (next_key.is_none()) {
                 // TODO maybe we should return none if there is not enough depth?
@@ -165,9 +163,8 @@ module aptos_experimental::active_order_book {
                 } else {
                     front_value.size
                 };
-            total_value = total_value
-                + (matched_size as u128) * (front_key.price as u128);
-            total_size = total_size + matched_size;
+            total_value +=(matched_size as u128) * (front_key.price as u128);
+            total_size += matched_size;
             let next_key = orders.next_key(&front_key);
             if (next_key.is_none()) {
                 break;
@@ -179,25 +176,25 @@ module aptos_experimental::active_order_book {
     }
 
     inline fun get_tie_breaker(
-        unique_priority_idx: UniqueIdxType, is_buy: bool
+        unique_priority_idx: UniqueIdxType, is_bid: bool
     ): UniqueIdxType {
-        if (is_buy) {
+        if (is_bid) {
             unique_priority_idx
         } else {
             unique_priority_idx.descending_idx()
         }
     }
 
-    public fun cancel_active_order(
+    public(friend) fun cancel_active_order(
         self: &mut ActiveOrderBook,
         price: u64,
         unique_priority_idx: UniqueIdxType,
-        is_buy: bool
+        is_bid: bool
     ): u64 {
-        let tie_breaker = get_tie_breaker(unique_priority_idx, is_buy);
+        let tie_breaker = get_tie_breaker(unique_priority_idx, is_bid);
         let key = ActiveBidKey { price: price, tie_breaker };
         let value =
-            if (is_buy) {
+            if (is_bid) {
                 self.buys.remove(&key)
             } else {
                 self.sells.remove(&key)
@@ -205,15 +202,15 @@ module aptos_experimental::active_order_book {
         value.size
     }
 
-    public fun is_active_order(
+    public(friend) fun is_active_order(
         self: &ActiveOrderBook,
         price: u64,
         unique_priority_idx: UniqueIdxType,
-        is_buy: bool
+        is_bid: bool
     ): bool {
-        let tie_breaker = get_tie_breaker(unique_priority_idx, is_buy);
+        let tie_breaker = get_tie_breaker(unique_priority_idx, is_bid);
         let key = ActiveBidKey { price: price, tie_breaker };
-        if (is_buy) {
+        if (is_bid) {
             self.buys.contains(&key)
         } else {
             self.sells.contains(&key)
@@ -222,14 +219,22 @@ module aptos_experimental::active_order_book {
 
     /// Check if the order is a taker order - i.e. if it can be immediately matched with the order book fully or partially.
     public fun is_taker_order(
-        self: &ActiveOrderBook, price: u64, is_buy: bool
+        self: &ActiveOrderBook, price: Option, is_bid: bool
     ): bool {
-        if (is_buy) {
+        if (is_bid) {
             let best_ask_price = self.best_ask_price();
-            best_ask_price.is_some() && price >= best_ask_price.destroy_some()
+            best_ask_price.is_some()
+                && (
+                    price.is_none()
+                        || price.destroy_some() >= best_ask_price.destroy_some()
+                )
         } else {
             let best_bid_price = self.best_bid_price();
-            best_bid_price.is_some() && price <= best_bid_price.destroy_some()
+            best_bid_price.is_some()
+                && (
+                    price.is_none()
+                        || price.destroy_some() <= best_bid_price.destroy_some()
+                )
         }
     }
 
@@ -264,10 +269,14 @@ module aptos_experimental::active_order_book {
     }
 
     fun get_single_match_for_buy_order(
-        self: &mut ActiveOrderBook, price: u64, size: u64
+        self: &mut ActiveOrderBook, price: Option, size: u64
     ): ActiveMatchedOrder {
         let (smallest_key, smallest_value) = self.sells.borrow_front();
-        assert!(price >= smallest_key.price, EINTERNAL_INVARIANT_BROKEN);
+        if (price.is_some()) {
+            assert!(
+                price.destroy_some() >= smallest_key.price, EINTERNAL_INVARIANT_BROKEN
+            );
+        };
         single_match_with_current_active_order(
             size,
             smallest_key,
@@ -277,10 +286,14 @@ module aptos_experimental::active_order_book {
     }
 
     fun get_single_match_for_sell_order(
-        self: &mut ActiveOrderBook, price: u64, size: u64
+        self: &mut ActiveOrderBook, price: Option, size: u64
     ): ActiveMatchedOrder {
         let (largest_key, largest_value) = self.buys.borrow_back();
-        assert!(price <= largest_key.price, EINTERNAL_INVARIANT_BROKEN);
+        if (price.is_some()) {
+            assert!(
+                price.destroy_some() <= largest_key.price, EINTERNAL_INVARIANT_BROKEN
+            );
+        };
         single_match_with_current_active_order(
             size,
             largest_key,
@@ -289,13 +302,13 @@ module aptos_experimental::active_order_book {
         )
     }
 
-    public fun get_single_match_result(
+    public(friend) fun get_single_match_result(
         self: &mut ActiveOrderBook,
-        price: u64,
+        price: Option,
         size: u64,
-        is_buy: bool
+        is_bid: bool
     ): ActiveMatchedOrder {
-        if (is_buy) {
+        if (is_bid) {
             self.get_single_match_for_buy_order(price, size)
         } else {
             self.get_single_match_for_sell_order(price, size)
@@ -303,16 +316,16 @@ module aptos_experimental::active_order_book {
     }
 
     /// Increase the size of the order in the orderbook without altering its position in the price-time priority.
-    public fun increase_order_size(
+    public(friend) fun increase_order_size(
         self: &mut ActiveOrderBook,
         price: u64,
         unique_priority_idx: UniqueIdxType,
         size_delta: u64,
-        is_buy: bool
+        is_bid: bool
     ) {
-        let tie_breaker = get_tie_breaker(unique_priority_idx, is_buy);
+        let tie_breaker = get_tie_breaker(unique_priority_idx, is_bid);
         let key = ActiveBidKey { price, tie_breaker };
-        if (is_buy) {
+        if (is_bid) {
             self.buys.borrow_mut(&key).size += size_delta;
         } else {
             self.sells.borrow_mut(&key).size += size_delta;
@@ -320,36 +333,36 @@ module aptos_experimental::active_order_book {
     }
 
     /// Decrease the size of the order in the order book without altering its position in the price-time priority.
-    public fun decrease_order_size(
+    public(friend) fun decrease_order_size(
         self: &mut ActiveOrderBook,
         price: u64,
         unique_priority_idx: UniqueIdxType,
         size_delta: u64,
-        is_buy: bool
+        is_bid: bool
     ) {
-        let tie_breaker = get_tie_breaker(unique_priority_idx, is_buy);
+        let tie_breaker = get_tie_breaker(unique_priority_idx, is_bid);
         let key = ActiveBidKey { price, tie_breaker };
-        if (is_buy) {
+        if (is_bid) {
             self.buys.borrow_mut(&key).size -= size_delta;
         } else {
             self.sells.borrow_mut(&key).size -= size_delta;
         };
     }
 
-    public fun place_maker_order(
+    public(friend) fun place_maker_order(
         self: &mut ActiveOrderBook,
         order_id: OrderIdType,
         price: u64,
         unique_priority_idx: UniqueIdxType,
         size: u64,
-        is_buy: bool
+        is_bid: bool
     ) {
-        let tie_breaker = get_tie_breaker(unique_priority_idx, is_buy);
+        let tie_breaker = get_tie_breaker(unique_priority_idx, is_bid);
         let key = ActiveBidKey { price, tie_breaker };
         let value = ActiveBidData { order_id, size };
         // Assert that this is not a taker order
-        assert!(!self.is_taker_order(price, is_buy), EINVALID_MAKER_ORDER);
-        if (is_buy) {
+        assert!(!self.is_taker_order(option::some(price), is_bid), EINVALID_MAKER_ORDER);
+        if (is_bid) {
             self.buys.add(key, value);
         } else {
             self.sells.add(key, value);
@@ -366,11 +379,11 @@ module aptos_experimental::active_order_book {
     #[test_only]
     struct TestOrder has copy, drop {
         account: address,
-        account_order_id: u64,
+        order_id: OrderIdType,
         price: u64,
         size: u64,
         unique_idx: UniqueIdxType,
-        is_buy: bool
+        is_bid: bool
     }
 
     #[test_only]
@@ -379,18 +392,14 @@ module aptos_experimental::active_order_book {
         let result = vector::empty();
         let remaining_size = order.size;
         while (remaining_size > 0) {
-            if (!self.is_taker_order(order.price, order.is_buy)) {
+            if (!self.is_taker_order(option::some(order.price), order.is_bid)) {
                 self.place_maker_order(
-                    new_order_id_type(order.account, order.account_order_id),
-                    order.price,
-                    order.unique_idx,
-                    order.size,
-                    order.is_buy
+                    order.order_id, order.price, order.unique_idx, order.size, order.is_bid
                 );
                 return result;
             };
             let match_result =
-                self.get_single_match_result(order.price, remaining_size, order.is_buy);
+                self.get_single_match_result(option::some(order.price), remaining_size, order.is_bid);
             remaining_size -= match_result.get_active_matched_size();
             result.push_back(match_result);
         };
@@ -411,11 +420,11 @@ module aptos_experimental::active_order_book {
             active_order_book.place_test_order(
                 TestOrder {
                     account: @0xAA,
-                    account_order_id: 0,
+                    order_id: new_order_id_type(0),
                     price: 200,
                     size: 1000,
                     unique_idx: new_unique_idx_type(0),
-                    is_buy: false
+                    is_bid: false
                 }
             );
         assert!(match_result.is_empty());
@@ -427,11 +436,11 @@ module aptos_experimental::active_order_book {
             active_order_book.place_test_order(
                 TestOrder {
                     account: @0xAA,
-                    account_order_id: 1,
+                    order_id: new_order_id_type(1),
                     price: 100,
                     size: 1000,
                     unique_idx: new_unique_idx_type(1),
-                    is_buy: true
+                    is_bid: true
                 }
             );
         assert!(match_result.is_empty());
@@ -447,11 +456,11 @@ module aptos_experimental::active_order_book {
             active_order_book.place_test_order(
                 TestOrder {
                     account: @0xAA,
-                    account_order_id: 2,
+                    order_id: new_order_id_type(2),
                     price: 150,
                     size: 100,
                     unique_idx: new_unique_idx_type(2),
-                    is_buy: false
+                    is_bid: false
                 }
             );
         assert!(match_result.is_empty());
@@ -465,11 +474,11 @@ module aptos_experimental::active_order_book {
             active_order_book.place_test_order(
                 TestOrder {
                     account: @0xAA,
-                    account_order_id: 3,
+                    order_id: new_order_id_type(3),
                     price: 175,
                     size: 100,
                     unique_idx: new_unique_idx_type(3),
-                    is_buy: false
+                    is_bid: false
                 }
             );
         assert!(match_result.is_empty());
@@ -486,11 +495,11 @@ module aptos_experimental::active_order_book {
             active_order_book.place_test_order(
                 TestOrder {
                     account: @0xAA,
-                    account_order_id: 4,
+                    order_id: new_order_id_type(4),
                     price: 160,
                     size: 50,
                     unique_idx: new_unique_idx_type(4),
-                    is_buy: true
+                    is_bid: true
                 }
             );
         assert!(match_result.length() == 1);
@@ -501,7 +510,7 @@ module aptos_experimental::active_order_book {
             match_result
                 == vector[
                     new_active_matched_order(
-                        new_order_id_type(@0xAA, 2),
+                        new_order_id_type(2),
                         50, // matched size
                         50 // remaining size
                     )
@@ -519,33 +528,33 @@ module aptos_experimental::active_order_book {
         active_order_book.place_test_order(
             TestOrder {
                 account: @0xAA,
-                account_order_id: 1,
+                order_id: new_order_id_type(1),
                 price: 100,
                 size: 50,
                 unique_idx: new_unique_idx_type(1),
-                is_buy: false
+                is_bid: false
             }
         );
 
         active_order_book.place_test_order(
             TestOrder {
                 account: @0xAA,
-                account_order_id: 2,
+                order_id: new_order_id_type(2),
                 price: 150,
                 size: 100,
                 unique_idx: new_unique_idx_type(2),
-                is_buy: false
+                is_bid: false
             }
         );
 
         active_order_book.place_test_order(
             TestOrder {
                 account: @0xAA,
-                account_order_id: 3,
+                order_id: new_order_id_type(3),
                 price: 200,
                 size: 150,
                 unique_idx: new_unique_idx_type(3),
-                is_buy: false
+                is_bid: false
             }
         );
 
@@ -576,33 +585,33 @@ module aptos_experimental::active_order_book {
         active_order_book.place_test_order(
             TestOrder {
                 account: @0xAA,
-                account_order_id: 1,
+                order_id: new_order_id_type(1),
                 price: 200,
                 size: 50,
                 unique_idx: new_unique_idx_type(1),
-                is_buy: true
+                is_bid: true
             }
         );
 
         active_order_book.place_test_order(
             TestOrder {
                 account: @0xAA,
-                account_order_id: 2,
+                order_id: new_order_id_type(2),
                 price: 150,
                 size: 100,
                 unique_idx: new_unique_idx_type(2),
-                is_buy: true
+                is_bid: true
             }
         );
 
         active_order_book.place_test_order(
             TestOrder {
                 account: @0xAA,
-                account_order_id: 3,
+                order_id: new_order_id_type(3),
                 price: 100,
                 size: 150,
                 unique_idx: new_unique_idx_type(3),
-                is_buy: true
+                is_bid: true
             }
         );
 
@@ -633,33 +642,33 @@ module aptos_experimental::active_order_book {
         active_order_book.place_test_order(
             TestOrder {
                 account: @0xAA,
-                account_order_id: 1,
+                order_id: new_order_id_type(1),
                 price: 101,
                 size: 50,
                 unique_idx: new_unique_idx_type(1),
-                is_buy: false
+                is_bid: false
             }
         );
 
         active_order_book.place_test_order(
             TestOrder {
                 account: @0xAA,
-                account_order_id: 2,
+                order_id: new_order_id_type(2),
                 price: 102,
                 size: 100,
                 unique_idx: new_unique_idx_type(2),
-                is_buy: false
+                is_bid: false
             }
         );
 
         active_order_book.place_test_order(
             TestOrder {
                 account: @0xAA,
-                account_order_id: 3,
+                order_id: new_order_id_type(3),
                 price: 103,
                 size: 150,
                 unique_idx: new_unique_idx_type(3),
-                is_buy: false
+                is_bid: false
             }
         );
 
@@ -667,22 +676,22 @@ module aptos_experimental::active_order_book {
         active_order_book.place_test_order(
             TestOrder {
                 account: @0xAA,
-                account_order_id: 4,
+                order_id: new_order_id_type(4),
                 price: 99,
                 size: 50,
                 unique_idx: new_unique_idx_type(4),
-                is_buy: true
+                is_bid: true
             }
         );
 
         active_order_book.place_test_order(
             TestOrder {
                 account: @0xAA,
-                account_order_id: 5,
+                order_id: new_order_id_type(5),
                 price: 98,
                 size: 100,
                 unique_idx: new_unique_idx_type(5),
-                is_buy: true
+                is_bid: true
             }
         );
 
diff --git a/aptos-move/framework/aptos-experimental/sources/trading/order_book/order_book.move b/aptos-move/framework/aptos-experimental/sources/trading/order_book/order_book.move
index 9382ec7f33d59..6e38c41fd137b 100644
--- a/aptos-move/framework/aptos-experimental/sources/trading/order_book/order_book.move
+++ b/aptos-move/framework/aptos-experimental/sources/trading/order_book/order_book.move
@@ -19,16 +19,16 @@ module aptos_experimental::order_book {
     use aptos_experimental::order_book_types::{
         OrderIdType,
         OrderWithState,
-        generate_unique_idx_fifo_tiebraker,
-        new_order_id_type,
         new_order,
         new_order_with_state,
         new_single_order_match,
         new_default_big_ordered_map,
+        new_ascending_id_generator,
+        new_unique_idx_type,
         TriggerCondition,
-        UniqueIdxType,
         SingleOrderMatch,
-        Order
+        Order,
+        AscendingIdGenerator
     };
     use aptos_experimental::active_order_book::{ActiveOrderBook, new_active_order_book};
     use aptos_experimental::pending_order_book_index::{
@@ -36,10 +36,7 @@ module aptos_experimental::order_book {
         new_pending_order_book_index
     };
     #[test_only]
-    use aptos_experimental::order_book_types::tp_trigger_condition;
-
-    const U256_MAX: u256 =
-        0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff;
+    use aptos_experimental::order_book_types::{new_order_id_type, tp_trigger_condition, UniqueIdxType};
 
     const EORDER_ALREADY_EXISTS: u64 = 1;
     const EPOST_ONLY_FILLED: u64 = 2;
@@ -47,24 +44,29 @@ module aptos_experimental::order_book {
     const EINVALID_INACTIVE_ORDER_STATE: u64 = 5;
     const EINVALID_ADD_SIZE_TO_ORDER: u64 = 6;
     const E_NOT_ACTIVE_ORDER: u64 = 7;
+    const E_REINSERT_ORDER_MISMATCH: u64 = 8;
+    const EORDER_CREATOR_MISMATCH: u64 = 9;
 
-    struct OrderRequest has copy, drop {
-        account: address,
-        account_order_id: u64,
-        unique_priority_idx: Option,
-        price: u64,
-        orig_size: u64,
-        remaining_size: u64,
-        is_buy: bool,
-        trigger_condition: Option,
-        metadata: M
+    enum OrderRequest has copy, drop {
+        V1 {
+            account: address,
+            order_id: OrderIdType,
+            client_order_id: Option,
+            price: u64,
+            orig_size: u64,
+            remaining_size: u64,
+            is_bid: bool,
+            trigger_condition: Option,
+            metadata: M
+        }
     }
 
     enum OrderBook has store {
         V1 {
             orders: BigOrderedMap>,
             active_orders: ActiveOrderBook,
-            pending_orders: PendingOrderBookIndex
+            pending_orders: PendingOrderBookIndex,
+            ascending_id_generator: AscendingIdGenerator
         }
     }
 
@@ -76,23 +78,23 @@ module aptos_experimental::order_book {
 
     public fun new_order_request(
         account: address,
-        account_order_id: u64,
-        unique_priority_idx: Option,
+        order_id: OrderIdType,
+        client_order_id: Option,
         price: u64,
         orig_size: u64,
         remaining_size: u64,
-        is_buy: bool,
+        is_bid: bool,
         trigger_condition: Option,
         metadata: M
     ): OrderRequest {
-        OrderRequest {
+        OrderRequest::V1 {
             account,
-            account_order_id,
-            unique_priority_idx,
+            order_id,
+            client_order_id,
             price,
             orig_size,
             remaining_size,
-            is_buy,
+            is_bid,
             trigger_condition,
             metadata
         }
@@ -102,55 +104,59 @@ module aptos_experimental::order_book {
         OrderBook::V1 {
             orders: new_default_big_ordered_map(),
             active_orders: new_active_order_book(),
-            pending_orders: new_pending_order_book_index()
+            pending_orders: new_pending_order_book_index(),
+            ascending_id_generator: new_ascending_id_generator()
         }
     }
 
-
-
     /// Cancels an order from the order book. If the order is active, it is removed from the active order book else
-    /// it is removed from the pending order book. The API doesn't abort if the order is not found in the order book -
-    /// this is a TODO for now.
+    /// it is removed from the pending order book.
+    /// If order doesn't exist, it aborts with EORDER_NOT_FOUND.
+    ///
+    /// `order_creator` is passed to only verify order cancellation is authorized correctly
     public fun cancel_order(
-        self: &mut OrderBook, account: address, account_order_id: u64
-    ): Option> {
-        let order_id = new_order_id_type(account, account_order_id);
+        self: &mut OrderBook, order_creator: address, order_id: OrderIdType
+    ): Order {
         assert!(self.orders.contains(&order_id), EORDER_NOT_FOUND);
         let order_with_state = self.orders.remove(&order_id);
         let (order, is_active) = order_with_state.destroy_order_from_state();
+        assert!(order_creator == order.get_account(), EORDER_CREATOR_MISMATCH);
         if (is_active) {
-            let (_, unique_priority_idx, bid_price, _orig_size, _size, is_buy, _, _) =
+            let unique_priority_idx = order.get_unique_priority_idx();
+            let (_account, _order_id, _client_order_id, bid_price, _orig_size, _size, is_bid, _, _) =
                 order.destroy_order();
-            self.active_orders.cancel_active_order(bid_price, unique_priority_idx, is_buy);
+            self.active_orders.cancel_active_order(bid_price, unique_priority_idx, is_bid);
         } else {
+            let unique_priority_idx = order.get_unique_priority_idx();
             let (
-                _,
-                unique_priority_idx,
+                _account,
+                _order_id,
+                _client_order_id,
                 _bid_price,
                 _orig_size,
                 _size,
-                is_buy,
+                is_bid,
                 trigger_condition,
                 _
             ) = order.destroy_order();
             self.pending_orders.cancel_pending_order(
-                trigger_condition.destroy_some(), unique_priority_idx, is_buy
+                trigger_condition.destroy_some(), unique_priority_idx, is_bid
             );
         };
-        return option::some(order)
+        return order
     }
 
     /// Checks if the order is a taker order i.e., matched immediatedly with the active order book.
     public fun is_taker_order(
         self: &OrderBook,
-        price: u64,
-        is_buy: bool,
+        price: Option,
+        is_bid: bool,
         trigger_condition: Option
     ): bool {
         if (trigger_condition.is_some()) {
             return false;
         };
-        return self.active_orders.is_taker_order(price, is_buy)
+        return self.active_orders.is_taker_order(price, is_bid)
     }
 
     /// Places a maker order to the order book. If the order is a pending order, it is added to the pending order book
@@ -162,37 +168,34 @@ module aptos_experimental::order_book {
             return self.place_pending_maker_order(order_req);
         };
 
-        let order_id = new_order_id_type(order_req.account, order_req.account_order_id);
-        let unique_priority_idx =
-            if (order_req.unique_priority_idx.is_some()) {
-                order_req.unique_priority_idx.destroy_some()
-            } else {
-                generate_unique_idx_fifo_tiebraker()
-            };
+        let ascending_idx =
+            new_unique_idx_type(self.ascending_id_generator.next_ascending_id());
 
         assert!(
-            !self.orders.contains(&order_id),
+            !self.orders.contains(&order_req.order_id),
             error::invalid_argument(EORDER_ALREADY_EXISTS)
         );
 
         let order =
             new_order(
-                order_id,
-                unique_priority_idx,
+                order_req.order_id,
+                order_req.account,
+                ascending_idx,
+                order_req.client_order_id,
                 order_req.price,
                 order_req.orig_size,
                 order_req.remaining_size,
-                order_req.is_buy,
+                order_req.is_bid,
                 order_req.trigger_condition,
                 order_req.metadata
             );
-        self.orders.add(order_id, new_order_with_state(order, true));
+        self.orders.add(order_req.order_id, new_order_with_state(order, true));
         self.active_orders.place_maker_order(
-            order_id,
+            order_req.order_id,
             order_req.price,
-            unique_priority_idx,
+            ascending_idx,
             order_req.remaining_size,
-            order_req.is_buy
+            order_req.is_bid
         );
     }
 
@@ -200,42 +203,59 @@ module aptos_experimental::order_book {
     /// but the clearinghouse fails to settle all or part of the order. If the order doesn't exist in the order book,
     /// it is added to the order book, if it exists, it's size is updated.
     public fun reinsert_maker_order(
-        self: &mut OrderBook, order_req: OrderRequest
+        self: &mut OrderBook, order_req: OrderRequest, original_order: Order
     ) {
+        assert!(
+            &original_order.get_order_id() == &order_req.order_id,
+            E_REINSERT_ORDER_MISMATCH
+        );
+        assert!(
+            &original_order.get_account() == &order_req.account,
+            E_REINSERT_ORDER_MISMATCH
+        );
+        assert!(
+            original_order.get_orig_size() == order_req.orig_size,
+            E_REINSERT_ORDER_MISMATCH
+        );
+        // TODO check what should the rule be for remaining_size. check test_maker_order_reinsert_not_exists unit test.
+        // assert!(
+        //     original_order.get_remaining_size() >= order_req.remaining_size,
+        //     E_REINSERT_ORDER_MISMATCH
+        // );
+        assert!(original_order.get_price() == order_req.price, E_REINSERT_ORDER_MISMATCH);
+        assert!(original_order.is_bid() == order_req.is_bid, E_REINSERT_ORDER_MISMATCH);
+
         assert!(order_req.trigger_condition.is_none(), E_NOT_ACTIVE_ORDER);
-        let order_id = new_order_id_type(order_req.account, order_req.account_order_id);
-        if (!self.orders.contains(&order_id)) {
+        if (!self.orders.contains(&order_req.order_id)) {
             return self.place_maker_order(order_req);
         };
-        let order_with_state = self.orders.remove(&order_id);
+        let order_with_state = self.orders.remove(&order_req.order_id);
         order_with_state.increase_remaining_size(order_req.remaining_size);
-        self.orders.add(order_id, order_with_state);
+        self.orders.add(order_req.order_id, order_with_state);
         self.active_orders.increase_order_size(
             order_req.price,
-            order_req.unique_priority_idx.destroy_some(),
+            original_order.get_unique_priority_idx(),
             order_req.remaining_size,
-            order_req.is_buy
+            order_req.is_bid
         );
     }
 
     fun place_pending_maker_order(
         self: &mut OrderBook, order_req: OrderRequest
     ) {
-        let order_id = new_order_id_type(order_req.account, order_req.account_order_id);
-        let unique_priority_idx =
-            if (order_req.unique_priority_idx.is_some()) {
-                order_req.unique_priority_idx.destroy_some()
-            } else {
-                generate_unique_idx_fifo_tiebraker()
-            };
+        let order_id = order_req.order_id;
+        let ascending_idx =
+            new_unique_idx_type(self.ascending_id_generator.next_ascending_id());
         let order =
             new_order(
                 order_id,
-                unique_priority_idx,
+                order_req.account,
+                ascending_idx,
+                order_req.client_order_id,
                 order_req.price,
                 order_req.orig_size,
                 order_req.remaining_size,
-                order_req.is_buy,
+                order_req.is_bid,
                 order_req.trigger_condition,
                 order_req.metadata
             );
@@ -245,8 +265,8 @@ module aptos_experimental::order_book {
         self.pending_orders.place_pending_maker_order(
             order_id,
             order_req.trigger_condition.destroy_some(),
-            unique_priority_idx,
-            order_req.is_buy
+            ascending_idx,
+            order_req.is_bid
         );
     }
 
@@ -254,11 +274,11 @@ module aptos_experimental::order_book {
     /// API to ensure that the order is a taker order before calling this API, otherwise it will abort.
     public fun get_single_match_for_taker(
         self: &mut OrderBook,
-        price: u64,
+        price: Option,
         size: u64,
-        is_buy: bool
+        is_bid: bool
     ): SingleOrderMatch {
-        let result = self.active_orders.get_single_match_result(price, size, is_buy);
+        let result = self.active_orders.get_single_match_result(price, size, is_bid);
         let (order_id, matched_size, remaining_size) =
             result.destroy_active_matched_order();
         let order_with_state = self.orders.remove(&order_id);
@@ -275,12 +295,14 @@ module aptos_experimental::order_book {
     /// if the size delta is greater than or equal to the remaining size of the order. Please note that the API will abort and
     /// not cancel the order if the size delta is equal to the remaining size of the order, to avoid unintended
     /// cancellation of the order. Please use the `cancel_order` API to cancel the order.
+    ///
+    /// `order_creator` is passed to only verify order cancellation is authorized correctly
     public fun decrease_order_size(
-        self: &mut OrderBook, account: address, account_order_id: u64, size_delta: u64
+        self: &mut OrderBook, order_creator: address, order_id: OrderIdType, size_delta: u64
     ) {
-        let order_id = new_order_id_type(account, account_order_id);
         assert!(self.orders.contains(&order_id), EORDER_NOT_FOUND);
         let order_with_state = self.orders.remove(&order_id);
+        assert!(order_creator == order_with_state.get_order_from_state().get_account(), EORDER_CREATOR_MISMATCH);
         order_with_state.decrease_remaining_size(size_delta);
         if (order_with_state.is_active_order()) {
             let order = order_with_state.get_order_from_state();
@@ -295,9 +317,8 @@ module aptos_experimental::order_book {
     }
 
     public fun is_active_order(
-        self: &OrderBook, account: address, account_order_id: u64
+        self: &OrderBook, order_id: OrderIdType
     ): bool {
-        let order_id = new_order_id_type(account, account_order_id);
         if (!self.orders.contains(&order_id)) {
             return false;
         };
@@ -305,9 +326,8 @@ module aptos_experimental::order_book {
     }
 
     public fun get_order(
-        self: &OrderBook, account: address, account_order_id: u64
+        self: &OrderBook, order_id: OrderIdType
     ): Option> {
-        let order_id = new_order_id_type(account, account_order_id);
         if (!self.orders.contains(&order_id)) {
             return option::none();
         };
@@ -315,9 +335,8 @@ module aptos_experimental::order_book {
     }
 
     public fun get_remaining_size(
-        self: &OrderBook, account: address, account_order_id: u64
+        self: &OrderBook, order_id: OrderIdType
     ): u64 {
-        let order_id = new_order_id_type(account, account_order_id);
         if (!self.orders.contains(&order_id)) {
             return 0;
         };
@@ -349,9 +368,9 @@ module aptos_experimental::order_book {
     }
 
     public fun get_slippage_price(
-        self: &OrderBook, is_buy: bool, slippage_pct: u64
+        self: &OrderBook, is_bid: bool, slippage_pct: u64
     ): Option {
-        self.active_orders.get_slippage_price(is_buy, slippage_pct)
+        self.active_orders.get_slippage_price(is_bid, slippage_pct)
     }
 
     /// Removes and returns the orders that are ready to be executed based on the time condition.
@@ -374,7 +393,12 @@ module aptos_experimental::order_book {
 
     #[test_only]
     public fun destroy_order_book(self: OrderBook) {
-        let OrderBook::V1 { orders, active_orders, pending_orders } = self;
+        let OrderBook::V1 {
+            orders,
+            active_orders,
+            pending_orders,
+            ascending_id_generator: _
+        } = self;
         orders.destroy(|_v| {});
         active_orders.destroy_active_order_book();
         pending_orders.destroy_pending_order_book_index();
@@ -382,31 +406,31 @@ module aptos_experimental::order_book {
 
     #[test_only]
     public fun get_unique_priority_idx(
-        self: &OrderBook, account: address, account_order_id: u64
+        self: &OrderBook, order_id: OrderIdType
     ): Option {
-        let order_id = new_order_id_type(account, account_order_id);
         if (!self.orders.contains(&order_id)) {
             return option::none();
         };
         option::some(self.orders.borrow(&order_id).get_unique_priority_idx_from_state())
     }
 
+    #[test_only]
     public fun place_order_and_get_matches(
         self: &mut OrderBook, order_req: OrderRequest
     ): vector> {
         let match_results = vector::empty();
-        let remainig_size = order_req.remaining_size;
-        while (remainig_size > 0) {
-            if (!self.is_taker_order(order_req.price, order_req.is_buy, order_req.trigger_condition)) {
+        let remaining_size = order_req.remaining_size;
+        while (remaining_size > 0) {
+            if (!self.is_taker_order(option::some(order_req.price), order_req.is_bid, order_req.trigger_condition)) {
                 self.place_maker_order(
-                    OrderRequest {
+                    OrderRequest::V1 {
                         account: order_req.account,
-                        account_order_id: order_req.account_order_id,
-                        unique_priority_idx: option::none(),
+                        order_id: order_req.order_id,
+                        client_order_id: order_req.client_order_id,
                         price: order_req.price,
                         orig_size: order_req.orig_size,
-                        remaining_size: remainig_size,
-                        is_buy: order_req.is_buy,
+                        remaining_size,
+                        is_bid: order_req.is_bid,
                         trigger_condition: order_req.trigger_condition,
                         metadata: order_req.metadata
                     }
@@ -415,11 +439,11 @@ module aptos_experimental::order_book {
             };
             let match_result =
                 self.get_single_match_for_taker(
-                    order_req.price, remainig_size, order_req.is_buy
+                    option::some(order_req.price), remaining_size, order_req.is_bid
                 );
             let matched_size = match_result.get_matched_size();
             match_results.push_back(match_result);
-            remainig_size -= matched_size;
+            remaining_size -= matched_size;
         };
         return match_results
     }
@@ -428,19 +452,17 @@ module aptos_experimental::order_book {
     public fun update_order_and_get_matches(
         self: &mut OrderBook, order_req: OrderRequest
     ): vector> {
-        let unique_priority_idx =
-            self.get_unique_priority_idx(order_req.account, order_req.account_order_id);
+        let unique_priority_idx = self.get_unique_priority_idx(order_req.order_id);
         assert!(unique_priority_idx.is_some(), EORDER_NOT_FOUND);
-        let unique_priority_idx = unique_priority_idx.destroy_some();
-        self.cancel_order(order_req.account, order_req.account_order_id);
-        let order_req = OrderRequest {
+        self.cancel_order(order_req.account, order_req.order_id);
+        let order_req = OrderRequest::V1 {
             account: order_req.account,
-            account_order_id: order_req.account_order_id,
-            unique_priority_idx: option::some(unique_priority_idx),
+            order_id: order_req.order_id,
+            client_order_id: order_req.client_order_id,
             price: order_req.price,
             orig_size: order_req.orig_size,
             remaining_size: order_req.remaining_size,
-            is_buy: order_req.is_buy,
+            is_bid: order_req.is_bid,
             trigger_condition: order_req.trigger_condition,
             metadata: order_req.metadata
         };
@@ -456,31 +478,23 @@ module aptos_experimental::order_book {
         let i = 0;
         while (i < ready_orders.length()) {
             let order = ready_orders[i];
-            let (
-                order_id,
-                unique_priority_idx,
-                price,
-                orig_size,
-                remaining_size,
-                is_buy,
-                _,
-                metadata
-            ) = order.destroy_order();
-            let (account, account_order_id) = order_id.destroy_order_id_type();
-            let order_req = OrderRequest {
+            let (account, order_id, client_order_id, price, orig_size, remaining_size, is_bid, _, metadata) =
+
+                order.destroy_order();
+            let order_req = OrderRequest::V1 {
                 account,
-                account_order_id,
-                unique_priority_idx: option::some(unique_priority_idx),
+                order_id,
+                client_order_id,
                 price,
                 orig_size,
                 remaining_size,
-                is_buy,
+                is_bid,
                 trigger_condition: option::none(),
                 metadata
             };
             let match_results = self.place_order_and_get_matches(order_req);
             all_matches.append(match_results);
-            i = i + 1;
+            i += 1;
         };
         all_matches
     }
@@ -492,9 +506,8 @@ module aptos_experimental::order_book {
         let total_matched_size = 0;
         let i = 0;
         while (i < match_results.length()) {
-            total_matched_size = total_matched_size
-                + match_results[i].get_matched_size();
-            i = i + 1;
+            total_matched_size += match_results[i].get_matched_size();
+            i += 1;
         };
         total_matched_size
     }
@@ -508,14 +521,14 @@ module aptos_experimental::order_book {
         let order_book = new_order_book();
 
         // Place a GTC sell order
-        let order_req = OrderRequest {
+        let order_req = OrderRequest::V1 {
             account: @0xAA,
-            account_order_id: 1,
-            unique_priority_idx: option::none(),
+            order_id: new_order_id_type(1),
+            client_order_id: option::some(1),
             price: 100,
             orig_size: 1000,
             remaining_size: 1000,
-            is_buy: false,
+            is_bid: false,
             trigger_condition: option::none(),
             metadata: TestMetadata {}
         };
@@ -523,41 +536,43 @@ module aptos_experimental::order_book {
         assert!(match_results.is_empty()); // No matches for first order
 
         // Verify order exists and is active
-        let order_id = new_order_id_type(@0xAA, 1);
+        let order_id = new_order_id_type(1);
         let order_state = *order_book.orders.borrow(&order_id);
         let (order, is_active) = order_state.destroy_order_from_state();
-        let (_order_id, _unique_priority_idx, price, orig_size, size, is_buy, _, _) =
+        let (_account, _order_id, client_order_id, price, orig_size, size, is_bid, _, _) =
             order.destroy_order();
         assert!(is_active == true);
         assert!(price == 100);
         assert!(orig_size == 1000);
         assert!(size == 1000);
-        assert!(is_buy == false);
+        assert!(is_bid == false);
+        assert!(client_order_id == option::some(1));
 
         // Place a matching buy order for partial fill
         let match_results =
             order_book.place_order_and_get_matches(
-                OrderRequest {
+                OrderRequest::V1 {
                     account: @0xBB,
-                    account_order_id: 1,
-                    unique_priority_idx: option::none(),
+                    order_id: new_order_id_type(1),
+                    client_order_id: option::some(2),
                     price: 100,
                     orig_size: 400,
                     remaining_size: 400,
-                    is_buy: true,
+                    is_bid: true,
                     trigger_condition: option::none(),
                     metadata: TestMetadata {}
                 }
             );
         // // Verify taker match details
         assert!(total_matched_size(&match_results) == 400);
-        assert!(order_book.get_remaining_size(@0xBB, 1) == 0);
+        assert!(order_book.get_remaining_size(new_order_id_type(2)) == 0);
 
         // Verify maker match details
         assert!(match_results.length() == 1); // One match result
         let maker_match = match_results[0];
         let (order, matched_size) = maker_match.destroy_single_order_match();
-        assert!(order.get_order_id() == new_order_id_type(@0xAA, 1));
+        assert!(order.get_account() == @0xAA);
+        assert!(order.get_order_id() == new_order_id_type(1));
         assert!(matched_size == 400);
         assert!(order.get_orig_size() == 1000);
         assert!(order.get_remaining_size() == 600); // Maker order partially filled
@@ -565,19 +580,19 @@ module aptos_experimental::order_book {
         // Verify original order still exists but with reduced size
         let order_state = *order_book.orders.borrow(&order_id);
         let (order, is_active) = order_state.destroy_order_from_state();
-        let (_, _unique_priority_idx, price, orig_size, size, is_buy, _, _) =
-            order.destroy_order();
+        let (_, _, client_order_id, price, orig_size, size, is_bid, _, _) = order.destroy_order();
         assert!(is_active == true);
         assert!(price == 100);
         assert!(orig_size == 1000);
         assert!(size == 600);
-        assert!(is_buy == false);
+        assert!(is_bid == false);
+        assert!(client_order_id == option::some(1));
 
         // Cancel the remaining order
-        order_book.cancel_order(@0xAA, 1);
+        order_book.cancel_order(@0xAA, new_order_id_type(1));
 
         // Verify order no longer exists
-        assert!(order_book.get_remaining_size(@0xAA, 1) == 0);
+        assert!(order_book.get_remaining_size(new_order_id_type(1)) == 0);
 
         // Since we cannot drop the order book, we move it to a test struct
         order_book.destroy_order_book();
@@ -590,14 +605,14 @@ module aptos_experimental::order_book {
         // Place a GTC sell order
         let match_results =
             order_book.place_order_and_get_matches(
-                OrderRequest {
+                OrderRequest::V1 {
                     account: @0xAA,
-                    account_order_id: 1,
-                    unique_priority_idx: option::none(),
+                    order_id: new_order_id_type(1),
+                    client_order_id: option::none(),
                     price: 101,
                     orig_size: 1000,
                     remaining_size: 1000,
-                    is_buy: false,
+                    is_bid: false,
                     trigger_condition: option::none(),
                     metadata: TestMetadata {}
                 }
@@ -606,14 +621,14 @@ module aptos_experimental::order_book {
 
         let match_results =
             order_book.place_order_and_get_matches(
-                OrderRequest {
+                OrderRequest::V1 {
                     account: @0xBB,
-                    account_order_id: 1,
-                    unique_priority_idx: option::none(),
+                    order_id: new_order_id_type(2),
+                    client_order_id: option::none(),
                     price: 100,
                     orig_size: 500,
                     remaining_size: 500,
-                    is_buy: true,
+                    is_bid: true,
                     trigger_condition: option::none(),
                     metadata: TestMetadata {}
                 }
@@ -623,14 +638,14 @@ module aptos_experimental::order_book {
         // Update the order so that it would match immediately
         let match_results =
             order_book.place_order_and_get_matches(
-                OrderRequest {
+                OrderRequest::V1 {
                     account: @0xBB,
-                    account_order_id: 2,
-                    unique_priority_idx: option::none(),
+                    order_id: new_order_id_type(3),
+                    client_order_id: option::none(),
                     price: 101,
                     orig_size: 500,
                     remaining_size: 500,
-                    is_buy: true,
+                    is_bid: true,
                     trigger_condition: option::none(),
                     metadata: TestMetadata {}
                 }
@@ -638,12 +653,13 @@ module aptos_experimental::order_book {
 
         // Verify taker (buy order) was fully filled
         assert!(total_matched_size(&match_results) == 500);
-        assert!(order_book.get_remaining_size(@0xBB, 2) == 0);
+        assert!(order_book.get_remaining_size(new_order_id_type(3)) == 0);
 
         assert!(match_results.length() == 1);
         let maker_match = match_results[0];
         let (order, matched_size) = maker_match.destroy_single_order_match();
-        assert!(order.get_order_id() == new_order_id_type(@0xAA, 1));
+        assert!(order.get_account() == @0xAA);
+        assert!(order.get_order_id() == new_order_id_type(1));
         assert!(matched_size == 500);
         assert!(order.get_orig_size() == 1000);
         assert!(order.get_remaining_size() == 500); // Partial fill
@@ -656,14 +672,14 @@ module aptos_experimental::order_book {
         let order_book = new_order_book();
 
         // Place a GTC sell order
-        let order_req = OrderRequest {
+        let order_req = OrderRequest::V1 {
             account: @0xAA,
-            account_order_id: 1,
-            unique_priority_idx: option::none(),
+            order_id: new_order_id_type(1),
+            client_order_id: option::some(1),
             price: 100,
             orig_size: 1000,
             remaining_size: 1000,
-            is_buy: false,
+            is_bid: false,
             trigger_condition: option::none(),
             metadata: TestMetadata {}
         };
@@ -673,14 +689,14 @@ module aptos_experimental::order_book {
         // Place a buy order at lower price
         let match_result =
             order_book.place_order_and_get_matches(
-                OrderRequest {
+                OrderRequest::V1 {
                     account: @0xBB,
-                    account_order_id: 1,
-                    unique_priority_idx: option::none(),
+                    order_id: new_order_id_type(2),
+                    client_order_id: option::some(2),
                     price: 99,
                     orig_size: 500,
                     remaining_size: 500,
-                    is_buy: true,
+                    is_bid: true,
                     trigger_condition: option::none(),
                     metadata: TestMetadata {}
                 }
@@ -690,14 +706,14 @@ module aptos_experimental::order_book {
         // Update sell order to match with buy order
         let match_results =
             order_book.update_order_and_get_matches(
-                OrderRequest {
+                OrderRequest::V1 {
                     account: @0xAA,
-                    account_order_id: 1,
-                    unique_priority_idx: option::none(),
+                    order_id: new_order_id_type(1),
+                    client_order_id: option::some(3),
                     price: 99,
                     orig_size: 1000,
                     remaining_size: 1000,
-                    is_buy: false,
+                    is_bid: false,
                     trigger_condition: option::none(),
                     metadata: TestMetadata {}
                 }
@@ -709,9 +725,11 @@ module aptos_experimental::order_book {
         assert!(match_results.length() == 1); // One match result
         let maker_match = match_results[0];
         let (order, matched_size) = maker_match.destroy_single_order_match();
-        assert!(order.get_order_id() == new_order_id_type(@0xBB, 1));
+        assert!(order.get_account() == @0xBB);
+        assert!(order.get_order_id() == new_order_id_type(2));
         assert!(matched_size == 500);
         assert!(order.get_orig_size() == 500);
+        assert!(order.get_client_order_id() == option::some(2));
         assert!(order.get_remaining_size() == 0); // Fully filled
 
         order_book.destroy_order_book();
@@ -725,14 +743,14 @@ module aptos_experimental::order_book {
         // Place a GTC sell order
         let match_result =
             order_book.place_order_and_get_matches(
-                OrderRequest {
+                OrderRequest::V1 {
                     account: @0xAA,
-                    account_order_id: 1,
-                    unique_priority_idx: option::none(),
+                    order_id: new_order_id_type(1),
+                    client_order_id: option::none(),
                     price: 101,
                     orig_size: 1000,
                     remaining_size: 1000,
-                    is_buy: false,
+                    is_bid: false,
                     trigger_condition: option::none(),
                     metadata: TestMetadata {}
                 }
@@ -741,14 +759,14 @@ module aptos_experimental::order_book {
 
         let match_result =
             order_book.place_order_and_get_matches(
-                OrderRequest {
+                OrderRequest::V1 {
                     account: @0xBB,
-                    account_order_id: 1,
-                    unique_priority_idx: option::none(),
+                    order_id: new_order_id_type(2),
+                    client_order_id: option::none(),
                     price: 100,
                     orig_size: 500,
                     remaining_size: 500,
-                    is_buy: true,
+                    is_bid: true,
                     trigger_condition: option::none(),
                     metadata: TestMetadata {}
                 }
@@ -758,14 +776,14 @@ module aptos_experimental::order_book {
         // Try to update non existant order
         let match_result =
             order_book.update_order_and_get_matches(
-                OrderRequest {
+                OrderRequest::V1 {
                     account: @0xBB,
-                    account_order_id: 3,
-                    unique_priority_idx: option::none(),
+                    order_id: new_order_id_type(3),
+                    client_order_id: option::none(),
                     price: 100,
                     orig_size: 500,
                     remaining_size: 500,
-                    is_buy: true,
+                    is_bid: true,
                     trigger_condition: option::none(),
                     metadata: TestMetadata {}
                 }
@@ -782,14 +800,14 @@ module aptos_experimental::order_book {
         // Place a GTC sell order for 1000 units at price 100
         let match_result =
             order_book.place_order_and_get_matches(
-                OrderRequest {
+                OrderRequest::V1 {
                     account: @0xAA,
-                    account_order_id: 1,
-                    unique_priority_idx: option::none(),
+                    order_id: new_order_id_type(1),
+                    client_order_id: option::none(),
                     price: 100,
                     orig_size: 1000,
                     remaining_size: 1000,
-                    is_buy: false,
+                    is_bid: false,
                     trigger_condition: option::none(),
                     metadata: TestMetadata {}
                 }
@@ -799,14 +817,14 @@ module aptos_experimental::order_book {
         // Place a smaller buy order (400 units) at the same price
         let match_results =
             order_book.place_order_and_get_matches(
-                OrderRequest {
+                OrderRequest::V1 {
                     account: @0xBB,
-                    account_order_id: 1,
-                    unique_priority_idx: option::none(),
+                    order_id: new_order_id_type(2),
+                    client_order_id: option::none(),
                     price: 100,
                     orig_size: 400,
                     remaining_size: 400,
-                    is_buy: true,
+                    is_bid: true,
                     trigger_condition: option::none(),
                     metadata: TestMetadata {}
                 }
@@ -819,7 +837,8 @@ module aptos_experimental::order_book {
         assert!(match_results.length() == 1);
         let maker_match = match_results[0];
         let (order, matched_size) = maker_match.destroy_single_order_match();
-        assert!(order.get_order_id() == new_order_id_type(@0xAA, 1));
+        assert!(order.get_account() == @0xAA);
+        assert!(order.get_order_id() == new_order_id_type(1));
         assert!(matched_size == 400);
         assert!(order.get_orig_size() == 1000);
         assert!(order.get_remaining_size() == 600); // Partial fill
@@ -827,14 +846,14 @@ module aptos_experimental::order_book {
         // Place another buy order for 300 units
         let match_results =
             order_book.place_order_and_get_matches(
-                OrderRequest {
+                OrderRequest::V1 {
                     account: @0xBB,
-                    account_order_id: 2,
-                    unique_priority_idx: option::none(),
+                    order_id: new_order_id_type(3),
+                    client_order_id: option::none(),
                     price: 100,
                     orig_size: 300,
                     remaining_size: 300,
-                    is_buy: true,
+                    is_bid: true,
                     trigger_condition: option::none(),
                     metadata: TestMetadata {}
                 }
@@ -848,22 +867,23 @@ module aptos_experimental::order_book {
         assert!(match_results.length() == 1);
         let maker_match = match_results[0];
         let (order, matched_size) = maker_match.destroy_single_order_match();
-        assert!(order.get_order_id() == new_order_id_type(@0xAA, 1));
+        assert!(order.get_account() == @0xAA);
+        assert!(order.get_order_id() == new_order_id_type(1));
         assert!(matched_size == 300);
         assert!(order.get_orig_size() == 1000);
         assert!(order.get_remaining_size() == 300); // Still partial as 300 units remain
 
         // Original sell order should still exist with 300 units remaining
-        let order_id = new_order_id_type(@0xAA, 1);
+        let order_id = new_order_id_type(1);
         let order_state = *order_book.orders.borrow(&order_id);
         let (order, is_active) = order_state.destroy_order_from_state();
-        let (_order_id, _unique_priority_idx, price, orig_size, size, is_buy, _, _) =
+        let (_account, _order_id, _, price, orig_size, size, is_bid, _, _) =
             order.destroy_order();
         assert!(is_active == true);
         assert!(price == 100);
         assert!(orig_size == 1000);
         assert!(size == 300); // 1000 - 400 - 300 = 300 remaining
-        assert!(is_buy == false);
+        assert!(is_bid == false);
 
         order_book.destroy_order_book();
     }
@@ -875,14 +895,14 @@ module aptos_experimental::order_book {
         // Place a GTC sell order for 500 units at price 100
         let match_result =
             order_book.place_order_and_get_matches(
-                OrderRequest {
+                OrderRequest::V1 {
                     account: @0xAA,
-                    account_order_id: 1,
-                    unique_priority_idx: option::none(),
+                    order_id: new_order_id_type(1),
+                    client_order_id: option::none(),
                     price: 100,
                     orig_size: 500,
                     remaining_size: 500,
-                    is_buy: false,
+                    is_bid: false,
                     trigger_condition: option::none(),
                     metadata: TestMetadata {}
                 }
@@ -893,14 +913,14 @@ module aptos_experimental::order_book {
         // Should partially fill against the sell order and remain in book
         let match_results =
             order_book.place_order_and_get_matches(
-                OrderRequest {
+                OrderRequest::V1 {
                     account: @0xBB,
-                    account_order_id: 1,
-                    unique_priority_idx: option::none(),
+                    order_id: new_order_id_type(2),
+                    client_order_id: option::none(),
                     price: 100,
                     orig_size: 800,
                     remaining_size: 800,
-                    is_buy: true,
+                    is_bid: true,
                     trigger_condition: option::none(),
                     metadata: TestMetadata {}
                 }
@@ -913,26 +933,27 @@ module aptos_experimental::order_book {
         assert!(match_results.length() == 1);
         let maker_match = match_results[0];
         let (order, matched_size) = maker_match.destroy_single_order_match();
-        assert!(order.get_order_id() == new_order_id_type(@0xAA, 1));
+        assert!(order.get_account() == @0xAA);
+        assert!(order.get_order_id() == new_order_id_type(1));
         assert!(matched_size == 500);
         assert!(order.get_orig_size() == 500);
         assert!(order.get_remaining_size() == 0); // Fully filled
 
         // Verify original sell order no longer exists (fully filled)
-        let order_id = new_order_id_type(@0xAA, 1);
+        let order_id = new_order_id_type(1);
         assert!(!order_book.orders.contains(&order_id));
 
         // Verify buy order still exists with remaining size
-        let order_id = new_order_id_type(@0xBB, 1);
+        let order_id = new_order_id_type(2);
         let order_state = *order_book.orders.borrow(&order_id);
         let (order, is_active) = order_state.destroy_order_from_state();
-        let (_order_id, _unique_priority_idx, price, orig_size, size, is_buy, _, _) =
+        let (_account, _order_id, _, price, orig_size, size, is_bid, _, _) =
             order.destroy_order();
         assert!(is_active == true);
         assert!(price == 100);
         assert!(orig_size == 800);
         assert!(size == 300); // 800 - 500 = 300 remaining
-        assert!(is_buy == true);
+        assert!(is_bid == true);
 
         order_book.destroy_order_book();
     }
@@ -944,14 +965,14 @@ module aptos_experimental::order_book {
         // Place a GTC sell order for 1000 units at price 100
         let match_result =
             order_book.place_order_and_get_matches(
-                OrderRequest {
+                OrderRequest::V1 {
                     account: @0xAA,
-                    account_order_id: 1,
-                    unique_priority_idx: option::none(),
+                    order_id: new_order_id_type(1),
+                    client_order_id: option::none(),
                     price: 100,
                     orig_size: 1000,
                     remaining_size: 1000,
-                    is_buy: false,
+                    is_bid: false,
                     trigger_condition: option::none(),
                     metadata: TestMetadata {}
                 }
@@ -963,14 +984,14 @@ module aptos_experimental::order_book {
         // Place a smaller buy order (400 units) at the same price
         let match_result =
             order_book.place_order_and_get_matches(
-                OrderRequest {
+                OrderRequest::V1 {
                     account: @0xBB,
-                    account_order_id: 1,
-                    unique_priority_idx: option::none(),
+                    order_id: new_order_id_type(2),
+                    client_order_id: option::none(),
                     price: 100,
                     orig_size: 400,
                     remaining_size: 400,
-                    is_buy: true,
+                    is_bid: true,
                     trigger_condition: option::some(tp_trigger_condition(90)),
                     metadata: TestMetadata {}
                 }
@@ -992,7 +1013,8 @@ module aptos_experimental::order_book {
         assert!(match_results.length() == 1);
         let maker_match = match_results[0];
         let (order, matched_size) = maker_match.destroy_single_order_match();
-        assert!(order.get_order_id() == new_order_id_type(@0xAA, 1));
+        assert!(order.get_account() == @0xAA);
+        assert!(order.get_order_id() == new_order_id_type(1));
         assert!(matched_size == 400);
         assert!(order.get_orig_size() == 1000);
         assert!(order.get_remaining_size() == 600); // Partial fill
@@ -1000,14 +1022,14 @@ module aptos_experimental::order_book {
         // Place another buy order for 300 units
         let match_result =
             order_book.place_order_and_get_matches(
-                OrderRequest {
+                OrderRequest::V1 {
                     account: @0xBB,
-                    account_order_id: 2,
-                    unique_priority_idx: option::none(),
+                    order_id: new_order_id_type(3),
+                    client_order_id: option::none(),
                     price: 100,
                     orig_size: 300,
                     remaining_size: 300,
-                    is_buy: true,
+                    is_bid: true,
                     trigger_condition: option::some(tp_trigger_condition(80)),
                     metadata: TestMetadata {}
                 }
@@ -1031,22 +1053,23 @@ module aptos_experimental::order_book {
         assert!(match_results.length() == 1);
         let maker_match = match_results[0];
         let (order, matched_size) = maker_match.destroy_single_order_match();
-        assert!(order.get_order_id() == new_order_id_type(@0xAA, 1));
+        assert!(order.get_account() == @0xAA);
+        assert!(order.get_order_id() == new_order_id_type(1));
         assert!(matched_size == 300);
         assert!(order.get_orig_size() == 1000);
         assert!(order.get_remaining_size() == 300); // Still partial as 300 units remain
 
         // Original sell order should still exist with 300 units remaining
-        let order_id = new_order_id_type(@0xAA, 1);
+        let order_id = new_order_id_type(1);
         let order_state = *order_book.orders.borrow(&order_id);
         let (order, is_active) = order_state.destroy_order_from_state();
-        let (_order_id, _unique_priority_idx, price, orig_size, size, is_buy, _, _) =
+        let (_account, _order_id, _, price, orig_size, size, is_bid, _, _) =
             order.destroy_order();
         assert!(is_active == true);
         assert!(price == 100);
         assert!(orig_size == 1000);
         assert!(size == 300); // 1000 - 400 - 300 = 300 remaining
-        assert!(is_buy == false);
+        assert!(is_bid == false);
 
         order_book.destroy_order_book();
     }
@@ -1058,14 +1081,14 @@ module aptos_experimental::order_book {
         // Place a GTC sell order for 1000 units at price 100
         let match_result =
             order_book.place_order_and_get_matches(
-                OrderRequest {
+                OrderRequest::V1 {
                     account: @0xAA,
-                    account_order_id: 1,
-                    unique_priority_idx: option::none(),
+                    order_id: new_order_id_type(1),
+                    client_order_id: option::none(),
                     price: 100,
                     orig_size: 1000,
                     remaining_size: 1000,
-                    is_buy: true,
+                    is_bid: true,
                     trigger_condition: option::none(),
                     metadata: TestMetadata {}
                 }
@@ -1077,14 +1100,14 @@ module aptos_experimental::order_book {
         // Place a smaller buy order (400 units) at the same price
         let match_result =
             order_book.place_order_and_get_matches(
-                OrderRequest {
+                OrderRequest::V1 {
                     account: @0xBB,
-                    account_order_id: 1,
-                    unique_priority_idx: option::none(),
+                    order_id: new_order_id_type(2),
+                    client_order_id: option::none(),
                     price: 100,
                     orig_size: 400,
                     remaining_size: 400,
-                    is_buy: false,
+                    is_bid: false,
                     trigger_condition: option::some(tp_trigger_condition(110)),
                     metadata: TestMetadata {}
                 }
@@ -1107,7 +1130,8 @@ module aptos_experimental::order_book {
         assert!(match_results.length() == 1);
         let maker_match = match_results[0];
         let (order, matched_size) = maker_match.destroy_single_order_match();
-        assert!(order.get_order_id() == new_order_id_type(@0xAA, 1));
+        assert!(order.get_account() == @0xAA);
+        assert!(order.get_order_id() == new_order_id_type(1));
         assert!(matched_size == 400);
         assert!(order.get_orig_size() == 1000);
         assert!(order.get_remaining_size() == 600); // Partial fill
@@ -1115,14 +1139,14 @@ module aptos_experimental::order_book {
         // Place another buy order for 300 units
         let match_result =
             order_book.place_order_and_get_matches(
-                OrderRequest {
+                OrderRequest::V1 {
                     account: @0xBB,
-                    account_order_id: 2,
-                    unique_priority_idx: option::none(),
+                    order_id: new_order_id_type(3),
+                    client_order_id: option::none(),
                     price: 100,
                     orig_size: 300,
                     remaining_size: 300,
-                    is_buy: false,
+                    is_bid: false,
                     trigger_condition: option::some(tp_trigger_condition(120)),
                     metadata: TestMetadata {}
                 }
@@ -1147,22 +1171,23 @@ module aptos_experimental::order_book {
         assert!(match_results.length() == 1);
         let maker_match = match_results[0];
         let (order, matched_size) = maker_match.destroy_single_order_match();
-        assert!(order.get_order_id() == new_order_id_type(@0xAA, 1));
+        assert!(order.get_account() == @0xAA);
+        assert!(order.get_order_id() == new_order_id_type(1));
         assert!(matched_size == 300);
         assert!(order.get_orig_size() == 1000);
         assert!(order.get_remaining_size() == 300); // Still partial as 300 units remain
 
         // Original sell order should still exist with 300 units remaining
-        let order_id = new_order_id_type(@0xAA, 1);
+        let order_id = new_order_id_type(1);
         let order_state = *order_book.orders.borrow(&order_id);
         let (order, is_active) = order_state.destroy_order_from_state();
-        let (_order_id, _unique_priority_idx, price, orig_size, size, is_buy, _, _) =
+        let (_account, _order_id, _, price, orig_size, size, is_bid, _, _) =
             order.destroy_order();
         assert!(is_active == true);
         assert!(price == 100);
         assert!(orig_size == 1000);
         assert!(size == 300); // 1000 - 400 - 300 = 300 remaining
-        assert!(is_buy == true);
+        assert!(is_bid == true);
         order_book.destroy_order_book();
     }
 
@@ -1171,29 +1196,29 @@ module aptos_experimental::order_book {
         let order_book = new_order_book();
 
         // Place a GTC sell order
-        let order_req = OrderRequest {
+        let order_req = OrderRequest::V1 {
             account: @0xAA,
-            account_order_id: 1,
-            unique_priority_idx: option::none(),
+            order_id: new_order_id_type(1),
+            client_order_id: option::none(),
             price: 100,
             orig_size: 1000,
             remaining_size: 1000,
-            is_buy: false,
+            is_bid: false,
             trigger_condition: option::none(),
             metadata: TestMetadata {}
         };
         order_book.place_maker_order(order_req);
-        assert!(order_book.get_remaining_size(@0xAA, 1) == 1000);
+        assert!(order_book.get_remaining_size(new_order_id_type(1)) == 1000);
 
         // Taker order
-        let order_req = OrderRequest {
+        let order_req = OrderRequest::V1 {
             account: @0xBB,
-            account_order_id: 1,
-            unique_priority_idx: option::none(),
+            order_id: new_order_id_type(2),
+            client_order_id: option::none(),
             price: 100,
             orig_size: 100,
             remaining_size: 100,
-            is_buy: true,
+            is_bid: true,
             trigger_condition: option::none(),
             metadata: TestMetadata {}
         };
@@ -1203,30 +1228,31 @@ module aptos_experimental::order_book {
 
         let (matched_order, _) = match_results[0].destroy_single_order_match();
         let (
+            _account,
             _order_id,
-            unique_idx,
+            _client_order_id,
             price,
             orig_size,
             _remaining_size,
-            is_buy,
+            is_bid,
             _trigger_condition,
             metadata
         ) = matched_order.destroy_order();
         // Assume half of the order was matched and remaining 50 size is reinserted back to the order book
-        let order_req = OrderRequest {
+        let order_req = OrderRequest::V1 {
             account: @0xAA,
-            account_order_id: 1,
-            unique_priority_idx: option::some(unique_idx),
+            order_id: new_order_id_type(1),
+            client_order_id: option::none(),
             price,
             orig_size,
             remaining_size: 50,
-            is_buy,
+            is_bid,
             trigger_condition: option::none(),
             metadata
         };
-        order_book.reinsert_maker_order(order_req);
+        order_book.reinsert_maker_order(order_req, matched_order);
         // Verify order was reinserted with updated size
-        assert!(order_book.get_remaining_size(@0xAA, 1) == 950);
+        assert!(order_book.get_remaining_size(new_order_id_type(1)) == 950);
         order_book.destroy_order_book();
     }
 
@@ -1235,29 +1261,29 @@ module aptos_experimental::order_book {
         let order_book = new_order_book();
 
         // Place a GTC sell order
-        let order_req = OrderRequest {
+        let order_req = OrderRequest::V1 {
             account: @0xAA,
-            account_order_id: 1,
-            unique_priority_idx: option::none(),
+            order_id: new_order_id_type(1),
+            client_order_id: option::none(),
             price: 100,
             orig_size: 1000,
             remaining_size: 1000,
-            is_buy: false,
+            is_bid: false,
             trigger_condition: option::none(),
             metadata: TestMetadata {}
         };
         order_book.place_maker_order(order_req);
-        assert!(order_book.get_remaining_size(@0xAA, 1) == 1000);
+        assert!(order_book.get_remaining_size(new_order_id_type(1)) == 1000);
 
         // Taker order
-        let order_req = OrderRequest {
+        let order_req = OrderRequest::V1 {
             account: @0xBB,
-            account_order_id: 1,
-            unique_priority_idx: option::none(),
+            order_id: new_order_id_type(2),
+            client_order_id: option::none(),
             price: 100,
             orig_size: 1000,
             remaining_size: 1000,
-            is_buy: true,
+            is_bid: true,
             trigger_condition: option::none(),
             metadata: TestMetadata {}
         };
@@ -1267,30 +1293,31 @@ module aptos_experimental::order_book {
 
         let (matched_order, _) = match_results[0].destroy_single_order_match();
         let (
+            _account,
             _order_id,
-            unique_idx,
+            _client_order_id,
             price,
             orig_size,
             _remaining_size,
-            is_buy,
+            is_bid,
             _trigger_condition,
             metadata
         ) = matched_order.destroy_order();
         // Assume half of the order was matched and remaining 50 size is reinserted back to the order book
-        let order_req = OrderRequest {
+        let order_req = OrderRequest::V1 {
             account: @0xAA,
-            account_order_id: 1,
-            unique_priority_idx: option::some(unique_idx),
+            order_id: new_order_id_type(1),
+            client_order_id: option::none(),
             price,
             orig_size,
             remaining_size: 500,
-            is_buy,
+            is_bid,
             trigger_condition: option::none(),
             metadata
         };
-        order_book.reinsert_maker_order(order_req);
+        order_book.reinsert_maker_order(order_req, matched_order);
         // Verify order was reinserted with updated size
-        assert!(order_book.get_remaining_size(@0xAA, 1) == 500);
+        assert!(order_book.get_remaining_size(new_order_id_type(1)) == 500);
         order_book.destroy_order_book();
     }
 
@@ -1299,40 +1326,40 @@ module aptos_experimental::order_book {
         let order_book = new_order_book();
 
         // Place an active order
-        let order_req = OrderRequest {
+        let order_req = OrderRequest::V1 {
             account: @0xAA,
-            account_order_id: 1,
-            unique_priority_idx: option::none(),
+            order_id: new_order_id_type(1),
+            client_order_id: option::none(),
             price: 100,
             orig_size: 1000,
             remaining_size: 1000,
-            is_buy: false,
+            is_bid: false,
             trigger_condition: option::none(),
             metadata: TestMetadata {}
         };
         order_book.place_maker_order(order_req);
-        assert!(order_book.get_remaining_size(@0xAA, 1) ==  1000);
+        assert!(order_book.get_remaining_size(new_order_id_type(1)) == 1000);
 
-        order_book.decrease_order_size(@0xAA, 1, 700);
+        order_book.decrease_order_size(@0xAA, new_order_id_type(1), 700);
         // Verify order was decreased with updated size
-        assert!(order_book.get_remaining_size(@0xAA, 1) == 300);
+        assert!(order_book.get_remaining_size(new_order_id_type(1)) == 300);
 
-        let order_req = OrderRequest {
+        let order_req = OrderRequest::V1 {
             account: @0xBB,
-            account_order_id: 1,
-            unique_priority_idx: option::none(),
+            order_id: new_order_id_type(2),
+            client_order_id: option::none(),
             price: 100,
             orig_size: 1000,
             remaining_size: 1000,
-            is_buy: false,
+            is_bid: false,
             trigger_condition: option::some(tp_trigger_condition(90)),
             metadata: TestMetadata {}
         };
         order_book.place_maker_order(order_req);
-        assert!(order_book.get_remaining_size(@0xBB, 1) == 1000);
-        order_book.decrease_order_size(@0xBB, 1, 600);
+        assert!(order_book.get_remaining_size(new_order_id_type(2)) == 1000);
+        order_book.decrease_order_size(@0xBB, new_order_id_type(2), 600);
         // Verify order was decreased with updated size
-        assert!(order_book.get_remaining_size(@0xBB, 1) == 400);
+        assert!(order_book.get_remaining_size(new_order_id_type(2)) == 400);
 
         order_book.destroy_order_book();
     }
diff --git a/aptos-move/framework/aptos-experimental/sources/trading/order_book/order_book_types.move b/aptos-move/framework/aptos-experimental/sources/trading/order_book/order_book_types.move
index d3cccedfac7aa..b8f40af0c263a 100644
--- a/aptos-move/framework/aptos-experimental/sources/trading/order_book/order_book_types.move
+++ b/aptos-move/framework/aptos-experimental/sources/trading/order_book/order_book_types.move
@@ -2,16 +2,13 @@
 module aptos_experimental::order_book_types {
     use std::option;
     use std::option::Option;
-    use aptos_std::bcs;
-    use aptos_std::from_bcs;
-    use aptos_framework::transaction_context;
     use aptos_framework::big_ordered_map::{Self, BigOrderedMap};
     friend aptos_experimental::active_order_book;
     friend aptos_experimental::order_book;
     friend aptos_experimental::pending_order_book_index;
+    friend aptos_experimental::market;
 
-    const U256_MAX: u256 =
-        0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff;
+    const U128_MAX: u128 = 0xffffffffffffffffffffffffffffffff;
 
     const BIG_MAP_INNER_DEGREE: u16 = 64;
     const BIG_MAP_LEAF_DEGREE: u16 = 32;
@@ -25,12 +22,21 @@ module aptos_experimental::order_book_types {
 
     // to replace types:
     struct OrderIdType has store, copy, drop {
-        account: address,
-        account_order_id: u64
+        order_id: u128
     }
 
+    // Internal type representing order in which trades are placed. Unique per instance of AscendingIdGenerator.
     struct UniqueIdxType has store, copy, drop {
-        idx: u256
+        idx: u128
+    }
+
+    // Struct providing ascending ids, to be able to be used as tie-breaker to respect FIFO order of trades.
+    // Returned ids are ascending and unique within a single instance of AscendingIdGenerator.
+    enum AscendingIdGenerator has store, drop {
+        FromCounter {
+            value: u64
+        }
+        // TODO: add stateless (and with that fully parallel) support for id creation via native function
     }
 
     struct ActiveMatchedOrder has copy, drop {
@@ -40,20 +46,26 @@ module aptos_experimental::order_book_types {
         remaining_size: u64
     }
 
-    struct SingleOrderMatch has drop, copy {
-        order: Order,
-        matched_size: u64
+    enum SingleOrderMatch has drop, copy {
+        V1 {
+            order: Order,
+            matched_size: u64
+        }
     }
 
-    struct Order has store, copy, drop {
-        order_id: OrderIdType,
-        unique_priority_idx: UniqueIdxType,
-        price: u64,
-        orig_size: u64,
-        remaining_size: u64,
-        is_bid: bool,
-        trigger_condition: Option,
-        metadata: M
+    enum Order has store, copy, drop {
+        V1 {
+            order_id: OrderIdType,
+            account: address,
+            client_order_id: Option, // for client to track orders
+            unique_priority_idx: UniqueIdxType,
+            price: u64,
+            orig_size: u64,
+            remaining_size: u64,
+            is_bid: bool,
+            trigger_condition: Option,
+            metadata: M
+        }
     }
 
     enum TriggerCondition has store, drop, copy {
@@ -62,9 +74,11 @@ module aptos_experimental::order_book_types {
         TimeBased(u64)
     }
 
-    struct OrderWithState has store, drop, copy {
-        order: Order,
-        is_active: bool // i.e. where to find it.
+    enum OrderWithState has store, drop, copy {
+        V1 {
+            order: Order,
+            is_active: bool // i.e. where to find it.
+        }
     }
 
     public(friend) fun new_default_big_ordered_map(): BigOrderedMap {
@@ -83,66 +97,72 @@ module aptos_experimental::order_book_types {
         TriggerCondition::TimeBased(time)
     }
 
-    public fun new_order_id_type(account: address, account_order_id: u64): OrderIdType {
-        OrderIdType { account, account_order_id }
+    public fun new_order_id_type(order_id: u128): OrderIdType {
+        OrderIdType { order_id }
     }
 
-    public fun generate_unique_idx_fifo_tiebraker(): UniqueIdxType {
-        // TODO change from random to monothonically increasing value
-        new_unique_idx_type(
-            from_bcs::to_u256(
-                bcs::to_bytes(&transaction_context::generate_auid_address())
-            )
-        )
+    public(friend) fun new_ascending_id_generator(): AscendingIdGenerator {
+        AscendingIdGenerator::FromCounter { value: 0 }
+    }
+
+    public(friend) fun next_ascending_id(self: &mut AscendingIdGenerator): u128 {
+        self.value += 1;
+        self.value as u128
     }
 
-    public fun new_unique_idx_type(idx: u256): UniqueIdxType {
+    public(friend) fun new_unique_idx_type(idx: u128): UniqueIdxType {
         UniqueIdxType { idx }
     }
 
-    public fun descending_idx(self: &UniqueIdxType): UniqueIdxType {
-        UniqueIdxType { idx: U256_MAX - self.idx }
+    public(friend) fun descending_idx(self: &UniqueIdxType): UniqueIdxType {
+        UniqueIdxType { idx: U128_MAX - self.idx }
     }
 
-    public fun new_active_matched_order(
+    public(friend) fun new_active_matched_order(
         order_id: OrderIdType, matched_size: u64, remaining_size: u64
     ): ActiveMatchedOrder {
         ActiveMatchedOrder { order_id, matched_size, remaining_size }
     }
 
-    public fun destroy_active_matched_order(self: ActiveMatchedOrder): (OrderIdType, u64, u64) {
+    public(friend) fun destroy_active_matched_order(
+        self: ActiveMatchedOrder
+    ): (OrderIdType, u64, u64) {
         (self.order_id, self.matched_size, self.remaining_size)
     }
 
-    public fun new_order(
+    public(friend) fun new_order(
         order_id: OrderIdType,
+        account: address,
         unique_priority_idx: UniqueIdxType,
+        client_order_id: Option,
         price: u64,
         orig_size: u64,
         size: u64,
-        is_buy: bool,
+        is_bid: bool,
         trigger_condition: Option,
         metadata: M
     ): Order {
-        Order {
+        Order::V1 {
             order_id,
+            account,
             unique_priority_idx,
+            client_order_id,
             price,
             orig_size,
             remaining_size: size,
-            is_bid: is_buy,
+            is_bid: is_bid,
             trigger_condition,
             metadata
         }
     }
 
-    public fun new_single_order_match(
+    public(friend) fun new_single_order_match(
         order: Order, matched_size: u64
     ): SingleOrderMatch {
-        SingleOrderMatch { order, matched_size }
+        SingleOrderMatch::V1 { order, matched_size }
     }
 
-    public fun get_active_matched_size(self: &ActiveMatchedOrder): u64 {
+    public(friend) fun get_active_matched_size(self: &ActiveMatchedOrder): u64 {
         self.matched_size
     }
 
@@ -155,7 +175,7 @@ module aptos_experimental::order_book_types {
     public fun new_order_with_state(
         order: Order, is_active: bool
     ): OrderWithState {
-        OrderWithState { order, is_active }
+        OrderWithState::V1 { order, is_active }
     }
 
     public fun tp_trigger_condition(take_profit: u64): TriggerCondition {
@@ -167,18 +187,18 @@ module aptos_experimental::order_book_types {
     }
 
     // Returns the price move down index and price move up index for a particular trigger condition
-    public fun index(self: &TriggerCondition, is_buy: bool):
+    public fun index(self: &TriggerCondition, is_bid: bool):
         (Option, Option, Option) {
         match(self) {
             TriggerCondition::TakeProfit(tp) => {
-                if (is_buy) {
+                if (is_bid) {
                     (option::some(*tp), option::none(), option::none())
                 } else {
                     (option::none(), option::some(*tp), option::none())
                 }
             }
             TriggerCondition::StopLoss(sl) => {
-                if (is_buy) {
+                if (is_bid) {
                     (option::none(), option::some(*sl), option::none())
                 } else {
                     (option::some(*sl), option::none(), option::none())
@@ -206,7 +226,13 @@ module aptos_experimental::order_book_types {
         self.order_id
     }
 
-    public fun get_unique_priority_idx(self: &Order): UniqueIdxType {
+    public fun get_account(self: &Order): address {
+        self.account
+    }
+
+    public(friend) fun get_unique_priority_idx(
+        self: &Order
+    ): UniqueIdxType {
         self.unique_priority_idx
     }
 
@@ -259,6 +285,10 @@ module aptos_experimental::order_book_types {
         self.orig_size
     }
 
+    public fun get_client_order_id(self: &Order): Option {
+        self.client_order_id
+    }
+
     public fun destroy_order_from_state(
         self: OrderWithState
     ): (Order, bool) {
@@ -271,16 +301,29 @@ module aptos_experimental::order_book_types {
 
     public fun destroy_order(
         self: Order
-    ): (OrderIdType, UniqueIdxType, u64, u64, u64, bool, Option, M) {
+    ): (address, OrderIdType, Option, u64, u64, u64, bool, Option, M) {
+        let Order::V1 {
+            order_id,
+            account,
+            client_order_id,
+            unique_priority_idx: _,
+            price,
+            orig_size,
+            remaining_size,
+            is_bid,
+            trigger_condition,
+            metadata
+        } = self;
         (
-            self.order_id,
-            self.unique_priority_idx,
-            self.price,
-            self.orig_size,
-            self.remaining_size,
-            self.is_bid,
-            self.trigger_condition,
-            self.metadata
+            account,
+            order_id,
+            client_order_id,
+            price,
+            orig_size,
+            remaining_size,
+            is_bid,
+            trigger_condition,
+            metadata
         )
     }
 
@@ -290,8 +333,8 @@ module aptos_experimental::order_book_types {
         (self.order, self.matched_size)
     }
 
-    public fun destroy_order_id_type(self: OrderIdType): (address, u64) {
-        (self.account, self.account_order_id)
+    public fun get_order_id_value(self: &OrderIdType): u128 {
+        self.order_id
     }
 
     public fun is_active_order(
diff --git a/aptos-move/framework/aptos-experimental/sources/trading/order_book/pending_order_book_index.move b/aptos-move/framework/aptos-experimental/sources/trading/order_book/pending_order_book_index.move
index abdda107a9cc6..d111af435ce1c 100644
--- a/aptos-move/framework/aptos-experimental/sources/trading/order_book/pending_order_book_index.move
+++ b/aptos-move/framework/aptos-experimental/sources/trading/order_book/pending_order_book_index.move
@@ -23,7 +23,7 @@ module aptos_experimental::pending_order_book_index {
             price_move_down_index: BigOrderedMap,
             // Orders to trigger whem the oracle price move greater than
             price_move_up_index: BigOrderedMap,
-            //time_based_index: BigOrderedMap, ActiveBidData>,
+            // time_based_index: BigOrderedMap, ActiveBidData>,
             // Orders to trigger when the time is greater than
             time_based_index: BigOrderedMap
         }
@@ -37,16 +37,14 @@ module aptos_experimental::pending_order_book_index {
         }
     }
 
-
-
     public(friend) fun cancel_pending_order(
         self: &mut PendingOrderBookIndex,
         trigger_condition: TriggerCondition,
         unique_priority_idx: UniqueIdxType,
-        is_buy: bool
+        is_bid: bool
     ) {
         let (price_move_up_index, price_move_down_index, time_based_index) =
-            trigger_condition.index(is_buy);
+            trigger_condition.index(is_bid);
         if (price_move_up_index.is_some()) {
             self.price_move_up_index.remove(
                 &PendingOrderKey {
@@ -73,11 +71,11 @@ module aptos_experimental::pending_order_book_index {
         order_id: OrderIdType,
         trigger_condition: TriggerCondition,
         unique_priority_idx: UniqueIdxType,
-        is_buy: bool
+        is_bid: bool
     ) {
         // Add this order to the pending order book index
         let (price_move_down_index, price_move_up_index, time_based_index) =
-            trigger_condition.index(is_buy);
+            trigger_condition.index(is_bid);
 
         if (price_move_up_index.is_some()) {
             self.price_move_up_index.add(
@@ -100,7 +98,7 @@ module aptos_experimental::pending_order_book_index {
         };
     }
 
-    public fun take_ready_price_based_orders(
+    public(friend) fun take_ready_price_based_orders(
         self: &mut PendingOrderBookIndex, current_price: u64
     ): vector {
         let orders = vector::empty();
@@ -125,7 +123,7 @@ module aptos_experimental::pending_order_book_index {
         orders
     }
 
-    public fun take_time_time_based_orders(
+    public(friend) fun take_time_time_based_orders(
         self: &mut PendingOrderBookIndex
     ): vector {
         let orders = vector::empty();
@@ -176,6 +174,4 @@ module aptos_experimental::pending_order_book_index {
     ): &BigOrderedMap {
         &self.time_based_index
     }
-
-
 }
diff --git a/aptos-move/framework/aptos-experimental/sources/trading/tests/market/clearinghouse_test.move b/aptos-move/framework/aptos-experimental/sources/trading/tests/market/clearinghouse_test.move
index 1065968a948f7..64c8e96b8d8e1 100644
--- a/aptos-move/framework/aptos-experimental/sources/trading/tests/market/clearinghouse_test.move
+++ b/aptos-move/framework/aptos-experimental/sources/trading/tests/market/clearinghouse_test.move
@@ -5,6 +5,7 @@ module aptos_experimental::clearinghouse_test {
     use std::signer;
     use aptos_std::table;
     use aptos_std::table::Table;
+    use aptos_experimental::order_book_types::OrderIdType;
     use aptos_experimental::market_types::{
         SettleTradeResult,
         new_settle_trade_result,
@@ -30,8 +31,8 @@ module aptos_experimental::clearinghouse_test {
 
     struct GlobalState has key {
         user_positions: Table,
-        open_orders: Table,
-        maker_order_calls: Table
+        open_orders: Table,
+        maker_order_calls: Table
     }
 
     public(package) fun initialize(admin: &signer) {
@@ -39,16 +40,22 @@ module aptos_experimental::clearinghouse_test {
             signer::address_of(admin) == @0x1,
             error::invalid_argument(EINVALID_ADDRESS)
         );
-        move_to(admin, GlobalState {
-            user_positions: table::new(),
-            open_orders: table::new(),
-            maker_order_calls: table::new()
-        });
+        move_to(
+            admin,
+            GlobalState {
+                user_positions: table::new(),
+                open_orders: table::new(),
+                maker_order_calls: table::new()
+            }
+        );
     }
 
-    public(package) fun validate_order_placement(order_id: u64): bool acquires GlobalState {
+    public(package) fun validate_order_placement(order_id: OrderIdType): bool acquires GlobalState {
         let open_orders = &mut borrow_global_mut(@0x1).open_orders;
-        assert!(!open_orders.contains(order_id), error::invalid_argument(E_DUPLICATE_ORDER));
+        assert!(
+            !open_orders.contains(order_id),
+            error::invalid_argument(E_DUPLICATE_ORDER)
+        );
         open_orders.add(order_id, true);
         return true
     }
@@ -96,32 +103,31 @@ module aptos_experimental::clearinghouse_test {
         new_settle_trade_result(size, option::none(), option::none())
     }
 
-    public(package) fun place_maker_order(
-        order_id: u64,
-    ) acquires GlobalState {
-        let maker_order_calls = &mut borrow_global_mut(@0x1).maker_order_calls;
-        assert!(!maker_order_calls.contains(order_id), error::invalid_argument(E_DUPLICATE_ORDER));
+    public(package) fun place_maker_order(order_id: OrderIdType) acquires GlobalState {
+        let maker_order_calls =
+            &mut borrow_global_mut(@0x1).maker_order_calls;
+        assert!(
+            !maker_order_calls.contains(order_id),
+            error::invalid_argument(E_DUPLICATE_ORDER)
+        );
         maker_order_calls.add(order_id, true);
     }
 
-    public(package) fun is_maker_order_called(
-        order_id: u64
-    ): bool acquires GlobalState {
+    public(package) fun is_maker_order_called(order_id: OrderIdType): bool acquires GlobalState {
         let maker_order_calls = &borrow_global(@0x1).maker_order_calls;
         maker_order_calls.contains(order_id)
     }
 
-    public(package) fun cleanup_order(
-        order_id: u64,
-    ) acquires GlobalState {
+    public(package) fun cleanup_order(order_id: OrderIdType) acquires GlobalState {
         let open_orders = &mut borrow_global_mut(@0x1).open_orders;
-        assert!(open_orders.contains(order_id), error::invalid_argument(E_ORDER_NOT_FOUND));
+        assert!(
+            open_orders.contains(order_id),
+            error::invalid_argument(E_ORDER_NOT_FOUND)
+        );
         open_orders.remove(order_id);
     }
 
-    public(package) fun order_exists(
-        order_id: u64
-    ): bool acquires GlobalState {
+    public(package) fun order_exists(order_id: OrderIdType): bool acquires GlobalState {
         let open_orders = &borrow_global(@0x1).open_orders;
         open_orders.contains(order_id)
     }
@@ -142,42 +148,42 @@ module aptos_experimental::clearinghouse_test {
     public(package) fun test_market_callbacks():
         MarketClearinghouseCallbacks acquires GlobalState {
         new_market_clearinghouse_callbacks(
-            |taker, maker, _taker_order_id, _maker_order_id, _fill_id, is_taker_long, _price, size, _taker_metadata, _maker_metadata| {
-                settle_trade(taker, maker, size, is_taker_long)
-            },
-            | _account, order_id, _is_taker, _is_bid, _price, _size, _order_metadata| {
+            |taker, _taker_order_id, maker, _maker_order_id, _fill_id, is_taker_long, _price, size, _taker_metadata, _maker_metadata
+            | { settle_trade(taker, maker, size, is_taker_long) },
+            |_account, order_id, _is_taker, _is_bid, _price, _size, _order_metadata| {
                 validate_order_placement(order_id)
             },
             |_account, order_id, _is_bid, _price, _size, _order_metadata| {
                 place_maker_order(order_id);
             },
-            | _account, _order_id, _is_bid, _remaining_size| {
+            |_account, _order_id, _is_bid, _remaining_size| {
                 cleanup_order(_order_id);
             },
-            | _account, _order_id, _is_bid, _price, _size| {
+            |_account, _order_id, _is_bid, _price, _size| {
                 // decrease order size is not used in this test
-            },
+            }
         )
     }
 
     public(package) fun test_market_callbacks_with_taker_cancelled():
         MarketClearinghouseCallbacks acquires GlobalState {
         new_market_clearinghouse_callbacks(
-            |taker, maker, _taker_order_id, _maker_order_id, _fill_id, is_taker_long, _price, size, _taker_metadata, _maker_metadata| {
+            |taker, _taker_order_id, maker, _maker_order_id, _fill_id, is_taker_long, _price, size, _taker_metadata, _maker_metadata
+            | {
                 settle_trade_with_taker_cancelled(taker, maker, size, is_taker_long)
             },
-            | _account, order_id, _is_taker, _is_bid, _price, _size, _order_metadata| {
+            |_account, order_id, _is_taker, _is_bid, _price, _size, _order_metadata| {
                 validate_order_placement(order_id)
             },
             |_account, _order_id, _is_bid, _price, _size, _order_metadata| {
                 // place_maker_order is not used in this test
             },
-            | _account, _order_id, _is_bid, _remaining_size| {
+            |_account, _order_id, _is_bid, _remaining_size| {
                 cleanup_order(_order_id);
             },
-            | _account, _order_id, _is_bid, _price, _size| {
+            |_account, _order_id, _is_bid, _price, _size| {
                 // decrease order size is not used in this test
-            },
+            }
         )
     }
 }
diff --git a/aptos-move/framework/aptos-experimental/sources/trading/tests/market/market_test_utils.move b/aptos-move/framework/aptos-experimental/sources/trading/tests/market/market_test_utils.move
index 4cbc51af82328..de69f5f4b612d 100644
--- a/aptos-move/framework/aptos-experimental/sources/trading/tests/market/market_test_utils.move
+++ b/aptos-move/framework/aptos-experimental/sources/trading/tests/market/market_test_utils.move
@@ -5,42 +5,57 @@ module aptos_experimental::market_test_utils {
     use std::signer;
     use aptos_experimental::clearinghouse_test;
     use aptos_experimental::event_utils::{latest_emitted_events, EventStore};
-    use aptos_experimental::market_types::MarketClearinghouseCallbacks;
-
-    use aptos_experimental::market::{
+    use aptos_experimental::market_types::{
         order_status_cancelled,
         order_status_filled,
         order_status_open,
-        OrderEvent,
-        Market
+        TimeInForce,
+        MarketClearinghouseCallbacks
     };
+    use aptos_experimental::order_book_types::OrderIdType;
+
+    use aptos_experimental::market::{OrderEvent, Market};
 
-    public fun place_maker_order_and_verify(
+    public fun place_order_and_verify(
         market: &mut Market,
         user: &signer,
-        price: u64,
+        limit_price: Option,
         size: u64,
-        is_buy: bool,
-        time_in_force: u8,
+        is_bid: bool,
+        time_in_force: TimeInForce,
         event_store: &mut EventStore,
         is_taker: bool,
         is_cancelled: bool,
         metadata: M,
         callbacks: &MarketClearinghouseCallbacks
-    ): u64 {
+    ): OrderIdType {
         let user_addr = signer::address_of(user);
-        market.place_order(
-            user,
-            price,
-            size,
-            is_buy, // is_buy
-            time_in_force, // order_type
-            option::none(), // trigger_condition
-            metadata,
-            1000,
-            true,
-            callbacks
-        );
+        if (limit_price.is_some()) {
+            market.place_limit_order(
+                user,
+                limit_price.destroy_some(),
+                size,
+                is_bid, // is_bid
+                time_in_force, // order_type
+                option::none(), // trigger_condition
+                metadata,
+                option::none(),
+                1000,
+                true,
+                callbacks
+            );
+        } else {
+            market.place_market_order(
+                user,
+                size,
+                is_bid, // is_buy
+                metadata,
+                option::none(), // client_order_id
+                1000,
+                true,
+                callbacks
+            );
+        };
         let events = latest_emitted_events(event_store, option::none());
         if (!is_cancelled) {
             assert!(events.length() == 1);
@@ -51,13 +66,14 @@ module aptos_experimental::market_test_utils {
         let order_id = order_place_event.get_order_id_from_event();
         order_place_event.verify_order_event(
             order_id,
+            option::none(), // client_order_id
             market.get_market(),
             user_addr,
             size,
             size,
             size,
-            price,
-            is_buy,
+            limit_price,
+            is_bid,
             is_taker,
             order_status_open()
         );
@@ -72,13 +88,14 @@ module aptos_experimental::market_test_utils {
             let order_cancel_event = events[1];
             order_cancel_event.verify_order_event(
                 order_id,
+                option::none(),
                 market.get_market(),
                 user_addr,
                 size,
                 0, // Remaining size is always 0 when the order is cancelled
                 size,
-                price,
-                is_buy,
+                limit_price,
+                is_bid,
                 is_taker,
                 order_status_cancelled()
             )
@@ -89,15 +106,16 @@ module aptos_experimental::market_test_utils {
     public fun place_taker_order(
         market: &mut Market,
         taker: &signer,
-        taker_price: u64,
+        client_order_id: Option,
+        taker_price: Option,
         size: u64,
-        is_buy: bool,
-        time_in_force: u8,
+        is_bid: bool,
+        time_in_force: TimeInForce,
         event_store: &mut EventStore,
         max_fills: Option,
         metadata: M,
         callbacks: &MarketClearinghouseCallbacks
-    ): u64 {
+    ): OrderIdType {
         let taker_addr = signer::address_of(taker);
         let max_fills =
             if (max_fills.is_none()) { 1000 }
@@ -105,18 +123,32 @@ module aptos_experimental::market_test_utils {
                 max_fills.destroy_some()
             };
         // Taker order will be immediately match in the same transaction
-        market.place_order(
-            taker,
-            taker_price,
-            size,
-            is_buy, // is_buy
-            time_in_force, // order_type
-            option::none(), // trigger_condition
-            metadata,
-            max_fills,
-            true,
-            callbacks
-        );
+        if (taker_price.is_some()) {
+            market.place_limit_order(
+                taker,
+                taker_price.destroy_some(),
+                size,
+                is_bid, // is_bid
+                time_in_force, // order_type
+                option::none(), // trigger_condition
+                metadata,
+                client_order_id,
+                max_fills,
+                true,
+                callbacks
+            );
+        } else {
+            market.place_market_order(
+                taker,
+                size,
+                is_bid, // is_bid
+                metadata,
+                client_order_id,
+                max_fills,
+                true,
+                callbacks
+            );
+        };
 
         let events = latest_emitted_events(event_store, option::some(1));
         let order_place_event = events[0];
@@ -124,13 +156,14 @@ module aptos_experimental::market_test_utils {
         // Taker order is opened
         order_place_event.verify_order_event(
             order_id,
+            client_order_id,
             market.get_market(),
             taker_addr,
             size,
             size,
             size,
             taker_price,
-            is_buy,
+            is_bid,
             true,
             order_status_open()
         );
@@ -140,14 +173,15 @@ module aptos_experimental::market_test_utils {
     public fun place_taker_order_and_verify_fill(
         market: &mut Market,
         taker: &signer,
-        taker_price: u64,
+        limit_price: Option,
         size: u64,
-        is_buy: bool,
-        time_in_force: u8,
+        is_bid: bool,
+        time_in_force: TimeInForce,
         fill_sizes: vector,
         fill_prices: vector,
         maker_addr: address,
-        maker_order_ids: vector,
+        maker_order_ids: vector,
+        maker_client_order_ids: vector>,
         maker_orig_sizes: vector,
         maker_remaining_sizes: vector,
         event_store: &mut EventStore,
@@ -155,14 +189,15 @@ module aptos_experimental::market_test_utils {
         max_fills: Option,
         metadata: M,
         callbacks: &MarketClearinghouseCallbacks
-    ): u64 {
+    ): OrderIdType {
         let order_id =
             place_taker_order(
                 market,
                 taker,
-                taker_price,
+                option::none(), // client_order_id
+                limit_price,
                 size,
-                is_buy,
+                is_bid,
                 time_in_force,
                 event_store,
                 max_fills,
@@ -174,13 +209,15 @@ module aptos_experimental::market_test_utils {
             market,
             taker,
             order_id, // taker_order_id
-            taker_price,
+            option::none(), // taker_client_order_id
+            limit_price,
             size,
-            is_buy,
+            is_bid,
             fill_sizes,
             fill_prices,
             maker_addr,
             maker_order_ids,
+            maker_client_order_ids,
             maker_orig_sizes,
             maker_remaining_sizes,
             event_store,
@@ -194,12 +231,13 @@ module aptos_experimental::market_test_utils {
         market: &mut Market,
         user: &signer,
         is_taker: bool,
-        order_id: u64,
-        price: u64,
+        order_id: OrderIdType,
+        client_order_id: Option,
+        price: Option,
         orig_size: u64,
         remaining_size: u64,
         size_delta: u64,
-        is_buy: bool,
+        is_bid: bool,
         event_store: &mut EventStore
     ) {
         let user_addr = signer::address_of(user);
@@ -208,13 +246,14 @@ module aptos_experimental::market_test_utils {
         let order_cancel_event = events[0];
         order_cancel_event.verify_order_event(
             order_id,
+            client_order_id,
             market.get_market(),
             user_addr,
             orig_size,
             remaining_size,
             size_delta,
             price, // price
-            is_buy,
+            is_bid,
             is_taker,
             order_status_cancelled()
         );
@@ -223,14 +262,16 @@ module aptos_experimental::market_test_utils {
     public fun verify_fills(
         market: &mut Market,
         taker: &signer,
-        taker_order_id: u64,
-        taker_price: u64,
+        taker_order_id: OrderIdType,
+        taker_client_order_id: Option,
+        taker_price: Option,
         size: u64,
-        is_buy: bool,
+        is_bid: bool,
         fill_sizes: vector,
         fill_prices: vector,
         maker_addr: address,
-        maker_order_ids: vector,
+        maker_order_ids: vector,
+        maker_client_order_ids: vector>,
         maker_orig_sizes: vector,
         maker_remaining_sizes: vector,
         event_store: &mut EventStore,
@@ -261,17 +302,19 @@ module aptos_experimental::market_test_utils {
             let maker_remaining_size = maker_remaining_sizes[fill_index];
             taker_total_fill += fill_size;
             let maker_order_id = maker_order_ids[fill_index];
+            let maker_client_order_id = maker_client_order_ids[fill_index];
             // Taker order is filled
             let taker_order_fill_event = events[2 * fill_index];
             taker_order_fill_event.verify_order_event(
                 taker_order_id,
+                taker_client_order_id,
                 market.get_market(),
                 taker_addr,
                 size,
                 size - taker_total_fill,
                 fill_size,
-                fill_price,
-                is_buy,
+                option::some(fill_price),
+                is_bid,
                 true,
                 order_status_filled()
             );
@@ -279,13 +322,14 @@ module aptos_experimental::market_test_utils {
             let maker_order_fill_event = events[1 + 2 * fill_index];
             maker_order_fill_event.verify_order_event(
                 maker_order_id,
+                maker_client_order_id,
                 market.get_market(),
                 maker_addr,
                 maker_orig_size,
                 maker_remaining_size - fill_size,
                 fill_size,
-                fill_price,
-                !is_buy,
+                option::some(fill_price),
+                !is_bid,
                 false,
                 order_status_filled()
             );
@@ -296,13 +340,14 @@ module aptos_experimental::market_test_utils {
             let order_cancel_event = events[num_expected_events - 1];
             order_cancel_event.verify_order_event(
                 taker_order_id,
+                taker_client_order_id,
                 market.get_market(),
                 taker_addr,
                 size,
                 0, // Remaining size is always 0 when the order is cancelled
                 size - taker_total_fill,
                 taker_price,
-                is_buy,
+                is_bid,
                 true,
                 order_status_cancelled()
             )
@@ -311,13 +356,14 @@ module aptos_experimental::market_test_utils {
             let order_open_event = events[num_expected_events - 1];
             order_open_event.verify_order_event(
                 taker_order_id,
+                taker_client_order_id,
                 market.get_market(),
                 taker_addr,
                 size,
                 size - total_fill_size,
                 size,
                 taker_price,
-                is_buy,
+                is_bid,
                 false,
                 order_status_open()
             )
diff --git a/aptos-move/framework/aptos-experimental/sources/trading/tests/market/market_tests.move b/aptos-move/framework/aptos-experimental/sources/trading/tests/market/market_tests.move
index 8e32b2907abcc..0255bfaca0a96 100644
--- a/aptos-move/framework/aptos-experimental/sources/trading/tests/market/market_tests.move
+++ b/aptos-move/framework/aptos-experimental/sources/trading/tests/market/market_tests.move
@@ -1,6 +1,7 @@
 #[test_only]
 module aptos_experimental::market_tests {
     use std::option;
+    use std::option::Option;
     use std::signer;
     use std::vector;
     use aptos_experimental::clearinghouse_test;
@@ -11,20 +12,20 @@ module aptos_experimental::market_tests {
         test_market_callbacks_with_taker_cancelled
     };
     use aptos_experimental::market_test_utils::{
-        place_maker_order_and_verify,
+        place_order_and_verify,
         place_taker_order_and_verify_fill,
         place_taker_order,
         verify_cancel_event,
         verify_fills
     };
     use aptos_experimental::event_utils;
-    use aptos_experimental::market::{
+    use aptos_experimental::market_types::{
         good_till_cancelled,
         post_only,
-        immediate_or_cancel,
-        new_market,
-        new_market_config
+        immediate_or_cancel
     };
+    use aptos_experimental::market::{new_market, new_market_config};
+    use aptos_experimental::order_book_types::OrderIdType;
 
     #[test(
         admin = @0x1, market_signer = @0x123, maker = @0x456, taker = @0x789
@@ -47,10 +48,10 @@ module aptos_experimental::market_tests {
 
         let event_store = event_utils::new_event_store();
         let maker_order_id =
-            place_maker_order_and_verify(
+            place_order_and_verify(
                 &mut market,
                 maker,
-                1000,
+                option::some(1000),
                 2000000,
                 true,
                 good_till_cancelled(),
@@ -69,7 +70,7 @@ module aptos_experimental::market_tests {
             place_taker_order_and_verify_fill(
                 &mut market,
                 taker,
-                1000,
+                option::some(1000),
                 1000000,
                 false,
                 good_till_cancelled(),
@@ -77,6 +78,7 @@ module aptos_experimental::market_tests {
                 vector[1000],
                 maker_addr,
                 vector[maker_order_id],
+                vector[option::none()],
                 vector[2000000],
                 vector[2000000],
                 &mut event_store,
@@ -94,7 +96,7 @@ module aptos_experimental::market_tests {
             place_taker_order_and_verify_fill(
                 &mut market,
                 taker,
-                1000,
+                option::some(1000),
                 1000000,
                 false,
                 good_till_cancelled(),
@@ -102,6 +104,7 @@ module aptos_experimental::market_tests {
                 vector[1000],
                 maker_addr,
                 vector[maker_order_id],
+                vector[option::none()],
                 vector[2000000],
                 vector[1000000],
                 &mut event_store,
@@ -140,10 +143,10 @@ module aptos_experimental::market_tests {
 
         let event_store = event_utils::new_event_store();
         let maker_order_id =
-            place_maker_order_and_verify(
+            place_order_and_verify(
                 &mut market,
                 maker,
-                1000,
+                option::some(1000),
                 1000000,
                 true,
                 good_till_cancelled(),
@@ -158,7 +161,7 @@ module aptos_experimental::market_tests {
             place_taker_order_and_verify_fill(
                 &mut market,
                 taker,
-                1000,
+                option::some(1000),
                 2000000,
                 false,
                 good_till_cancelled(),
@@ -166,6 +169,7 @@ module aptos_experimental::market_tests {
                 vector[1000],
                 maker_addr,
                 vector[maker_order_id],
+                vector[option::none()],
                 vector[1000000],
                 vector[1000000],
                 &mut event_store,
@@ -203,10 +207,10 @@ module aptos_experimental::market_tests {
         let event_store = event_utils::new_event_store();
 
         let maker_order_id =
-            place_maker_order_and_verify(
+            place_order_and_verify(
                 &mut market,
                 maker1,
-                1000,
+                option::some(1000),
                 1000000,
                 true,
                 good_till_cancelled(),
@@ -219,12 +223,12 @@ module aptos_experimental::market_tests {
 
         // Place a post only order that should not match with the maker order
         let maker2_order_id =
-            place_maker_order_and_verify(
+            place_order_and_verify(
                 &mut market,
                 maker2,
-                1100,
+                option::some(1100),
                 1000000,
-                false, // is_buy
+                false, // is_bid
                 post_only(), // order_type
                 &mut event_store,
                 false,
@@ -238,14 +242,8 @@ module aptos_experimental::market_tests {
         assert!(get_position_size(maker2_addr) == 0);
 
         // Ensure the post only order was posted to the order book
-        assert!(
-            market.get_remaining_size(signer::address_of(maker1), maker_order_id)
-                == 1000000
-        );
-        assert!(
-            market.get_remaining_size(signer::address_of(maker2), maker2_order_id)
-                == 1000000
-        );
+        assert!(market.get_remaining_size(maker_order_id) == 1000000);
+        assert!(market.get_remaining_size(maker2_order_id) == 1000000);
 
         // Verify that the maker order is still active
         assert!(clearinghouse_test::order_exists(maker_order_id));
@@ -276,12 +274,12 @@ module aptos_experimental::market_tests {
         let taker_addr = signer::address_of(taker);
 
         let maker_order_id =
-            place_maker_order_and_verify(
+            place_order_and_verify(
                 &mut market,
                 maker,
-                1000,
+                option::some(1000),
                 1000000,
-                true, // is_buy
+                true, // is_bid
                 good_till_cancelled(), // order_type
                 &mut event_store,
                 false,
@@ -292,12 +290,12 @@ module aptos_experimental::market_tests {
 
         // Taker order which is marked as post only but will immediately match - this should fail
         let taker_order_id =
-            place_maker_order_and_verify(
+            place_order_and_verify(
                 &mut market,
                 taker,
-                1000,
+                option::some(1000),
                 1000000,
-                false, // is_buy
+                false, // is_bid
                 post_only(), // order_type
                 &mut event_store,
                 true,
@@ -311,9 +309,7 @@ module aptos_experimental::market_tests {
         assert!(get_position_size(taker_addr) == 0);
 
         // Ensure the post only order was not posted in the order book
-        assert!(
-            market.get_remaining_size(signer::address_of(taker), taker_order_id) == 0
-        );
+        assert!(market.get_remaining_size(taker_order_id) == 0);
         // Verify that the taker order is not active
         assert!(!clearinghouse_test::order_exists(taker_order_id));
         // The maker order should still be active
@@ -321,14 +317,12 @@ module aptos_experimental::market_tests {
         market.destroy_market()
     }
 
-    #[test(
-        admin = @0x1, market_signer = @0x123, maker = @0x456, taker = @0x789
-    )]
-    public fun test_ioc_full_match(
+    public fun test_order_full_match(
         admin: &signer,
         market_signer: &signer,
         maker: &signer,
-        taker: &signer
+        taker: &signer,
+        is_market_order: bool
     ) {
         // Setup accounts
         let market = new_market(
@@ -342,12 +336,12 @@ module aptos_experimental::market_tests {
         let taker_addr = signer::address_of(taker);
 
         let maker_order_id =
-            place_maker_order_and_verify(
+            place_order_and_verify(
                 &mut market,
                 maker,
-                1000,
+                option::some(1000),
                 1000000,
-                true, // is_buy
+                true, // is_bid
                 good_till_cancelled(), // order_type
                 &mut event_store,
                 false,
@@ -357,18 +351,25 @@ module aptos_experimental::market_tests {
             );
 
         // Taker order will be immediately match in the same transaction
+        let limit_price =
+            if (is_market_order) {
+                option::none() // Market order has no price
+            } else {
+                option::some(1000) // Limit price for limit order
+            };
         let taker_order_id =
             place_taker_order_and_verify_fill(
                 &mut market,
                 taker,
-                1000,
+                limit_price,
                 1000000,
-                false, // is_buy
+                false, // is_bid
                 immediate_or_cancel(), // order_type
                 vector[1000000],
                 vector[1000],
                 maker_addr,
                 vector[maker_order_id],
+                vector[option::none()],
                 vector[1000000],
                 vector[1000000],
                 &mut event_store,
@@ -385,23 +386,47 @@ module aptos_experimental::market_tests {
         assert!(!clearinghouse_test::order_exists(maker_order_id));
         assert!(!clearinghouse_test::order_exists(taker_order_id));
 
-        assert!(
-            market.get_remaining_size(signer::address_of(taker), taker_order_id) == 0
-        );
-        assert!(
-            market.get_remaining_size(signer::address_of(maker), maker_order_id) == 0
-        );
+        assert!(market.get_remaining_size(taker_order_id) == 0);
+        assert!(market.get_remaining_size(maker_order_id) == 0);
         market.destroy_market()
     }
 
     #[test(
         admin = @0x1, market_signer = @0x123, maker = @0x456, taker = @0x789
     )]
-    public fun test_ioc_partial_match(
+    public fun test_ioc_full_match(
+        admin: &signer,
+        market_signer: &signer,
+        maker: &signer,
+        taker: &signer
+    ) {
+        test_order_full_match(admin, market_signer, maker, taker, false);
+    }
+
+    #[test(
+        admin = @0x1, market_signer = @0x123, maker = @0x456, taker = @0x789
+    )]
+    public fun test_market_order_full_match(
         admin: &signer,
         market_signer: &signer,
         maker: &signer,
         taker: &signer
+    ) {
+        test_order_full_match(
+            admin,
+            market_signer,
+            maker,
+            taker,
+            true // is_market_order
+        );
+    }
+
+    public fun test_order_partial_match(
+        admin: &signer,
+        market_signer: &signer,
+        maker: &signer,
+        taker: &signer,
+        is_market_order: bool
     ) {
         // Setup accounts
         let market = new_market(
@@ -415,12 +440,12 @@ module aptos_experimental::market_tests {
         let taker_addr = signer::address_of(taker);
 
         let maker_order_id =
-            place_maker_order_and_verify(
+            place_order_and_verify(
                 &mut market,
                 maker,
-                1000,
+                option::some(1000),
                 1000000,
-                true, // is_buy
+                true, // is_bid
                 good_till_cancelled(), // order_type
                 &mut event_store,
                 false,
@@ -430,18 +455,25 @@ module aptos_experimental::market_tests {
             );
 
         // Taker order is IOC, which will partially match and remaining will be cancelled
+        let limit_price =
+            if (is_market_order) {
+                option::none() // Market order has no price
+            } else {
+                option::some(1000) // Limit price for limit order
+            };
         let taker_order_id =
             place_taker_order_and_verify_fill(
                 &mut market,
                 taker,
-                1000,
+                limit_price,
                 2000000,
-                false, // is_buy
+                false, // is_bid
                 immediate_or_cancel(), // order_type
                 vector[1000000],
                 vector[1000],
                 maker_addr,
                 vector[maker_order_id],
+                vector[option::none()],
                 vector[1000000],
                 vector[1000000],
                 &mut event_store,
@@ -458,23 +490,53 @@ module aptos_experimental::market_tests {
         assert!(!clearinghouse_test::order_exists(maker_order_id));
         assert!(!clearinghouse_test::order_exists(taker_order_id));
 
-        assert!(
-            market.get_remaining_size(signer::address_of(taker), taker_order_id) == 0
-        );
-        assert!(
-            market.get_remaining_size(signer::address_of(maker), maker_order_id) == 0
-        );
+        assert!(market.get_remaining_size(taker_order_id) == 0);
+        assert!(market.get_remaining_size(maker_order_id) == 0);
         market.destroy_market()
     }
 
     #[test(
         admin = @0x1, market_signer = @0x123, maker = @0x456, taker = @0x789
     )]
-    public fun test_ioc_no_match(
+    public fun test_ioc_partial_match(
         admin: &signer,
         market_signer: &signer,
         maker: &signer,
         taker: &signer
+    ) {
+        test_order_partial_match(
+            admin,
+            market_signer,
+            maker,
+            taker,
+            false // is_market_order
+        );
+    }
+
+    #[test(
+        admin = @0x1, market_signer = @0x123, maker = @0x456, taker = @0x789
+    )]
+    public fun test_market_order_partial_match(
+        admin: &signer,
+        market_signer: &signer,
+        maker: &signer,
+        taker: &signer
+    ) {
+        test_order_partial_match(
+            admin,
+            market_signer,
+            maker,
+            taker,
+            true // is_market_order
+        );
+    }
+
+    public fun test_order_no_match(
+        admin: &signer,
+        market_signer: &signer,
+        maker: &signer,
+        taker: &signer,
+        is_market_order: bool
     ) {
         // Setup accounts
         let market = new_market(
@@ -488,12 +550,12 @@ module aptos_experimental::market_tests {
         let taker_addr = signer::address_of(taker);
 
         let maker_order_id =
-            place_maker_order_and_verify(
+            place_order_and_verify(
                 &mut market,
                 maker,
-                1000,
+                option::some(1000),
                 1000000, // 1 BTC
-                true, // is_buy
+                true, // is_bid
                 good_till_cancelled(), // order_type
                 &mut event_store,
                 false,
@@ -503,13 +565,19 @@ module aptos_experimental::market_tests {
             );
 
         // Taker order is IOC, which will not be matched and should be cancelled
+        let limit_price =
+            if (is_market_order) {
+                option::none() // Market order has no price
+            } else {
+                option::some(1200) // Limit price for limit order
+            };
         let taker_order_id =
-            place_maker_order_and_verify(
+            place_order_and_verify(
                 &mut market,
                 taker,
-                1200,
+                limit_price,
                 1000000, // 1 BTC
-                false, // is_buy
+                false, // is_bid
                 immediate_or_cancel(), // order_type
                 &mut event_store,
                 false, // Despite it being a "taker", this order will not cross
@@ -526,13 +594,56 @@ module aptos_experimental::market_tests {
         assert!(!clearinghouse_test::order_exists(taker_order_id));
         // The maker order should still be active
         assert!(clearinghouse_test::order_exists(maker_order_id));
-        assert!(
-            market.get_remaining_size(signer::address_of(maker), maker_order_id)
-                == 1000000
+        assert!(market.get_remaining_size(maker_order_id) == 1000000);
+        assert!(market.get_remaining_size(taker_order_id) == 0);
+        market.destroy_market()
+    }
+
+    #[test(
+        admin = @0x1, market_signer = @0x123, maker = @0x456, taker = @0x789
+    )]
+    public fun test_ioc_no_match(
+        admin: &signer,
+        market_signer: &signer,
+        maker: &signer,
+        taker: &signer
+    ) {
+        test_order_no_match(
+            admin,
+            market_signer,
+            maker,
+            taker,
+            false // is_market_order
         );
-        assert!(
-            market.get_remaining_size(signer::address_of(taker), taker_order_id) == 0
+    }
+
+    #[test(admin = @0x1, market_signer = @0x123, taker = @0x789)]
+    public fun test_market_order_no_match(
+        admin: &signer, market_signer: &signer, taker: &signer
+    ) {
+        // Setup accounts
+        let market = new_market(
+            admin,
+            market_signer,
+            new_market_config(false, true)
         );
+        clearinghouse_test::initialize(admin);
+        let event_store = event_utils::new_event_store();
+
+        let _taker_order_id =
+            place_order_and_verify(
+                &mut market,
+                taker,
+                option::none(),
+                1000000, // 1 BTC
+                false, // is_buy
+                immediate_or_cancel(), // order_type
+                &mut event_store,
+                false, // Despite it being a "taker", this order will not cross
+                true,
+                new_test_order_metadata(),
+                &test_market_callbacks()
+            );
         market.destroy_market()
     }
 
@@ -558,12 +669,12 @@ module aptos_experimental::market_tests {
 
         // Place maker order
         let maker_order_id =
-            place_maker_order_and_verify(
+            place_order_and_verify(
                 &mut market,
                 maker,
-                1000, // price
+                option::some(1000), // price
                 500000, // 0.5 BTC
-                true, // is_buy
+                true, // is_bid
                 good_till_cancelled(),
                 &mut event_store,
                 false,
@@ -577,14 +688,15 @@ module aptos_experimental::market_tests {
             place_taker_order_and_verify_fill(
                 &mut market,
                 taker,
-                1000,
+                option::some(1000),
                 1000000, // 1 BTC
-                false, // is_buy
+                false, // is_bid
                 good_till_cancelled(),
                 vector[500000], // 0.5 BTC
                 vector[1000],
                 maker_addr,
                 vector[maker_order_id],
+                vector[option::none()],
                 vector[500000],
                 vector[500000],
                 &mut event_store,
@@ -599,12 +711,12 @@ module aptos_experimental::market_tests {
         assert!(get_position_size(taker_addr) == 500000); // Short 0.5 BTC
 
         // Verify maker order fully filled
-        assert!(market.get_remaining_size(maker_addr, maker_order_id) == 0);
+        assert!(market.get_remaining_size(maker_order_id) == 0);
         assert!(!clearinghouse_test::order_exists(maker_order_id));
 
         // Taker order partially filled
         assert!(
-            market.get_remaining_size(taker_addr, taker_order_id) == 500000 // 0.5 BTC remaining
+            market.get_remaining_size(taker_order_id) == 500000 // 0.5 BTC remaining
         );
         assert!(clearinghouse_test::order_exists(taker_order_id));
 
@@ -633,16 +745,17 @@ module aptos_experimental::market_tests {
 
         // Place several maker order with small sizes.
         let i = 1;
-        let maker_order_ids = vector::empty();
+        let maker_order_ids = vector::empty();
         let expected_fill_sizes = vector::empty();
         let fill_prices = vector::empty();
         let maker_orig_sizes = vector::empty();
+        let maker_client_order_ids = vector::empty>();
         while (i < 6) {
             let maker_order_id =
-                place_maker_order_and_verify(
+                place_order_and_verify(
                     &mut market,
                     maker,
-                    1000 - i,
+                    option::some(1000 - i),
                     10000 * i,
                     true,
                     good_till_cancelled(),
@@ -653,6 +766,7 @@ module aptos_experimental::market_tests {
                     &test_market_callbacks()
                 );
             maker_order_ids.push_back(maker_order_id);
+            maker_client_order_ids.push_back(option::none());
             expected_fill_sizes.push_back(10000 * i);
             maker_orig_sizes.push_back(10000 * i);
             fill_prices.push_back(1000 - i);
@@ -667,7 +781,7 @@ module aptos_experimental::market_tests {
             place_taker_order_and_verify_fill(
                 &mut market,
                 taker,
-                990,
+                option::some(990),
                 1000000,
                 false,
                 good_till_cancelled(),
@@ -675,6 +789,7 @@ module aptos_experimental::market_tests {
                 fill_prices,
                 maker_addr,
                 maker_order_ids,
+                maker_client_order_ids,
                 maker_orig_sizes,
                 maker_orig_sizes,
                 &mut event_store,
@@ -716,10 +831,10 @@ module aptos_experimental::market_tests {
 
         let event_store = event_utils::new_event_store();
         let maker_order_id =
-            place_maker_order_and_verify(
+            place_order_and_verify(
                 &mut market,
                 maker,
-                1000,
+                option::some(1000),
                 2000000,
                 true,
                 good_till_cancelled(),
@@ -738,7 +853,7 @@ module aptos_experimental::market_tests {
             place_taker_order_and_verify_fill(
                 &mut market,
                 taker,
-                1000,
+                option::some(1000),
                 1000000,
                 false,
                 good_till_cancelled(),
@@ -746,6 +861,7 @@ module aptos_experimental::market_tests {
                 vector[1000],
                 maker_addr,
                 vector[maker_order_id],
+                vector[option::none()],
                 vector[2000000],
                 vector[2000000],
                 &mut event_store,
@@ -755,7 +871,7 @@ module aptos_experimental::market_tests {
                 &test_market_callbacks_with_taker_cancelled()
             );
         // Make sure the maker order is reinserted
-        assert!(market.get_remaining_size(maker_addr, maker_order_id) == 1500000);
+        assert!(market.get_remaining_size(maker_order_id) == 1500000);
         assert!(clearinghouse_test::order_exists(maker_order_id));
         assert!(!clearinghouse_test::order_exists(taker_order_id));
         market.destroy_market()
@@ -781,10 +897,10 @@ module aptos_experimental::market_tests {
         let maker2_addr = signer::address_of(maker2);
         let event_store = event_utils::new_event_store();
         let maker1_order_id =
-            place_maker_order_and_verify(
+            place_order_and_verify(
                 &mut market,
                 maker1,
-                1001,
+                option::some(1001),
                 2000000,
                 true,
                 good_till_cancelled(),
@@ -796,10 +912,10 @@ module aptos_experimental::market_tests {
             );
 
         let maker2_order_id =
-            place_maker_order_and_verify(
+            place_order_and_verify(
                 &mut market,
                 maker2,
-                1000,
+                option::some(1000),
                 2000000,
                 true,
                 good_till_cancelled(),
@@ -818,7 +934,8 @@ module aptos_experimental::market_tests {
             place_taker_order(
                 &mut market,
                 maker1,
-                1000,
+                option::none(),
+                option::some(1000),
                 1000000,
                 false,
                 good_till_cancelled(),
@@ -833,7 +950,8 @@ module aptos_experimental::market_tests {
             maker1,
             false,
             maker1_order_id,
-            1001,
+            option::none(),
+            option::some(1001),
             2000000,
             0,
             2000000,
@@ -845,13 +963,15 @@ module aptos_experimental::market_tests {
             &mut market,
             maker1,
             taker_order_id,
-            1000,
+            option::none(),
+            option::some(1000),
             1000000,
             false,
             vector[1000000],
             vector[1000],
             maker2_addr,
             vector[maker2_order_id],
+            vector[option::none()],
             vector[2000000],
             vector[2000000],
             &mut event_store,
@@ -882,10 +1002,10 @@ module aptos_experimental::market_tests {
         let maker1_addr = signer::address_of(maker1);
         let event_store = event_utils::new_event_store();
         let maker1_order_id =
-            place_maker_order_and_verify(
+            place_order_and_verify(
                 &mut market,
                 maker1,
-                1001,
+                option::some(1001),
                 2000000,
                 true,
                 good_till_cancelled(),
@@ -897,10 +1017,10 @@ module aptos_experimental::market_tests {
             );
 
         let _ =
-            place_maker_order_and_verify(
+            place_order_and_verify(
                 &mut market,
                 maker2,
-                1000,
+                option::some(1000),
                 2000000,
                 true,
                 good_till_cancelled(),
@@ -919,7 +1039,8 @@ module aptos_experimental::market_tests {
             place_taker_order(
                 &mut market,
                 maker1,
-                1000,
+                option::some(1),
+                option::some(1000),
                 1000000,
                 false,
                 good_till_cancelled(),
@@ -933,13 +1054,15 @@ module aptos_experimental::market_tests {
             &mut market,
             maker1,
             taker_order_id,
-            1001,
+            option::some(1),
+            option::some(1001),
             1000000,
             false,
             vector[1000000],
             vector[1001],
             maker1_addr,
             vector[maker1_order_id],
+            vector[option::none()],
             vector[2000000],
             vector[2000000],
             &mut event_store,
@@ -947,4 +1070,90 @@ module aptos_experimental::market_tests {
         );
         market.destroy_market()
     }
+
+    #[test(
+        admin = @0x1, market_signer = @0x123, maker1 = @0x456, maker2 = @0x789
+    )]
+    public fun test_self_matching_not_allowed_no_match(
+        admin: &signer,
+        market_signer: &signer,
+        maker1: &signer,
+        maker2: &signer
+    ) {
+        // Setup accounts
+        let market = new_market(
+            admin,
+            market_signer,
+            new_market_config(false, true)
+        );
+        clearinghouse_test::initialize(admin);
+        let maker1_addr = signer::address_of(maker1);
+        let maker2_addr = signer::address_of(maker2);
+        let event_store = event_utils::new_event_store();
+        let maker1_order_id =
+            place_order_and_verify(
+                &mut market,
+                maker1,
+                option::some(1001),
+                2000000,
+                true,
+                good_till_cancelled(),
+                &mut event_store,
+                false,
+                false,
+                new_test_order_metadata(),
+                &test_market_callbacks()
+            );
+
+        let _ =
+            place_order_and_verify(
+                &mut market,
+                maker2,
+                option::some(1000),
+                2000000,
+                true,
+                good_till_cancelled(),
+                &mut event_store,
+                false,
+                false,
+                new_test_order_metadata(),
+                &test_market_callbacks()
+            );
+
+        // Order not filled yet, so size is 0
+        assert!(get_position_size(maker1_addr) == 0);
+
+        // This should result in a self match order which should be cancelled and the taker order should not match
+        place_taker_order(
+            &mut market,
+            maker1,
+            option::none(),
+            option::some(1001),
+            1000000,
+            false,
+            good_till_cancelled(),
+            &mut event_store,
+            option::none(),
+            new_test_order_metadata(),
+            &test_market_callbacks()
+        );
+
+        verify_cancel_event(
+            &mut market,
+            maker1,
+            false,
+            maker1_order_id,
+            option::none(),
+            option::some(1001),
+            2000000,
+            0,
+            2000000,
+            true,
+            &mut event_store
+        );
+
+        assert!(get_position_size(maker1_addr) == 0);
+        assert!(get_position_size(maker2_addr) == 0);
+        market.destroy_market()
+    }
 }
diff --git a/aptos-move/framework/aptos-framework/doc/account.md b/aptos-move/framework/aptos-framework/doc/account.md
index e3bb969dae14f..126431165d300 100644
--- a/aptos-move/framework/aptos-framework/doc/account.md
+++ b/aptos-move/framework/aptos-framework/doc/account.md
@@ -846,6 +846,7 @@ Scheme identifier for Ed25519 signatures used to derive authentication keys for
 
 
 
+This account has exceeded the allocated GUIDs it can create. It should be impossible to reach this number for real applications.
 
 
 
const EEXCEEDED_MAX_GUID_CREATION_NUM: u64 = 20;
@@ -965,6 +966,7 @@ The caller does not have a digital-signature-based capability to call this funct
 
 
 
+The signer capability is not offered to any address
 
 
 
const ENO_SIGNER_CAPABILITY_OFFERED: u64 = 19;
@@ -1042,6 +1044,16 @@ Sequence number exceeds the maximum value for a u64
 
 
 
+
+
+The set_originating_address is disabled due to potential poisoning from account abstraction
+
+
+
const ESET_ORIGINATING_ADDRESS_DISABLED: u64 = 27;
+
+ + + Specified scheme is not recognized. Should be ED25519_SCHEME(0), MULTI_ED25519_SCHEME(1), SINGLE_KEY_SCHEME(2), or MULTI_KEY_SCHEME(3). @@ -2191,6 +2203,8 @@ authority of the new authentication key.
entry fun set_originating_address(account: &signer) acquires Account, OriginatingAddress {
+    abort error::invalid_state(ESET_ORIGINATING_ADDRESS_DISABLED);
+
     let account_addr = signer::address_of(account);
     assert!(exists<Account>(account_addr), error::not_found(EACCOUNT_DOES_NOT_EXIST));
     let auth_key_as_address =
diff --git a/aptos-move/framework/aptos-framework/doc/big_ordered_map.md b/aptos-move/framework/aptos-framework/doc/big_ordered_map.md
index b5d0b75b5a487..115dc5eb5ddf7 100644
--- a/aptos-move/framework/aptos-framework/doc/big_ordered_map.md
+++ b/aptos-move/framework/aptos-framework/doc/big_ordered_map.md
@@ -101,6 +101,43 @@ allowing cleaner iterator APIs.
 -  [Function `update_key`](#0x1_big_ordered_map_update_key)
 -  [Function `remove_at`](#0x1_big_ordered_map_remove_at)
 -  [Specification](#@Specification_1)
+    -  [Enum `BigOrderedMap`](#@Specification_1_BigOrderedMap)
+    -  [Function `new`](#@Specification_1_new)
+    -  [Function `new_with_reusable`](#@Specification_1_new_with_reusable)
+    -  [Function `new_with_type_size_hints`](#@Specification_1_new_with_type_size_hints)
+    -  [Function `new_with_config`](#@Specification_1_new_with_config)
+    -  [Function `new_from`](#@Specification_1_new_from)
+    -  [Function `destroy_empty`](#@Specification_1_destroy_empty)
+    -  [Function `allocate_spare_slots`](#@Specification_1_allocate_spare_slots)
+    -  [Function `is_empty`](#@Specification_1_is_empty)
+    -  [Function `add`](#@Specification_1_add)
+    -  [Function `upsert`](#@Specification_1_upsert)
+    -  [Function `remove`](#@Specification_1_remove)
+    -  [Function `add_all`](#@Specification_1_add_all)
+    -  [Function `pop_front`](#@Specification_1_pop_front)
+    -  [Function `pop_back`](#@Specification_1_pop_back)
+    -  [Function `lower_bound`](#@Specification_1_lower_bound)
+    -  [Function `find`](#@Specification_1_find)
+    -  [Function `contains`](#@Specification_1_contains)
+    -  [Function `borrow`](#@Specification_1_borrow)
+    -  [Function `borrow_mut`](#@Specification_1_borrow_mut)
+    -  [Function `borrow_front`](#@Specification_1_borrow_front)
+    -  [Function `borrow_back`](#@Specification_1_borrow_back)
+    -  [Function `prev_key`](#@Specification_1_prev_key)
+    -  [Function `next_key`](#@Specification_1_next_key)
+    -  [Function `keys`](#@Specification_1_keys)
+    -  [Function `new_begin_iter`](#@Specification_1_new_begin_iter)
+    -  [Function `new_end_iter`](#@Specification_1_new_end_iter)
+    -  [Function `iter_is_begin`](#@Specification_1_iter_is_begin)
+    -  [Function `iter_is_end`](#@Specification_1_iter_is_end)
+    -  [Function `iter_borrow_key`](#@Specification_1_iter_borrow_key)
+    -  [Function `iter_borrow`](#@Specification_1_iter_borrow)
+    -  [Function `iter_borrow_mut`](#@Specification_1_iter_borrow_mut)
+    -  [Function `iter_next`](#@Specification_1_iter_next)
+    -  [Function `iter_prev`](#@Specification_1_iter_prev)
+    -  [Function `validate_dynamic_size_and_init_max_degrees`](#@Specification_1_validate_dynamic_size_and_init_max_degrees)
+    -  [Function `validate_static_size_and_init_max_degrees`](#@Specification_1_validate_static_size_and_init_max_degrees)
+    -  [Function `validate_size_and_init_max_degrees`](#@Specification_1_validate_size_and_init_max_degrees)
     -  [Function `add_at`](#@Specification_1_add_at)
     -  [Function `remove_at`](#@Specification_1_remove_at)
 
@@ -3052,6 +3089,794 @@ Given a path to node (excluding the node itself), which is currently stored unde
 
 
 
+
+
+### Enum `BigOrderedMap`
+
+
+
enum BigOrderedMap<K: store, V: store> has store
+
+ + + +
+ +
+BPlusTreeMap + + +
+Fields + + +
+
+root: big_ordered_map::Node<K, V> +
+
+ Root node. It is stored directly in the resource itself, unlike all other nodes. +
+
+nodes: storage_slots_allocator::StorageSlotsAllocator<big_ordered_map::Node<K, V>> +
+
+ Storage of all non-root nodes. They are stored in separate storage slots. +
+
+min_leaf_index: u64 +
+
+ The node index of the leftmost node. +
+
+max_leaf_index: u64 +
+
+ The node index of the rightmost node. +
+
+constant_kv_size: bool +
+
+ Whether Key and Value have constant serialized size, and if so, + optimize out size checks on every insert. +
+
+inner_max_degree: u16 +
+
+ The max number of children an inner node can have. +
+
+leaf_max_degree: u16 +
+
+ The max number of children a leaf node can have. +
+
+ + +
+ +
+
+ + + +
pragma intrinsic = map,
+    map_new = new,
+    map_destroy_empty = destroy_empty,
+    map_has_key = contains,
+    map_add_no_override = add,
+    map_borrow = borrow,
+    map_borrow_mut = borrow_mut,
+    map_spec_get = spec_get,
+    map_spec_set = spec_set,
+    map_spec_del = spec_remove,
+    map_spec_len = spec_len,
+    map_spec_has_key = spec_contains_key,
+    map_is_empty = is_empty;
+
+ + + + + + + +
native fun spec_len<K, V>(t: BigOrderedMap<K, V>): num;
+
+ + + + + + + +
native fun spec_contains_key<K, V>(t: BigOrderedMap<K, V>, k: K): bool;
+
+ + + + + + + +
native fun spec_set<K, V>(t: BigOrderedMap<K, V>, k: K, v: V): BigOrderedMap<K, V>;
+
+ + + + + + + +
native fun spec_remove<K, V>(t: BigOrderedMap<K, V>, k: K): BigOrderedMap<K, V>;
+
+ + + + + + + +
native fun spec_get<K, V>(t: BigOrderedMap<K, V>, k: K): V;
+
+ + + + + +### Function `new` + + +
public fun new<K: store, V: store>(): big_ordered_map::BigOrderedMap<K, V>
+
+ + + + +
pragma intrinsic;
+
+ + + + + +### Function `new_with_reusable` + + +
public fun new_with_reusable<K: store, V: store>(): big_ordered_map::BigOrderedMap<K, V>
+
+ + + + +
pragma verify = false;
+pragma opaque;
+
+ + + + + +### Function `new_with_type_size_hints` + + +
public fun new_with_type_size_hints<K: store, V: store>(avg_key_bytes: u64, max_key_bytes: u64, avg_value_bytes: u64, max_value_bytes: u64): big_ordered_map::BigOrderedMap<K, V>
+
+ + + + +
pragma verify = false;
+pragma opaque;
+
+ + + + + +### Function `new_with_config` + + +
public fun new_with_config<K: store, V: store>(inner_max_degree: u16, leaf_max_degree: u16, reuse_slots: bool): big_ordered_map::BigOrderedMap<K, V>
+
+ + + + +
pragma verify = false;
+pragma opaque;
+
+ + + + + +### Function `new_from` + + +
public fun new_from<K: copy, drop, store, V: store>(keys: vector<K>, values: vector<V>): big_ordered_map::BigOrderedMap<K, V>
+
+ + + + +
pragma opaque;
+pragma verify = false;
+aborts_if [abstract] exists i in 0..len(keys), j in 0..len(keys) where i != j : keys[i] == keys[j];
+aborts_if [abstract] len(keys) != len(values);
+ensures [abstract] forall k: K {spec_contains_key(result, k)} : vector::spec_contains(keys,k) <==> spec_contains_key(result, k);
+ensures [abstract] forall i in 0..len(keys) : spec_get(result, keys[i]) == values[i];
+ensures [abstract] spec_len(result) == len(keys);
+
+ + + + + +### Function `destroy_empty` + + +
public fun destroy_empty<K: store, V: store>(self: big_ordered_map::BigOrderedMap<K, V>)
+
+ + + + +
pragma intrinsic;
+
+ + + + + +### Function `allocate_spare_slots` + + +
public fun allocate_spare_slots<K: store, V: store>(self: &mut big_ordered_map::BigOrderedMap<K, V>, num_to_allocate: u64)
+
+ + + + +
pragma verify = false;
+pragma opaque;
+
+ + + + + +### Function `is_empty` + + +
public fun is_empty<K: store, V: store>(self: &big_ordered_map::BigOrderedMap<K, V>): bool
+
+ + + + +
pragma intrinsic;
+
+ + + + + +### Function `add` + + +
public fun add<K: copy, drop, store, V: store>(self: &mut big_ordered_map::BigOrderedMap<K, V>, key: K, value: V)
+
+ + + + +
pragma intrinsic;
+
+ + + + + +### Function `upsert` + + +
public fun upsert<K: copy, drop, store, V: store>(self: &mut big_ordered_map::BigOrderedMap<K, V>, key: K, value: V): option::Option<V>
+
+ + + + +
pragma opaque;
+pragma verify = false;
+ensures [abstract] !spec_contains_key(old(self), key) ==> option::is_none(result);
+ensures [abstract] spec_contains_key(self, key);
+ensures [abstract] spec_get(self, key) == value;
+ensures [abstract] spec_contains_key(old(self), key) ==> ((option::is_some(result)) && (option::spec_borrow(result) == spec_get(old(
+    self), key)));
+ensures [abstract] !spec_contains_key(old(self), key) ==> spec_len(old(self)) + 1 == spec_len(self);
+ensures [abstract] spec_contains_key(old(self), key) ==> spec_len(old(self)) == spec_len(self);
+ensures [abstract] forall k: K: spec_contains_key(old(self), k) && k != key ==> spec_get(old(self), k) == spec_get(self, k);
+ensures [abstract] forall k: K: spec_contains_key(old(self), k) ==> spec_contains_key(self, k);
+
+ + + + + +### Function `remove` + + +
public fun remove<K: copy, drop, store, V: store>(self: &mut big_ordered_map::BigOrderedMap<K, V>, key: &K): V
+
+ + + + +
pragma opaque;
+pragma verify = false;
+aborts_if [abstract] !spec_contains_key(self, key);
+ensures [abstract] !spec_contains_key(self, key);
+ensures [abstract] spec_get(old(self), key) == result;
+ensures [abstract] spec_len(old(self)) == spec_len(self) + 1;
+ensures [abstract] forall k: K where k != key: spec_contains_key(self, k) ==> spec_get(self, k) == spec_get(old(self), k);
+ensures [abstract] forall k: K where k != key: spec_contains_key(old(self), k) == spec_contains_key(self, k);
+
+ + + + + +### Function `add_all` + + +
public fun add_all<K: copy, drop, store, V: store>(self: &mut big_ordered_map::BigOrderedMap<K, V>, keys: vector<K>, values: vector<V>)
+
+ + + + +
pragma opaque;
+pragma verify = false;
+
+ + + + + +### Function `pop_front` + + +
public fun pop_front<K: copy, drop, store, V: store>(self: &mut big_ordered_map::BigOrderedMap<K, V>): (K, V)
+
+ + + + +
pragma opaque;
+pragma verify = false;
+
+ + + + + +### Function `pop_back` + + +
public fun pop_back<K: copy, drop, store, V: store>(self: &mut big_ordered_map::BigOrderedMap<K, V>): (K, V)
+
+ + + + +
pragma opaque;
+pragma verify = false;
+
+ + + + + +### Function `lower_bound` + + +
public(friend) fun lower_bound<K: copy, drop, store, V: store>(self: &big_ordered_map::BigOrderedMap<K, V>, key: &K): big_ordered_map::IteratorPtr<K>
+
+ + + + +
pragma opaque;
+pragma verify = false;
+
+ + + + + +### Function `find` + + +
public(friend) fun find<K: copy, drop, store, V: store>(self: &big_ordered_map::BigOrderedMap<K, V>, key: &K): big_ordered_map::IteratorPtr<K>
+
+ + + + +
pragma opaque;
+pragma verify = false;
+
+ + + + + +### Function `contains` + + +
public fun contains<K: copy, drop, store, V: store>(self: &big_ordered_map::BigOrderedMap<K, V>, key: &K): bool
+
+ + + + +
pragma intrinsic;
+
+ + + + + +### Function `borrow` + + +
public fun borrow<K: copy, drop, store, V: store>(self: &big_ordered_map::BigOrderedMap<K, V>, key: &K): &V
+
+ + + + +
pragma intrinsic;
+
+ + + + + +### Function `borrow_mut` + + +
public fun borrow_mut<K: copy, drop, store, V: store>(self: &mut big_ordered_map::BigOrderedMap<K, V>, key: &K): &mut V
+
+ + + + +
pragma intrinsic;
+
+ + + + + +### Function `borrow_front` + + +
public fun borrow_front<K: copy, drop, store, V: store>(self: &big_ordered_map::BigOrderedMap<K, V>): (K, &V)
+
+ + + + +
pragma opaque;
+pragma verify = false;
+ensures [abstract] spec_contains_key(self, result_1);
+ensures [abstract] spec_get(self, result_1) == result_2;
+ensures [abstract] forall k: K where k != result_1: spec_contains_key(self, k) ==>
+std::cmp::compare(result_1, k) == std::cmp::Ordering::Less;
+
+ + + + + +### Function `borrow_back` + + +
public fun borrow_back<K: copy, drop, store, V: store>(self: &big_ordered_map::BigOrderedMap<K, V>): (K, &V)
+
+ + + + +
pragma opaque;
+pragma verify = false;
+ensures [abstract] spec_contains_key(self, result_1);
+ensures [abstract] spec_get(self, result_1) == result_2;
+ensures [abstract] forall k: K where k != result_1: spec_contains_key(self, k) ==>
+std::cmp::compare(result_1, k) == std::cmp::Ordering::Greater;
+
+ + + + + +### Function `prev_key` + + +
public fun prev_key<K: copy, drop, store, V: store>(self: &big_ordered_map::BigOrderedMap<K, V>, key: &K): option::Option<K>
+
+ + + + +
pragma opaque;
+pragma verify = false;
+ensures [abstract] result == std::option::spec_none() <==>
+(forall k: K {spec_contains_key(self, k)} where spec_contains_key(self, k)
+&& k != key: std::cmp::compare(key, k) == std::cmp::Ordering::Less);
+ensures [abstract] result.is_some() <==>
+    spec_contains_key(self, option::spec_borrow(result)) &&
+    (std::cmp::compare(option::spec_borrow(result), key) == std::cmp::Ordering::Less)
+    && (forall k: K {spec_contains_key(self, k), std::cmp::compare(option::spec_borrow(result), k), std::cmp::compare(key, k)} where k != option::spec_borrow(result): ((spec_contains_key(self, k) &&
+    std::cmp::compare(k, key) == std::cmp::Ordering::Less)) ==>
+    std::cmp::compare(option::spec_borrow(result), k) == std::cmp::Ordering::Greater);
+
+ + + + + +### Function `next_key` + + +
public fun next_key<K: copy, drop, store, V: store>(self: &big_ordered_map::BigOrderedMap<K, V>, key: &K): option::Option<K>
+
+ + + + +
pragma opaque;
+pragma verify = false;
+ensures [abstract] result == std::option::spec_none() <==>
+(forall k: K {spec_contains_key(self, k)} where spec_contains_key(self, k) && k != key:
+std::cmp::compare(key, k) == std::cmp::Ordering::Greater);
+ensures [abstract] result.is_some() <==>
+    spec_contains_key(self, option::spec_borrow(result)) &&
+    (std::cmp::compare(option::spec_borrow(result), key) == std::cmp::Ordering::Greater)
+    && (forall k: K {spec_contains_key(self, k)} where k != option::spec_borrow(result): ((spec_contains_key(self, k) &&
+    std::cmp::compare(k, key) == std::cmp::Ordering::Greater)) ==>
+    std::cmp::compare(option::spec_borrow(result), k) == std::cmp::Ordering::Less);
+
+ + + + + +### Function `keys` + + +
public fun keys<K: copy, drop, store, V: copy, store>(self: &big_ordered_map::BigOrderedMap<K, V>): vector<K>
+
+ + + + +
pragma verify = false;
+pragma opaque;
+ensures [abstract] forall k: K: vector::spec_contains(result, k) <==> spec_contains_key(self, k);
+
+ + + + + +### Function `new_begin_iter` + + +
public(friend) fun new_begin_iter<K: copy, store, V: store>(self: &big_ordered_map::BigOrderedMap<K, V>): big_ordered_map::IteratorPtr<K>
+
+ + + + +
pragma opaque;
+pragma verify = false;
+
+ + + + + +### Function `new_end_iter` + + +
public(friend) fun new_end_iter<K: copy, store, V: store>(self: &big_ordered_map::BigOrderedMap<K, V>): big_ordered_map::IteratorPtr<K>
+
+ + + + +
pragma opaque;
+pragma verify = false;
+
+ + + + + +### Function `iter_is_begin` + + +
public(friend) fun iter_is_begin<K: store, V: store>(self: &big_ordered_map::IteratorPtr<K>, map: &big_ordered_map::BigOrderedMap<K, V>): bool
+
+ + + + +
pragma opaque;
+pragma verify = false;
+
+ + + + + +### Function `iter_is_end` + + +
public(friend) fun iter_is_end<K: store, V: store>(self: &big_ordered_map::IteratorPtr<K>, _map: &big_ordered_map::BigOrderedMap<K, V>): bool
+
+ + + + +
pragma opaque;
+pragma verify = false;
+
+ + + + + +### Function `iter_borrow_key` + + +
public(friend) fun iter_borrow_key<K>(self: &big_ordered_map::IteratorPtr<K>): &K
+
+ + + + +
pragma opaque;
+pragma verify = false;
+
+ + + + + +### Function `iter_borrow` + + +
public(friend) fun iter_borrow<K: drop, store, V: store>(self: big_ordered_map::IteratorPtr<K>, map: &big_ordered_map::BigOrderedMap<K, V>): &V
+
+ + + + +
pragma opaque;
+pragma verify = false;
+
+ + + + + +### Function `iter_borrow_mut` + + +
public(friend) fun iter_borrow_mut<K: drop, store, V: store>(self: big_ordered_map::IteratorPtr<K>, map: &mut big_ordered_map::BigOrderedMap<K, V>): &mut V
+
+ + + + +
pragma opaque;
+pragma verify = false;
+
+ + + + + +### Function `iter_next` + + +
public(friend) fun iter_next<K: copy, drop, store, V: store>(self: big_ordered_map::IteratorPtr<K>, map: &big_ordered_map::BigOrderedMap<K, V>): big_ordered_map::IteratorPtr<K>
+
+ + + + +
pragma opaque;
+pragma verify = false;
+
+ + + + + +### Function `iter_prev` + + +
public(friend) fun iter_prev<K: copy, drop, store, V: store>(self: big_ordered_map::IteratorPtr<K>, map: &big_ordered_map::BigOrderedMap<K, V>): big_ordered_map::IteratorPtr<K>
+
+ + + + +
pragma opaque;
+pragma verify = false;
+
+ + + + + +### Function `validate_dynamic_size_and_init_max_degrees` + + +
fun validate_dynamic_size_and_init_max_degrees<K: store, V: store>(self: &mut big_ordered_map::BigOrderedMap<K, V>, key: &K, value: &V)
+
+ + + + +
pragma verify = false;
+pragma opaque;
+
+ + + + + +### Function `validate_static_size_and_init_max_degrees` + + +
fun validate_static_size_and_init_max_degrees<K: store, V: store>(self: &mut big_ordered_map::BigOrderedMap<K, V>)
+
+ + + + +
pragma verify = false;
+pragma opaque;
+
+ + + + + +### Function `validate_size_and_init_max_degrees` + + +
fun validate_size_and_init_max_degrees<K: store, V: store>(self: &mut big_ordered_map::BigOrderedMap<K, V>, key_size: u64, value_size: u64)
+
+ + + + +
pragma verify = false;
+pragma opaque;
+
+ + + ### Function `add_at` diff --git a/aptos-move/framework/aptos-framework/doc/block.md b/aptos-move/framework/aptos-framework/doc/block.md index ebb17f5d99997..ce82b3f28d036 100644 --- a/aptos-move/framework/aptos-framework/doc/block.md +++ b/aptos-move/framework/aptos-framework/doc/block.md @@ -20,6 +20,7 @@ This module defines a struct storing the metadata of the block and new block eve - [Function `block_prologue_common`](#0x1_block_block_prologue_common) - [Function `block_prologue`](#0x1_block_block_prologue) - [Function `block_prologue_ext`](#0x1_block_block_prologue_ext) +- [Function `block_epilogue`](#0x1_block_block_epilogue) - [Function `get_current_block_height`](#0x1_block_get_current_block_height) - [Function `emit_new_block_event`](#0x1_block_emit_new_block_event) - [Function `emit_genesis_block_event`](#0x1_block_emit_genesis_block_event) @@ -704,6 +705,34 @@ The runtime always runs this before executing the transactions in a block. + + + + +## Function `block_epilogue` + + + +
fun block_epilogue(vm: &signer, fee_distribution_validator_indices: vector<u64>, fee_amounts_octa: vector<u64>)
+
+ + + +
+Implementation + + +
fun block_epilogue(
+    vm: &signer,
+    fee_distribution_validator_indices: vector<u64>,
+    fee_amounts_octa: vector<u64>,
+) {
+    stake::record_fee(vm, fee_distribution_validator_indices, fee_amounts_octa);
+}
+
+ + +
diff --git a/aptos-move/framework/aptos-framework/doc/fungible_asset.md b/aptos-move/framework/aptos-framework/doc/fungible_asset.md index f43390ecf8dac..802572968b44f 100644 --- a/aptos-move/framework/aptos-framework/doc/fungible_asset.md +++ b/aptos-move/framework/aptos-framework/doc/fungible_asset.md @@ -4271,8 +4271,13 @@ Decrease the supply of a fungible asset by burning. ) acquires FungibleStore { assert!(object::owns(store, signer::address_of(owner)), error::permission_denied(ENOT_STORE_OWNER)); assert!(!is_frozen(store), error::invalid_argument(ESTORE_IS_FROZEN)); + let fungible_store_address = object::object_address(&store); + // be graceful if ConcurrentFungibleBalance already exists, but flag is off + if (exists<ConcurrentFungibleBalance>(fungible_store_address)) { + return + }; assert!(allow_upgrade_to_concurrent_fungible_balance(), error::invalid_argument(ECONCURRENT_BALANCE_NOT_ENABLED)); - ensure_store_upgraded_to_concurrent_internal(object::object_address(&store)); + ensure_store_upgraded_to_concurrent_internal(fungible_store_address); }
diff --git a/aptos-move/framework/aptos-framework/doc/genesis.md b/aptos-move/framework/aptos-framework/doc/genesis.md index 9245805fe983a..f7aeb1d34f848 100644 --- a/aptos-move/framework/aptos-framework/doc/genesis.md +++ b/aptos-move/framework/aptos-framework/doc/genesis.md @@ -341,6 +341,7 @@ Genesis step 1: Initialize aptos framework account and core modules on chain. execution_config::set(&aptos_framework_account, execution_config); version::initialize(&aptos_framework_account, initial_version); stake::initialize(&aptos_framework_account); + stake::initialize_pending_transaction_fee(&aptos_framework_account); timestamp::set_time_has_started(&aptos_framework_account); staking_config::initialize( &aptos_framework_account, diff --git a/aptos-move/framework/aptos-framework/doc/ordered_map.md b/aptos-move/framework/aptos-framework/doc/ordered_map.md index 78995bdbb8acc..7ec6cdae2985f 100644 --- a/aptos-move/framework/aptos-framework/doc/ordered_map.md +++ b/aptos-move/framework/aptos-framework/doc/ordered_map.md @@ -82,6 +82,50 @@ allowing cleaner iterator APIs. - [Function `new_iter`](#0x1_ordered_map_new_iter) - [Function `binary_search`](#0x1_ordered_map_binary_search) - [Specification](#@Specification_1) + - [Enum `OrderedMap`](#@Specification_1_OrderedMap) + - [Function `new`](#@Specification_1_new) + - [Function `new_from`](#@Specification_1_new_from) + - [Function `length`](#@Specification_1_length) + - [Function `is_empty`](#@Specification_1_is_empty) + - [Function `add`](#@Specification_1_add) + - [Function `upsert`](#@Specification_1_upsert) + - [Function `remove`](#@Specification_1_remove) + - [Function `contains`](#@Specification_1_contains) + - [Function `borrow`](#@Specification_1_borrow) + - [Function `borrow_mut`](#@Specification_1_borrow_mut) + - [Function `replace_key_inplace`](#@Specification_1_replace_key_inplace) + - [Function `add_all`](#@Specification_1_add_all) + - [Function `upsert_all`](#@Specification_1_upsert_all) + - [Function `append`](#@Specification_1_append) + - [Function `append_disjoint`](#@Specification_1_append_disjoint) + - [Function `append_impl`](#@Specification_1_append_impl) + - [Function `trim`](#@Specification_1_trim) + - [Function `borrow_front`](#@Specification_1_borrow_front) + - [Function `borrow_back`](#@Specification_1_borrow_back) + - [Function `pop_front`](#@Specification_1_pop_front) + - [Function `pop_back`](#@Specification_1_pop_back) + - [Function `prev_key`](#@Specification_1_prev_key) + - [Function `next_key`](#@Specification_1_next_key) + - [Function `lower_bound`](#@Specification_1_lower_bound) + - [Function `find`](#@Specification_1_find) + - [Function `new_begin_iter`](#@Specification_1_new_begin_iter) + - [Function `new_end_iter`](#@Specification_1_new_end_iter) + - [Function `iter_next`](#@Specification_1_iter_next) + - [Function `iter_prev`](#@Specification_1_iter_prev) + - [Function `iter_is_begin`](#@Specification_1_iter_is_begin) + - [Function `iter_is_begin_from_non_empty`](#@Specification_1_iter_is_begin_from_non_empty) + - [Function `iter_is_end`](#@Specification_1_iter_is_end) + - [Function `iter_borrow_key`](#@Specification_1_iter_borrow_key) + - [Function `iter_borrow`](#@Specification_1_iter_borrow) + - [Function `iter_borrow_mut`](#@Specification_1_iter_borrow_mut) + - [Function `iter_remove`](#@Specification_1_iter_remove) + - [Function `iter_replace`](#@Specification_1_iter_replace) + - [Function `iter_add`](#@Specification_1_iter_add) + - [Function `destroy_empty`](#@Specification_1_destroy_empty) + - [Function `keys`](#@Specification_1_keys) + - [Function `values`](#@Specification_1_values) + - [Function `to_vec_pair`](#@Specification_1_to_vec_pair) + - [Function `binary_search`](#@Specification_1_binary_search)
use 0x1::cmp;
@@ -1841,7 +1885,877 @@ to O(n).
 
 
 
+
pragma verify = true;
+
+ + + + + +### Enum `OrderedMap` + + +
enum OrderedMap<K, V> has copy, drop, store
+
+ + + +
+ +
+SortedVectorMap + + +
+Fields + + +
+
+entries: vector<ordered_map::Entry<K, V>> +
+
+ List of entries, sorted by key. +
+
+ + +
+ +
+
+ + + +
pragma intrinsic = map,
+    map_new = new,
+    map_len = length,
+    map_destroy_empty = destroy_empty,
+    map_has_key = contains,
+    map_add_no_override = add,
+    map_borrow = borrow,
+    map_borrow_mut = borrow_mut,
+    map_spec_get = spec_get,
+    map_spec_set = spec_set,
+    map_spec_del = spec_remove,
+    map_spec_len = spec_len,
+    map_spec_has_key = spec_contains_key,
+    map_is_empty = is_empty;
+
+ + + + + + + +
native fun spec_len<K, V>(t: OrderedMap<K, V>): num;
+
+ + + + + + + +
native fun spec_contains_key<K, V>(t: OrderedMap<K, V>, k: K): bool;
+
+ + + + + + + +
native fun spec_set<K, V>(t: OrderedMap<K, V>, k: K, v: V): OrderedMap<K, V>;
+
+ + + + + + + +
native fun spec_remove<K, V>(t: OrderedMap<K, V>, k: K): OrderedMap<K, V>;
+
+ + + + + + + +
native fun spec_get<K, V>(t: OrderedMap<K, V>, k: K): V;
+
+ + + + + +### Function `new` + + +
public fun new<K, V>(): ordered_map::OrderedMap<K, V>
+
+ + + + +
pragma intrinsic;
+
+ + + + + +### Function `new_from` + + +
public fun new_from<K, V>(keys: vector<K>, values: vector<V>): ordered_map::OrderedMap<K, V>
+
+ + + + +
pragma opaque;
+pragma verify = false;
+aborts_if [abstract] exists i in 0..len(keys), j in 0..len(keys) where i != j : keys[i] == keys[j];
+aborts_if [abstract] len(keys) != len(values);
+ensures [abstract] forall k: K {spec_contains_key(result, k)} : vector::spec_contains(keys,k) <==> spec_contains_key(result, k);
+ensures [abstract] forall i in 0..len(keys) : spec_get(result, keys[i]) == values[i];
+ensures [abstract] spec_len(result) == len(keys);
+
+ + + + + +### Function `length` + + +
public fun length<K, V>(self: &ordered_map::OrderedMap<K, V>): u64
+
+ + + + +
pragma intrinsic;
+
+ + + + + +### Function `is_empty` + + +
public fun is_empty<K, V>(self: &ordered_map::OrderedMap<K, V>): bool
+
+ + + + +
pragma intrinsic;
+
+ + + + + +### Function `add` + + +
public fun add<K, V>(self: &mut ordered_map::OrderedMap<K, V>, key: K, value: V)
+
+ + + + +
pragma intrinsic;
+
+ + + + + +### Function `upsert` + + +
public fun upsert<K: drop, V>(self: &mut ordered_map::OrderedMap<K, V>, key: K, value: V): option::Option<V>
+
+ + + + +
pragma opaque;
+pragma verify = false;
+ensures [abstract] !spec_contains_key(old(self), key) ==> option::is_none(result);
+ensures [abstract] spec_contains_key(self, key);
+ensures [abstract] spec_get(self, key) == value;
+ensures [abstract] spec_contains_key(old(self), key) ==> ((option::is_some(result)) && (option::spec_borrow(result) == spec_get(old(
+    self), key)));
+ensures [abstract] !spec_contains_key(old(self), key) ==> spec_len(old(self)) + 1 == spec_len(self);
+ensures [abstract] spec_contains_key(old(self), key) ==> spec_len(old(self)) == spec_len(self);
+ensures [abstract] forall k: K: spec_contains_key(old(self), k) && k != key ==> spec_get(old(self), k) == spec_get(self, k);
+ensures [abstract] forall k: K: spec_contains_key(old(self), k) ==> spec_contains_key(self, k);
+
+ + + + + +### Function `remove` + + +
public fun remove<K: drop, V>(self: &mut ordered_map::OrderedMap<K, V>, key: &K): V
+
+ + + + +
pragma opaque;
+pragma verify = false;
+aborts_if [abstract] !spec_contains_key(self, key);
+ensures [abstract] !spec_contains_key(self, key);
+ensures [abstract] spec_get(old(self), key) == result;
+ensures [abstract] spec_len(old(self)) == spec_len(self) + 1;
+ensures [abstract] forall k: K where k != key: spec_contains_key(self, k) ==> spec_get(self, k) == spec_get(old(self), k);
+ensures [abstract] forall k: K where k != key: spec_contains_key(old(self), k) == spec_contains_key(self, k);
+
+ + + + + +### Function `contains` + + +
public fun contains<K, V>(self: &ordered_map::OrderedMap<K, V>, key: &K): bool
+
+ + + + +
pragma intrinsic;
+
+ + + + + +### Function `borrow` + + +
public fun borrow<K, V>(self: &ordered_map::OrderedMap<K, V>, key: &K): &V
+
+ + + + +
pragma intrinsic;
+
+ + + + + +### Function `borrow_mut` + + +
public fun borrow_mut<K, V>(self: &mut ordered_map::OrderedMap<K, V>, key: &K): &mut V
+
+ + + + +
pragma intrinsic;
+
+ + + + + +### Function `replace_key_inplace` + + +
public(friend) fun replace_key_inplace<K: drop, V>(self: &mut ordered_map::OrderedMap<K, V>, old_key: &K, new_key: K)
+
+ + + + +
pragma opaque;
+pragma verify = false;
+
+ + + + + +### Function `add_all` + + +
public fun add_all<K, V>(self: &mut ordered_map::OrderedMap<K, V>, keys: vector<K>, values: vector<V>)
+
+ + + + +
pragma opaque;
+pragma verify = false;
+
+ + + + + +### Function `upsert_all` + + +
public fun upsert_all<K: drop, V: drop>(self: &mut ordered_map::OrderedMap<K, V>, keys: vector<K>, values: vector<V>)
+
+ + + + +
pragma opaque;
+pragma verify = false;
+
+ + + + + +### Function `append` + + +
public fun append<K: drop, V: drop>(self: &mut ordered_map::OrderedMap<K, V>, other: ordered_map::OrderedMap<K, V>)
+
+ + + + +
pragma opaque;
+pragma verify = false;
+
+ + + + + +### Function `append_disjoint` + + +
public fun append_disjoint<K, V>(self: &mut ordered_map::OrderedMap<K, V>, other: ordered_map::OrderedMap<K, V>)
+
+ + + + +
pragma opaque;
+pragma verify = false;
+
+ + + + + +### Function `append_impl` + + +
fun append_impl<K, V>(self: &mut ordered_map::OrderedMap<K, V>, other: ordered_map::OrderedMap<K, V>): vector<ordered_map::Entry<K, V>>
+
+ + + + +
pragma opaque;
+pragma verify = false;
+
+ + + + + +### Function `trim` + + +
public fun trim<K, V>(self: &mut ordered_map::OrderedMap<K, V>, at: u64): ordered_map::OrderedMap<K, V>
+
+ + + + +
pragma opaque;
+pragma verify = false;
+
+ + + + + +### Function `borrow_front` + + +
public fun borrow_front<K, V>(self: &ordered_map::OrderedMap<K, V>): (&K, &V)
+
+ + + + +
pragma opaque;
+pragma verify = false;
+ensures [abstract] spec_contains_key(self, result_1);
+ensures [abstract] spec_get(self, result_1) == result_2;
+ensures [abstract] forall k: K where k != result_1: spec_contains_key(self, k) ==>
+std::cmp::compare(result_1, k) == std::cmp::Ordering::Less;
+
+ + + + + +### Function `borrow_back` + + +
public fun borrow_back<K, V>(self: &ordered_map::OrderedMap<K, V>): (&K, &V)
+
+ + + + +
pragma opaque;
+pragma verify = false;
+ensures [abstract] spec_contains_key(self, result_1);
+ensures [abstract] spec_get(self, result_1) == result_2;
+ensures [abstract] forall k: K where k != result_1: spec_contains_key(self, k) ==>
+std::cmp::compare(result_1, k) == std::cmp::Ordering::Greater;
+
+ + + + + +### Function `pop_front` + + +
public fun pop_front<K, V>(self: &mut ordered_map::OrderedMap<K, V>): (K, V)
+
+ + + + +
pragma opaque;
+pragma verify = false;
+
+ + + + + +### Function `pop_back` + + +
public fun pop_back<K, V>(self: &mut ordered_map::OrderedMap<K, V>): (K, V)
+
+ + + + +
pragma opaque;
+pragma verify = false;
+
+ + + + + +### Function `prev_key` + + +
public fun prev_key<K: copy, V>(self: &ordered_map::OrderedMap<K, V>, key: &K): option::Option<K>
+
+ + + + +
pragma opaque;
+pragma verify = false;
+ensures [abstract] result == std::option::spec_none() <==>
+(forall k: K {spec_contains_key(self, k)} where spec_contains_key(self, k)
+&& k != key: std::cmp::compare(key, k) == std::cmp::Ordering::Less);
+ensures [abstract] result.is_some() <==>
+    spec_contains_key(self, option::spec_borrow(result)) &&
+    (std::cmp::compare(option::spec_borrow(result), key) == std::cmp::Ordering::Less)
+    && (forall k: K {spec_contains_key(self, k), std::cmp::compare(option::spec_borrow(result), k), std::cmp::compare(key, k)} where k != option::spec_borrow(result): ((spec_contains_key(self, k) &&
+    std::cmp::compare(k, key) == std::cmp::Ordering::Less)) ==>
+    std::cmp::compare(option::spec_borrow(result), k) == std::cmp::Ordering::Greater);
+
+ + + + + +### Function `next_key` + + +
public fun next_key<K: copy, V>(self: &ordered_map::OrderedMap<K, V>, key: &K): option::Option<K>
+
+ + + + +
pragma opaque;
+pragma verify = false;
+ensures [abstract] result == std::option::spec_none() <==>
+(forall k: K {spec_contains_key(self, k)} where spec_contains_key(self, k) && k != key:
+std::cmp::compare(key, k) == std::cmp::Ordering::Greater);
+ensures [abstract] result.is_some() <==>
+    spec_contains_key(self, option::spec_borrow(result)) &&
+    (std::cmp::compare(option::spec_borrow(result), key) == std::cmp::Ordering::Greater)
+    && (forall k: K {spec_contains_key(self, k)} where k != option::spec_borrow(result): ((spec_contains_key(self, k) &&
+    std::cmp::compare(k, key) == std::cmp::Ordering::Greater)) ==>
+    std::cmp::compare(option::spec_borrow(result), k) == std::cmp::Ordering::Less);
+
+ + + + + +### Function `lower_bound` + + +
public(friend) fun lower_bound<K, V>(self: &ordered_map::OrderedMap<K, V>, key: &K): ordered_map::IteratorPtr
+
+ + + + +
pragma opaque;
+pragma verify = false;
+
+ + + + + +### Function `find` + + +
public(friend) fun find<K, V>(self: &ordered_map::OrderedMap<K, V>, key: &K): ordered_map::IteratorPtr
+
+ + + + +
pragma opaque;
+pragma verify = false;
+
+ + + + + +### Function `new_begin_iter` + + +
public(friend) fun new_begin_iter<K, V>(self: &ordered_map::OrderedMap<K, V>): ordered_map::IteratorPtr
+
+ + + + +
pragma opaque;
+pragma verify = false;
+
+ + + + + +### Function `new_end_iter` + + +
public(friend) fun new_end_iter<K, V>(self: &ordered_map::OrderedMap<K, V>): ordered_map::IteratorPtr
+
+ + + + +
pragma opaque;
+pragma verify = false;
+
+ + + + + +### Function `iter_next` + + +
public(friend) fun iter_next<K, V>(self: ordered_map::IteratorPtr, map: &ordered_map::OrderedMap<K, V>): ordered_map::IteratorPtr
+
+ + + + +
pragma opaque;
+pragma verify = false;
+
+ + + + + +### Function `iter_prev` + + +
public(friend) fun iter_prev<K, V>(self: ordered_map::IteratorPtr, map: &ordered_map::OrderedMap<K, V>): ordered_map::IteratorPtr
+
+ + + + +
pragma opaque;
+pragma verify = false;
+
+ + + + + +### Function `iter_is_begin` + + +
public(friend) fun iter_is_begin<K, V>(self: &ordered_map::IteratorPtr, map: &ordered_map::OrderedMap<K, V>): bool
+
+ + + + +
pragma opaque;
+pragma verify = false;
+
+ + + + + +### Function `iter_is_begin_from_non_empty` + + +
public(friend) fun iter_is_begin_from_non_empty(self: &ordered_map::IteratorPtr): bool
+
+ + + + +
pragma opaque;
+pragma verify = false;
+
+ + + + + +### Function `iter_is_end` + + +
public(friend) fun iter_is_end<K, V>(self: &ordered_map::IteratorPtr, _map: &ordered_map::OrderedMap<K, V>): bool
+
+ + + + +
pragma opaque;
+pragma verify = false;
+
+ + + + + +### Function `iter_borrow_key` + + +
public(friend) fun iter_borrow_key<K, V>(self: &ordered_map::IteratorPtr, map: &ordered_map::OrderedMap<K, V>): &K
+
+ + + + +
pragma opaque;
+pragma verify = false;
+
+ + + + + +### Function `iter_borrow` + + +
public(friend) fun iter_borrow<K, V>(self: ordered_map::IteratorPtr, map: &ordered_map::OrderedMap<K, V>): &V
+
+ + + + +
pragma opaque;
+pragma verify = false;
+
+ + + + + +### Function `iter_borrow_mut` + + +
public(friend) fun iter_borrow_mut<K, V>(self: ordered_map::IteratorPtr, map: &mut ordered_map::OrderedMap<K, V>): &mut V
+
+ + + + +
pragma opaque;
+pragma verify = false;
+
+ + + + + +### Function `iter_remove` + + +
public(friend) fun iter_remove<K: drop, V>(self: ordered_map::IteratorPtr, map: &mut ordered_map::OrderedMap<K, V>): V
+
+ + + + +
pragma opaque;
+pragma verify = false;
+
+ + + + + +### Function `iter_replace` + + +
public(friend) fun iter_replace<K: copy, drop, V>(self: ordered_map::IteratorPtr, map: &mut ordered_map::OrderedMap<K, V>, value: V): V
+
+ + + + +
pragma opaque;
+pragma verify = false;
+
+ + + + + +### Function `iter_add` + + +
public(friend) fun iter_add<K, V>(self: ordered_map::IteratorPtr, map: &mut ordered_map::OrderedMap<K, V>, key: K, value: V)
+
+ + + + +
pragma opaque;
+pragma verify = false;
+
+ + + + + +### Function `destroy_empty` + + +
public fun destroy_empty<K, V>(self: ordered_map::OrderedMap<K, V>)
+
+ + + + +
pragma intrinsic;
+
+ + + + + +### Function `keys` + + +
public fun keys<K: copy, V>(self: &ordered_map::OrderedMap<K, V>): vector<K>
+
+ + + + +
pragma verify = false;
+pragma opaque;
+ensures [abstract] forall k: K: vector::spec_contains(result, k) <==> spec_contains_key(self, k);
+
+ + + + + +### Function `values` + + +
public fun values<K, V: copy>(self: &ordered_map::OrderedMap<K, V>): vector<V>
+
+ + + + +
pragma opaque;
+pragma verify = false;
+
+ + + + + +### Function `to_vec_pair` + + +
public fun to_vec_pair<K, V>(self: ordered_map::OrderedMap<K, V>): (vector<K>, vector<V>)
+
+ + + +
pragma verify = false;
+pragma opaque;
+
+ + + + + +### Function `binary_search` + + +
fun binary_search<K, V>(key: &K, entries: &vector<ordered_map::Entry<K, V>>, start: u64, end: u64): u64
+
+ + + + +
pragma opaque;
+pragma verify = false;
 
diff --git a/aptos-move/framework/aptos-framework/doc/stake.md b/aptos-move/framework/aptos-framework/doc/stake.md index 564db5d3f1434..b3cf8ee526ce3 100644 --- a/aptos-move/framework/aptos-framework/doc/stake.md +++ b/aptos-move/framework/aptos-framework/doc/stake.md @@ -29,6 +29,8 @@ or if their stake drops below the min required, they would get removed at the en - [Resource `ValidatorConfig`](#0x1_stake_ValidatorConfig) - [Struct `ValidatorInfo`](#0x1_stake_ValidatorInfo) - [Resource `ValidatorSet`](#0x1_stake_ValidatorSet) +- [Resource `PendingTransactionFee`](#0x1_stake_PendingTransactionFee) +- [Struct `DistributeTransactionFee`](#0x1_stake_DistributeTransactionFee) - [Resource `AptosCoinCapabilities`](#0x1_stake_AptosCoinCapabilities) - [Struct `IndividualValidatorPerformance`](#0x1_stake_IndividualValidatorPerformance) - [Resource `ValidatorPerformance`](#0x1_stake_ValidatorPerformance) @@ -81,6 +83,8 @@ or if their stake drops below the min required, they would get removed at the en - [Function `initialize`](#0x1_stake_initialize) - [Function `store_aptos_coin_mint_cap`](#0x1_stake_store_aptos_coin_mint_cap) - [Function `remove_validators`](#0x1_stake_remove_validators) +- [Function `initialize_pending_transaction_fee`](#0x1_stake_initialize_pending_transaction_fee) +- [Function `record_fee`](#0x1_stake_record_fee) - [Function `initialize_stake_owner`](#0x1_stake_initialize_stake_owner) - [Function `initialize_validator`](#0x1_stake_initialize_validator) - [Function `initialize_owner`](#0x1_stake_initialize_owner) @@ -168,7 +172,9 @@ or if their stake drops below the min required, they would get removed at the en
use 0x1::account;
+use 0x1::aggregator_v2;
 use 0x1::aptos_coin;
+use 0x1::big_ordered_map;
 use 0x1::bls12381;
 use 0x1::chain_status;
 use 0x1::coin;
@@ -512,6 +518,68 @@ Full ValidatorSet, stored in @aptos_framework.
 
 
 
+
+
+
+
+## Resource `PendingTransactionFee`
+
+Transaction fee that is collected in current epoch, indexed by validator_index.
+
+
+
struct PendingTransactionFee has store, key
+
+ + + +
+Fields + + +
+
+pending_fee_by_validator: big_ordered_map::BigOrderedMap<u64, aggregator_v2::Aggregator<u64>> +
+
+ +
+
+ + +
+ + + +## Struct `DistributeTransactionFee` + + + +
#[event]
+struct DistributeTransactionFee has drop, store
+
+ + + +
+Fields + + +
+
+pool_address: address +
+
+ +
+
+fee_amount: u64 +
+
+ +
+
+ +
@@ -1251,6 +1319,7 @@ This allows the Stake module to mint rewards to stakers. ## Struct `DistributeRewards` +The amount includes transaction fee and staking rewards.
#[event]
@@ -1841,6 +1910,16 @@ Not enough stake to join validator set.
 
 
 
+
+
+Transaction fee is not fully distributed at epoch ending.
+
+
+
const ETRANSACTION_FEE_NOT_FULLY_DISTRIBUTED: u64 = 29;
+
+ + + Validator Config not published. @@ -2452,6 +2531,79 @@ Allow on chain governance to remove validators from the validator set. + + + + +## Function `initialize_pending_transaction_fee` + + + +
public fun initialize_pending_transaction_fee(framework: &signer)
+
+ + + +
+Implementation + + +
public fun initialize_pending_transaction_fee(framework: &signer) {
+    system_addresses::assert_aptos_framework(framework);
+
+    if (!exists<PendingTransactionFee>(@aptos_framework)) {
+        move_to(framework, PendingTransactionFee {
+            // The max leaf order is set to 10 because there is a existing limitation that a
+            // resource can only have 10 aggregators at max.
+            pending_fee_by_validator: big_ordered_map::new_with_config(5, 10, true),
+        });
+    }
+}
+
+ + + +
+ + + +## Function `record_fee` + + + +
public(friend) fun record_fee(vm: &signer, fee_distribution_validator_indices: vector<u64>, fee_amounts_octa: vector<u64>)
+
+ + + +
+Implementation + + +
public(friend) fun record_fee(
+    vm: &signer,
+    fee_distribution_validator_indices: vector<u64>,
+    fee_amounts_octa: vector<u64>,
+) acquires PendingTransactionFee {
+    // Operational constraint: can only be invoked by the VM.
+    system_addresses::assert_vm(vm);
+
+    assert!(fee_distribution_validator_indices.length() == fee_amounts_octa.length());
+
+    let num_validators_to_distribute = fee_distribution_validator_indices.length();
+    let pending_fee = borrow_global_mut<PendingTransactionFee>(@aptos_framework);
+    let i = 0;
+    while (i < num_validators_to_distribute) {
+        let validator_index = fee_distribution_validator_indices[i];
+        let fee_octa = fee_amounts_octa[i];
+        pending_fee.pending_fee_by_validator.borrow_mut(&validator_index).add(fee_octa);
+        i = i + 1;
+    }
+}
+
+ + +
@@ -3717,7 +3869,7 @@ power.
public(friend) fun on_new_epoch(
-) acquires StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet {
+) acquires AptosCoinCapabilities, PendingTransactionFee, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
     let validator_set = borrow_global_mut<ValidatorSet>(@aptos_framework);
     let config = staking_config::get();
     let validator_perf = borrow_global_mut<ValidatorPerformance>(@aptos_framework);
@@ -3826,6 +3978,12 @@ power.
         validator_index = validator_index + 1;
     };
 
+    if (exists<PendingTransactionFee>(@aptos_framework)) {
+        let pending_fee_by_validator = &mut borrow_global_mut<PendingTransactionFee>(@aptos_framework).pending_fee_by_validator;
+        assert!(pending_fee_by_validator.is_empty(), error::internal(ETRANSACTION_FEE_NOT_FULLY_DISTRIBUTED));
+        validator_set.active_validators.for_each_ref(|v| pending_fee_by_validator.add(v.config.validator_index, aggregator_v2::create_unbounded_aggregator<u64>()));
+    };
+
     if (features::periodical_reward_rate_decrease_enabled()) {
         // Update rewards rate after reward distribution.
         staking_config::calculate_and_save_latest_epoch_rewards_rate();
@@ -4121,11 +4279,27 @@ This function shouldn't abort.
     validator_perf: &ValidatorPerformance,
     pool_address: address,
     staking_config: &StakingConfig,
-) acquires StakePool, AptosCoinCapabilities, ValidatorConfig {
+) acquires AptosCoinCapabilities, PendingTransactionFee, StakePool, ValidatorConfig {
     let stake_pool = borrow_global_mut<StakePool>(pool_address);
     let validator_config = borrow_global<ValidatorConfig>(pool_address);
-    let cur_validator_perf = vector::borrow(&validator_perf.validators, validator_config.validator_index);
+    let validator_index = validator_config.validator_index;
+    let cur_validator_perf = vector::borrow(&validator_perf.validators, validator_index);
     let num_successful_proposals = cur_validator_perf.successful_proposals;
+
+    let fee_pending_inactive = 0;
+    let fee_active = 0;
+
+    if (exists<PendingTransactionFee>(@aptos_framework)) {
+        let pending_fee_by_validator = &mut borrow_global_mut<PendingTransactionFee>(@aptos_framework).pending_fee_by_validator;
+        if (pending_fee_by_validator.contains(&validator_index)) {
+            let fee_octa = pending_fee_by_validator.remove(&validator_index).read();
+            let stake_active = (coin::value(&stake_pool.active) as u128);
+            let stake_pending_inactive = (coin::value(&stake_pool.pending_inactive) as u128);
+            fee_pending_inactive = (((fee_octa as u128) * stake_pending_inactive / (stake_active + stake_pending_inactive)) as u64);
+            fee_active = fee_octa - fee_pending_inactive;
+        }
+    };
+
     spec {
         // The following addition should not overflow because `num_total_proposals` cannot be larger than 86400,
         // the maximum number of proposals in a day (1 proposal per second).
@@ -4150,6 +4324,21 @@ This function shouldn't abort.
     spec {
         assume rewards_active + rewards_pending_inactive <= MAX_U64;
     };
+
+    if (std::features::is_distribute_transaction_fee_enabled()) {
+        let mint_cap = &borrow_global<AptosCoinCapabilities>(@aptos_framework).mint_cap;
+        if (fee_active > 0) {
+            coin::merge(&mut stake_pool.active, coin::mint(fee_active, mint_cap));
+        };
+        if (fee_pending_inactive > 0) {
+            coin::merge(&mut stake_pool.pending_inactive, coin::mint(fee_pending_inactive, mint_cap));
+        };
+        let fee_amount = fee_active + fee_pending_inactive;
+        if (fee_amount > 0) {
+            event::emit(DistributeTransactionFee { pool_address, fee_amount });
+        };
+    };
+
     let rewards_amount = rewards_active + rewards_pending_inactive;
     // Pending active stake can now be active.
     coin::merge(&mut stake_pool.active, coin::extract_all(&mut stake_pool.pending_active));
@@ -4677,62 +4866,6 @@ Returns validator's next epoch voting power, including pending_active, active, a
 
 
 
-
-
-
-
fun spec_rewards_amount(
-   stake_amount: u64,
-   num_successful_proposals: u64,
-   num_total_proposals: u64,
-   rewards_rate: u64,
-   rewards_rate_denominator: u64,
-): u64;
-
- - - - - - - -
fun spec_contains(validators: vector<ValidatorInfo>, addr: address): bool {
-   exists i in 0..len(validators): validators[i].addr == addr
-}
-
- - - - - - - -
fun spec_is_current_epoch_validator(pool_address: address): bool {
-   let validator_set = global<ValidatorSet>(@aptos_framework);
-   !spec_contains(validator_set.pending_active, pool_address)
-       && (spec_contains(validator_set.active_validators, pool_address)
-       || spec_contains(validator_set.pending_inactive, pool_address))
-}
-
- - - - - - - -
schema ResourceRequirement {
-    requires exists<AptosCoinCapabilities>(@aptos_framework);
-    requires exists<ValidatorPerformance>(@aptos_framework);
-    requires exists<ValidatorSet>(@aptos_framework);
-    requires exists<StakingConfig>(@aptos_framework);
-    requires exists<StakingRewardsConfig>(@aptos_framework) || !features::spec_periodical_reward_rate_decrease_enabled();
-    requires exists<timestamp::CurrentTimeMicroseconds>(@aptos_framework);
-}
-
- - - - @@ -5603,6 +5736,247 @@ Returns validator's next epoch voting power, including pending_active, active, a + + + + +
schema AddStakeWithCapAbortsIfAndEnsures {
+    owner_cap: OwnerCapability;
+    amount: u64;
+    let pool_address = owner_cap.pool_address;
+    aborts_if !exists<StakePool>(pool_address);
+    let config = global<staking_config::StakingConfig>(@aptos_framework);
+    let validator_set = global<ValidatorSet>(@aptos_framework);
+    let voting_power_increase_limit = config.voting_power_increase_limit;
+    let post post_validator_set = global<ValidatorSet>(@aptos_framework);
+    let update_voting_power_increase = amount != 0 && (spec_contains(validator_set.active_validators, pool_address)
+                                                       || spec_contains(validator_set.pending_active, pool_address));
+    aborts_if update_voting_power_increase && validator_set.total_joining_power + amount > MAX_U128;
+    ensures update_voting_power_increase ==> post_validator_set.total_joining_power == validator_set.total_joining_power + amount;
+    aborts_if update_voting_power_increase && validator_set.total_voting_power > 0
+            && validator_set.total_voting_power * voting_power_increase_limit > MAX_U128;
+    aborts_if update_voting_power_increase && validator_set.total_voting_power > 0
+            && validator_set.total_joining_power + amount > validator_set.total_voting_power * voting_power_increase_limit / 100;
+    let stake_pool = global<StakePool>(pool_address);
+    let post post_stake_pool = global<StakePool>(pool_address);
+    let value_pending_active = stake_pool.pending_active.value;
+    let value_active = stake_pool.active.value;
+    ensures amount != 0 && spec_is_current_epoch_validator(pool_address) ==> post_stake_pool.pending_active.value == value_pending_active + amount;
+    ensures amount != 0 && !spec_is_current_epoch_validator(pool_address) ==> post_stake_pool.active.value == value_active + amount;
+    let maximum_stake = config.maximum_stake;
+    let value_pending_inactive = stake_pool.pending_inactive.value;
+    let next_epoch_voting_power = value_pending_active + value_active + value_pending_inactive;
+    let voting_power = next_epoch_voting_power + amount;
+    aborts_if amount != 0 && voting_power > MAX_U64;
+    aborts_if amount != 0 && voting_power > maximum_stake;
+}
+
+ + + + + + + +
schema AddStakeAbortsIfAndEnsures {
+    owner: signer;
+    amount: u64;
+    let owner_address = signer::address_of(owner);
+    aborts_if !exists<OwnerCapability>(owner_address);
+    let owner_cap = global<OwnerCapability>(owner_address);
+    include AddStakeWithCapAbortsIfAndEnsures { owner_cap };
+}
+
+ + + + + + + +
fun spec_is_allowed(account: address): bool {
+   if (!exists<AllowedValidators>(@aptos_framework)) {
+       true
+   } else {
+       let allowed = global<AllowedValidators>(@aptos_framework);
+       contains(allowed.accounts, account)
+   }
+}
+
+ + + + + + + +
fun spec_find_validator(v: vector<ValidatorInfo>, addr: address): Option<u64>;
+
+ + + + + + + +
fun spec_validators_are_initialized(validators: vector<ValidatorInfo>): bool {
+   forall i in 0..len(validators):
+       spec_has_stake_pool(validators[i].addr) &&
+           spec_has_validator_config(validators[i].addr)
+}
+
+ + + + + + + +
fun spec_validators_are_initialized_addrs(addrs: vector<address>): bool {
+   forall i in 0..len(addrs):
+       spec_has_stake_pool(addrs[i]) &&
+           spec_has_validator_config(addrs[i])
+}
+
+ + + + + + + +
fun spec_validator_indices_are_valid(validators: vector<ValidatorInfo>): bool {
+   spec_validator_indices_are_valid_addr(validators, spec_validator_index_upper_bound()) &&
+       spec_validator_indices_are_valid_config(validators, spec_validator_index_upper_bound())
+}
+
+ + + + + + + +
fun spec_validator_indices_are_valid_addr(validators: vector<ValidatorInfo>, upper_bound: u64): bool {
+   forall i in 0..len(validators):
+       global<ValidatorConfig>(validators[i].addr).validator_index < upper_bound
+}
+
+ + + + + + + +
fun spec_validator_indices_are_valid_config(validators: vector<ValidatorInfo>, upper_bound: u64): bool {
+   forall i in 0..len(validators):
+       validators[i].config.validator_index < upper_bound
+}
+
+ + + + + + + +
fun spec_validator_indices_active_pending_inactive(validator_set: ValidatorSet): bool {
+   len(validator_set.pending_inactive) + len(validator_set.active_validators) == spec_validator_index_upper_bound()
+}
+
+ + + + + + + +
fun spec_validator_index_upper_bound(): u64 {
+   len(global<ValidatorPerformance>(@aptos_framework).validators)
+}
+
+ + + + + + + +
fun spec_has_stake_pool(a: address): bool {
+   exists<StakePool>(a)
+}
+
+ + + + + + + +
fun spec_has_validator_config(a: address): bool {
+   exists<ValidatorConfig>(a)
+}
+
+ + + + + + + +
fun spec_rewards_amount(
+   stake_amount: u64,
+   num_successful_proposals: u64,
+   num_total_proposals: u64,
+   rewards_rate: u64,
+   rewards_rate_denominator: u64,
+): u64;
+
+ + + + + + + +
fun spec_contains(validators: vector<ValidatorInfo>, addr: address): bool {
+   exists i in 0..len(validators): validators[i].addr == addr
+}
+
+ + + + + + + +
fun spec_is_current_epoch_validator(pool_address: address): bool {
+   let validator_set = global<ValidatorSet>(@aptos_framework);
+   !spec_contains(validator_set.pending_active, pool_address)
+       && (spec_contains(validator_set.active_validators, pool_address)
+       || spec_contains(validator_set.pending_inactive, pool_address))
+}
+
+ + + + + + + +
schema ResourceRequirement {
+    requires exists<AptosCoinCapabilities>(@aptos_framework);
+    requires exists<ValidatorPerformance>(@aptos_framework);
+    requires exists<ValidatorSet>(@aptos_framework);
+    requires exists<StakingConfig>(@aptos_framework);
+    requires exists<StakingRewardsConfig>(@aptos_framework) || !features::spec_periodical_reward_rate_decrease_enabled();
+    requires exists<timestamp::CurrentTimeMicroseconds>(@aptos_framework);
+}
+
+ + + ### Function `update_stake_pool` @@ -5700,44 +6074,6 @@ Returns validator's next epoch voting power, including pending_active, active, a - - - - -
schema GetReconfigStartTimeRequirement {
-    requires exists<timestamp::CurrentTimeMicroseconds>(@aptos_framework);
-    include reconfiguration_state::StartTimeSecsRequirement;
-}
-
- - - - - - - -
fun spec_get_reconfig_start_time_secs(): u64 {
-   if (exists<reconfiguration_state::State>(@aptos_framework)) {
-       reconfiguration_state::spec_start_time_secs()
-   } else {
-       timestamp::spec_now_seconds()
-   }
-}
-
- - - - - - - -
fun spec_get_lockup_secs(pool_address: address): u64 {
-   global<StakePool>(pool_address).locked_until_secs
-}
-
- - - ### Function `calculate_rewards_amount` diff --git a/aptos-move/framework/aptos-framework/doc/transaction_validation.md b/aptos-move/framework/aptos-framework/doc/transaction_validation.md index 0986c0f52f816..7c23e497d9ef8 100644 --- a/aptos-move/framework/aptos-framework/doc/transaction_validation.md +++ b/aptos-move/framework/aptos-framework/doc/transaction_validation.md @@ -554,10 +554,16 @@ Only called during genesis to initialize system resources for this module. // Check if the authentication key is valid if (!skip_auth_key_check(is_simulation, &txn_authentication_key)) { if (option::is_some(&txn_authentication_key)) { - assert!( - txn_authentication_key == option::some(account::get_authentication_key(sender_address)), - error::invalid_argument(PROLOGUE_EINVALID_ACCOUNT_AUTH_KEY), - ); + if ( + sender_address == gas_payer_address || + account::exists_at(sender_address) || + !features::sponsored_automatic_account_creation_enabled() + ) { + assert!( + txn_authentication_key == option::some(account::get_authentication_key(sender_address)), + error::invalid_argument(PROLOGUE_EINVALID_ACCOUNT_AUTH_KEY), + ); + }; } else { assert!( allow_missing_txn_authentication_key(sender_address), @@ -2014,7 +2020,7 @@ not equal the number of singers.
pragma aborts_if_is_partial;
 pragma verify_duration_estimate = 120;
 aborts_if !features::spec_is_enabled(features::FEE_PAYER_ENABLED);
-let gas_payer = create_signer::create_signer(fee_payer_address);
+let gas_payer = create_signer::spec_create_signer(fee_payer_address);
 include PrologueCommonAbortsIf {
     gas_payer,
     replay_protector: ReplayProtector::SequenceNumber(txn_sequence_number),
diff --git a/aptos-move/framework/aptos-framework/sources/account/account.move b/aptos-move/framework/aptos-framework/sources/account/account.move
index 7ac3144d9f453..62ec34f4644af 100644
--- a/aptos-move/framework/aptos-framework/sources/account/account.move
+++ b/aptos-move/framework/aptos-framework/sources/account/account.move
@@ -202,9 +202,9 @@ module aptos_framework::account {
     const EOFFERER_ADDRESS_DOES_NOT_EXIST: u64 = 17;
     /// The specified rotation capability offer does not exist at the specified offerer address
     const ENO_SUCH_ROTATION_CAPABILITY_OFFER: u64 = 18;
-    // The signer capability is not offered to any address
+    /// The signer capability is not offered to any address
     const ENO_SIGNER_CAPABILITY_OFFERED: u64 = 19;
-    // This account has exceeded the allocated GUIDs it can create. It should be impossible to reach this number for real applications.
+    /// This account has exceeded the allocated GUIDs it can create. It should be impossible to reach this number for real applications.
     const EEXCEEDED_MAX_GUID_CREATION_NUM: u64 = 20;
     /// The new authentication key already has an entry in the `OriginatingAddress` table
     const ENEW_AUTH_KEY_ALREADY_MAPPED: u64 = 21;
@@ -218,6 +218,8 @@ module aptos_framework::account {
     const ENOT_A_KEYLESS_PUBLIC_KEY: u64 = 25;
     /// The provided public key is not the original public key for the account
     const ENOT_THE_ORIGINAL_PUBLIC_KEY: u64 = 26;
+    /// The set_originating_address is disabled due to potential poisoning from account abstraction
+    const ESET_ORIGINATING_ADDRESS_DISABLED: u64 = 27;
 
     /// Explicitly separate the GUID space between Object and Account to prevent accidental overlap.
     const MAX_GUID_CREATION_NUM: u64 = 0x4000000000000;
@@ -828,6 +830,8 @@ module aptos_framework::account {
     /// `rotate_authentication_key_call()`, the `OriginatingAddress` table is only updated under the
     /// authority of the new authentication key.
     entry fun set_originating_address(account: &signer) acquires Account, OriginatingAddress {
+        abort error::invalid_state(ESET_ORIGINATING_ADDRESS_DISABLED);
+
         let account_addr = signer::address_of(account);
         assert!(exists(account_addr), error::not_found(EACCOUNT_DOES_NOT_EXIST));
         let auth_key_as_address =
@@ -2102,4 +2106,10 @@ module aptos_framework::account {
         let event = CoinRegister { account: addr, type_info: type_info::type_of() };
         assert!(!event::was_event_emitted(&event), 3);
     }
+
+    #[test(account = @0x1234)]
+    #[expected_failure(abort_code = 0x3001b, location = Self)]
+    fun test_set_originating_address_fails(account: &signer) acquires Account, OriginatingAddress {
+        set_originating_address(account);
+    }
 }
diff --git a/aptos-move/framework/aptos-framework/sources/block.move b/aptos-move/framework/aptos-framework/sources/block.move
index 3dc18b06480eb..b65feb0bca716 100644
--- a/aptos-move/framework/aptos-framework/sources/block.move
+++ b/aptos-move/framework/aptos-framework/sources/block.move
@@ -246,6 +246,14 @@ module aptos_framework::block {
         };
     }
 
+    fun block_epilogue(
+        vm: &signer,
+        fee_distribution_validator_indices: vector,
+        fee_amounts_octa: vector,
+    ) {
+        stake::record_fee(vm, fee_distribution_validator_indices, fee_amounts_octa);
+    }
+
     #[view]
     /// Get the current block height
     public fun get_current_block_height(): u64 acquires BlockResource {
diff --git a/aptos-move/framework/aptos-framework/sources/datastructures/big_ordered_map.move b/aptos-move/framework/aptos-framework/sources/datastructures/big_ordered_map.move
index f30d8c1922bd1..62978830f3e54 100644
--- a/aptos-move/framework/aptos-framework/sources/datastructures/big_ordered_map.move
+++ b/aptos-move/framework/aptos-framework/sources/datastructures/big_ordered_map.move
@@ -2271,4 +2271,219 @@ module aptos_std::big_ordered_map {
     // fun test_large_data_set_order_32_true() {
     //     test_large_data_set_helper(32, 32, true);
     // }
+
+    #[verify_only]
+    fun test_verify_borrow_front_key() {
+        let keys: vector = vector[1, 2, 3];
+        let values: vector = vector[4, 5, 6];
+        let map = new_from(keys, values);
+        let (_key, _value) = map.borrow_front();
+        spec {
+            assert keys[0] == 1;
+            assert vector::spec_contains(keys, 1);
+            assert spec_contains_key(map, _key);
+            assert spec_get(map, _key) == _value;
+            assert _key == (1 as u64);
+        };
+        map.remove(&1);
+        map.remove(&2);
+        map.remove(&3);
+        map.destroy_empty();
+    }
+
+    spec test_verify_borrow_front_key {
+        pragma verify = true;
+    }
+
+    #[verify_only]
+    fun test_verify_borrow_back_key() {
+        let keys: vector = vector[1, 2, 3];
+        let values: vector = vector[4, 5, 6];
+        let map = new_from(keys, values);
+        let (key, value) = map.borrow_back();
+        spec {
+            assert keys[2] == 3;
+            assert vector::spec_contains(keys, 3);
+            assert spec_contains_key(map, key);
+            assert spec_get(map, key) == value;
+            assert key == (3 as u64);
+        };
+        map.remove(&1);
+        map.remove(&2);
+        map.remove(&3);
+        map.destroy_empty();
+    }
+
+    spec test_verify_borrow_back_key {
+        pragma verify = true;
+    }
+
+    #[verify_only]
+    fun test_verify_upsert() {
+        let keys: vector = vector[1, 2, 3];
+        let values: vector = vector[4, 5, 6];
+        let map = new_from(keys, values);
+        let (_key, _value) = map.borrow_back();
+        let result_1 = map.upsert(4, 5);
+        spec {
+            assert spec_contains_key(map, 4);
+            assert spec_get(map, 4) == 5;
+            assert option::spec_is_none(result_1);
+        };
+        let result_2 = map.upsert(4, 6);
+        spec {
+            assert spec_contains_key(map, 4);
+            assert spec_get(map, 4) == 6;
+            assert option::spec_is_some(result_2);
+            assert option::spec_borrow(result_2) == 5;
+        };
+        spec {
+            assert keys[0] == 1;
+            assert spec_contains_key(map, 1);
+            assert spec_get(map, 1) == 4;
+        };
+        let v = map.remove(&1);
+        spec {
+            assert v == 4;
+        };
+        map.remove(&2);
+        map.remove(&3);
+        map.remove(&4);
+        spec {
+            assert !spec_contains_key(map, 1);
+            assert !spec_contains_key(map, 2);
+            assert !spec_contains_key(map, 3);
+            assert !spec_contains_key(map, 4);
+            assert spec_len(map) == 0;
+        };
+        map.destroy_empty();
+    }
+
+    spec test_verify_upsert {
+        pragma verify = true;
+    }
+
+    #[verify_only]
+    fun test_verify_next_key() {
+        let keys: vector = vector[1, 2, 3];
+        let values: vector = vector[4, 5, 6];
+        let map = new_from(keys, values);
+        let result_1 = map.next_key(&3);
+        spec {
+            assert option::spec_is_none(result_1);
+        };
+        let result_2 = map.next_key(&1);
+        spec {
+            assert keys[0] == 1;
+            assert spec_contains_key(map, 1);
+            assert keys[1] == 2;
+            assert spec_contains_key(map, 2);
+            assert option::spec_is_some(result_2);
+            assert option::spec_borrow(result_2) == 2;
+        };
+        map.remove(&1);
+        map.remove(&2);
+        map.remove(&3);
+        map.destroy_empty();
+    }
+
+    spec test_verify_next_key {
+        pragma verify = true;
+    }
+
+    #[verify_only]
+    fun test_verify_prev_key() {
+        let keys: vector = vector[1, 2, 3];
+        let values: vector = vector[4, 5, 6];
+        let map = new_from(keys, values);
+        let result_1 = map.prev_key(&1);
+        spec {
+            assert option::spec_is_none(result_1);
+        };
+        let result_2 = map.prev_key(&3);
+        spec {
+            assert keys[0] == 1;
+            assert spec_contains_key(map, 1);
+            assert keys[1] == 2;
+            assert spec_contains_key(map, 2);
+            assert option::spec_is_some(result_2);
+        };
+        map.remove(&1);
+        map.remove(&2);
+        map.remove(&3);
+        map.destroy_empty();
+    }
+
+    spec test_verify_prev_key {
+        pragma verify = true;
+    }
+
+    #[verify_only]
+    fun test_verify_remove() {
+        let keys: vector = vector[1, 2, 3];
+        let values: vector = vector[4, 5, 6];
+        let map = new_from(keys, values);
+        spec {
+            assert keys[1] == 2;
+            assert vector::spec_contains(keys, 2);
+            assert spec_contains_key(map, 2);
+            assert spec_get(map, 2) == 5;
+            assert spec_len(map) == 3;
+        };
+        let v = map.remove(&1);
+        spec {
+            assert v == 4;
+            assert spec_contains_key(map, 2);
+            assert spec_get(map, 2) == 5;
+            assert spec_len(map) == 2;
+            assert !spec_contains_key(map, 1);
+        };
+        map.remove(&2);
+        map.remove(&3);
+        map.destroy_empty();
+    }
+
+    spec test_verify_remove {
+        pragma verify = true;
+    }
+
+     #[verify_only]
+     fun test_aborts_if_new_from_1(): BigOrderedMap {
+        let keys: vector = vector[1, 2, 3, 1];
+        let values: vector = vector[4, 5, 6, 7];
+        spec {
+            assert keys[0] == 1;
+            assert keys[3] == 1;
+        };
+        let map = new_from(keys, values);
+        map
+     }
+
+     spec test_aborts_if_new_from_1 {
+        pragma verify = true;
+        aborts_if true;
+     }
+
+     #[verify_only]
+     fun test_aborts_if_new_from_2(keys: vector, values: vector): BigOrderedMap {
+        let map = new_from(keys, values);
+        map
+     }
+
+     spec test_aborts_if_new_from_2 {
+        pragma verify = true;
+        aborts_if exists i in 0..len(keys), j in 0..len(keys) where i != j : keys[i] == keys[j];
+        aborts_if len(keys) != len(values);
+     }
+
+     #[verify_only]
+     fun test_aborts_if_remove(map: &mut BigOrderedMap) {
+        map.remove(&1);
+     }
+
+     spec test_aborts_if_remove {
+        pragma verify = true;
+        aborts_if !spec_contains_key(map, 1);
+     }
+
 }
diff --git a/aptos-move/framework/aptos-framework/sources/datastructures/big_ordered_map.spec.move b/aptos-move/framework/aptos-framework/sources/datastructures/big_ordered_map.spec.move
new file mode 100644
index 0000000000000..273b995d63075
--- /dev/null
+++ b/aptos-move/framework/aptos-framework/sources/datastructures/big_ordered_map.spec.move
@@ -0,0 +1,249 @@
+spec aptos_std::big_ordered_map {
+
+    spec BigOrderedMap {
+        pragma intrinsic = map,
+            map_new = new,
+            map_destroy_empty = destroy_empty,
+            map_has_key = contains,
+            map_add_no_override = add,
+            map_borrow = borrow,
+            map_borrow_mut = borrow_mut,
+            map_spec_get = spec_get,
+            map_spec_set = spec_set,
+            map_spec_del = spec_remove,
+            map_spec_len = spec_len,
+            map_spec_has_key = spec_contains_key,
+            map_is_empty = is_empty;
+    }
+
+    spec native fun spec_len(t: BigOrderedMap): num;
+    spec native fun spec_contains_key(t: BigOrderedMap, k: K): bool;
+    spec native fun spec_set(t: BigOrderedMap, k: K, v: V): BigOrderedMap;
+    spec native fun spec_remove(t: BigOrderedMap, k: K): BigOrderedMap;
+    spec native fun spec_get(t: BigOrderedMap, k: K): V;
+
+
+    spec new_with_config {
+        pragma verify = false;
+        pragma opaque;
+    }
+
+    spec new {
+        pragma intrinsic;
+    }
+
+    spec new_with_reusable {
+        pragma verify = false;
+        pragma opaque;
+    }
+
+    spec new_with_type_size_hints {
+        pragma verify = false;
+        pragma opaque;
+    }
+
+    spec borrow {
+        pragma intrinsic;
+    }
+
+    spec borrow_mut {
+        pragma intrinsic;
+    }
+
+    spec contains {
+        pragma intrinsic;
+    }
+
+    spec destroy_empty {
+        pragma intrinsic;
+    }
+
+    spec add {
+        pragma intrinsic;
+    }
+
+    spec remove {
+        pragma opaque;
+        pragma verify = false;
+        aborts_if [abstract] !spec_contains_key(self, key);
+        ensures [abstract] !spec_contains_key(self, key);
+        ensures [abstract] spec_get(old(self), key) == result;
+        ensures [abstract] spec_len(old(self)) == spec_len(self) + 1;
+        ensures [abstract] forall k: K where k != key: spec_contains_key(self, k) ==> spec_get(self, k) == spec_get(old(self), k);
+        ensures [abstract] forall k: K where k != key: spec_contains_key(old(self), k) == spec_contains_key(self, k);
+    }
+
+    spec is_empty {
+        pragma intrinsic;
+    }
+
+    spec iter_is_end {
+        pragma opaque;
+        pragma verify = false;
+    }
+
+    spec iter_borrow {
+        pragma opaque;
+        pragma verify = false;
+    }
+
+    spec iter_borrow_mut {
+        pragma opaque;
+        pragma verify = false;
+    }
+
+    spec iter_is_begin {
+        pragma opaque;
+        pragma verify = false;
+    }
+
+    spec lower_bound {
+        pragma opaque;
+        pragma verify = false;
+    }
+
+    spec iter_borrow_key {
+        pragma opaque;
+        pragma verify = false;
+    }
+
+    spec allocate_spare_slots {
+        pragma verify = false;
+        pragma opaque;
+    }
+
+    spec validate_size_and_init_max_degrees {
+        pragma verify = false;
+        pragma opaque;
+    }
+
+    spec validate_dynamic_size_and_init_max_degrees {
+        pragma verify = false;
+        pragma opaque;
+    }
+
+    spec validate_static_size_and_init_max_degrees {
+        pragma verify = false;
+        pragma opaque;
+    }
+
+    spec keys {
+        pragma verify = false;
+        pragma opaque;
+        ensures [abstract] forall k: K: vector::spec_contains(result, k) <==> spec_contains_key(self, k);
+    }
+
+    spec new_from(keys: vector, values: vector): BigOrderedMap {
+        pragma opaque;
+        pragma verify = false;
+        aborts_if [abstract] exists i in 0..len(keys), j in 0..len(keys) where i != j : keys[i] == keys[j];
+        aborts_if [abstract] len(keys) != len(values);
+        ensures [abstract] forall k: K {spec_contains_key(result, k)} : vector::spec_contains(keys,k) <==> spec_contains_key(result, k);
+        ensures [abstract] forall i in 0..len(keys) : spec_get(result, keys[i]) == values[i];
+        ensures [abstract] spec_len(result) == len(keys);
+    }
+
+    spec upsert {
+        pragma opaque;
+        pragma verify = false;
+        ensures [abstract] !spec_contains_key(old(self), key) ==> option::is_none(result);
+        ensures [abstract] spec_contains_key(self, key);
+        ensures [abstract] spec_get(self, key) == value;
+        ensures [abstract] spec_contains_key(old(self), key) ==> ((option::is_some(result)) && (option::spec_borrow(result) == spec_get(old(
+            self), key)));
+        ensures [abstract] !spec_contains_key(old(self), key) ==> spec_len(old(self)) + 1 == spec_len(self);
+        ensures [abstract] spec_contains_key(old(self), key) ==> spec_len(old(self)) == spec_len(self);
+        ensures [abstract] forall k: K: spec_contains_key(old(self), k) && k != key ==> spec_get(old(self), k) == spec_get(self, k);
+        ensures [abstract] forall k: K: spec_contains_key(old(self), k) ==> spec_contains_key(self, k);
+    }
+
+    spec add_all {
+        pragma opaque;
+        pragma verify = false;
+    }
+
+    spec borrow_front(self: &BigOrderedMap): (K, &V) {
+        pragma opaque;
+        pragma verify = false;
+        ensures [abstract] spec_contains_key(self, result_1);
+        ensures [abstract] spec_get(self, result_1) == result_2;
+        ensures [abstract] forall k: K where k != result_1: spec_contains_key(self, k) ==>
+        std::cmp::compare(result_1, k) == std::cmp::Ordering::Less;
+    }
+
+    spec borrow_back {
+        pragma opaque;
+        pragma verify = false;
+        ensures [abstract] spec_contains_key(self, result_1);
+        ensures [abstract] spec_get(self, result_1) == result_2;
+        ensures [abstract] forall k: K where k != result_1: spec_contains_key(self, k) ==>
+        std::cmp::compare(result_1, k) == std::cmp::Ordering::Greater;
+    }
+
+    spec pop_front(self: &mut BigOrderedMap): (K, V) {
+        pragma opaque;
+        pragma verify = false;
+    }
+
+    spec pop_back {
+        pragma opaque;
+        pragma verify = false;
+    }
+
+    spec prev_key(self: &BigOrderedMap, key: &K): Option {
+        pragma opaque;
+        pragma verify = false;
+        ensures [abstract] result == std::option::spec_none() <==>
+        (forall k: K {spec_contains_key(self, k)} where spec_contains_key(self, k)
+        && k != key: std::cmp::compare(key, k) == std::cmp::Ordering::Less);
+        ensures [abstract] result.is_some() <==>
+            spec_contains_key(self, option::spec_borrow(result)) &&
+            (std::cmp::compare(option::spec_borrow(result), key) == std::cmp::Ordering::Less)
+            && (forall k: K {spec_contains_key(self, k), std::cmp::compare(option::spec_borrow(result), k), std::cmp::compare(key, k)} where k != option::spec_borrow(result): ((spec_contains_key(self, k) &&
+            std::cmp::compare(k, key) == std::cmp::Ordering::Less)) ==>
+            std::cmp::compare(option::spec_borrow(result), k) == std::cmp::Ordering::Greater);
+    }
+
+
+    spec next_key(self: &BigOrderedMap, key: &K): Option  {
+        pragma opaque;
+        pragma verify = false;
+        ensures [abstract] result == std::option::spec_none() <==>
+        (forall k: K {spec_contains_key(self, k)} where spec_contains_key(self, k) && k != key:
+        std::cmp::compare(key, k) == std::cmp::Ordering::Greater);
+        ensures [abstract] result.is_some() <==>
+            spec_contains_key(self, option::spec_borrow(result)) &&
+            (std::cmp::compare(option::spec_borrow(result), key) == std::cmp::Ordering::Greater)
+            && (forall k: K {spec_contains_key(self, k)} where k != option::spec_borrow(result): ((spec_contains_key(self, k) &&
+            std::cmp::compare(k, key) == std::cmp::Ordering::Greater)) ==>
+            std::cmp::compare(option::spec_borrow(result), k) == std::cmp::Ordering::Less);
+    }
+
+
+    spec find {
+        pragma opaque;
+        pragma verify = false;
+    }
+
+    spec new_begin_iter {
+        pragma opaque;
+        pragma verify = false;
+    }
+
+    spec new_end_iter {
+        pragma opaque;
+        pragma verify = false;
+    }
+
+    spec iter_next {
+        pragma opaque;
+        pragma verify = false;
+    }
+
+    spec iter_prev {
+        pragma opaque;
+        pragma verify = false;
+    }
+
+
+}
diff --git a/aptos-move/framework/aptos-framework/sources/datastructures/ordered_map.move b/aptos-move/framework/aptos-framework/sources/datastructures/ordered_map.move
index 6b64fef04b2e1..23f487875a78d 100644
--- a/aptos-move/framework/aptos-framework/sources/datastructures/ordered_map.move
+++ b/aptos-move/framework/aptos-framework/sources/datastructures/ordered_map.move
@@ -678,7 +678,7 @@ module aptos_std::ordered_map {
     // }
 
     spec module {
-        pragma verify = false;
+        pragma verify = true;
     }
 
     // ================= Section for tests =====================
@@ -1276,4 +1276,155 @@ module aptos_std::ordered_map {
 
         map.destroy_empty();
     }
+
+    #[verify_only]
+    fun test_verify_borrow_front_key() {
+        let keys: vector = vector[1, 2, 3];
+        let values: vector = vector[4, 5, 6];
+        let map = new_from(keys, values);
+        let (key, value) = map.borrow_front();
+        spec {
+            assert keys[0] == 1;
+            assert vector::spec_contains(keys, 1);
+            assert spec_contains_key(map, key);
+            assert spec_get(map, key) == value;
+            assert key == (1 as u64);
+        };
+    }
+
+    #[verify_only]
+    fun test_verify_borrow_back_key() {
+        let keys: vector = vector[1, 2, 3];
+        let values: vector = vector[4, 5, 6];
+        let map = new_from(keys, values);
+        let (key, value) = map.borrow_back();
+        spec {
+            assert keys[2] == 3;
+            assert vector::spec_contains(keys, 3);
+            assert spec_contains_key(map, key);
+            assert spec_get(map, key) == value;
+            assert key == (3 as u64);
+        };
+    }
+
+    #[verify_only]
+    fun test_verify_upsert() {
+        let keys: vector = vector[1, 2, 3];
+        let values: vector = vector[4, 5, 6];
+        let map = new_from(keys, values);
+        spec {
+            assert spec_len(map) == 3;
+        };
+        let (_key, _value) = map.borrow_back();
+        let result_1 = map.upsert(4, 5);
+        spec {
+            assert spec_contains_key(map, 4);
+            assert spec_get(map, 4) == 5;
+            assert option::spec_is_none(result_1);
+            assert spec_len(map) == 4;
+        };
+        let result_2 = map.upsert(4, 6);
+        spec {
+            assert spec_contains_key(map, 4);
+            assert spec_get(map, 4) == 6;
+            assert option::spec_is_some(result_2);
+            assert option::spec_borrow(result_2) == 5;
+        };
+        spec {
+            assert keys[0] == 1;
+            assert spec_contains_key(map, 1);
+            assert spec_get(map, 1) == 4;
+        };
+        let v = map.remove(&1);
+        spec {
+            assert v == 4;
+        };
+        map.remove(&2);
+        map.remove(&3);
+        map.remove(&4);
+        spec {
+            assert !spec_contains_key(map, 1);
+            assert !spec_contains_key(map, 2);
+            assert !spec_contains_key(map, 3);
+            assert !spec_contains_key(map, 4);
+            assert spec_len(map) == 0;
+        };
+        map.destroy_empty();
+    }
+
+    #[verify_only]
+    fun test_verify_next_key() {
+        let keys: vector = vector[1, 2, 3];
+        let values: vector = vector[4, 5, 6];
+        let map = new_from(keys, values);
+        let result_1 = map.next_key(&3);
+        spec {
+            assert option::spec_is_none(result_1);
+        };
+        let result_2 = map.next_key(&1);
+        spec {
+            assert keys[0] == 1;
+            assert spec_contains_key(map, 1);
+            assert keys[1] == 2;
+            assert spec_contains_key(map, 2);
+            assert option::spec_is_some(result_2);
+            assert option::spec_borrow(result_2) == 2;
+        };
+    }
+
+    #[verify_only]
+    fun test_verify_prev_key() {
+        let keys: vector = vector[1, 2, 3];
+        let values: vector = vector[4, 5, 6];
+        let map = new_from(keys, values);
+        let result_1 = map.prev_key(&1);
+        spec {
+            assert option::spec_is_none(result_1);
+        };
+        let result_2 = map.prev_key(&3);
+        spec {
+            assert keys[0] == 1;
+            assert spec_contains_key(map, 1);
+            assert keys[1] == 2;
+            assert spec_contains_key(map, 2);
+            assert option::spec_is_some(result_2);
+        };
+    }
+
+     #[verify_only]
+     fun test_aborts_if_new_from_1(): OrderedMap {
+        let keys: vector = vector[1, 2, 3, 1];
+        let values: vector = vector[4, 5, 6, 7];
+        spec {
+            assert keys[0] == 1;
+            assert keys[3] == 1;
+        };
+        let map = new_from(keys, values);
+        map
+     }
+
+     spec test_aborts_if_new_from_1 {
+        aborts_if true;
+     }
+
+     #[verify_only]
+     fun test_aborts_if_new_from_2(keys: vector, values: vector): OrderedMap {
+        let map = new_from(keys, values);
+        map
+     }
+
+     spec test_aborts_if_new_from_2 {
+        aborts_if exists i in 0..len(keys), j in 0..len(keys) where i != j : keys[i] == keys[j];
+        aborts_if len(keys) != len(values);
+     }
+
+     #[verify_only]
+     fun test_aborts_if_remove(map: &mut OrderedMap) {
+        map.remove(&1);
+     }
+
+     spec test_aborts_if_remove {
+        aborts_if !spec_contains_key(map, 1);
+     }
+
 }
diff --git a/aptos-move/framework/aptos-framework/sources/datastructures/ordered_map.spec.move b/aptos-move/framework/aptos-framework/sources/datastructures/ordered_map.spec.move
new file mode 100644
index 0000000000000..2d3f6b321d308
--- /dev/null
+++ b/aptos-move/framework/aptos-framework/sources/datastructures/ordered_map.spec.move
@@ -0,0 +1,286 @@
+spec aptos_std::ordered_map {
+
+    spec OrderedMap {
+        pragma intrinsic = map,
+            map_new = new,
+            map_len = length,
+            map_destroy_empty = destroy_empty,
+            map_has_key = contains,
+            map_add_no_override = add,
+            map_borrow = borrow,
+            map_borrow_mut = borrow_mut,
+            map_spec_get = spec_get,
+            map_spec_set = spec_set,
+            map_spec_del = spec_remove,
+            map_spec_len = spec_len,
+            map_spec_has_key = spec_contains_key,
+            map_is_empty = is_empty;
+    }
+
+    spec native fun spec_len(t: OrderedMap): num;
+    spec native fun spec_contains_key(t: OrderedMap, k: K): bool;
+    spec native fun spec_set(t: OrderedMap, k: K, v: V): OrderedMap;
+    spec native fun spec_remove(t: OrderedMap, k: K): OrderedMap;
+    spec native fun spec_get(t: OrderedMap, k: K): V;
+
+    spec length {
+        pragma intrinsic;
+    }
+
+    spec new {
+        pragma intrinsic;
+    }
+
+    spec borrow {
+        pragma intrinsic;
+    }
+
+    spec borrow_mut {
+        pragma intrinsic;
+    }
+
+    spec contains {
+        pragma intrinsic;
+    }
+
+    spec destroy_empty {
+        pragma intrinsic;
+    }
+
+    spec add {
+        pragma intrinsic;
+    }
+
+    spec remove {
+        pragma opaque;
+        pragma verify = false;
+        aborts_if [abstract] !spec_contains_key(self, key);
+        ensures [abstract] !spec_contains_key(self, key);
+        ensures [abstract] spec_get(old(self), key) == result;
+        ensures [abstract] spec_len(old(self)) == spec_len(self) + 1;
+        ensures [abstract] forall k: K where k != key: spec_contains_key(self, k) ==> spec_get(self, k) == spec_get(old(self), k);
+        ensures [abstract] forall k: K where k != key: spec_contains_key(old(self), k) == spec_contains_key(self, k);
+    }
+
+    spec is_empty {
+        pragma intrinsic;
+    }
+
+    spec iter_add {
+        pragma opaque;
+        pragma verify = false;
+    }
+
+
+    spec iter_replace {
+        pragma opaque;
+        pragma verify = false;
+    }
+
+    spec iter_remove {
+        pragma opaque;
+        pragma verify = false;
+    }
+
+    spec iter_is_end {
+        pragma opaque;
+        pragma verify = false;
+    }
+
+    spec iter_borrow {
+        pragma opaque;
+        pragma verify = false;
+    }
+
+    spec iter_borrow_mut {
+        pragma opaque;
+        pragma verify = false;
+    }
+
+    spec iter_is_begin_from_non_empty {
+        pragma opaque;
+        pragma verify = false;
+    }
+
+    spec iter_is_begin {
+        pragma opaque;
+        pragma verify = false;
+    }
+
+    spec values {
+        pragma opaque;
+        pragma verify = false;
+    }
+
+
+    spec binary_search {
+        pragma opaque;
+        pragma verify = false;
+    }
+
+
+    spec lower_bound {
+        pragma opaque;
+        pragma verify = false;
+    }
+
+    spec iter_borrow_key {
+        pragma opaque;
+        pragma verify = false;
+    }
+
+    spec keys {
+        pragma verify = false;
+        pragma opaque;
+        ensures [abstract] forall k: K: vector::spec_contains(result, k) <==> spec_contains_key(self, k);
+    }
+
+    spec to_vec_pair {
+        pragma verify = false;
+        pragma opaque;
+    }
+
+    spec new_from(keys: vector, values: vector): OrderedMap {
+        pragma opaque;
+        pragma verify = false;
+        aborts_if [abstract] exists i in 0..len(keys), j in 0..len(keys) where i != j : keys[i] == keys[j];
+        aborts_if [abstract] len(keys) != len(values);
+        ensures [abstract] forall k: K {spec_contains_key(result, k)} : vector::spec_contains(keys,k) <==> spec_contains_key(result, k);
+        ensures [abstract] forall i in 0..len(keys) : spec_get(result, keys[i]) == values[i];
+        ensures [abstract] spec_len(result) == len(keys);
+    }
+
+    spec upsert {
+        pragma opaque;
+        pragma verify = false;
+        ensures [abstract] !spec_contains_key(old(self), key) ==> option::is_none(result);
+        ensures [abstract] spec_contains_key(self, key);
+        ensures [abstract] spec_get(self, key) == value;
+        ensures [abstract] spec_contains_key(old(self), key) ==> ((option::is_some(result)) && (option::spec_borrow(result) == spec_get(old(
+            self), key)));
+        ensures [abstract] !spec_contains_key(old(self), key) ==> spec_len(old(self)) + 1 == spec_len(self);
+        ensures [abstract] spec_contains_key(old(self), key) ==> spec_len(old(self)) == spec_len(self);
+        ensures [abstract] forall k: K: spec_contains_key(old(self), k) && k != key ==> spec_get(old(self), k) == spec_get(self, k);
+        ensures [abstract] forall k: K: spec_contains_key(old(self), k) ==> spec_contains_key(self, k);
+    }
+
+    spec replace_key_inplace {
+        pragma opaque;
+        pragma verify = false;
+    }
+
+    spec add_all {
+        pragma opaque;
+        pragma verify = false;
+    }
+
+    spec append {
+        pragma opaque;
+        pragma verify = false;
+    }
+
+    spec upsert_all {
+        pragma opaque;
+        pragma verify = false;
+    }
+
+    spec append_disjoint {
+        pragma opaque;
+        pragma verify = false;
+    }
+
+    spec append_impl {
+        pragma opaque;
+        pragma verify = false;
+    }
+
+    spec trim {
+        pragma opaque;
+        pragma verify = false;
+    }
+
+    spec borrow_front(self: &OrderedMap): (&K, &V) {
+        pragma opaque;
+        pragma verify = false;
+        ensures [abstract] spec_contains_key(self, result_1);
+        ensures [abstract] spec_get(self, result_1) == result_2;
+        ensures [abstract] forall k: K where k != result_1: spec_contains_key(self, k) ==>
+        std::cmp::compare(result_1, k) == std::cmp::Ordering::Less;
+    }
+
+    spec borrow_back {
+        pragma opaque;
+        pragma verify = false;
+        ensures [abstract] spec_contains_key(self, result_1);
+        ensures [abstract] spec_get(self, result_1) == result_2;
+        ensures [abstract] forall k: K where k != result_1: spec_contains_key(self, k) ==>
+        std::cmp::compare(result_1, k) == std::cmp::Ordering::Greater;
+    }
+
+    spec pop_front(self: &mut OrderedMap): (K, V) {
+        pragma opaque;
+        pragma verify = false;
+    }
+
+    spec pop_back {
+        pragma opaque;
+        pragma verify = false;
+    }
+
+    spec prev_key(self: &OrderedMap, key: &K): Option {
+        pragma opaque;
+        pragma verify = false;
+        ensures [abstract] result == std::option::spec_none() <==>
+        (forall k: K {spec_contains_key(self, k)} where spec_contains_key(self, k)
+        && k != key: std::cmp::compare(key, k) == std::cmp::Ordering::Less);
+        ensures [abstract] result.is_some() <==>
+            spec_contains_key(self, option::spec_borrow(result)) &&
+            (std::cmp::compare(option::spec_borrow(result), key) == std::cmp::Ordering::Less)
+            && (forall k: K {spec_contains_key(self, k), std::cmp::compare(option::spec_borrow(result), k), std::cmp::compare(key, k)} where k != option::spec_borrow(result): ((spec_contains_key(self, k) &&
+            std::cmp::compare(k, key) == std::cmp::Ordering::Less)) ==>
+            std::cmp::compare(option::spec_borrow(result), k) == std::cmp::Ordering::Greater);
+    }
+
+
+    spec next_key(self: &OrderedMap, key: &K): Option  {
+        pragma opaque;
+        pragma verify = false;
+        ensures [abstract] result == std::option::spec_none() <==>
+        (forall k: K {spec_contains_key(self, k)} where spec_contains_key(self, k) && k != key:
+        std::cmp::compare(key, k) == std::cmp::Ordering::Greater);
+        ensures [abstract] result.is_some() <==>
+            spec_contains_key(self, option::spec_borrow(result)) &&
+            (std::cmp::compare(option::spec_borrow(result), key) == std::cmp::Ordering::Greater)
+            && (forall k: K {spec_contains_key(self, k)} where k != option::spec_borrow(result): ((spec_contains_key(self, k) &&
+            std::cmp::compare(k, key) == std::cmp::Ordering::Greater)) ==>
+            std::cmp::compare(option::spec_borrow(result), k) == std::cmp::Ordering::Less);
+    }
+
+
+    spec find {
+        pragma opaque;
+        pragma verify = false;
+    }
+
+    spec new_begin_iter {
+        pragma opaque;
+        pragma verify = false;
+    }
+
+    spec new_end_iter {
+        pragma opaque;
+        pragma verify = false;
+    }
+
+    spec iter_next {
+        pragma opaque;
+        pragma verify = false;
+    }
+
+    spec iter_prev {
+        pragma opaque;
+        pragma verify = false;
+    }
+
+
+}
diff --git a/aptos-move/framework/aptos-framework/sources/fungible_asset.move b/aptos-move/framework/aptos-framework/sources/fungible_asset.move
index cba8e81071cdc..03f78e5697e05 100644
--- a/aptos-move/framework/aptos-framework/sources/fungible_asset.move
+++ b/aptos-move/framework/aptos-framework/sources/fungible_asset.move
@@ -1361,8 +1361,13 @@ module aptos_framework::fungible_asset {
     ) acquires FungibleStore {
         assert!(object::owns(store, signer::address_of(owner)), error::permission_denied(ENOT_STORE_OWNER));
         assert!(!is_frozen(store), error::invalid_argument(ESTORE_IS_FROZEN));
+        let fungible_store_address = object::object_address(&store);
+        // be graceful if ConcurrentFungibleBalance already exists, but flag is off
+        if (exists(fungible_store_address)) {
+            return
+        };
         assert!(allow_upgrade_to_concurrent_fungible_balance(), error::invalid_argument(ECONCURRENT_BALANCE_NOT_ENABLED));
-        ensure_store_upgraded_to_concurrent_internal(object::object_address(&store));
+        ensure_store_upgraded_to_concurrent_internal(fungible_store_address);
     }
 
     /// Ensure a known `FungibleStore` has `ConcurrentFungibleBalance`.
diff --git a/aptos-move/framework/aptos-framework/sources/genesis.move b/aptos-move/framework/aptos-framework/sources/genesis.move
index 0dffe26dcf9b3..c01170026a95e 100644
--- a/aptos-move/framework/aptos-framework/sources/genesis.move
+++ b/aptos-move/framework/aptos-framework/sources/genesis.move
@@ -109,6 +109,7 @@ module aptos_framework::genesis {
         execution_config::set(&aptos_framework_account, execution_config);
         version::initialize(&aptos_framework_account, initial_version);
         stake::initialize(&aptos_framework_account);
+        stake::initialize_pending_transaction_fee(&aptos_framework_account);
         timestamp::set_time_has_started(&aptos_framework_account);
         staking_config::initialize(
             &aptos_framework_account,
diff --git a/aptos-move/framework/aptos-framework/sources/stake.move b/aptos-move/framework/aptos-framework/sources/stake.move
index ab156ceff5af4..fd0d3263e6fd1 100644
--- a/aptos-move/framework/aptos-framework/sources/stake.move
+++ b/aptos-move/framework/aptos-framework/sources/stake.move
@@ -25,7 +25,9 @@ module aptos_framework::stake {
     use std::vector;
     use aptos_std::bls12381;
     use aptos_std::math64::min;
+    use aptos_std::big_ordered_map::{Self, BigOrderedMap};
     use aptos_std::table::Table;
+    use aptos_framework::aggregator_v2::{Self, Aggregator};
     use aptos_framework::aptos_coin::AptosCoin;
     use aptos_framework::account;
     use aptos_framework::coin::{Self, Coin, MintCapability};
@@ -84,6 +86,8 @@ module aptos_framework::stake {
     const ERECONFIGURATION_IN_PROGRESS: u64 = 20;
     /// Signer does not have permission to perform stake logic.
     const ENO_STAKE_PERMISSION: u64 = 28;
+    /// Transaction fee is not fully distributed at epoch ending.
+    const ETRANSACTION_FEE_NOT_FULLY_DISTRIBUTED: u64 = 29;
 
     /// Validator status enum. We can switch to proper enum later once Move supports it.
     const VALIDATOR_STATUS_PENDING_ACTIVE: u64 = 1;
@@ -188,6 +192,17 @@ module aptos_framework::stake {
         total_joining_power: u128,
     }
 
+    /// Transaction fee that is collected in current epoch, indexed by validator_index.
+    struct PendingTransactionFee has key, store {
+        pending_fee_by_validator: BigOrderedMap>,
+    }
+
+    #[event]
+    struct DistributeTransactionFee has drop, store {
+        pool_address: address,
+        fee_amount: u64,
+    }
+
     /// AptosCoin capabilities, set during genesis and stored in @CoreResource account.
     /// This allows the Stake module to mint rewards to stakers.
     struct AptosCoinCapabilities has key {
@@ -307,6 +322,7 @@ module aptos_framework::stake {
     }
 
     #[event]
+    /// The amount includes transaction fee and staking rewards.
     struct DistributeRewards has drop, store {
         pool_address: address,
         rewards_amount: u64,
@@ -544,6 +560,39 @@ module aptos_framework::stake {
         };
     }
 
+    public fun initialize_pending_transaction_fee(framework: &signer) {
+        system_addresses::assert_aptos_framework(framework);
+
+        if (!exists(@aptos_framework)) {
+            move_to(framework, PendingTransactionFee {
+                // The max leaf order is set to 10 because there is a existing limitation that a
+                // resource can only have 10 aggregators at max.
+                pending_fee_by_validator: big_ordered_map::new_with_config(5, 10, true),
+            });
+        }
+    }
+
+    public(friend) fun record_fee(
+        vm: &signer,
+        fee_distribution_validator_indices: vector,
+        fee_amounts_octa: vector,
+    ) acquires PendingTransactionFee {
+        // Operational constraint: can only be invoked by the VM.
+        system_addresses::assert_vm(vm);
+
+        assert!(fee_distribution_validator_indices.length() == fee_amounts_octa.length());
+
+        let num_validators_to_distribute = fee_distribution_validator_indices.length();
+        let pending_fee = borrow_global_mut(@aptos_framework);
+        let i = 0;
+        while (i < num_validators_to_distribute) {
+            let validator_index = fee_distribution_validator_indices[i];
+            let fee_octa = fee_amounts_octa[i];
+            pending_fee.pending_fee_by_validator.borrow_mut(&validator_index).add(fee_octa);
+            i = i + 1;
+        }
+    }
+
     /// Initialize the validator account and give ownership to the signing account
     /// except it leaves the ValidatorConfig to be set by another entity.
     /// Note: this triggers setting the operator and owner, set it to the account's address
@@ -1252,7 +1301,7 @@ module aptos_framework::stake {
     /// 4. The validator's voting power in the validator set is updated to be the corresponding staking pool's voting
     /// power.
     public(friend) fun on_new_epoch(
-    ) acquires StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet {
+    ) acquires AptosCoinCapabilities, PendingTransactionFee, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
         let validator_set = borrow_global_mut(@aptos_framework);
         let config = staking_config::get();
         let validator_perf = borrow_global_mut(@aptos_framework);
@@ -1361,6 +1410,12 @@ module aptos_framework::stake {
             validator_index = validator_index + 1;
         };
 
+        if (exists(@aptos_framework)) {
+            let pending_fee_by_validator = &mut borrow_global_mut(@aptos_framework).pending_fee_by_validator;
+            assert!(pending_fee_by_validator.is_empty(), error::internal(ETRANSACTION_FEE_NOT_FULLY_DISTRIBUTED));
+            validator_set.active_validators.for_each_ref(|v| pending_fee_by_validator.add(v.config.validator_index, aggregator_v2::create_unbounded_aggregator()));
+        };
+
         if (features::periodical_reward_rate_decrease_enabled()) {
             // Update rewards rate after reward distribution.
             staking_config::calculate_and_save_latest_epoch_rewards_rate();
@@ -1557,11 +1612,27 @@ module aptos_framework::stake {
         validator_perf: &ValidatorPerformance,
         pool_address: address,
         staking_config: &StakingConfig,
-    ) acquires StakePool, AptosCoinCapabilities, ValidatorConfig {
+    ) acquires AptosCoinCapabilities, PendingTransactionFee, StakePool, ValidatorConfig {
         let stake_pool = borrow_global_mut(pool_address);
         let validator_config = borrow_global(pool_address);
-        let cur_validator_perf = vector::borrow(&validator_perf.validators, validator_config.validator_index);
+        let validator_index = validator_config.validator_index;
+        let cur_validator_perf = vector::borrow(&validator_perf.validators, validator_index);
         let num_successful_proposals = cur_validator_perf.successful_proposals;
+
+        let fee_pending_inactive = 0;
+        let fee_active = 0;
+
+        if (exists(@aptos_framework)) {
+            let pending_fee_by_validator = &mut borrow_global_mut(@aptos_framework).pending_fee_by_validator;
+            if (pending_fee_by_validator.contains(&validator_index)) {
+                let fee_octa = pending_fee_by_validator.remove(&validator_index).read();
+                let stake_active = (coin::value(&stake_pool.active) as u128);
+                let stake_pending_inactive = (coin::value(&stake_pool.pending_inactive) as u128);
+                fee_pending_inactive = (((fee_octa as u128) * stake_pending_inactive / (stake_active + stake_pending_inactive)) as u64);
+                fee_active = fee_octa - fee_pending_inactive;
+            }
+        };
+
         spec {
             // The following addition should not overflow because `num_total_proposals` cannot be larger than 86400,
             // the maximum number of proposals in a day (1 proposal per second).
@@ -1586,6 +1657,21 @@ module aptos_framework::stake {
         spec {
             assume rewards_active + rewards_pending_inactive <= MAX_U64;
         };
+
+        if (std::features::is_distribute_transaction_fee_enabled()) {
+            let mint_cap = &borrow_global(@aptos_framework).mint_cap;
+            if (fee_active > 0) {
+                coin::merge(&mut stake_pool.active, coin::mint(fee_active, mint_cap));
+            };
+            if (fee_pending_inactive > 0) {
+                coin::merge(&mut stake_pool.pending_inactive, coin::mint(fee_pending_inactive, mint_cap));
+            };
+            let fee_amount = fee_active + fee_pending_inactive;
+            if (fee_amount > 0) {
+                event::emit(DistributeTransactionFee { pool_address, fee_amount });
+            };
+        };
+
         let rewards_amount = rewards_active + rewards_pending_inactive;
         // Pending active stake can now be active.
         coin::merge(&mut stake_pool.active, coin::extract_all(&mut stake_pool.pending_active));
@@ -1805,7 +1891,7 @@ module aptos_framework::stake {
         operator: &signer,
         pool_address: address,
         should_end_epoch: bool,
-    ) acquires AptosCoinCapabilities, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
+    ) acquires AptosCoinCapabilities, PendingTransactionFee, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
         let pk_bytes = bls12381::public_key_to_bytes(pk);
         let pop_bytes = bls12381::proof_of_possession_to_bytes(pop);
         rotate_consensus_key(operator, pool_address, pk_bytes, pop_bytes);
@@ -1817,7 +1903,7 @@ module aptos_framework::stake {
 
     #[test_only]
     public fun fast_forward_to_unlock(pool_address: address)
-    acquires AptosCoinCapabilities, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
+    acquires AptosCoinCapabilities, PendingTransactionFee, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
         let expiration_time = get_lockup_secs(pool_address);
         timestamp::update_global_time_for_test_secs(expiration_time);
         end_epoch();
@@ -1889,7 +1975,7 @@ module aptos_framework::stake {
         amount: u64,
         should_join_validator_set: bool,
         should_end_epoch: bool,
-    ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
+    ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, PendingTransactionFee, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
         let validator_address = signer::address_of(validator);
         account::create_account_for_test(validator_address);
 
@@ -1983,7 +2069,7 @@ module aptos_framework::stake {
     public entry fun test_inactive_validator_can_add_stake_if_exceeding_max_allowed(
         aptos_framework: &signer,
         validator: &signer,
-    ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
+    ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, PendingTransactionFee, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
         initialize_for_test(aptos_framework);
         let (_sk, pk, pop) = generate_identity();
         initialize_test_validator(&pk, &pop, validator, 100, false, false);
@@ -1998,7 +2084,7 @@ module aptos_framework::stake {
         aptos_framework: &signer,
         validator_1: &signer,
         validator_2: &signer,
-    ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
+    ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, PendingTransactionFee, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
         initialize_for_test_custom(aptos_framework, 50, 10000, LOCKUP_CYCLE_SECONDS, true, 1, 10, 100000);
         // Have one validator join the set to ensure the validator set is not empty when main validator joins.
         let (_sk_1, pk_1, pop_1) = generate_identity();
@@ -2017,7 +2103,7 @@ module aptos_framework::stake {
     public entry fun test_active_validator_cannot_add_stake_if_exceeding_max_allowed(
         aptos_framework: &signer,
         validator: &signer,
-    ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
+    ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, PendingTransactionFee, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
         initialize_for_test(aptos_framework);
         // Validator joins validator set and waits for epoch end so it's in the validator set.
         let (_sk, pk, pop) = generate_identity();
@@ -2032,7 +2118,7 @@ module aptos_framework::stake {
     public entry fun test_active_validator_with_pending_inactive_stake_cannot_add_stake_if_exceeding_max_allowed(
         aptos_framework: &signer,
         validator: &signer,
-    ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
+    ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, PendingTransactionFee, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
         initialize_for_test(aptos_framework);
         // Validator joins validator set and waits for epoch end so it's in the validator set.
         let (_sk, pk, pop) = generate_identity();
@@ -2052,7 +2138,7 @@ module aptos_framework::stake {
         aptos_framework: &signer,
         validator_1: &signer,
         validator_2: &signer,
-    ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
+    ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, PendingTransactionFee, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
         initialize_for_test(aptos_framework);
         let (_sk_1, pk_1, pop_1) = generate_identity();
         let (_sk_2, pk_2, pop_2) = generate_identity();
@@ -2070,7 +2156,7 @@ module aptos_framework::stake {
     public entry fun test_end_to_end(
         aptos_framework: &signer,
         validator: &signer,
-    ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet {
+    ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, PendingTransactionFee, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
         initialize_for_test(aptos_framework);
         let (_sk, pk, pop) = generate_identity();
         initialize_test_validator(&pk, &pop, validator, 100, true, true);
@@ -2127,7 +2213,7 @@ module aptos_framework::stake {
     public entry fun test_inactive_validator_with_existing_lockup_join_validator_set(
         aptos_framework: &signer,
         validator: &signer,
-    ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet {
+    ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, PendingTransactionFee, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
         initialize_for_test(aptos_framework);
         let (_sk, pk, pop) = generate_identity();
         initialize_test_validator(&pk, &pop, validator, 100, false, false);
@@ -2153,7 +2239,7 @@ module aptos_framework::stake {
     public entry fun test_cannot_reduce_lockup(
         aptos_framework: &signer,
         validator: &signer,
-    ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet {
+    ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, PendingTransactionFee, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
         initialize_for_test(aptos_framework);
         let (_sk, pk, pop) = generate_identity();
         initialize_test_validator(&pk, &pop, validator, 100, false, false);
@@ -2172,7 +2258,7 @@ module aptos_framework::stake {
         aptos_framework: &signer,
         validator_1: &signer,
         validator_2: &signer,
-    ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet {
+    ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, PendingTransactionFee, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
         // Only 50% voting power increase is allowed in each epoch.
         initialize_for_test_custom(aptos_framework, 50, 10000, LOCKUP_CYCLE_SECONDS, true, 1, 10, 50);
         let (_sk_1, pk_1, pop_1) = generate_identity();
@@ -2194,7 +2280,7 @@ module aptos_framework::stake {
         aptos_framework: &signer,
         validator_1: &signer,
         validator_2: &signer,
-    ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet {
+    ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, PendingTransactionFee, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
         initialize_for_test_custom(aptos_framework, 50, 10000, LOCKUP_CYCLE_SECONDS, true, 1, 10, 10000);
         // Need 1 validator to be in the active validator set so joining limit works.
         let (_sk_1, pk_1, pop_1) = generate_identity();
@@ -2216,7 +2302,7 @@ module aptos_framework::stake {
         aptos_framework: &signer,
         validator_1: &signer,
         validator_2: &signer,
-    ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet {
+    ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, PendingTransactionFee, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
         // 100% voting power increase is allowed in each epoch.
         initialize_for_test_custom(aptos_framework, 50, 10000, LOCKUP_CYCLE_SECONDS, true, 1, 10, 100);
         // Need 1 validator to be in the active validator set so joining limit works.
@@ -2236,7 +2322,7 @@ module aptos_framework::stake {
     public entry fun test_pending_active_validator_leaves_validator_set(
         aptos_framework: &signer,
         validator: &signer,
-    ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet {
+    ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, PendingTransactionFee, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
         initialize_for_test(aptos_framework);
         // Validator joins but epoch hasn't ended, so the validator is still pending_active.
         let (_sk, pk, pop) = generate_identity();
@@ -2260,7 +2346,7 @@ module aptos_framework::stake {
     public entry fun test_active_validator_cannot_add_more_stake_than_limit_in_multiple_epochs(
         aptos_framework: &signer,
         validator: &signer,
-    ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet {
+    ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, PendingTransactionFee, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
         // Only 50% voting power increase is allowed in each epoch.
         initialize_for_test_custom(aptos_framework, 50, 10000, LOCKUP_CYCLE_SECONDS, true, 1, 10, 50);
         // Add initial stake and join the validator set.
@@ -2282,7 +2368,7 @@ module aptos_framework::stake {
     public entry fun test_active_validator_cannot_add_more_stake_than_limit(
         aptos_framework: &signer,
         validator: &signer,
-    ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet {
+    ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, PendingTransactionFee, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
         // Only 50% voting power increase is allowed in each epoch.
         initialize_for_test_custom(aptos_framework, 50, 10000, LOCKUP_CYCLE_SECONDS, true, 1, 10, 50);
         let (_sk, pk, pop) = generate_identity();
@@ -2296,7 +2382,7 @@ module aptos_framework::stake {
     public entry fun test_active_validator_unlock_partial_stake(
         aptos_framework: &signer,
         validator: &signer,
-    ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet {
+    ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, PendingTransactionFee, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
         // Reward rate = 10%.
         initialize_for_test_custom(aptos_framework, 50, 10000, LOCKUP_CYCLE_SECONDS, true, 1, 10, 100);
         let (_sk, pk, pop) = generate_identity();
@@ -2322,7 +2408,7 @@ module aptos_framework::stake {
     public entry fun test_active_validator_can_withdraw_all_stake_and_rewards_at_once(
         aptos_framework: &signer,
         validator: &signer,
-    ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet {
+    ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, PendingTransactionFee, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
         initialize_for_test(aptos_framework);
         let (_sk, pk, pop) = generate_identity();
         initialize_test_validator(&pk, &pop, validator, 100, true, true);
@@ -2359,7 +2445,7 @@ module aptos_framework::stake {
     public entry fun test_active_validator_unlocking_more_than_available_stake_should_cap(
         aptos_framework: &signer,
         validator: &signer,
-    ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet {
+    ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, PendingTransactionFee, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
         initialize_for_test(aptos_framework);
         let (_sk, pk, pop) = generate_identity();
         initialize_test_validator(&pk, &pop, validator, 100, false, false);
@@ -2373,7 +2459,7 @@ module aptos_framework::stake {
     public entry fun test_active_validator_withdraw_should_cap_by_inactive_stake(
         aptos_framework: &signer,
         validator: &signer,
-    ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet {
+    ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, PendingTransactionFee, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
         initialize_for_test(aptos_framework);
         // Initial balance = 900 (idle) + 100 (staked) = 1000.
         let (_sk, pk, pop) = generate_identity();
@@ -2398,7 +2484,7 @@ module aptos_framework::stake {
     public entry fun test_active_validator_can_reactivate_pending_inactive_stake(
         aptos_framework: &signer,
         validator: &signer,
-    ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet {
+    ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, PendingTransactionFee, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
         initialize_for_test(aptos_framework);
         let (_sk, pk, pop) = generate_identity();
         initialize_test_validator(&pk, &pop, validator, 100, true, true);
@@ -2417,7 +2503,7 @@ module aptos_framework::stake {
     public entry fun test_active_validator_reactivate_more_than_available_pending_inactive_stake_should_cap(
         aptos_framework: &signer,
         validator: &signer,
-    ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet {
+    ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, PendingTransactionFee, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
         initialize_for_test(aptos_framework);
         let (_sk, pk, pop) = generate_identity();
         initialize_test_validator(&pk, &pop, validator, 100, true, true);
@@ -2434,7 +2520,7 @@ module aptos_framework::stake {
     public entry fun test_active_validator_having_insufficient_remaining_stake_after_withdrawal_gets_kicked(
         aptos_framework: &signer,
         validator: &signer,
-    ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet {
+    ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, PendingTransactionFee, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
         initialize_for_test(aptos_framework);
         let (_sk, pk, pop) = generate_identity();
         initialize_test_validator(&pk, &pop, validator, 100, true, true);
@@ -2462,7 +2548,7 @@ module aptos_framework::stake {
         aptos_framework: &signer,
         validator: &signer,
         validator_2: &signer,
-    ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet {
+    ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, PendingTransactionFee, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
         initialize_for_test(aptos_framework);
         let (_sk_1, pk_1, pop_1) = generate_identity();
         let (_sk_2, pk_2, pop_2) = generate_identity();
@@ -2506,7 +2592,7 @@ module aptos_framework::stake {
         aptos_framework: &signer,
         validator: &signer,
         validator_2: &signer,
-    ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet {
+    ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, PendingTransactionFee, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
         initialize_for_test(aptos_framework);
         let (_sk_1, pk_1, pop_1) = generate_identity();
         let (_sk_2, pk_2, pop_2) = generate_identity();
@@ -2537,7 +2623,7 @@ module aptos_framework::stake {
         aptos_framework: &signer,
         validator_1: &signer,
         validator_2: &signer,
-    ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet {
+    ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, PendingTransactionFee, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
         // Only 50% voting power increase is allowed in each epoch.
         initialize_for_test_custom(aptos_framework, 50, 10000, LOCKUP_CYCLE_SECONDS, true, 1, 10, 50);
         let (_sk_1, pk_1, pop_1) = generate_identity();
@@ -2558,7 +2644,7 @@ module aptos_framework::stake {
         validator_1: &signer,
         validator_2: &signer,
         validator_3: &signer
-    ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet {
+    ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, PendingTransactionFee, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
         let validator_1_address = signer::address_of(validator_1);
         let validator_2_address = signer::address_of(validator_2);
         let validator_3_address = signer::address_of(validator_3);
@@ -2652,7 +2738,7 @@ module aptos_framework::stake {
     public entry fun test_delegated_staking_with_owner_cap(
         aptos_framework: &signer,
         validator: &signer,
-    ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet {
+    ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, PendingTransactionFee, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
         initialize_for_test_custom(aptos_framework, 100, 10000, LOCKUP_CYCLE_SECONDS, true, 1, 100, 100);
         let (_sk, pk, pop) = generate_identity();
         initialize_test_validator(&pk, &pop, validator, 0, false, false);
@@ -2705,7 +2791,7 @@ module aptos_framework::stake {
     public entry fun test_validator_cannot_join_post_genesis(
         aptos_framework: &signer,
         validator: &signer,
-    ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
+    ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, PendingTransactionFee, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
         initialize_for_test_custom(aptos_framework, 100, 10000, LOCKUP_CYCLE_SECONDS, false, 1, 100, 100);
 
         // Joining the validator set should fail as post genesis validator set change is not allowed.
@@ -2718,7 +2804,7 @@ module aptos_framework::stake {
     public entry fun test_invalid_pool_address(
         aptos_framework: &signer,
         validator: &signer,
-    ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
+    ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, PendingTransactionFee, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
         initialize_for_test(aptos_framework);
         let (_sk, pk, pop) = generate_identity();
         initialize_test_validator(&pk, &pop, validator, 100, true, true);
@@ -2730,7 +2816,7 @@ module aptos_framework::stake {
     public entry fun test_validator_cannot_leave_post_genesis(
         aptos_framework: &signer,
         validator: &signer,
-    ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet {
+    ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, PendingTransactionFee, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
         initialize_for_test_custom(aptos_framework, 100, 10000, LOCKUP_CYCLE_SECONDS, false, 1, 100, 100);
         let (_sk, pk, pop) = generate_identity();
         initialize_test_validator(&pk, &pop, validator, 100, false, false);
@@ -2759,7 +2845,7 @@ module aptos_framework::stake {
         validator_3: &signer,
         validator_4: &signer,
         validator_5: &signer,
-    ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
+    ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, PendingTransactionFee, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
         let v1_addr = signer::address_of(validator_1);
         let v2_addr = signer::address_of(validator_2);
         let v3_addr = signer::address_of(validator_3);
@@ -2832,7 +2918,7 @@ module aptos_framework::stake {
         validator_3: &signer,
         validator_4: &signer,
         validator_5: &signer,
-    ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet {
+    ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, PendingTransactionFee, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
         let v1_addr = signer::address_of(validator_1);
         let v2_addr = signer::address_of(validator_2);
         let v3_addr = signer::address_of(validator_3);
@@ -2895,7 +2981,7 @@ module aptos_framework::stake {
         aptos_framework: &signer,
         validator_1: &signer,
         validator_2: &signer,
-    ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet {
+    ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, PendingTransactionFee, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
         initialize_for_test(aptos_framework);
 
         let validator_1_address = signer::address_of(validator_1);
@@ -2943,7 +3029,7 @@ module aptos_framework::stake {
         aptos_framework: &signer,
         validator_1: &signer,
         validator_2: &signer,
-    ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet {
+    ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, PendingTransactionFee, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
         initialize_for_test(aptos_framework);
 
         let genesis_time_in_secs = timestamp::now_seconds();
@@ -2999,7 +3085,7 @@ module aptos_framework::stake {
     public entry fun test_update_performance_statistics_should_not_fail_due_to_out_of_bounds(
         aptos_framework: &signer,
         validator: &signer,
-    ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet {
+    ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, PendingTransactionFee, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
         initialize_for_test(aptos_framework);
 
         let validator_address = signer::address_of(validator);
@@ -3122,7 +3208,7 @@ module aptos_framework::stake {
         aptos_framework: &signer,
         validator_1: &signer,
         validator_2: &signer,
-    ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet {
+    ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, PendingTransactionFee, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
         initialize_for_test(aptos_framework);
         let (_sk_1, pk_1, pop_1) = generate_identity();
         let (_sk_2, pk_2, pop_2) = generate_identity();
@@ -3137,9 +3223,95 @@ module aptos_framework::stake {
         assert!(get_validator_state(validator_to_remove) == VALIDATOR_STATUS_PENDING_INACTIVE, 1);
     }
 
+    #[test(vm = @0x0, aptos_framework = @0x1, validator_0 = @0x123, validator_1 = @0x234)]
+    public entry fun test_transaction_fee(
+        vm: &signer,
+        aptos_framework: &signer,
+        validator_0: &signer,
+        validator_1: &signer,
+    ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, PendingTransactionFee, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
+        initialize_for_test(aptos_framework);
+        initialize_pending_transaction_fee(aptos_framework);
+        features::change_feature_flags_for_testing(aptos_framework, vector[features::get_distribute_transaction_fee_feature()], vector[]);
+        let address_0 = signer::address_of(validator_0);
+        let address_1 = signer::address_of(validator_1);
+        let (_sk_0, pk_0, pop_0) = generate_identity();
+        let (_sk_1, pk_1, pop_1) = generate_identity();
+        initialize_test_validator(&pk_0, &pop_0, validator_0, 100, true, false);
+        initialize_test_validator(&pk_1, &pop_1, validator_1, 100, true, true);
+        assert!(vector::length(&borrow_global(@aptos_framework).active_validators) == 2, 0);
+
+        let validator_to_remove = signer::address_of(validator_0);
+        remove_validators(aptos_framework, &vector[validator_to_remove]);
+        assert!(vector::length(&borrow_global(@aptos_framework).active_validators) == 1, 0);
+
+        // validator 0 is pending inactive, validator 1 is active, both should get fee.
+
+        {
+            let fee_table = &borrow_global(@aptos_framework).pending_fee_by_validator;
+            assert!(fee_table.contains(&0), 0);
+            assert!(fee_table.contains(&1), 0);
+        };
+
+        record_fee(vm, vector[], vector[]);
+        record_fee(vm, vector[get_validator_index(address_0)], vector[1]);
+        record_fee(vm, vector[get_validator_index(address_1)], vector[2]);
+        record_fee(vm, vector[get_validator_index(address_0), get_validator_index(address_1)], vector[10, 220]);
+
+        {
+            let fee_table = &borrow_global(@aptos_framework).pending_fee_by_validator;
+            assert!(fee_table.borrow(&get_validator_index(address_0)).read() == 11, 0);
+            assert!(fee_table.borrow(&get_validator_index(address_1)).read() == 222, 0);
+            end_epoch();
+
+            assert!(event::was_event_emitted(&DistributeTransactionFee { pool_address: address_0, fee_amount: 11 }), 0);
+            assert!(event::was_event_emitted(&DistributeTransactionFee { pool_address: address_1, fee_amount: 222 }), 0);
+        };
+
+        let fee_table = &borrow_global(@aptos_framework).pending_fee_by_validator;
+        // validator 1 is at index 0 now.
+        assert!(fee_table.contains(&0), 0);
+        assert!(!fee_table.contains(&1), 0);
+
+        assert!(event::emitted_events().length() == 2, 0);
+        // No more event is emitted at this epoch ending.
+        end_epoch();
+        assert!(event::emitted_events().length() == 2, 0);
+    }
+
+    #[test(vm = @0x0, aptos_framework = @0x1, validator_0 = @0x123, validator_1 = @0x234)]
+    #[expected_failure(abort_code = 0x10002, location = 0x1::big_ordered_map)]
+    public entry fun test_transaction_fee_non_validator(
+        vm: &signer,
+        aptos_framework: &signer,
+        validator_0: &signer,
+        validator_1: &signer,
+    ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, PendingTransactionFee, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
+        initialize_for_test(aptos_framework);
+        initialize_pending_transaction_fee(aptos_framework);
+        features::change_feature_flags_for_testing(aptos_framework, vector[features::get_distribute_transaction_fee_feature()], vector[]);
+        let (_sk_0, pk_0, pop_0) = generate_identity();
+        let (_sk_1, pk_1, pop_1) = generate_identity();
+        initialize_test_validator(&pk_0, &pop_0, validator_0, 100, true, false);
+        initialize_test_validator(&pk_1, &pop_1, validator_1, 100, true, true);
+        assert!(vector::length(&borrow_global(@aptos_framework).active_validators) == 2, 0);
+
+        let validator_to_remove = signer::address_of(validator_0);
+        remove_validators(aptos_framework, &vector[validator_to_remove]);
+        assert!(vector::length(&borrow_global(@aptos_framework).active_validators) == 1, 0);
+
+        // validator 0 is pending inactive, validator 1 is active.
+
+        end_epoch();
+        assert!(event::emitted_events().length() == 0, 0);
+
+        // Validator index is out of bound, abort.
+        record_fee(vm, vector[1], vector[1]);
+    }
+
     #[test_only]
     public fun end_epoch(
-    ) acquires StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet {
+    ) acquires AptosCoinCapabilities, PendingTransactionFee, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet {
         // Set the number of blocks to 1, to give out rewards to non-failing validators.
         set_validator_perf_at_least_one_block();
         timestamp::fast_forward_seconds(EPOCH_DURATION);
diff --git a/aptos-move/framework/aptos-framework/sources/transaction_validation.move b/aptos-move/framework/aptos-framework/sources/transaction_validation.move
index bb5daa6328d26..0f2df245d847b 100644
--- a/aptos-move/framework/aptos-framework/sources/transaction_validation.move
+++ b/aptos-move/framework/aptos-framework/sources/transaction_validation.move
@@ -147,10 +147,16 @@ module aptos_framework::transaction_validation {
         // Check if the authentication key is valid
         if (!skip_auth_key_check(is_simulation, &txn_authentication_key)) {
             if (option::is_some(&txn_authentication_key)) {
-                assert!(
-                    txn_authentication_key == option::some(account::get_authentication_key(sender_address)),
-                    error::invalid_argument(PROLOGUE_EINVALID_ACCOUNT_AUTH_KEY),
-                );
+                if (
+                    sender_address == gas_payer_address ||
+                    account::exists_at(sender_address) ||
+                    !features::sponsored_automatic_account_creation_enabled()
+                ) {
+                    assert!(
+                        txn_authentication_key == option::some(account::get_authentication_key(sender_address)),
+                        error::invalid_argument(PROLOGUE_EINVALID_ACCOUNT_AUTH_KEY),
+                    );
+                };
             } else {
                 assert!(
                     allow_missing_txn_authentication_key(sender_address),
diff --git a/aptos-move/framework/aptos-framework/sources/transaction_validation.spec.move b/aptos-move/framework/aptos-framework/sources/transaction_validation.spec.move
index 23bedb1640642..70fe400d7f802 100644
--- a/aptos-move/framework/aptos-framework/sources/transaction_validation.spec.move
+++ b/aptos-move/framework/aptos-framework/sources/transaction_validation.spec.move
@@ -257,7 +257,7 @@ spec aptos_framework::transaction_validation {
         pragma verify_duration_estimate = 120;
 
         aborts_if !features::spec_is_enabled(features::FEE_PAYER_ENABLED);
-        let gas_payer = create_signer::create_signer(fee_payer_address);
+        let gas_payer = create_signer::spec_create_signer(fee_payer_address);
         include PrologueCommonAbortsIf {
             gas_payer,
             replay_protector: ReplayProtector::SequenceNumber(txn_sequence_number),
diff --git a/aptos-move/framework/move-stdlib/doc/cmp.md b/aptos-move/framework/move-stdlib/doc/cmp.md
index f6f43999bfe93..e903b88ec007d 100644
--- a/aptos-move/framework/move-stdlib/doc/cmp.md
+++ b/aptos-move/framework/move-stdlib/doc/cmp.md
@@ -14,7 +14,14 @@
 -  [Function `is_gt`](#0x1_cmp_is_gt)
 -  [Function `is_ge`](#0x1_cmp_is_ge)
 -  [Specification](#@Specification_0)
+    -  [Enum `Ordering`](#@Specification_0_Ordering)
     -  [Function `compare`](#@Specification_0_compare)
+    -  [Function `is_eq`](#@Specification_0_is_eq)
+    -  [Function `is_ne`](#@Specification_0_is_ne)
+    -  [Function `is_lt`](#@Specification_0_is_lt)
+    -  [Function `is_le`](#@Specification_0_is_le)
+    -  [Function `is_gt`](#@Specification_0_is_gt)
+    -  [Function `is_ge`](#@Specification_0_is_ge)
 
 
 
@@ -263,6 +270,74 @@ and if equal we proceed to the next. ## Specification + + +### Enum `Ordering` + + +
enum Ordering has copy, drop
+
+ + + +
+ +
+Less + + +
+Fields + + +
+
+ + +
+ +
+ +
+Equal + + +
+Fields + + +
+
+ + +
+ +
+ +
+Greater + + +
+Fields + + +
+
+ + +
+ +
+
+ + + +
pragma intrinsic;
+
+ + + ### Function `compare` @@ -274,7 +349,115 @@ and if equal we proceed to the next. -
pragma opaque;
+
pragma intrinsic;
+
+ + + + + +### Function `is_eq` + + +
public fun is_eq(self: &cmp::Ordering): bool
+
+ + + + +
pragma intrinsic;
+pragma opaque;
+pragma verify = false;
+
+ + + + + +### Function `is_ne` + + +
public fun is_ne(self: &cmp::Ordering): bool
+
+ + + + +
pragma intrinsic;
+pragma opaque;
+pragma verify = false;
+
+ + + + + +### Function `is_lt` + + +
public fun is_lt(self: &cmp::Ordering): bool
+
+ + + + +
pragma intrinsic;
+pragma opaque;
+pragma verify = false;
+
+ + + + + +### Function `is_le` + + +
public fun is_le(self: &cmp::Ordering): bool
+
+ + + + +
pragma intrinsic;
+pragma opaque;
+pragma verify = false;
+
+ + + + + +### Function `is_gt` + + +
public fun is_gt(self: &cmp::Ordering): bool
+
+ + + + +
pragma intrinsic;
+pragma opaque;
+pragma verify = false;
+
+ + + + + +### Function `is_ge` + + +
public fun is_ge(self: &cmp::Ordering): bool
+
+ + + + +
pragma intrinsic;
+pragma opaque;
+pragma verify = false;
 
diff --git a/aptos-move/framework/move-stdlib/sources/cmp.move b/aptos-move/framework/move-stdlib/sources/cmp.move index af90ff7b275a0..6356622367f20 100644 --- a/aptos-move/framework/move-stdlib/sources/cmp.move +++ b/aptos-move/framework/move-stdlib/sources/cmp.move @@ -41,8 +41,47 @@ module std::cmp { } spec compare { - // TODO: temporary mockup. + pragma intrinsic; + } + + spec Ordering { + pragma intrinsic; + } + + spec is_eq { + pragma intrinsic; + pragma opaque; + pragma verify = false; + } + + spec is_ne { + pragma intrinsic; pragma opaque; + pragma verify = false; + } + + spec is_lt { + pragma intrinsic; + pragma opaque; + pragma verify = false; + } + + spec is_le { + pragma intrinsic; + pragma opaque; + pragma verify = false; + } + + spec is_gt { + pragma intrinsic; + pragma opaque; + pragma verify = false; + } + + spec is_ge { + pragma intrinsic; + pragma opaque; + pragma verify = false; } #[test_only] @@ -147,4 +186,128 @@ module std::cmp { assert!(compare(&SomeEnum::V5 { field_5: SimpleEnum::V { field: 5}}, &SomeEnum::V5 { field_5: SimpleEnum::V { field: 3}}) is Ordering::Greater, 13); assert!(compare(&SomeEnum::V5 { field_5: SimpleEnum::V { field: 3}}, &SomeEnum::V5 { field_5: SimpleEnum::V { field: 5}}) is Ordering::Less, 14); } + + #[verify_only] + fun test_verify_compare_preliminary_types() { + spec { + assert compare(1, 5).is_ne(); + assert !compare(1, 5).is_eq(); + assert compare(1, 5).is_lt(); + assert compare(1, 5).is_le(); + assert compare(5, 5).is_eq(); + assert !compare(5, 5).is_ne(); + assert !compare(5, 5).is_lt(); + assert compare(5, 5).is_le(); + assert !compare(7, 5).is_eq(); + assert compare(7, 5).is_ne(); + assert !compare(7, 5).is_lt(); + assert !compare(7, 5).is_le(); + assert compare(false, true).is_ne(); + assert compare(false, true).is_lt(); + assert compare(true, false).is_ge(); + assert compare(true, true).is_eq(); + }; + } + + #[verify_only] + fun test_verify_compare_vectors() { + let empty: vector = vector[]; + let v1 = vector[1 as u64]; + let v8 = vector[1 as u8, 2]; + let v32_1 = vector[1 as u32, 2, 3]; + let v32_2 = vector[5 as u32]; + spec { + assert compare(empty, v1) == Ordering::Less; + assert compare(empty, empty) == Ordering::Equal; + assert compare(v1, empty) == Ordering::Greater; + assert compare(v8, v8) == Ordering::Equal; + assert compare(v32_1, v32_2) is Ordering::Less; + assert compare(v32_2, v32_1) == Ordering::Greater; + }; + } + + #[verify_only] + struct SomeStruct has drop { + field_1: u64, + field_2: u64, + } + + #[verify_only] + fun test_verify_compare_structs() { + let s1 = SomeStruct { field_1: 1, field_2: 2}; + let s2 = SomeStruct { field_1: 1, field_2: 3}; + let s3 = SomeStruct { field_1: 1, field_2: 1}; + let s4 = SomeStruct { field_1: 2, field_2: 1}; + spec { + assert compare(s1, s1) == Ordering::Equal; + assert compare(s1, s2) == Ordering::Less; + assert compare(s1, s3) == Ordering::Greater; + assert compare(s4, s1) == Ordering::Greater; + }; + } + + #[verify_only] + fun test_verify_compare_vector_of_structs() { + let v1 = vector[SomeStruct { field_1: 1, field_2: 2}]; + let v2 = vector[SomeStruct { field_1: 1, field_2: 3}]; + spec { + assert compare(v1, v2) == Ordering::Less; + assert compare(v1, v1) == Ordering::Equal; + }; + } + + #[verify_only] + enum SomeEnum has drop { + V1 { field_1: u64 }, + V2 { field_2: u64 }, + V3 { field_3: SomeStruct }, + V4 { field_4: vector }, + V5 { field_5: SimpleEnum }, + } + + #[verify_only] + enum SimpleEnum has drop { + V { field: u64 }, + } + + #[verify_only] + fun test_verify_compare_enums() { + let e1 = SomeEnum::V1 { field_1: 6}; + let e2 = SomeEnum::V2 { field_2: 1}; + let e3 = SomeEnum::V3 { field_3: SomeStruct { field_1: 1, field_2: 2}}; + let e4 = SomeEnum::V4 { field_4: vector[1, 2]}; + let e5 = SomeEnum::V5 { field_5: SimpleEnum::V { field: 3}}; + spec { + assert compare(e1, e1) == Ordering::Equal; + assert compare(e1, e2) == Ordering::Less; + assert compare(e2, e1) == Ordering::Greater; + assert compare(e3, e4) == Ordering::Less; + assert compare(e5, e4) == Ordering::Greater; + }; + } + + #[verify_only] + struct SomeStruct_BV has copy,drop { + field: u64 + } + + spec SomeStruct_BV { + pragma bv=b"0"; + } + + #[verify_only] + fun test_compare_bv() { + let a = 1; + let b = 5; + let se_a = SomeStruct_BV { field: a}; + let se_b = SomeStruct_BV { field: b}; + let v_a = vector[a]; + let v_b = vector[b]; + spec { + assert compare(a, b) == Ordering::Less; + assert compare(se_a, se_b) == Ordering::Less; + assert compare(v_a, v_b) == Ordering::Less; + }; + } + } diff --git a/aptos-move/framework/move-stdlib/src/natives/bcs.rs b/aptos-move/framework/move-stdlib/src/natives/bcs.rs index 8781b367ae46c..91c5616446a10 100644 --- a/aptos-move/framework/move-stdlib/src/natives/bcs.rs +++ b/aptos-move/framework/move-stdlib/src/natives/bcs.rs @@ -70,7 +70,8 @@ fn native_to_bytes( let val = ref_to_val.read_ref()?; let function_value_extension = context.function_value_extension(); - let serialized_value = match ValueSerDeContext::new() + let max_value_nest_depth = context.max_value_nest_depth(); + let serialized_value = match ValueSerDeContext::new(max_value_nest_depth) .with_legacy_signer() .with_func_args_deserialization(&function_value_extension) .serialize(&val, &layout)? @@ -138,7 +139,8 @@ fn serialized_size_impl( let ty_layout = context.type_to_type_layout(ty)?; let function_value_extension = context.function_value_extension(); - ValueSerDeContext::new() + let max_value_nest_depth = context.max_value_nest_depth(); + ValueSerDeContext::new(max_value_nest_depth) .with_legacy_signer() .with_func_args_deserialization(&function_value_extension) .with_delayed_fields_serde() diff --git a/aptos-move/framework/move-stdlib/src/natives/cmp.rs b/aptos-move/framework/move-stdlib/src/natives/cmp.rs index d867e8a5dc0e4..e3950b81736c3 100644 --- a/aptos-move/framework/move-stdlib/src/natives/cmp.rs +++ b/aptos-move/framework/move-stdlib/src/natives/cmp.rs @@ -47,8 +47,8 @@ fn native_compare( let cost = CMP_COMPARE_BASE + CMP_COMPARE_PER_ABS_VAL_UNIT - * (context.abs_val_size_dereferenced(&args[0]) - + context.abs_val_size_dereferenced(&args[1])); + * (context.abs_val_size_dereferenced(&args[0])? + + context.abs_val_size_dereferenced(&args[1])?); context.charge(cost)?; let ordering = args[0].compare(&args[1])?; diff --git a/aptos-move/framework/src/aptos-natives.bpl b/aptos-move/framework/src/aptos-natives.bpl index e2c34cf5f21a0..0e433ccb33d9a 100644 --- a/aptos-move/framework/src/aptos-natives.bpl +++ b/aptos-move/framework/src/aptos-natives.bpl @@ -17,9 +17,167 @@ procedure {:inline 1} $1_object_exists_at{{S}}(object: int) returns (res: bool) {%- endfor %} +datatype $1_cmp_Ordering { + $1_cmp_Ordering_Less(), + $1_cmp_Ordering_Equal(), + $1_cmp_Ordering_Greater() +} +function $IsValid'$1_cmp_Ordering_Less'(s: $1_cmp_Ordering): bool { + true +} +function $IsValid'$1_cmp_Ordering_Equal'(s: $1_cmp_Ordering): bool { + true +} +function $IsValid'$1_cmp_Ordering_Greater'(s: $1_cmp_Ordering): bool { + true +} +function $IsValid'$1_cmp_Ordering'(s: $1_cmp_Ordering): bool { + true +} +function {:inline} $IsEqual'$1_cmp_Ordering'(s1: $1_cmp_Ordering, s2: $1_cmp_Ordering): bool { + s1 == s2 +} + +function $Arbitrary_value_of'$1_cmp_Ordering'(): $1_cmp_Ordering; + +function {:inline} $1_cmp_$compare'bool'(s1: bool, s2: bool): $1_cmp_Ordering { + if s1 == s2 then $1_cmp_Ordering_Equal() + else if s1 == true then $1_cmp_Ordering_Greater() + else + $1_cmp_Ordering_Less() +} + +procedure {:inline 1} $1_cmp_compare'bool'(s1: bool, s2: bool) returns ($ret0: $1_cmp_Ordering) { + $ret0 := $1_cmp_$compare'bool'(s1, s2); + return; +} + +function {:inline} $1_cmp_$compare'signer'(s1: $signer, s2: $signer): $1_cmp_Ordering { + if s1 == s2 then $1_cmp_Ordering_Equal() + else if s1 is $signer && s2 is $permissioned_signer then $1_cmp_Ordering_Less() + else if s1 is $permissioned_signer && s2 is $signer then $1_cmp_Ordering_Greater() + else if s1 is $signer then + $compare_int(s1 -> $addr, s2 -> $addr) + else if s1 -> $addr == s2 -> $addr then + $compare_int(s1 -> $permission_addr, s2 -> $permission_addr) + else + $compare_int(s1 -> $addr, s2 -> $addr) +} + +procedure {:inline 1} $1_cmp_compare'signer'(s1: $signer, s2: $signer) returns ($ret0: $1_cmp_Ordering) { + $ret0 := $1_cmp_$compare'signer'(s1, s2); + return; +} + +function $compare_int(s1: int, s2: int): $1_cmp_Ordering { + if s1 == s2 then $1_cmp_Ordering_Equal() + else if s1 > s2 then $1_cmp_Ordering_Greater() + else $1_cmp_Ordering_Less() +} + +function {:inline} $1_cmp_$compare'num'(s1: int, s2: int): $1_cmp_Ordering { + $compare_int(s1, s2) +} + +procedure {:inline 1} $1_cmp_compare'num'(s1: int, s2: int) returns ($ret0: $1_cmp_Ordering) { + $ret0 := $compare_int(s1, s2); + return; +} + +function {:inline} $1_cmp_$compare'int'(s1: int, s2: int): $1_cmp_Ordering { + $compare_int(s1, s2) +} + +procedure {:inline 1} $1_cmp_compare'int'(s1: int, s2: int) returns ($ret0: $1_cmp_Ordering) { + $ret0 := $compare_int(s1, s2); + return; +} + +{%- for impl in bv_instances %} + +function {:inline} $1_cmp_$compare'bv{{impl.base}}'(s1: bv{{impl.base}}, s2: bv{{impl.base}}): $1_cmp_Ordering { + if s1 == s2 then $1_cmp_Ordering_Equal() + else if $Gt'Bv{{impl.base}}'(s1,s2) then $1_cmp_Ordering_Greater() + else $1_cmp_Ordering_Less() +} + +procedure {:inline 1} $1_cmp_compare'bv{{impl.base}}'(s1: bv{{impl.base}}, s2: bv{{impl.base}}) returns ($ret0: $1_cmp_Ordering) { + $ret0 := $1_cmp_$compare'bv{{impl.base}}'(s1, s2); + return; +} + +{%- endfor %} + + +{%- for instance in cmp_int_instances -%} +{%- set S = instance.suffix -%} +{%- set T = instance.name -%} -{%- for instance in aggregator_v2_instances %} +function {:inline} $1_cmp_$compare'{{S}}'(s1: {{T}}, s2: {{T}}): $1_cmp_Ordering { + $compare_int(s1, s2) +} + + +procedure {:inline 1} $1_cmp_compare'{{S}}'(s1: {{T}}, s2: {{T}}) returns ($ret0: $1_cmp_Ordering) { + $ret0 := $compare_int(s1, s2); + return; +} + +{%- endfor %} + +{%- for instance in cmp_vector_instances -%} +{%- set S = instance.suffix -%} +{%- set T = instance.name -%} + + {% set concat_s = "/" ~ S ~"/" %} + {% set rest_s = concat_s | trim_start_matches(pat="/vec'") | trim_end_matches(pat="'/") %} + + + function {:inline} $1_cmp_$compare'{{S}}'(v1: {{T}}, v2: {{T}}): $1_cmp_Ordering { + if $IsEqual'{{S}}'(v1, v2) then $1_cmp_Ordering_Equal() + else if v1 -> l == 0 && v2 -> l != 0 then + $1_cmp_Ordering_Less() + else if v2 -> l == 0 && v1 -> l != 0 then + $1_cmp_Ordering_Greater() + else + $compare_vec'{{S}}'(v1, v2) + } + + procedure {:inline 1} $1_cmp_compare'{{S}}'(v1: {{T}}, v2: {{T}}) returns ($ret0: $1_cmp_Ordering) { + $ret0 := $1_cmp_$compare'{{S}}'(v1, v2); + return; + } + + function $compare_vec'{{S}}'(v1: {{T}}, v2: {{T}}): $1_cmp_Ordering; + axiom {:ctor "Vec"} (forall v1: {{T}}, v2: {{T}}, res: $1_cmp_Ordering :: + (var res := $compare_vec'{{S}}'(v1, v2); + if v1 -> l == 0 && v2 -> l != 0 then + res == $1_cmp_Ordering_Less() + else if v2 -> l == 0 && v1 -> l != 0 then + res == $1_cmp_Ordering_Greater() + else if ReadVec(v1, 0) == ReadVec(v2, 0) then res == $compare_vec'{{S}}'(RemoveAtVec(v1, 0), RemoveAtVec(v2, 0)) + else res == $1_cmp_$compare'{{rest_s}}'(ReadVec(v1, 0), ReadVec(v2, 0)))); + +{%- endfor %} + +{% for instance in cmp_table_instances -%} +{%- set S = instance.suffix -%} +{%- set T = instance.name -%} + + function {:inline} $1_cmp_$compare'{{S}}'(v1: {{T}}, v2: {{T}}): $1_cmp_Ordering { + $Arbitrary_value_of'$1_cmp_Ordering'() + } + + procedure {:inline 1} $1_cmp_compare'{{S}}'(v1: {{T}}, v2: {{T}}) returns ($ret0: $1_cmp_Ordering) { + $ret0 := $1_cmp_$compare'{{S}}'(v1, v2); + return; + } + +{%- endfor %} + + +{% for instance in aggregator_v2_instances %} {%- set S = instance.suffix -%} {%- set T = instance.name -%} @@ -234,6 +392,15 @@ function {:inline} $1_aggregator_v2_$read'{{S}}'(s: $1_aggregator_v2_Aggregator' function $1_aggregator_v2_$is_at_least_impl'{{S}}'(aggregator: $1_aggregator_v2_Aggregator'{{S}}', min_amount: {{T}}): bool; {% endif -%} +function {:inline} $1_cmp_$compare'$1_aggregator_v2_Aggregator'{{S}}''(s1: $1_aggregator_v2_Aggregator'{{S}}', s2: $1_aggregator_v2_Aggregator'{{S}}'): $1_cmp_Ordering { + $Arbitrary_value_of'$1_cmp_Ordering'() +} + +procedure {:inline 1} $1_cmp_compare'$1_aggregator_v2_Aggregator'{{S}}''(s1: $1_aggregator_v2_Aggregator'{{S}}', s2: $1_aggregator_v2_Aggregator'{{S}}') returns ($ret0: $1_cmp_Ordering) { + $ret0 := $1_cmp_$compare'$1_aggregator_v2_Aggregator'{{S}}''(s1, s2); + return; +} + {%- endfor %} // ================================================================================== @@ -305,6 +472,17 @@ axiom (forall limit: int :: {$1_aggregator_factory_spec_new_aggregator(limit)} (var agg := $1_aggregator_factory_spec_new_aggregator(limit); $1_aggregator_spec_aggregator_get_val(agg) == 0)); + +function {:inline} $1_cmp_$compare'$1_aggregator_Aggregator'(s1: $1_aggregator_Aggregator, s2: $1_aggregator_Aggregator): $1_cmp_Ordering { + $Arbitrary_value_of'$1_cmp_Ordering'() +} + +procedure {:inline 1} $1_cmp_compare'$1_aggregator_Aggregator'(s1: $1_aggregator_Aggregator, s2: $1_aggregator_Aggregator) returns ($ret0: $1_cmp_Ordering) { + $ret0 := $1_cmp_$compare'$1_aggregator_Aggregator'(s1, s2); + return; +} + + // ================================================================================== // Native for function_info diff --git a/aptos-move/framework/src/natives/account_abstraction.rs b/aptos-move/framework/src/natives/account_abstraction.rs index 7a14c5a3904a5..c236fafb9a2ef 100644 --- a/aptos-move/framework/src/natives/account_abstraction.rs +++ b/aptos-move/framework/src/natives/account_abstraction.rs @@ -32,9 +32,10 @@ pub(crate) fn native_dispatch( .check_is_special_or_visited(module_name.address(), module_name.name()) .map_err(|_| SafeNativeError::Abort { abort_code: 4 })?; + context.charge(DISPATCHABLE_AUTHENTICATE_DISPATCH_BASE)?; + // Use Error to instruct the VM to perform a function call dispatch. Err(SafeNativeError::FunctionDispatch { - cost: context.eval_gas(DISPATCHABLE_AUTHENTICATE_DISPATCH_BASE), module_name, func_name, ty_args, diff --git a/aptos-move/framework/src/natives/aggregator_natives/aggregator_v2.rs b/aptos-move/framework/src/natives/aggregator_natives/aggregator_v2.rs index 693ebdf9d71cc..6de952539f0c7 100644 --- a/aptos-move/framework/src/natives/aggregator_natives/aggregator_v2.rs +++ b/aptos-move/framework/src/natives/aggregator_natives/aggregator_v2.rs @@ -97,7 +97,7 @@ fn create_string_value(value: Vec) -> Value { } fn get_context_data<'t, 'b>( - context: &'t mut SafeNativeContext<'_, 'b, '_>, + context: &'t mut SafeNativeContext<'_, 'b, '_, '_>, ) -> Option<(&'b dyn DelayedFieldResolver, RefMut<'t, DelayedFieldData>)> { let aggregator_context = context.extensions().get::(); if aggregator_context.delayed_field_optimization_enabled { diff --git a/aptos-move/framework/src/natives/cryptography/algebra/mod.rs b/aptos-move/framework/src/natives/cryptography/algebra/mod.rs index 6a65be4c4e7eb..8de629b0d5e28 100644 --- a/aptos-move/framework/src/natives/cryptography/algebra/mod.rs +++ b/aptos-move/framework/src/natives/cryptography/algebra/mod.rs @@ -70,7 +70,7 @@ impl TryFrom for Structure { type Error = (); fn try_from(value: TypeTag) -> Result { - match value.to_string().as_str() { + match value.to_canonical_string().as_str() { "0x1::bls12381_algebra::Fr" => Ok(Structure::BLS12381Fr), "0x1::bls12381_algebra::Fq12" => Ok(Structure::BLS12381Fq12), "0x1::bls12381_algebra::G1" => Ok(Structure::BLS12381G1), @@ -124,7 +124,7 @@ impl TryFrom for SerializationFormat { type Error = (); fn try_from(value: TypeTag) -> Result { - match value.to_string().as_str() { + match value.to_canonical_string().as_str() { "0x1::bls12381_algebra::FormatFq12LscLsb" => { Ok(SerializationFormat::BLS12381Fq12LscLsb) }, @@ -166,7 +166,7 @@ impl TryFrom for HashToStructureSuite { type Error = (); fn try_from(value: TypeTag) -> Result { - match value.to_string().as_str() { + match value.to_canonical_string().as_str() { "0x1::bls12381_algebra::HashG1XmdSha256SswuRo" => { Ok(HashToStructureSuite::Bls12381g1XmdSha256SswuRo) }, diff --git a/aptos-move/framework/src/natives/dispatchable_fungible_asset.rs b/aptos-move/framework/src/natives/dispatchable_fungible_asset.rs index 1bbd16f456023..8d299545e655d 100644 --- a/aptos-move/framework/src/natives/dispatchable_fungible_asset.rs +++ b/aptos-move/framework/src/natives/dispatchable_fungible_asset.rs @@ -44,9 +44,10 @@ pub(crate) fn native_dispatch( check_visited(module_name.address(), module_name.name()) .map_err(|_| SafeNativeError::Abort { abort_code: 4 })?; + context.charge(DISPATCHABLE_FUNGIBLE_ASSET_DISPATCH_BASE)?; + // Use Error to instruct the VM to perform a function call dispatch. Err(SafeNativeError::FunctionDispatch { - cost: context.eval_gas(DISPATCHABLE_FUNGIBLE_ASSET_DISPATCH_BASE), module_name, func_name, ty_args, diff --git a/aptos-move/framework/src/natives/event.rs b/aptos-move/framework/src/natives/event.rs index 0aeb399ff439d..6bebf71618871 100644 --- a/aptos-move/framework/src/natives/event.rs +++ b/aptos-move/framework/src/natives/event.rs @@ -87,14 +87,15 @@ fn native_write_to_event_store( // TODO(Gas): Get rid of abstract memory size context.charge( EVENT_WRITE_TO_EVENT_STORE_BASE - + EVENT_WRITE_TO_EVENT_STORE_PER_ABSTRACT_VALUE_UNIT * context.abs_val_size(&msg), + + EVENT_WRITE_TO_EVENT_STORE_PER_ABSTRACT_VALUE_UNIT * context.abs_val_size(&msg)?, )?; let ty_tag = context.type_to_type_tag(&ty)?; let (layout, has_aggregator_lifting) = context.type_to_type_layout_with_identifier_mappings(&ty)?; let function_value_extension = context.function_value_extension(); - let blob = ValueSerDeContext::new() + let max_value_nest_depth = context.max_value_nest_depth(); + let blob = ValueSerDeContext::new(max_value_nest_depth) .with_delayed_fields_serde() .with_func_args_deserialization(&function_value_extension) .serialize(&msg, &layout)? @@ -161,7 +162,8 @@ fn native_emitted_events_by_handle( .into_iter() .map(|blob| { let function_value_extension = context.function_value_extension(); - ValueSerDeContext::new() + let max_value_nest_depth = context.max_value_nest_depth(); + ValueSerDeContext::new(max_value_nest_depth) .with_func_args_deserialization(&function_value_extension) .deserialize(blob, &ty_layout) .ok_or_else(|| { @@ -194,7 +196,8 @@ fn native_emitted_events( .into_iter() .map(|blob| { let function_value_extension = context.function_value_extension(); - ValueSerDeContext::new() + let max_value_nest_depth = context.max_value_nest_depth(); + ValueSerDeContext::new(max_value_nest_depth) .with_func_args_deserialization(&function_value_extension) .with_delayed_fields_serde() .deserialize(blob, &ty_layout) @@ -222,7 +225,7 @@ fn native_write_module_event_to_store( context.charge( EVENT_WRITE_TO_EVENT_STORE_BASE - + EVENT_WRITE_TO_EVENT_STORE_PER_ABSTRACT_VALUE_UNIT * context.abs_val_size(&msg), + + EVENT_WRITE_TO_EVENT_STORE_PER_ABSTRACT_VALUE_UNIT * context.abs_val_size(&msg)?, )?; let type_tag = context.type_to_type_tag(&ty)?; @@ -262,7 +265,8 @@ fn native_write_module_event_to_store( context.type_to_type_layout_with_identifier_mappings(&ty)?; let function_value_extension = context.function_value_extension(); - let blob = ValueSerDeContext::new() + let max_value_nest_depth = context.max_value_nest_depth(); + let blob = ValueSerDeContext::new(max_value_nest_depth) .with_delayed_fields_serde() .with_func_args_deserialization(&function_value_extension) .serialize(&msg, &layout)? diff --git a/aptos-move/framework/src/natives/function_info.rs b/aptos-move/framework/src/natives/function_info.rs index c342cf3c420f6..ef13782336098 100644 --- a/aptos-move/framework/src/natives/function_info.rs +++ b/aptos-move/framework/src/natives/function_info.rs @@ -182,7 +182,13 @@ fn native_load_function_impl( context.charge(FUNCTION_INFO_LOAD_FUNCTION_BASE)?; let (module_name, _) = extract_function_info(&mut arguments)?; - Err(SafeNativeError::LoadModule { module_name }) + if context.has_direct_gas_meter_access_in_native_context() { + context.charge_gas_for_dependencies(module_name)?; + Ok(smallvec![]) + } else { + // Legacy flow, VM will charge gas for module loading. + Err(SafeNativeError::LoadModule { module_name }) + } } /*************************************************************************************************** diff --git a/aptos-move/framework/src/natives/string_utils.rs b/aptos-move/framework/src/natives/string_utils.rs index 8590d0b8f9fac..3475c06fa8ee2 100644 --- a/aptos-move/framework/src/natives/string_utils.rs +++ b/aptos-move/framework/src/natives/string_utils.rs @@ -9,8 +9,10 @@ use aptos_native_interface::{ }; use aptos_types::on_chain_config::FeatureFlag; use ark_std::iterable::Iterable; +use move_binary_format::errors::PartialVMError; use move_core_types::{ account_address::AccountAddress, + function::ClosureMask, language_storage::TypeTag, u256, value::{MoveFieldLayout, MoveStructLayout, MoveTypeLayout, MASTER_ADDRESS_FIELD_OFFSET}, @@ -18,7 +20,6 @@ use move_core_types::{ use move_vm_runtime::native_functions::NativeFunction; use move_vm_types::{ loaded_data::runtime_types::Type, - value_serde::FunctionValueExtension, values::{Closure, Reference, Struct, Value, Vector, VectorRef}, }; use smallvec::{smallvec, SmallVec}; @@ -29,8 +30,8 @@ const EARGS_MISMATCH: u64 = 1; const EINVALID_FORMAT: u64 = 2; const EUNABLE_TO_FORMAT_DELAYED_FIELD: u64 = 3; -struct FormatContext<'a, 'b, 'c, 'd> { - context: &'d mut SafeNativeContext<'a, 'b, 'c>, +struct FormatContext<'a, 'b, 'c, 'd, 'e> { + context: &'e mut SafeNativeContext<'a, 'b, 'c, 'd>, should_charge_gas: bool, max_depth: usize, max_len: usize, @@ -126,6 +127,59 @@ fn format_vector<'a>( Ok(()) } +fn format_closure_captured_arguments( + context: &mut FormatContext, + mask: ClosureMask, + mut captured_layouts: impl Iterator, + mut captured_arguments: impl Iterator, + depth: usize, + newline: bool, + out: &mut String, +) -> SafeNativeResult<()> { + if depth >= context.max_depth { + write!(out, " .. ").unwrap(); + return Ok(()); + } + + let mut i = 0; + let mut mask = mask.bits(); + + while mask != 0 { + if i > 0 { + out.push(','); + print_space_or_newline(newline, out, depth + 1); + } + if i >= context.max_len { + write!(out, "..").unwrap(); + break; + } + + if mask & 0x1 != 0 { + let layout = captured_layouts.next().ok_or_else(|| { + PartialVMError::new_invariant_violation("Captured layout must exist") + })?; + layout.write_name(out); + + let value = captured_arguments.next().ok_or_else(|| { + PartialVMError::new_invariant_violation("Captured argument must exist") + })?; + native_format_impl(context, layout.get_layout(), value, depth + 1, out)?; + } else { + write!(out, "_").unwrap(); + } + mask >>= 1; + i += 1; + } + + if i < context.max_len { + out.push(','); + print_space_or_newline(newline, out, depth + 1); + write!(out, "..").unwrap(); + } + + Ok(()) +} + fn native_format_impl( context: &mut FormatContext, layout: &MoveTypeLayout, @@ -279,7 +333,7 @@ fn native_format_impl( return Ok(()); } if context.type_tag { - write!(out, "{} {{", TypeTag::from(type_.clone())).unwrap(); + write!(out, "{} {{", type_.to_canonical_string()).unwrap(); } else { write!(out, "{} {{", type_.name.as_str()).unwrap(); }; @@ -363,19 +417,25 @@ fn native_format_impl( // avoiding potential loading of the function to get full // decorated type information. let (fun, args) = val.value_as::()?.unpack(); - let data = context + let captured_layouts = context .context - .function_value_extension() - .get_serialization_data(fun.as_ref())?; - out.push_str(&fun.to_stable_string()); - format_vector( - context, - data.captured_layouts.iter(), - args.collect(), - depth, - !context.single_line, - out, - )?; + .get_captured_layouts_for_string_utils(fun.as_ref())? + .ok_or_else(|| SafeNativeError::Abort { + abort_code: EUNABLE_TO_FORMAT_DELAYED_FIELD, + })?; + out.push_str(&fun.to_canonical_string()); + out.push('('); + if !captured_layouts.is_empty() { + format_closure_captured_arguments( + context, + fun.closure_mask(), + captured_layouts.into_iter(), + args, + depth, + !context.single_line, + out, + )?; + } out.push(')'); }, diff --git a/aptos-move/framework/src/natives/type_info.rs b/aptos-move/framework/src/natives/type_info.rs index ae8468db86ca4..b33e3271ee60a 100644 --- a/aptos-move/framework/src/natives/type_info.rs +++ b/aptos-move/framework/src/natives/type_info.rs @@ -21,9 +21,9 @@ fn type_of_internal(struct_tag: &StructTag) -> Result, std: let mut name = struct_tag.name.to_string(); if let Some(first_ty) = struct_tag.type_args.first() { write!(name, "<")?; - write!(name, "{}", first_ty)?; + write!(name, "{}", first_ty.to_canonical_string())?; for ty in struct_tag.type_args.iter().skip(1) { - write!(name, ", {}", ty)?; + write!(name, ", {}", ty.to_canonical_string())?; } write!(name, ">")?; } @@ -57,7 +57,7 @@ fn native_type_of( let type_tag = context.type_to_type_tag(&ty_args[0])?; if context.eval_gas(TYPE_INFO_TYPE_OF_PER_BYTE_IN_STR) > 0.into() { - let type_tag_str = type_tag.to_string(); + let type_tag_str = type_tag.to_canonical_string(); // Ideally, we would charge *before* the `type_to_type_tag()` and `type_tag.to_string()` calls above. // But there are other limits in place that prevent this native from being called with too much work. context @@ -92,7 +92,7 @@ fn native_type_name( context.charge(TYPE_INFO_TYPE_NAME_BASE)?; let type_tag = context.type_to_type_tag(&ty_args[0])?; - let type_name = type_tag.to_string(); + let type_name = type_tag.to_canonical_string(); // TODO: Ideally, we would charge *before* the `type_to_type_tag()` and `type_tag.to_string()` calls above. context.charge(TYPE_INFO_TYPE_NAME_PER_BYTE_IN_STR * NumBytes::new(type_name.len() as u64))?; @@ -159,7 +159,7 @@ mod tests { type_args: vec![TypeTag::Vector(Box::new(TypeTag::U8))], }; - let dummy_as_strings = dummy_st.to_string(); + let dummy_as_strings = dummy_st.to_canonical_string(); let mut dummy_as_strings = dummy_as_strings.split("::"); let dummy_as_type_of = type_of_internal(&dummy_st).unwrap().pop().unwrap(); let dummy_as_type_of: Struct = dummy_as_type_of.cast().unwrap(); diff --git a/aptos-move/framework/src/natives/util.rs b/aptos-move/framework/src/natives/util.rs index a10509eaee8d9..444cc143e3fa5 100644 --- a/aptos-move/framework/src/natives/util.rs +++ b/aptos-move/framework/src/natives/util.rs @@ -44,7 +44,8 @@ fn native_from_bytes( )?; let function_value_extension = context.function_value_extension(); - let val = match ValueSerDeContext::new() + let max_value_nest_depth = context.max_value_nest_depth(); + let val = match ValueSerDeContext::new(max_value_nest_depth) .with_legacy_signer() .with_func_args_deserialization(&function_value_extension) .deserialize(&bytes, &layout) diff --git a/aptos-move/framework/src/prover.rs b/aptos-move/framework/src/prover.rs index 44e437a517327..5e77792c72727 100644 --- a/aptos-move/framework/src/prover.rs +++ b/aptos-move/framework/src/prover.rs @@ -8,6 +8,7 @@ use codespan_reporting::{ term::termcolor::{ColorChoice, StandardStream}, }; use log::{info, LevelFilter}; +use move_compiler_v2::Experiment; use move_core_types::account_address::AccountAddress; use move_model::{ metadata::{CompilerVersion, LanguageVersion}, @@ -148,6 +149,12 @@ impl ProverOptions { let now = Instant::now(); let for_test = self.for_test; let benchmark = self.benchmark; + let mut experiments_vec = experiments.to_vec(); + // If `filter` is `some` then only the files filtered for are primary targets. + // This interferes with the package visibility check in the function checker. + if self.filter.is_some() { + experiments_vec.push(Experiment::UNSAFE_PACKAGE_VISIBILITY.to_string()); + }; let mut model = build_model( dev_mode, package_path, @@ -158,7 +165,7 @@ impl ProverOptions { language_version, skip_attribute_checks, known_attributes.clone(), - experiments.to_vec(), + experiments_vec, )?; let mut options = self.convert_options(package_path)?; options.language_version = language_version; diff --git a/aptos-move/framework/table-natives/src/lib.rs b/aptos-move/framework/table-natives/src/lib.rs index 77d0372fa81b7..b9c2a1265060a 100644 --- a/aptos-move/framework/table-natives/src/lib.rs +++ b/aptos-move/framework/table-natives/src/lib.rs @@ -380,13 +380,15 @@ fn native_add_box( let (gv, loaded) = table.get_or_create_global_value(&function_value_extension, table_context, key_bytes)?; - let mem_usage = gv.view().map(|val| { - u64::from( + let mem_usage = gv + .view() + .map(|val| { context .abs_val_gas_params() - .abstract_heap_size(&val, context.gas_feature_version()), - ) - }); + .abstract_heap_size(&val, context.gas_feature_version()) + .map(u64::from) + }) + .transpose()?; let res = match gv.move_to(val) { Ok(_) => Ok(smallvec![]), @@ -400,7 +402,7 @@ fn native_add_box( // TODO(Gas): Figure out a way to charge this earlier. context.charge(key_cost)?; if let Some(amount) = mem_usage { - context.use_heap_memory(amount); + context.use_heap_memory(amount)?; } charge_load_cost(context, loaded)?; @@ -431,13 +433,15 @@ fn native_borrow_box( let (gv, loaded) = table.get_or_create_global_value(&function_value_extension, table_context, key_bytes)?; - let mem_usage = gv.view().map(|val| { - u64::from( + let mem_usage = gv + .view() + .map(|val| { context .abs_val_gas_params() - .abstract_heap_size(&val, context.gas_feature_version()), - ) - }); + .abstract_heap_size(&val, context.gas_feature_version()) + .map(u64::from) + }) + .transpose()?; let res = match gv.borrow_global() { Ok(ref_val) => Ok(smallvec![ref_val]), @@ -451,7 +455,7 @@ fn native_borrow_box( // TODO(Gas): Figure out a way to charge this earlier. context.charge(key_cost)?; if let Some(amount) = mem_usage { - context.use_heap_memory(amount); + context.use_heap_memory(amount)?; } charge_load_cost(context, loaded)?; @@ -482,13 +486,15 @@ fn native_contains_box( let (gv, loaded) = table.get_or_create_global_value(&function_value_extension, table_context, key_bytes)?; - let mem_usage = gv.view().map(|val| { - u64::from( + let mem_usage = gv + .view() + .map(|val| { context .abs_val_gas_params() - .abstract_heap_size(&val, context.gas_feature_version()), - ) - }); + .abstract_heap_size(&val, context.gas_feature_version()) + .map(u64::from) + }) + .transpose()?; let exists = Value::bool(gv.exists()?); drop(table_data); @@ -496,7 +502,7 @@ fn native_contains_box( // TODO(Gas): Figure out a way to charge this earlier. context.charge(key_cost)?; if let Some(amount) = mem_usage { - context.use_heap_memory(amount); + context.use_heap_memory(amount)?; } charge_load_cost(context, loaded)?; @@ -527,13 +533,15 @@ fn native_remove_box( let (gv, loaded) = table.get_or_create_global_value(&function_value_extension, table_context, key_bytes)?; - let mem_usage = gv.view().map(|val| { - u64::from( + let mem_usage = gv + .view() + .map(|val| { context .abs_val_gas_params() - .abstract_heap_size(&val, context.gas_feature_version()), - ) - }); + .abstract_heap_size(&val, context.gas_feature_version()) + .map(u64::from) + }) + .transpose()?; let res = match gv.move_from() { Ok(val) => Ok(smallvec![val]), @@ -547,7 +555,7 @@ fn native_remove_box( // TODO(Gas): Figure out a way to charge this earlier. context.charge(key_cost)?; if let Some(amount) = mem_usage { - context.use_heap_memory(amount); + context.use_heap_memory(amount)?; } charge_load_cost(context, loaded)?; @@ -606,7 +614,7 @@ fn serialize_key( layout: &MoveTypeLayout, key: &Value, ) -> PartialVMResult> { - ValueSerDeContext::new() + ValueSerDeContext::new(function_value_extension.max_value_nest_depth()) .with_func_args_deserialization(function_value_extension) .serialize(key, layout)? .ok_or_else(|| partial_extension_error("cannot serialize table key")) @@ -617,9 +625,10 @@ fn serialize_value( layout_info: &LayoutInfo, val: &Value, ) -> PartialVMResult<(Bytes, Option>)> { + let max_value_nest_depth = function_value_extension.max_value_nest_depth(); let serialization_result = if layout_info.has_identifier_mappings { // Value contains delayed fields, so we should be able to serialize it. - ValueSerDeContext::new() + ValueSerDeContext::new(max_value_nest_depth) .with_delayed_fields_serde() .with_func_args_deserialization(function_value_extension) .serialize(val, layout_info.layout.as_ref())? @@ -627,7 +636,7 @@ fn serialize_value( } else { // No delayed fields, make sure serialization fails if there are any // native values. - ValueSerDeContext::new() + ValueSerDeContext::new(max_value_nest_depth) .with_func_args_deserialization(function_value_extension) .serialize(val, layout_info.layout.as_ref())? .map(|bytes| (bytes.into(), None)) @@ -642,12 +651,12 @@ fn deserialize_value( ) -> PartialVMResult { let layout = layout_info.layout.as_ref(); let deserialization_result = if layout_info.has_identifier_mappings { - ValueSerDeContext::new() + ValueSerDeContext::new(function_value_extension.max_value_nest_depth()) .with_func_args_deserialization(function_value_extension) .with_delayed_fields_serde() .deserialize(bytes, layout) } else { - ValueSerDeContext::new() + ValueSerDeContext::new(function_value_extension.max_value_nest_depth()) .with_func_args_deserialization(function_value_extension) .deserialize(bytes, layout) }; diff --git a/aptos-move/framework/tests/move_prover_tests.rs b/aptos-move/framework/tests/move_prover_tests.rs index 99e54d3c6120f..f2e5dbbe0370b 100644 --- a/aptos-move/framework/tests/move_prover_tests.rs +++ b/aptos-move/framework/tests/move_prover_tests.rs @@ -78,28 +78,8 @@ pub fn run_prover_for_pkg( } #[test] -fn move_framework_prover_tests_shard1() { - run_prover_for_pkg("aptos-framework", 5, Some(1)); -} - -#[test] -fn move_framework_prover_tests_shard2() { - run_prover_for_pkg("aptos-framework", 5, Some(2)); -} - -#[test] -fn move_framework_prover_tests_shard3() { - run_prover_for_pkg("aptos-framework", 5, Some(3)); -} - -#[test] -fn move_framework_prover_tests_shard4() { - run_prover_for_pkg("aptos-framework", 5, Some(4)); -} - -#[test] -fn move_framework_prover_tests_shard5() { - run_prover_for_pkg("aptos-framework", 5, Some(5)); +fn move_framework_prover_tests() { + run_prover_for_pkg("aptos-framework", 1, None); } #[test] diff --git a/aptos-move/mvhashmap/Cargo.toml b/aptos-move/mvhashmap/Cargo.toml index 09a679ca6a48f..7cbc7c6f0f014 100644 --- a/aptos-move/mvhashmap/Cargo.toml +++ b/aptos-move/mvhashmap/Cargo.toml @@ -15,12 +15,15 @@ rust-version = { workspace = true } [dependencies] anyhow = { workspace = true } aptos-aggregator = { workspace = true } +aptos-infallible = { workspace = true } aptos-types = { workspace = true } aptos-vm-types = { workspace = true } bytes = { workspace = true } claims = { workspace = true } crossbeam = { workspace = true } dashmap = { workspace = true } +equivalent = { workspace = true } +fail = { workspace = true } move-binary-format = { workspace = true } move-core-types = { workspace = true } move-vm-runtime = { workspace = true } @@ -29,7 +32,10 @@ serde = { workspace = true } [dev-dependencies] aptos-aggregator = { workspace = true, features = ["testing"] } +concurrent-queue = { workspace = true } +fail = { workspace = true, features = ["failpoints"] } move-vm-runtime = { workspace = true } +num_cpus = { workspace = true } proptest = { workspace = true } proptest-derive = { workspace = true } rayon = { workspace = true } diff --git a/aptos-move/mvhashmap/src/lib.rs b/aptos-move/mvhashmap/src/lib.rs index 5f26c1191f856..7edee967b7924 100644 --- a/aptos-move/mvhashmap/src/lib.rs +++ b/aptos-move/mvhashmap/src/lib.rs @@ -32,8 +32,12 @@ mod unit_tests; /// given key, it holds exclusive access and doesn't need to explicitly synchronize /// with other reader/writers. /// -/// TODO: separate V into different generic types for data and code modules with specialized -/// traits (currently both WriteOp for executor). +/// TODO(BlockSTMv2): consider handling the baseline retrieval inside MVHashMap, by +/// providing a lambda during construction. This would simplify the caller logic and +/// allow unifying initialization logic e.g. for resource groups that span two +/// different multi-version data-structures (MVData and MVGroupData). It would also +/// allow performing a check on the path once during initialization (to determine +/// if the path is for a resource or a group), and then checking invariants. pub struct MVHashMap { data: VersionedData, group_data: VersionedGroupData, @@ -48,7 +52,7 @@ impl MVHashMap where K: ModulePath + Hash + Clone + Eq + Debug, T: Hash + Clone + Eq + Debug + Serialize, - V: TransactionWrite, + V: TransactionWrite + PartialEq, I: Copy + Clone + Eq + Hash + Debug, { #[allow(clippy::new_without_default)] diff --git a/aptos-move/mvhashmap/src/types.rs b/aptos-move/mvhashmap/src/types.rs index 4e6d2459e3f6e..59721f0794644 100644 --- a/aptos-move/mvhashmap/src/types.rs +++ b/aptos-move/mvhashmap/src/types.rs @@ -6,6 +6,7 @@ use aptos_types::{ error::PanicOr, write_set::{TransactionWrite, WriteOpKind}, }; +use fail::fail_point; use move_core_types::value::MoveTypeLayout; use std::sync::{atomic::AtomicU32, Arc}; @@ -158,6 +159,7 @@ impl ValueWithLayout { } pub fn bytes_len(&self) -> Option { + fail_point!("value_with_layout_bytes_len", |_| { Some(10) }); match self { ValueWithLayout::RawFromStorage(value) | ValueWithLayout::Exchanged(value, _) => { value.bytes().map(|b| b.len()) diff --git a/aptos-move/mvhashmap/src/unit_tests/dependencies.rs b/aptos-move/mvhashmap/src/unit_tests/dependencies.rs new file mode 100644 index 0000000000000..e2ea63dff6f35 --- /dev/null +++ b/aptos-move/mvhashmap/src/unit_tests/dependencies.rs @@ -0,0 +1,265 @@ +// Copyright © Aptos Foundation +// Parts of the project are originally copyright © Meta Platforms, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use super::{ + types::{test::KeyType, Incarnation, MVDataError, MVDataOutput, TxnIndex, ValueWithLayout}, + MVHashMap, +}; +use crate::{types::ShiftedTxnIndex, unit_tests::proptest_types::MockValue}; +use claims::{assert_matches, assert_ok}; +use concurrent_queue::ConcurrentQueue; +use proptest::{ + collection::vec, prelude::*, sample::Index, strategy::ValueTree, test_runner::TestRunner, +}; +use std::{ + collections::{BTreeMap, BTreeSet, HashMap}, + fmt::Debug, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + thread::sleep, + time::Duration, +}; +use test_case::test_case; + +#[derive(Debug, Clone)] +enum Operator { + Insert(V), + // InsertAndRemove transforms into two operations, with the second + // operation taking place after the first operation is completed. + InsertAndRemove(V), + Read, +} + +fn operator_strategy() -> impl Strategy> { + prop_oneof![ + 1 => any::().prop_map(Operator::Insert), + 1 => any::().prop_map(Operator::InsertAndRemove), + 1 => Just(Operator::Read), + ] +} + +/// This test works as follows: +/// 1. We generate a sequence of transactions based on the above Operator and +/// generate the expected baseline. +/// 2. The worker threads pick transactions from the queue and execute them. +/// 3. For the reads, the final result is recorded, and reads are rescheduled +/// when a write_v2 or remove_v2 invalidate the previous dependency record. +/// 4. In the end we simply ensure that the final result matches the expected +/// baseline, and is recorded once as a dependency in the corresponding entry. +#[test_case(30, 2, 1, 30, 5)] +#[test_case(50, 4, 3, 20, 5)] +#[test_case(100, 8, 3, 50, 0)] +#[test_case(1000, 16, 10, 1, 1)] +#[test_case(20, 6, 1, 100, 1)] +fn test_dependencies( + num_txns: usize, + num_workers: usize, + universe_size: usize, + num_random_generations: usize, + sleep_millis: u64, +) { + if num_workers > num_cpus::get() { + // Ideally, we would want: + // https://users.rust-lang.org/t/how-to-programatically-ignore-a-unit-test/64096/5 + return; + } + + let mut runner = TestRunner::default(); + + for _ in 0..num_random_generations { + // Generate universe & transactions. + let universe = vec(any::<[u8; 32]>(), universe_size) + .new_tree(&mut runner) + .expect("creating universe should succeed") + .current(); + let transactions = vec((any::(), operator_strategy::<[u8; 32]>()), num_txns) + .new_tree(&mut runner) + .expect("creating transactions should succeed") + .current() + .into_iter() + .map(|(idx, op)| (*idx.get(&universe), op)) + .collect::>(); + + let mut baseline = universe + .iter() + .map(|key| (*key, BTreeMap::new())) + .collect::>(); + for (idx, (key, op)) in transactions.iter().enumerate() { + if let Operator::Insert(v) = op { + baseline + .entry(*key) + .or_default() + .insert(idx as TxnIndex, MockValue::new(Some(*v))); + } + } + + let map = MVHashMap::, usize, MockValue<[u8; 32]>, ()>::new(); + + // Each read may get invalidated and be rescheduled, but since each original + // txn performs at most one read, total number of rescheduled reads at any given + // time is bounded by num_txns. + let rescheduled_reads = ConcurrentQueue::<(TxnIndex, Incarnation)>::bounded(num_txns); + // When a read occurs, if it does not read the correct value, the corresponding + // correct_read flag is set to false (initialized to true since not all txns + // contain a read), o.w. to true (if correct value is read). With each invalidations + // causing a rescheduling, eventually all correct_read flags should be set to true. + // The flag is stored in the least significant bit, while prefix is incarnation * 2, + // so that (using fetch_max)the flag from the latest incarnation is recorded in the end. + let correct_read = (0..num_txns) + .map(|_| AtomicUsize::new(1)) + .collect::>(); + + let current_idx: AtomicUsize = AtomicUsize::new(0); + rayon::scope(|s| { + for _ in 0..num_workers { + s.spawn(|_| loop { + let process_deps = |invalidated_deps: BTreeSet<(TxnIndex, Incarnation)>| { + for (txn_idx, incarnation) in invalidated_deps { + assert_ok!(rescheduled_reads.push((txn_idx, incarnation + 1))); + } + }; + + let maybe_perform_read = if current_idx.load(Ordering::Relaxed) < num_txns { + let idx = current_idx.fetch_add(1, Ordering::Relaxed); + if idx < num_txns { + let key = KeyType(transactions[idx].0); + match &transactions[idx].1 { + Operator::Read => Some((transactions[idx].0, idx as TxnIndex, 0)), + Operator::Insert(v) => { + process_deps(map.data().write_v2::( + key, + idx as TxnIndex, + 0, + Arc::new(MockValue::new(Some(*v))), + None, + )); + None + }, + Operator::InsertAndRemove(v) => { + process_deps(map.data().write_v2::( + key.clone(), + idx as TxnIndex, + 0, + Arc::new(MockValue::new(Some(*v))), + None, + )); + sleep(Duration::from_millis(sleep_millis)); + process_deps( + map.data().remove_v2::<_, false>(&key, idx as TxnIndex), + ); + None + }, + } + } else { + None + } + } else { + let ret = rescheduled_reads.pop().ok().map(|(txn_idx, incarnation)| { + assert_matches!(&transactions[txn_idx as usize].1, Operator::Read); + (transactions[txn_idx as usize].0, txn_idx, incarnation) + }); + if ret.is_none() { + break; + } + ret + }; + + if let Some((key, txn_idx, incarnation)) = maybe_perform_read { + let speculative_read_value = + map.data() + .fetch_data_v2(&KeyType(key), txn_idx, incarnation); + let correct = match speculative_read_value { + Ok(MVDataOutput::Versioned(_version, value)) => { + let correct = baseline + .get(&key) + .expect("key should exist in baseline") + .range(..txn_idx) + .next_back() + .map_or_else( + || { + // Comparison ignores version since push invalidation + // is based only on values. + value + == ValueWithLayout::Exchanged( + Arc::new(MockValue::new(None)), + None, + ) + }, + |(_expected_txn_idx, expected_output)| { + // Comparison ignores expected_txn_idx since push + // validation is based only on values. + value + == ValueWithLayout::Exchanged( + Arc::new(expected_output.clone()), + None, + ) + }, + ); + correct + }, + Err(MVDataError::Uninitialized) => { + map.data().set_base_value( + KeyType(key), + ValueWithLayout::Exchanged( + Arc::new(MockValue::new(None)), + None, + ), + ); + assert_ok!(rescheduled_reads.push((txn_idx, incarnation + 1))); + false + }, + _ => unreachable!("Should be versioned or uninitialized"), + }; + + correct_read[txn_idx as usize].fetch_max( + incarnation as usize * 2 + correct as usize, + Ordering::Relaxed, + ); + } + }); + } + }); + + assert_eq!(rescheduled_reads.len(), 0); + assert!(correct_read + .iter() + .all(|correct_flag| correct_flag.load(Ordering::Relaxed) & 1 == 1)); + + let mut expected_deps: HashMap< + KeyType<[u8; 32]>, + BTreeMap>, + > = HashMap::new(); + for (idx, txn) in transactions.iter().enumerate() { + if let Operator::Read = txn.1 { + let expected_idx = baseline + .get(&txn.0) + .expect("key should exist in baseline") + .range(..idx as TxnIndex) + .next_back() + .map_or(ShiftedTxnIndex::zero_idx(), |(txn_idx, _)| { + ShiftedTxnIndex::new(*txn_idx) + }); + + expected_deps + .entry(KeyType(txn.0)) + .or_default() + .entry(expected_idx.clone()) + .or_default() + .insert(( + idx as TxnIndex, + correct_read[idx].load(Ordering::Relaxed) as u32 / 2, + )); + } + } + + for (key, expected_deps) in expected_deps { + for (expected_idx, expected_deps) in expected_deps { + let recorded_deps = map.data().get_dependencies(&key, expected_idx.clone()); + assert_eq!(recorded_deps, expected_deps); + } + } + } +} diff --git a/aptos-move/mvhashmap/src/unit_tests/mod.rs b/aptos-move/mvhashmap/src/unit_tests/mod.rs index 03883f9fdd04c..b0676a180fc33 100644 --- a/aptos-move/mvhashmap/src/unit_tests/mod.rs +++ b/aptos-move/mvhashmap/src/unit_tests/mod.rs @@ -24,6 +24,8 @@ use aptos_types::{ use bytes::Bytes; use claims::{assert_err_eq, assert_none, assert_ok_eq, assert_some_eq}; use std::sync::Arc; + +mod dependencies; mod proptest_types; fn match_unresolved( diff --git a/aptos-move/mvhashmap/src/unit_tests/proptest_types.rs b/aptos-move/mvhashmap/src/unit_tests/proptest_types.rs index a7ad47c00e51e..b899f017af9a5 100644 --- a/aptos-move/mvhashmap/src/unit_tests/proptest_types.rs +++ b/aptos-move/mvhashmap/src/unit_tests/proptest_types.rs @@ -37,7 +37,7 @@ enum Operator { } #[derive(Debug, Clone, PartialEq, Eq)] -enum ExpectedOutput { +enum ExpectedOutput { NotInMap, Deleted, Value(V), @@ -46,14 +46,14 @@ enum ExpectedOutput { Failure, } -#[derive(Debug, Clone)] -struct Value { +#[derive(Debug, Clone, PartialEq, Eq)] +pub(crate) struct MockValue { maybe_value: Option, maybe_bytes: Option, } -impl> + Clone> Value { - fn new(maybe_value: Option) -> Self { +impl> + Clone + Eq + PartialEq> MockValue { + pub(crate) fn new(maybe_value: Option) -> Self { let maybe_bytes = maybe_value.clone().map(|v| { let mut bytes = v.into(); bytes.resize(16, 0); @@ -66,7 +66,7 @@ impl> + Clone> Value { } } -impl> + Clone + Debug> TransactionWrite for Value { +impl> + Clone + Debug + Eq + PartialEq> TransactionWrite for MockValue { fn bytes(&self) -> Option<&Bytes> { self.maybe_bytes.as_ref() } @@ -88,23 +88,23 @@ impl> + Clone + Debug> TransactionWrite for Value { } } -enum Data { - Write(Value), +enum Data { + Write(MockValue), Delta(DeltaOp), } -struct Baseline(HashMap>>); +struct Baseline(HashMap>>); impl Baseline where K: Hash + Eq + Clone + Debug, - V: Clone + Into> + Debug + PartialEq, + V: Clone + Into> + Debug + Eq + PartialEq, { pub fn new(txns: &[(K, Operator)], ignore_updates: bool) -> Self { let mut baseline: HashMap>> = HashMap::new(); for (idx, (k, op)) in txns.iter().enumerate() { let value_to_update = match op { - Operator::Insert(v) => Data::Write(Value::new(Some(v.clone()))), - Operator::Remove => Data::Write(Value::new(None)), + Operator::Insert(v) => Data::Write(MockValue::new(Some(v.clone()))), + Operator::Remove => Data::Write(MockValue::new(None)), Operator::Update(d) => { if ignore_updates { continue; @@ -195,7 +195,7 @@ fn operator_strategy() -> impl Strategy Just(Operator::Remove), @@ -213,7 +213,7 @@ fn run_and_assert( ) -> Result<(), TestCaseError> where K: PartialOrd + Send + Clone + Hash + Eq + Sync + Debug, - V: Send + Into> + Debug + Clone + PartialEq + Sync, + V: Send + Into> + Debug + Clone + Eq + PartialEq + Sync, { let transactions: Vec<(K, Operator)> = transaction_gens .into_iter() @@ -221,7 +221,7 @@ where .collect::>(); let baseline = Baseline::new(transactions.as_slice(), test_group); - let map = MVHashMap::, usize, Value, ()>::new(); + let map = MVHashMap::, usize, MockValue, ()>::new(); // make ESTIMATE placeholders for all versions to be updated. // allows to test that correct values appear at the end of concurrent execution. @@ -236,7 +236,7 @@ where .collect::>(); for (key, idx) in versions_to_write { let key = KeyType(key); - let value = Value::new(None); + let value = MockValue::new(None); let idx = idx as TxnIndex; if test_group { map.group_data @@ -253,7 +253,7 @@ where ) .unwrap(); map.group_data() - .mark_estimate(&key, idx, [5usize].into_iter().collect()); + .mark_estimate(&key, idx, &[5usize].into_iter().collect()); } else { map.data().write(key.clone(), idx, 0, Arc::new(value), None); map.data().mark_estimate(&key, idx); @@ -279,7 +279,7 @@ where use MVDataOutput::*; let baseline = baseline.get(key, idx as TxnIndex); - let assert_value = |v: ValueWithLayout>| match v + let assert_value = |v: ValueWithLayout>| match v .extract_value_no_layout() .maybe_value .as_ref() @@ -358,7 +358,7 @@ where }, Operator::Remove => { let key = KeyType(key.clone()); - let value = Value::new(None); + let value = MockValue::new(None); if test_group { map.group_data() .write( @@ -377,7 +377,7 @@ where }, Operator::Insert(v) => { let key = KeyType(key.clone()); - let value = Value::new(Some(v.clone())); + let value = MockValue::new(Some(v.clone())); if test_group { map.group_data() .write( @@ -408,8 +408,6 @@ where Ok(()) } -// TODO: proptest MVHashMap delete and dependency handling! - proptest! { #[test] fn single_key_proptest( diff --git a/aptos-move/mvhashmap/src/unsync_map.rs b/aptos-move/mvhashmap/src/unsync_map.rs index e8eafb81045ea..1f0fe20acd366 100644 --- a/aptos-move/mvhashmap/src/unsync_map.rs +++ b/aptos-move/mvhashmap/src/unsync_map.rs @@ -272,11 +272,18 @@ impl< self.resource_map.borrow().get(key).cloned() } - pub fn fetch_exchanged_data(&self, key: &K) -> Option<(Arc, Arc)> { - if let Some(ValueWithLayout::Exchanged(value, Some(layout))) = self.fetch_data(key) { - Some((value, layout)) + pub fn fetch_exchanged_data( + &self, + key: &K, + ) -> Result<(Arc, Arc), PanicError> { + let data = self.fetch_data(key); + if let Some(ValueWithLayout::Exchanged(value, Some(layout))) = data { + Ok((value, layout)) } else { - None + Err(code_invariant_error(format!( + "Read value needing exchange {:?} does not exist or not in Exchanged format", + data + ))) } } diff --git a/aptos-move/mvhashmap/src/versioned_data.rs b/aptos-move/mvhashmap/src/versioned_data.rs index aed65b5a9bd40..3cda0d537e5db 100644 --- a/aptos-move/mvhashmap/src/versioned_data.rs +++ b/aptos-move/mvhashmap/src/versioned_data.rs @@ -7,13 +7,18 @@ use crate::types::{ }; use anyhow::Result; use aptos_aggregator::delta_change_set::DeltaOp; +use aptos_infallible::Mutex; use aptos_types::write_set::TransactionWrite; use claims::assert_some; use crossbeam::utils::CachePadded; use dashmap::DashMap; +use equivalent::Equivalent; use move_core_types::value::MoveTypeLayout; use std::{ - collections::btree_map::{self, BTreeMap}, + collections::{ + btree_map::{self, BTreeMap}, + BTreeSet, + }, fmt::Debug, hash::Hash, sync::{ @@ -42,8 +47,14 @@ enum EntryCell { /// Recorded in the shared multi-version data-structure for each write. It /// has: 1) Incarnation number of the transaction that wrote the entry (note /// that TxnIndex is part of the key and not recorded here), 2) actual data - /// stored in a shared pointer (to ensure ownership and avoid clones). - Write(Incarnation, ValueWithLayout), + /// stored in a shared pointer (to ensure ownership and avoid clones). and + /// a mutex-protected set of (txn_idx, incarnation) pairs that have registered + /// a read of this entry. + ResourceWrite { + incarnation: Incarnation, + value_with_layout: ValueWithLayout, + dependencies: Mutex>, + }, /// Recorded in the shared multi-version data-structure for each delta. /// Option is a shortcut to aggregated value (to avoid traversing down @@ -63,8 +74,16 @@ pub struct VersionedData { total_base_value_size: AtomicU64, } -fn new_write_entry(incarnation: Incarnation, value: ValueWithLayout) -> Entry> { - Entry::new(EntryCell::Write(incarnation, value)) +fn new_write_entry( + incarnation: Incarnation, + value: ValueWithLayout, + dependencies: BTreeSet<(TxnIndex, Incarnation)>, +) -> Entry> { + Entry::new(EntryCell::ResourceWrite { + incarnation, + value_with_layout: value, + dependencies: Mutex::new(dependencies), + }) } fn new_delta_entry(data: DeltaOp) -> Entry> { @@ -115,14 +134,123 @@ impl Default for VersionedValue { } } -impl VersionedValue { - fn read(&self, txn_idx: TxnIndex) -> anyhow::Result, MVDataError> { +// TODO(BlockSTMv2): remove TransactionWrite trait requirement from V, even if +// AggregatorV1 code is not removed by definining a more specialized trait that can +// runtime assert in other variants. Add other variants to stored cells that allow +// storing group metadata and group size together at the group key (currently metadata +// is stored in a fake write entry, and group size is stored in MVGroupData map). +impl VersionedValue { + // Extracts read dependencies from the data-structure that are affected by a write + // of 'data' at 'txn_idx'. Some of these dependencies that remain valid can be + // relocated by a caller to a different (new) entry in the data-structure. The + // boolean flag indicates whether the dependency is still valid after the write. + fn split_off_affected_read_dependencies( + &self, + txn_idx: TxnIndex, + new_data: &Arc, + new_maybe_layout: &Option>, + ) -> (BTreeSet<(TxnIndex, Incarnation)>, bool) { + let mut affected_deps = BTreeSet::new(); + let mut still_valid = false; + + // Look at entries at or below txn_idx, which is where all the affected + // dependencies may be stored. Here, for generality, we assume that there + // may also be an entry at txn_idx, which could be getting overwritten, + // in which case all of its dependencies would be considered affected. + if let Some((_, entry)) = self + .versioned_map + .range(..=ShiftedTxnIndex::new(txn_idx)) + .next_back() + { + // Non-exchanged format is default validation failure. + if let EntryCell::ResourceWrite { + incarnation: _, + value_with_layout, + dependencies, + } = &entry.value + { + // Take dependencies above txn_idx + affected_deps = dependencies.lock().split_off(&(txn_idx + 1, 0)); + if !affected_deps.is_empty() { + if let ValueWithLayout::Exchanged( + previous_entry_value, + previous_entry_maybe_layout, + ) = value_with_layout + { + still_valid = compare_values_and_layouts::( + previous_entry_value, + new_data, + previous_entry_maybe_layout.as_ref(), + new_maybe_layout.as_ref(), + ); + } + } + } + } + (affected_deps, still_valid) + } + + /// Handle dependencies from a removed entry by validating against the next (lower) entry. + /// The caller MUST ensure that the entry at txn_idx has been removed from versioned_map + /// before calling this method. If the lower entry does not exist, all dependencies are + /// considered invalidated. A re-execution of the read can then create a baseline sentinel + /// entry, if needed. Data is the value that was stored in the removed entry. + fn handle_removed_dependencies( + &mut self, + txn_idx: TxnIndex, + mut dependencies: BTreeSet<(TxnIndex, Incarnation)>, + removed_data: &Arc, + removed_maybe_layout: &Option>, + ) -> BTreeSet<(TxnIndex, Incarnation)> { + // If we have dependencies and a next (lower) entry exists, validate against it. + if !dependencies.is_empty() { + if let Some((idx, next_entry)) = self + .versioned_map + .range(..=ShiftedTxnIndex::new(txn_idx)) + .next_back() + { + assert_ne!( + idx.idx(), + Ok(txn_idx), + "Entry at txn_idx must be removed before calling handle_removed_dependencies" + ); + + // Non-exchanged format is default validation failure. + if let EntryCell::ResourceWrite { + incarnation: _, + value_with_layout: ValueWithLayout::Exchanged(entry_value, entry_maybe_layout), + dependencies: next_deps, + } = &next_entry.value + { + let still_valid = compare_values_and_layouts::( + entry_value, + removed_data, + entry_maybe_layout.as_ref(), + removed_maybe_layout.as_ref(), + ); + + if still_valid { + // If validation passed, add dependencies to next entry and clear them. + next_deps.lock().extend(std::mem::take(&mut dependencies)); + } + } + } + } + dependencies + } + + // 'maybe_reader_incarnation' is None for BlockSTMv1 and always set for BlockSTMv2. + fn read( + &self, + reader_txn_idx: TxnIndex, + maybe_reader_incarnation: Option, + ) -> Result, MVDataError> { use MVDataError::*; use MVDataOutput::*; let mut iter = self .versioned_map - .range(ShiftedTxnIndex::zero_idx()..ShiftedTxnIndex::new(txn_idx)); + .range(ShiftedTxnIndex::zero_idx()..ShiftedTxnIndex::new(reader_txn_idx)); // If read encounters a delta, it must traverse the block of transactions // (top-down) until it encounters a write or reaches the end of the block. @@ -137,17 +265,40 @@ impl VersionedValue { } match (&entry.value, accumulator.as_mut()) { - (EntryCell::Write(incarnation, data), None) => { + ( + EntryCell::ResourceWrite { + incarnation, + value_with_layout, + dependencies, + }, + None, + ) => { + // Record the read dependency (only in V2 case, not to add contention to V1). + if let Some(reader_incarnation) = maybe_reader_incarnation { + dependencies + .lock() + .insert((reader_txn_idx, reader_incarnation)); + } + // Resolve to the write if no deltas were applied in between. return Ok(Versioned( idx.idx().map(|idx| (idx, *incarnation)), - data.clone(), + value_with_layout.clone(), )); }, - (EntryCell::Write(incarnation, data), Some(accumulator)) => { + ( + EntryCell::ResourceWrite { + incarnation, + value_with_layout, + // We ignore dependencies here because accumulator is set, i.e. + // we are dealing with AggregatorV1 flow w.o. push validation. + dependencies: _, + }, + Some(accumulator), + ) => { // Deltas were applied. We must deserialize the value // of the write and apply the aggregated delta accumulator. - let value = data.extract_value_no_layout(); + let value = value_with_layout.extract_value_no_layout(); return match value .as_u128() .expect("Aggregator value must deserialize to u128") @@ -158,7 +309,7 @@ impl VersionedValue { // over any speculative delta accumulation errors on top. Ok(Versioned( idx.idx().map(|idx| (idx, *incarnation)), - data.clone(), + value_with_layout.clone(), )) }, Some(value) => { @@ -221,7 +372,34 @@ impl VersionedValue { } } -impl VersionedData { +// Helper function to perform push validation whereby a read of an entry containing +// prev_value with prev_maybe_layout would now be reading an entry containing new_value +// with new_maybe_layout. +fn compare_values_and_layouts< + const ONLY_COMPARE_METADATA: bool, + V: TransactionWrite + PartialEq, +>( + prev_value: &V, + new_value: &V, + prev_maybe_layout: Option<&Arc>, + new_maybe_layout: Option<&Arc>, +) -> bool { + // ONLY_COMPARE_METADATA is a const static flag that indicates that these entries are + // versioning metadata only, and not the actual value (Currently, only used for versioning + // resource group metadata). Hence, validation is only performed on the metadata. + if ONLY_COMPARE_METADATA { + prev_value.as_state_value_metadata() == new_value.as_state_value_metadata() + } else { + // Layouts pass validation only if they are both None. Otherwise, validation pessimistically + // fails. This is a simple logic that avoids potentially costly layout comparisons. + prev_maybe_layout.is_none() && new_maybe_layout.is_none() && prev_value == new_value + } + // TODO(BlockSTMv2): optimize layout validation (potentially based on size, or by having + // a more efficient representation. Optimizing value validation by having a configurable + // size threshold above which validation can automatically pessimistically fail. +} + +impl VersionedData { pub(crate) fn empty() -> Self { Self { values: DashMap::new(), @@ -247,7 +425,14 @@ impl VersionedData { /// Mark an entry from transaction 'txn_idx' at access path 'key' as an estimated write /// (for future incarnation). Will panic if the entry is not in the data-structure. - pub fn mark_estimate(&self, key: &K, txn_idx: TxnIndex) { + /// Use the Equivalent trait for key lookup. This avoids having to clone the key when + /// only a reference is needed. + pub fn mark_estimate(&self, key: &Q, txn_idx: TxnIndex) + where + Q: Equivalent + Hash, + { + // Use dashmap's get method which accepts a reference when Borrow is implemented + // The equivalent crate automatically implements the right traits. let v = self.values.get(key).expect("Path must exist"); v.versioned_map .get(&ShiftedTxnIndex::new(txn_idx)) @@ -257,7 +442,10 @@ impl VersionedData { /// Delete an entry from transaction 'txn_idx' at access path 'key'. Will panic /// if the corresponding entry does not exist. - pub fn remove(&self, key: &K, txn_idx: TxnIndex) { + pub fn remove(&self, key: &Q, txn_idx: TxnIndex) + where + Q: Equivalent + Hash, + { // TODO: investigate logical deletion. let mut v = self.values.get_mut(key).expect("Path must exist"); assert_some!( @@ -266,62 +454,141 @@ impl VersionedData { ); } - pub fn fetch_data( + /// Delete an entry from transaction 'txn_idx' at access path 'key' for BlockSTMv2. + /// Returns read dependencies from the entry that are no longer valid, panics if + /// the entry does not exist. + pub fn remove_v2( &self, - key: &K, + key: &Q, txn_idx: TxnIndex, - ) -> anyhow::Result, MVDataError> { + ) -> BTreeSet<(TxnIndex, Incarnation)> + where + Q: Equivalent + Hash, + { + let mut v = self.values.get_mut(key).expect("Path must exist"); + + // Get the entry to be removed + let removed_entry = v + .versioned_map + .remove(&ShiftedTxnIndex::new(txn_idx)) + .expect("Entry for key / idx must exist to be deleted"); + + if let EntryCell::ResourceWrite { + incarnation: _, + value_with_layout, + dependencies, + } = &removed_entry.value + { + match value_with_layout { + ValueWithLayout::RawFromStorage(_) => { + unreachable!( + "Removed value written by txn {txn_idx} may not be RawFromStorage" + ); + }, + ValueWithLayout::Exchanged(data, layout) => { + let removed_deps = std::mem::take(&mut *dependencies.lock()); + v.handle_removed_dependencies::( + txn_idx, + removed_deps, + data, + layout, + ) + }, + } + } else { + BTreeSet::new() + } + } + + // This method can also be used from BlockSTMv2 flow, e.g. during post-commit + // final validation for safety, as it avoids making any dependency records. + pub fn fetch_data( + &self, + key: &Q, + txn_idx: TxnIndex, + ) -> anyhow::Result, MVDataError> + where + Q: Equivalent + Hash, + { self.values .get(key) - .map(|v| v.read(txn_idx)) + .map(|v| v.read(txn_idx, None)) .unwrap_or(Err(MVDataError::Uninitialized)) } - pub fn fetch_exchanged_data( + // TODO(BlockSTMv2): Have a dispatch or dedicated interfaces for reading data, + // metadata, size, and exists predicate. Return the appropriately cast value, record + // the kind of each read dependency, then validate accordingly on write / removal. + pub fn fetch_data_v2( &self, - key: &K, + key: &Q, txn_idx: TxnIndex, - ) -> Option<(Arc, Arc)> { - if let Ok(MVDataOutput::Versioned(_, ValueWithLayout::Exchanged(value, Some(layout)))) = - self.fetch_data(key, txn_idx) - { - Some((value, layout)) - } else { - None - } + incarnation: Incarnation, + ) -> Result, MVDataError> + where + Q: Equivalent + Hash, + { + self.values + .get(key) + .map(|v| v.read(txn_idx, Some(incarnation))) + .unwrap_or(Err(MVDataError::Uninitialized)) } - pub fn set_base_value(&self, key: K, value: ValueWithLayout) { + // The caller needs to repeat the read after set_base_value (concurrent caller might have + // exchanged and stored a different delayed field ID). + pub fn set_base_value(&self, key: K, base_value_with_layout: ValueWithLayout) { let mut v = self.values.entry(key).or_default(); // For base value, incarnation is irrelevant, and is always set to 0. use btree_map::Entry::*; use ValueWithLayout::*; match v.versioned_map.entry(ShiftedTxnIndex::zero_idx()) { - Vacant(v) => { - if let Some(base_size) = value.bytes_len() { + Vacant(vacant_entry) => { + if let Some(base_size) = base_value_with_layout.bytes_len() { self.total_base_value_size .fetch_add(base_size as u64, Ordering::Relaxed); } - v.insert(CachePadded::new(new_write_entry(0, value))); + vacant_entry.insert(CachePadded::new(new_write_entry( + 0, + base_value_with_layout, + BTreeSet::new(), + ))); }, Occupied(mut o) => { - if let EntryCell::Write(i, existing_value) = &o.get().value { - assert!(*i == 0); - match (existing_value, &value) { - (RawFromStorage(ev), RawFromStorage(v)) => { + if let EntryCell::ResourceWrite { + incarnation, + value_with_layout: existing_value_with_layout, + dependencies, + } = &o.get().value + { + assert!(*incarnation == 0); + match (existing_value_with_layout, &base_value_with_layout) { + (RawFromStorage(existing_value), RawFromStorage(base_value)) => { // Base value from storage needs to be identical // Assert the length of bytes for efficiency (instead of full equality) - assert!(v.bytes().map(|b| b.len()) == ev.bytes().map(|b| b.len())) + assert!( + base_value.bytes().map(|b| b.len()) + == existing_value.bytes().map(|b| b.len()) + ); }, (Exchanged(_, _), RawFromStorage(_)) => { // Stored value contains more info, nothing to do. }, (RawFromStorage(_), Exchanged(_, _)) => { - // Received more info, update. - o.insert(CachePadded::new(new_write_entry(0, value))); + let dependencies = std::mem::take(&mut *dependencies.lock()); + // Received more info, update, but keep the same dependencies. + // TODO(BlockSTMv2): Once we support dependency kind, here we could check + // that carried over dependencies can be only size & metadata. + o.insert(CachePadded::new(new_write_entry( + 0, + base_value_with_layout, + dependencies, + ))); }, - (Exchanged(ev, e_layout), Exchanged(v, layout)) => { + ( + Exchanged(existing_value, existing_layout), + Exchanged(base_value, base_layout), + ) => { // base value may have already been provided by another transaction // executed simultaneously and asking for the same resource. // Value from storage must be identical, but then delayed field @@ -330,9 +597,12 @@ impl VersionedData { // If maybe_layout is None, they are required to be identical // If maybe_layout is Some, there might have been an exchange // Assert the length of bytes for efficiency (instead of full equality) - assert_eq!(e_layout.is_some(), layout.is_some()); - if layout.is_none() { - assert_eq!(v.bytes().map(|b| b.len()), ev.bytes().map(|b| b.len())); + assert_eq!(existing_layout.is_some(), base_layout.is_some()); + if existing_layout.is_none() { + assert_eq!( + existing_value.bytes().map(|b| b.len()), + base_value.bytes().map(|b| b.len()) + ); } }, } @@ -341,34 +611,88 @@ impl VersionedData { }; } - /// Versioned write of data at a given key (and version). - pub fn write( - &self, - key: K, + fn write_impl( + versioned_values: &mut VersionedValue, txn_idx: TxnIndex, incarnation: Incarnation, - data: Arc, - maybe_layout: Option>, + value: ValueWithLayout, + dependencies: BTreeSet<(TxnIndex, Incarnation)>, ) { - let mut v = self.values.entry(key).or_default(); - let prev_entry = v.versioned_map.insert( + let prev_entry = versioned_values.versioned_map.insert( ShiftedTxnIndex::new(txn_idx), - CachePadded::new(new_write_entry( - incarnation, - ValueWithLayout::Exchanged(data, maybe_layout), - )), + CachePadded::new(new_write_entry(incarnation, value, dependencies)), ); // Assert that the previous entry for txn_idx, if present, had lower incarnation. assert!(prev_entry.map_or(true, |entry| -> bool { - if let EntryCell::Write(i, _) = entry.value { - i < incarnation + if let EntryCell::ResourceWrite { + incarnation: prev_incarnation, + value_with_layout: _, + dependencies: _, + } = entry.value + { + prev_incarnation < incarnation } else { true } })); } + pub fn write( + &self, + key: K, + txn_idx: TxnIndex, + incarnation: Incarnation, + data: Arc, + maybe_layout: Option>, + ) { + let mut v = self.values.entry(key).or_default(); + Self::write_impl( + &mut v, + txn_idx, + incarnation, + ValueWithLayout::Exchanged(data, maybe_layout), + BTreeSet::new(), + ); + } + + /// Write a value at a given key (and version) for BlockSTMv2. + /// Returns invalidated affected read dependencies (dependencies that failed push validation). + pub fn write_v2( + &self, + key: K, + txn_idx: TxnIndex, + incarnation: Incarnation, + data: Arc, + maybe_layout: Option>, + ) -> BTreeSet<(TxnIndex, Incarnation)> { + let mut v = self.values.entry(key).or_default(); + let (affected_dependencies, validation_passed) = v + .split_off_affected_read_dependencies::( + txn_idx, + &data, + &maybe_layout, + ); + + // If validation passed, keep the dependencies (pass to write_impl), o.w. return them + // (invalidated read dependencies) to the caller. + let (deps_to_retain, deps_to_return) = if validation_passed { + (affected_dependencies, BTreeSet::new()) + } else { + (BTreeSet::new(), affected_dependencies) + }; + + Self::write_impl( + &mut v, + txn_idx, + incarnation, + ValueWithLayout::Exchanged(data, maybe_layout), + deps_to_retain, + ); + + deps_to_return + } + /// Versioned write of metadata at a given resource group key (and version). Returns true /// if the previously stored metadata has changed as observed by later transactions (e.g. /// metadata of a deletion can never be observed by later transactions). @@ -387,14 +711,20 @@ impl VersionedData { CachePadded::new(new_write_entry( incarnation, ValueWithLayout::Exchanged(arc_data.clone(), None), + BTreeSet::new(), )), ); // Changes versioned metadata that was stored. prev_entry.map_or(true, |entry| -> bool { - if let EntryCell::Write(_, existing_v) = &entry.value { + if let EntryCell::ResourceWrite { + incarnation: _, + value_with_layout: existing_value_with_layout, + dependencies: _, + } = &entry.value + { arc_data.as_state_value_metadata() - != existing_v + != existing_value_with_layout .extract_value_no_layout() .as_state_value_metadata() } else { @@ -414,7 +744,7 @@ impl VersionedData { let mut v = self.values.get_mut(key).expect("Path must exist"); // +1 makes sure we include the delta from txn_idx. - match v.read(txn_idx + 1) { + match v.read(txn_idx + 1, None) { Ok(MVDataOutput::Resolved(value)) => { v.versioned_map .get_mut(&ShiftedTxnIndex::new(txn_idx)) @@ -430,4 +760,584 @@ impl VersionedData { ), } } + + #[cfg(test)] + pub(crate) fn get_dependencies( + &self, + key: &K, + shifted_txn_idx: ShiftedTxnIndex, + ) -> BTreeSet<(TxnIndex, Incarnation)> { + match &self + .values + .get(key) + .expect("Entry must exist for the given key") + .versioned_map + .get(&shifted_txn_idx) + .expect("Entry must exist for the given txn_idx") + .value + { + EntryCell::ResourceWrite { + incarnation: _, + value_with_layout: _, + dependencies, + } => dependencies.lock().clone(), + _ => unreachable!("Dependencies can only be recorded for resource writes"), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::types::StorageVersion; + use aptos_aggregator::{bounded_math::SignedU128, delta_math::DeltaHistory}; + use aptos_types::{ + on_chain_config::CurrentTimeMicroseconds, + state_store::state_value::{StateValue, StateValueMetadata}, + write_set::{TransactionWrite, WriteOpKind}, + }; + use bytes::Bytes; + use claims::assert_ok_eq; + use fail::FailScenario; + use test_case::test_case; + + #[derive(Debug, Clone, PartialEq, Eq)] + struct TestValueWithMetadata { + value: u64, + metadata: u64, + } + + impl TestValueWithMetadata { + fn new(value: u64, metadata: u64) -> Self { + Self { value, metadata } + } + } + + impl TransactionWrite for TestValueWithMetadata { + fn bytes(&self) -> Option<&Bytes> { + unimplemented!("Irrelevant for the test") + } + + fn write_op_kind(&self) -> WriteOpKind { + unimplemented!("Irrelevant for the test") + } + + fn from_state_value(_maybe_state_value: Option) -> Self { + unimplemented!("Irrelevant for the test") + } + + fn as_state_value(&self) -> Option { + unimplemented!("Irrelevant for the test") + } + + fn set_bytes(&mut self, _bytes: Bytes) { + unimplemented!("Irrelevant for the test") + } + + fn as_state_value_metadata(&self) -> Option { + Some(StateValueMetadata::legacy( + self.metadata, + &CurrentTimeMicroseconds { + microseconds: self.metadata, + }, + )) + } + } + + fn get_deps_from_entry( + entry: &Entry>, + ) -> BTreeSet<(TxnIndex, Incarnation)> { + if let EntryCell::ResourceWrite { + incarnation: _, + value_with_layout: _, + dependencies, + } = &entry.value + { + dependencies.lock().clone() + } else { + unreachable!() + } + } + + #[test_case(1, BTreeSet::from([(2, 0), (3, 0), (3, 1), (7, 1)]), true; "deps > 1 from idx 0 write, pass validation")] + #[test_case(7, BTreeSet::from([(8, 1), (9, 0), (10, 0), (10, 2)]), false; "deps > 7 from idx 7 write, fail validation")] + #[test_case(5, BTreeSet::from([(7, 1)]), true; "deps > 5 from write at idx 0, pass validation")] + #[test_case(0, BTreeSet::from([(1, 0), (2, 0), (3, 0), (3, 1), (7, 1)]), true; "all deps > 0 from idx 0 write, pass validation")] + #[test_case(9, BTreeSet::from([(10, 0), (10, 2)]), false; "deps > 9 from write at idx 7, fail validation")] + #[test_case(12, BTreeSet::from([]), false; "entries >= idx 12 - no deps, default fail validation")] + #[test_case(7, BTreeSet::from([(8, 1), (9, 0), (10, 0), (10, 2)]), false; "all deps from write at idx 7, fail validation")] + fn test_split_off_affected_read_dependencies( + idx: TxnIndex, + expected_deps: BTreeSet<(TxnIndex, Incarnation)>, + expected_validation_result: bool, + ) { + let mut v = VersionedValue::::default(); + + // Setup: Create some writes with dependencies. + let deps_idx0 = BTreeSet::from([(1, 0), (2, 0)]); + let deps_idx7 = BTreeSet::from([(8, 1), (9, 0), (10, 2)]); + + v.versioned_map.insert( + ShiftedTxnIndex::new(0), + CachePadded::new(new_write_entry( + 0, + ValueWithLayout::Exchanged(Arc::new(TestValueWithMetadata::new(10, 100)), None), + deps_idx0, + )), + ); + v.versioned_map.insert( + ShiftedTxnIndex::new(7), + CachePadded::new(new_write_entry( + 0, + ValueWithLayout::Exchanged(Arc::new(TestValueWithMetadata::new(20, 200)), None), + deps_idx7, + )), + ); + + // Add some dependencies via read() calls. + let _ = v.read(3, Some(0)); // This adds (3, 0) to latest write <= 3 (write at idx 0). + let _ = v.read(3, Some(1)); // Add another incarnation of txn 3. + let _ = v.read(7, Some(1)); // This adds (7, 1) to write at idx 0. + let _ = v.read(8, Some(1)); // This adds (8, 1) to write at idx 7 (duplicate with existing). + let _ = v.read(10, Some(0)); // Add lower incarnation after we'll add a higher one. + let _ = v.read(10, Some(2)); // Add higher incarnation first. + + // Get pre-call state of dependencies. + let mut recorded_deps_idx0 = + get_deps_from_entry(v.versioned_map.get(&ShiftedTxnIndex::new(0)).unwrap()); + let mut recorded_deps_idx7 = + get_deps_from_entry(v.versioned_map.get(&ShiftedTxnIndex::new(7)).unwrap()); + + // Get the actual dependencies and verify they match expected. + let (affected_deps, validation_passed) = v.split_off_affected_read_dependencies::( + idx, + &Arc::new(TestValueWithMetadata::new(10, 100)), + &None, + ); + assert_eq!( + affected_deps, expected_deps, + "Dependencies above idx don't match expected." + ); + assert_eq!( + validation_passed, expected_validation_result, + "Validation result doesn't match expected." + ); + + // Verify that the remaining dependencies in entries match what we expect. + if idx < 7 { + let (remaining_deps, _) = v.split_off_affected_read_dependencies::( + 6, + &Arc::new(TestValueWithMetadata::new(10, 100)), + &None, + ); + assert!(remaining_deps.is_empty()); + recorded_deps_idx0.retain(|(txn_idx, _)| *txn_idx <= idx); + } else { + recorded_deps_idx7.retain(|(txn_idx, _)| *txn_idx <= idx); + } + + let final_deps_idx0 = + get_deps_from_entry(v.versioned_map.get(&ShiftedTxnIndex::new(0)).unwrap()); + assert_eq!( + final_deps_idx0, recorded_deps_idx0, + "Dependencies in write at idx 0 don't match expected." + ); + + let final_deps_idx7 = + get_deps_from_entry(v.versioned_map.get(&ShiftedTxnIndex::new(7)).unwrap()); + assert_eq!( + final_deps_idx7, recorded_deps_idx7, + "Dependencies in write at idx 7 don't match expected." + ); + } + + #[test] + fn test_split_off_affected_read_dependencies_delta_only() { + let mut v = VersionedValue::::default(); + v.versioned_map.insert( + ShiftedTxnIndex::new(0), + CachePadded::new(new_delta_entry(DeltaOp::new( + SignedU128::Positive(10), + 1000, + DeltaHistory { + max_achieved_positive_delta: 10, + min_achieved_negative_delta: 0, + min_overflow_positive_delta: None, + max_underflow_negative_delta: None, + }, + ))), + ); + v.versioned_map.insert( + ShiftedTxnIndex::new(5), + CachePadded::new(new_delta_entry(DeltaOp::new( + SignedU128::Positive(20), + 1000, + DeltaHistory { + max_achieved_positive_delta: 20, + min_achieved_negative_delta: 0, + min_overflow_positive_delta: None, + max_underflow_negative_delta: None, + }, + ))), + ); + let (deps, _) = v.split_off_affected_read_dependencies::( + 3, + &Arc::new(TestValueWithMetadata::new(10, 100)), + &None, + ); + assert_eq!(deps, BTreeSet::new()); + } + + #[test] + fn test_value_metadata_layout_comparison() { + macro_rules! test_metadata_layout_case { + ($only_compare_metadata:expr) => { + // Test all combinations of value/metadata/layout comparison parameters + for same_value in [true, false] { + for same_metadata in [true, false] { + for same_layout in [true, false] { + let mut v = VersionedValue::::default(); + + // Setup: Create a write with value 10, metadata 100 and one dependency + let deps = BTreeSet::from([(1, 0)]); + let layout = if same_layout { None } else { Some(Arc::new(MoveTypeLayout::Bool)) }; + v.versioned_map.insert( + ShiftedTxnIndex::new(0), + CachePadded::new(new_write_entry(0, ValueWithLayout::Exchanged(Arc::new(TestValueWithMetadata::new(10, 100)), layout), deps)), + ); + + // Create test value based on parameters + let test_value = TestValueWithMetadata::new( + if same_value { 10 } else { 20 }, + if same_metadata { 100 } else { 200 } + ); + + // Compute expected validation result + let expected_validation = if $only_compare_metadata { + same_metadata + } else { + same_value && same_metadata && same_layout + }; + + // Test split_off_affected_read_dependencies + let (deps, validation_passed) = v.split_off_affected_read_dependencies::<{ $only_compare_metadata }>( + 0, + &Arc::new(test_value.clone()), + &None, + ); + + // Verify results + assert_eq!( + validation_passed, + expected_validation, + "Validation failed for same_value={}, same_metadata={}, only_compare_metadata={}, same_layout={}", + same_value, same_metadata, $only_compare_metadata, same_layout + ); + assert_eq!( + deps, + BTreeSet::from([(1, 0)]), + "Dependencies don't match for same_value={}, same_metadata={}, only_compare_metadata={}, same_layout={}", + same_value, same_metadata, $only_compare_metadata, same_layout + ); + + // Test handle_removed_dependencies + let remaining_deps = v.handle_removed_dependencies::<{ $only_compare_metadata }>( + 1, + BTreeSet::from([(2, 0)]), + &Arc::new(test_value), + &None, + ); + + if expected_validation { + assert!(remaining_deps.is_empty()); + // Verify that (2,0) is recorded in 0-th entry + assert_eq!(get_deps_from_entry(v.versioned_map.get(&ShiftedTxnIndex::new(0)).unwrap()), BTreeSet::from([(2, 0)])); + } else { + assert_eq!(remaining_deps, BTreeSet::from([(2, 0)])); + // Verify that dependencies are empty in 0-th entry + assert_eq!(get_deps_from_entry(v.versioned_map.get(&ShiftedTxnIndex::new(0)).unwrap()).len(), 0); + } + } + } + } + }; + } + + // Test both cases + test_metadata_layout_case!(true); + test_metadata_layout_case!(false); + } + + #[test] + fn test_raw_from_storage_validation() { + macro_rules! test_raw_from_storage_case { + ($only_compare_metadata:expr) => { + let mut v = VersionedValue::::default(); + + // Setup: Create a write with RawFromStorage value and one dependency + let deps = BTreeSet::from([(1, 0)]); + v.versioned_map.insert( + ShiftedTxnIndex::new(0), + CachePadded::new(new_write_entry(0, ValueWithLayout::RawFromStorage(Arc::new(TestValueWithMetadata::new(10, 100))), deps)), + ); + + // Test split_off_affected_read_dependencies with Exchanged value + let (deps, validation_passed) = v.split_off_affected_read_dependencies::<{ $only_compare_metadata }>( + 0, + &Arc::new(TestValueWithMetadata::new(10, 100)), + &None, + ); + + // Verify results - validation should fail even with same value and metadata + assert!(!validation_passed, "Validation should fail when comparing with RawFromStorage (only_compare_metadata={})", $only_compare_metadata); + assert_eq!(deps, BTreeSet::from([(1, 0)]), "Dependencies should be returned even when validation fails (only_compare_metadata={})", $only_compare_metadata); + + // Test handle_removed_dependencies + let remaining_deps = v.handle_removed_dependencies::<{ $only_compare_metadata }>( + 1, + BTreeSet::from([(2, 0)]), + &Arc::new(TestValueWithMetadata::new(10, 100)), + &None, + ); + + // Verify that dependencies are not passed and returned + assert_eq!( + remaining_deps, + BTreeSet::from([(2, 0)]), + "Dependencies should be returned when validation fails (only_compare_metadata={})", + $only_compare_metadata + ); + assert_eq!( + get_deps_from_entry(v.versioned_map.get(&ShiftedTxnIndex::new(0)).unwrap()).len(), + 0, + "Dependencies should not be passed to next entry when validation fails (only_compare_metadata={})", + $only_compare_metadata + ); + }; + } + + // Test both cases + test_raw_from_storage_case!(true); + test_raw_from_storage_case!(false); + } + + #[test] + #[should_panic( + expected = "Entry at txn_idx must be removed before calling handle_removed_dependencies" + )] + fn test_handle_removed_dependencies_panic() { + let mut v = VersionedValue::::default(); + + // Setup: Create a write entry + v.versioned_map.insert( + ShiftedTxnIndex::new(0), + CachePadded::new(new_write_entry( + 0, + ValueWithLayout::Exchanged(Arc::new(TestValueWithMetadata::new(10, 100)), None), + BTreeSet::new(), + )), + ); + + v.handle_removed_dependencies::( + 0, + BTreeSet::from([(2, 0)]), + &Arc::new(TestValueWithMetadata::new(10, 100)), + &None, + ); + } + + #[test] + fn test_remove_v2_storage_version() { + let mut v = VersionedValue::::default(); + + // Setup: Create an irrelevant (higher index) write entry. + v.versioned_map.insert( + ShiftedTxnIndex::new(3), + CachePadded::new(new_write_entry( + 0, + ValueWithLayout::Exchanged(Arc::new(TestValueWithMetadata::new(10, 100)), None), + BTreeSet::new(), + )), + ); + + let dependencies = BTreeSet::from([(2, 0)]); + assert_eq!( + dependencies, + v.handle_removed_dependencies::( + 1, + dependencies.clone(), + &Arc::new(TestValueWithMetadata::new(10, 100)), + &None, + ) + ); + assert_eq!( + dependencies, + v.handle_removed_dependencies::( + 1, + dependencies.clone(), + &Arc::new(TestValueWithMetadata::new(10, 100)), + &None, + ) + ); + + // Now insert a new entry at index 0 that will retain the dependencies. + v.versioned_map.insert( + ShiftedTxnIndex::new(0), + CachePadded::new(new_write_entry( + 0, + ValueWithLayout::Exchanged(Arc::new(TestValueWithMetadata::new(10, 100)), None), + BTreeSet::new(), + )), + ); + assert_eq!( + v.handle_removed_dependencies::( + 1, + dependencies.clone(), + &Arc::new(TestValueWithMetadata::new(10, 100)), + &None, + ) + .len(), + 0 + ); + assert_eq!( + get_deps_from_entry(v.versioned_map.get(&ShiftedTxnIndex::new(0)).unwrap()), + dependencies + ); + + let dependencies = BTreeSet::from([(2, 1)]); + assert_eq!( + v.handle_removed_dependencies::( + 1, + dependencies.clone(), + &Arc::new(TestValueWithMetadata::new(10, 100)), + &None, + ) + .len(), + 0 + ); + assert_eq!( + get_deps_from_entry(v.versioned_map.get(&ShiftedTxnIndex::new(0)).unwrap()), + BTreeSet::from([(2, 0), (2, 1)]) + ); + } + + fn check_versioned_data_deps( + versioned_data: &VersionedData<(), TestValueWithMetadata>, + shifted_txn_idx: ShiftedTxnIndex, + expected_deps: BTreeSet<(TxnIndex, Incarnation)>, + ) { + assert_eq!( + get_deps_from_entry( + versioned_data + .values + .get(&()) + .unwrap() + .versioned_map + .get(&shifted_txn_idx) + .unwrap() + ), + expected_deps + ); + } + + #[test] + fn test_base_value_dep_transfer() { + let versioned_data = VersionedData::<(), TestValueWithMetadata>::empty(); + + let scenario = FailScenario::setup(); + assert!(fail::has_failpoints()); + // Failpoint returns 10 as bytes length. + fail::cfg("value_with_layout_bytes_len", "return").unwrap(); + assert!(!fail::list().is_empty()); + + versioned_data.set_base_value( + (), + ValueWithLayout::Exchanged(Arc::new(TestValueWithMetadata::new(10, 100)), None), + ); + assert_eq!(versioned_data.total_base_value_size(), 10); + scenario.teardown(); + + assert_ok_eq!( + versioned_data.fetch_data_v2(&(), 5, 1), + MVDataOutput::Versioned( + Err(StorageVersion), + ValueWithLayout::Exchanged(Arc::new(TestValueWithMetadata::new(10, 100)), None), + ), + ); + check_versioned_data_deps( + &versioned_data, + ShiftedTxnIndex::zero_idx(), + BTreeSet::from([(5, 1)]), + ); + + versioned_data.write_v2::( + (), + 1, + 1, + Arc::new(TestValueWithMetadata::new(10, 100)), + None, + ); + check_versioned_data_deps( + &versioned_data, + ShiftedTxnIndex::zero_idx(), + BTreeSet::new(), + ); + check_versioned_data_deps( + &versioned_data, + ShiftedTxnIndex::new(1), + BTreeSet::from([(5, 1)]), + ); + + versioned_data.write_v2::( + (), + 3, + 0, + Arc::new(TestValueWithMetadata::new(10, 100)), + None, + ); + check_versioned_data_deps( + &versioned_data, + ShiftedTxnIndex::zero_idx(), + BTreeSet::new(), + ); + check_versioned_data_deps(&versioned_data, ShiftedTxnIndex::new(1), BTreeSet::new()); + check_versioned_data_deps( + &versioned_data, + ShiftedTxnIndex::new(3), + BTreeSet::from([(5, 1)]), + ); + + assert_ok_eq!( + versioned_data.fetch_data_v2(&(), 2, 0), + MVDataOutput::Versioned( + Ok((1, 1)), + ValueWithLayout::Exchanged(Arc::new(TestValueWithMetadata::new(10, 100)), None), + ), + ); + assert_eq!(versioned_data.remove_v2::<_, false>(&(), 3).len(), 0); + assert_eq!(versioned_data.remove_v2::<_, true>(&(), 1).len(), 0); + check_versioned_data_deps( + &versioned_data, + ShiftedTxnIndex::zero_idx(), + BTreeSet::from([(2, 0), (5, 1)]), + ); + } + + #[test] + #[should_panic(expected = "Entry for key / idx must exist to be deleted")] + fn test_remove_v2_panic_no_entry() { + let versioned_data = VersionedData::<(), TestValueWithMetadata>::empty(); + + // Add an entry at index 0 + versioned_data.write( + (), + 0, + 0, + Arc::new(TestValueWithMetadata::new(10, 100)), + None, + ); + + // Try to remove a non-existent entry at index 1 + versioned_data.remove_v2::<_, false>(&(), 1); + } } diff --git a/aptos-move/mvhashmap/src/versioned_delayed_fields.rs b/aptos-move/mvhashmap/src/versioned_delayed_fields.rs index 042e0024c8dd0..3d8033bf18ceb 100644 --- a/aptos-move/mvhashmap/src/versioned_delayed_fields.rs +++ b/aptos-move/mvhashmap/src/versioned_delayed_fields.rs @@ -518,11 +518,17 @@ impl VersionedDelayedFields { /// before given idx are in Value state. /// /// Must be called for each transaction index, in order. - pub fn try_commit(&self, idx_to_commit: TxnIndex, ids: Vec) -> Result<(), CommitError> { + pub fn try_commit( + &self, + idx_to_commit: TxnIndex, + ids: Vec, + is_appended_epilogue: bool, + ) -> Result<(), CommitError> { // we may not need to return values here, we can just read them. use DelayedApplyEntry::*; - if idx_to_commit != self.next_idx_to_commit.load(Ordering::SeqCst) { + if !is_appended_epilogue && idx_to_commit != self.next_idx_to_commit.load(Ordering::SeqCst) + { return Err(CommitError::CodeInvariantError( "idx_to_commit must be next_idx_to_commit".to_string(), )); @@ -649,10 +655,12 @@ impl VersionedDelayedFields { // Should be guaranteed, as this is the only function modifying the idx, // and value is checked at the start. // Need to assert, because if not matching we are in an inconsistent state. - assert_eq!( - idx_to_commit, - self.next_idx_to_commit.fetch_add(1, Ordering::SeqCst) - ); + if !is_appended_epilogue { + assert_eq!( + idx_to_commit, + self.next_idx_to_commit.fetch_add(1, Ordering::SeqCst) + ); + } Ok(()) } diff --git a/aptos-move/mvhashmap/src/versioned_group_data.rs b/aptos-move/mvhashmap/src/versioned_group_data.rs index 70352fc8fdcab..741efd6ff6c58 100644 --- a/aptos-move/mvhashmap/src/versioned_group_data.rs +++ b/aptos-move/mvhashmap/src/versioned_group_data.rs @@ -17,6 +17,7 @@ use aptos_types::{ use aptos_vm_types::{resolver::ResourceGroupSize, resource_group_adapter::group_size_as_sum}; use claims::assert_some; use dashmap::DashMap; +use equivalent::Equivalent; use move_core_types::value::MoveTypeLayout; use serde::Serialize; use std::{ @@ -46,7 +47,7 @@ pub struct VersionedGroupData { // such as get, where only & of the key is needed. values: VersionedData<(K, T), V>, // TODO: Once AggregatorV1 is deprecated (no V: TransactionWrite trait bound), - // switch to VersionedVersionedData. + // switch to VersionedData. // If an entry exists for a group key in Dashmap, the group is considered initialized. group_sizes: DashMap, @@ -62,10 +63,47 @@ pub struct VersionedGroupData { group_tags: DashMap>, } +// This struct allows us to reference a group key and tag without cloning +#[derive(Clone)] +struct GroupKeyRef<'a, K, T> { + group_key: &'a K, + tag: &'a T, +} + +// Implement Equivalent for GroupKeyRef so it can be used to look up (K, T) keys +impl<'a, K, T> Equivalent<(K, T)> for GroupKeyRef<'a, K, T> +where + K: Eq, + T: Eq, +{ + fn equivalent(&self, key: &(K, T)) -> bool { + self.group_key == &key.0 && self.tag == &key.1 + } +} + +// Implement Hash for GroupKeyRef to satisfy dashmap's key requirements +impl<'a, K: Hash, T: Hash> std::hash::Hash for GroupKeyRef<'a, K, T> { + fn hash(&self, state: &mut H) { + // Hash the same way as (K, T) would hash + self.group_key.hash(state); + self.tag.hash(state); + } +} + +// Implement Debug for better error messages +impl<'a, K: Debug, T: Debug> Debug for GroupKeyRef<'a, K, T> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("GroupKeyRef") + .field("group_key", &self.group_key) + .field("tag", &self.tag) + .finish() + } +} + impl< K: Hash + Clone + Debug + Eq, T: Hash + Clone + Debug + Eq + Serialize, - V: TransactionWrite, + V: TransactionWrite + PartialEq, > VersionedGroupData { pub(crate) fn empty() -> Self { @@ -222,10 +260,11 @@ impl< /// Mark all entry from transaction 'txn_idx' at access path 'key' as an estimated write /// (for future incarnation). Will panic if the entry is not in the data-structure. - pub fn mark_estimate(&self, group_key: &K, txn_idx: TxnIndex, tags: HashSet) { - for tag in tags { - let key = (group_key.clone(), tag); - self.values.mark_estimate(&key, txn_idx); + pub fn mark_estimate(&self, group_key: &K, txn_idx: TxnIndex, tags: &HashSet) { + for tag in tags.iter() { + // Use GroupKeyRef to avoid cloning the group_key + let key_ref = GroupKeyRef { group_key, tag }; + self.values.mark_estimate(&key_ref, txn_idx); } self.group_sizes @@ -239,9 +278,9 @@ impl< /// Remove all entries from transaction 'txn_idx' at access path 'key'. pub fn remove(&self, group_key: &K, txn_idx: TxnIndex, tags: HashSet) { - for tag in tags { - let key = (group_key.clone(), tag); - self.values.remove(&key, txn_idx); + for tag in tags.iter() { + let key_ref = GroupKeyRef { group_key, tag }; + self.values.remove(&key_ref, txn_idx); } // TODO: consider setting size_has_changed flag if e.g. the size observed @@ -267,10 +306,10 @@ impl< tag: &T, txn_idx: TxnIndex, ) -> Result<(Version, ValueWithLayout), MVGroupError> { - let key = (group_key.clone(), tag.clone()); + let key_ref = GroupKeyRef { group_key, tag }; let initialized = self.group_sizes.contains_key(group_key); - match self.values.fetch_data(&key, txn_idx) { + match self.values.fetch_data(&key_ref, txn_idx) { Ok(MVDataOutput::Versioned(version, value)) => Ok((version, value)), Err(MVDataError::Uninitialized) => Err(if initialized { MVGroupError::TagNotFound @@ -395,7 +434,7 @@ mod test { match test_idx { 0 => { - map.mark_estimate(&ap, 1, HashSet::new()); + map.mark_estimate(&ap, 1, &HashSet::new()); }, 1 => { map.remove(&ap, 2, HashSet::new()); @@ -673,7 +712,7 @@ mod test { ) ); - map.mark_estimate(&ap, 10, (1..3).collect()); + map.mark_estimate(&ap, 10, &(1..3).collect()); assert_matches!(map.fetch_tagged_data(&ap, &1, 12), Err(Dependency(10))); assert_matches!(map.fetch_tagged_data(&ap, &2, 12), Err(Dependency(10))); assert_matches!(map.fetch_tagged_data(&ap, &3, 12), Err(TagNotFound)); @@ -752,7 +791,7 @@ mod test { assert_ok_eq!(map.get_group_size(&ap, 6), idx_5_size); // Despite estimates, should still return size. - map.mark_estimate(&ap, 5, (0..2).collect()); + map.mark_estimate(&ap, 5, &(0..2).collect()); assert_ok_eq!(map.get_group_size(&ap, 12), idx_5_size); assert!(map.validate_group_size(&ap, 12, idx_5_size)); assert!(!map.validate_group_size(&ap, 12, ResourceGroupSize::zero_combined())); @@ -770,7 +809,7 @@ mod test { true ); assert!(!map.group_sizes.get(&ap).unwrap().size_has_changed); - map.mark_estimate(&ap, 5, (0..2).collect()); + map.mark_estimate(&ap, 5, &(0..2).collect()); assert_ok_eq!(map.get_group_size(&ap, 12), idx_5_size); assert!(map.validate_group_size(&ap, 12, idx_5_size)); assert!(!map.validate_group_size(&ap, 12, ResourceGroupSize::zero_concrete())); @@ -796,7 +835,7 @@ mod test { assert!(!map.validate_group_size(&ap, 10, idx_5_size)); assert_ok_eq!(map.get_group_size(&ap, 3), base_size); - map.mark_estimate(&ap, 5, (0..3).collect()); + map.mark_estimate(&ap, 5, &(0..3).collect()); assert_matches!( map.get_group_size(&ap, 12), Err(MVGroupError::Dependency(5)) @@ -1057,4 +1096,110 @@ mod test { ) ); } + + #[test] + fn group_key_ref_equivalence_and_hashing() { + use std::hash::{DefaultHasher, Hash, Hasher}; + + let dashmap: DashMap<(u32, u32), String> = DashMap::new(); + + // Test with a range of keys and tags (1..50 x 1..50 = 2500 combinations) + for k in 1u32..50u32 { + for t in 1u32..50u32 { + let tuple_key = (k, t); + let ref_key = GroupKeyRef { + group_key: &k, + tag: &t, + }; + let expected_value = format!("value_{}_{}", k, t); + + // Test 1: Verify that (K, T) and GroupKeyRef hash to the same value + let mut hasher1 = DefaultHasher::new(); + tuple_key.hash(&mut hasher1); + let tuple_hash = hasher1.finish(); + + let mut hasher2 = DefaultHasher::new(); + ref_key.hash(&mut hasher2); + let ref_hash = hasher2.finish(); + + assert_eq!( + tuple_hash, ref_hash, + "Tuple ({}, {}) and GroupKeyRef should hash to the same value", + k, t + ); + + // Test 2: Test equivalence trait directly + assert!( + ref_key.equivalent(&tuple_key), + "GroupKeyRef should be equivalent to corresponding tuple ({}, {})", + k, + t + ); + // Test with different values to ensure non-equivalence + let different_tuple = (k, t + 1000); + assert!( + !ref_key.equivalent(&different_tuple), + "GroupKeyRef should not be equivalent to different tuple ({}, {})", + k, + t + 1000 + ); + + // Test 3: Insert using tuple key + dashmap.insert(tuple_key, expected_value.clone()); + + // Test 4: Access using GroupKeyRef - should find the same entry + let retrieved = dashmap.get(&ref_key); + assert!( + retrieved.is_some(), + "Should be able to access entry ({}, {}) using GroupKeyRef", + k, + t + ); + assert_eq!( + retrieved.unwrap().as_str(), + expected_value, + "Retrieved value should match expected value for ({}, {})", + k, + t + ); + + // Test 5: Remove using GroupKeyRef and verify it's the correct entry + let removed = dashmap.remove(&ref_key); + assert!( + removed.is_some(), + "Should be able to remove entry ({}, {}) using GroupKeyRef", + k, + t + ); + let (removed_key, removed_value) = removed.unwrap(); + assert_eq!( + removed_key, tuple_key, + "Removed key should match original tuple key ({}, {})", + k, t + ); + assert_eq!( + removed_value, expected_value, + "Removed value should match expected value for ({}, {})", + k, t + ); + + // Verify entry is actually removed + assert!( + dashmap.get(&ref_key).is_none(), + "Entry ({}, {}) should be removed and not accessible", + k, + t + ); + assert!( + dashmap.get(&tuple_key).is_none(), + "Entry ({}, {}) should be removed and not accessible via tuple key", + k, + t + ); + } + } + + // Verify all entries are removed + assert_eq!(dashmap.len(), 0, "All entries should be removed"); + } } diff --git a/aptos-move/replay-benchmark/src/overrides.rs b/aptos-move/replay-benchmark/src/overrides.rs index 3837920cd4b7b..203c5dd36b80c 100644 --- a/aptos-move/replay-benchmark/src/overrides.rs +++ b/aptos-move/replay-benchmark/src/overrides.rs @@ -12,7 +12,7 @@ use anyhow::bail; use aptos_framework::{natives::code::PackageRegistry, BuildOptions, BuiltPackage}; use aptos_gas_schedule::LATEST_GAS_FEATURE_VERSION; -use aptos_logger::error; +use aptos_logger::{error, warn}; use aptos_types::{ on_chain_config::{FeatureFlag, Features, GasScheduleV2, OnChainConfig}, state_store::{state_key::StateKey, state_value::StateValue, StateView}, @@ -74,8 +74,8 @@ impl OverrideConfig { bail!("Enabled and disabled feature flags cannot overlap") } if matches!(gas_feature_version, Some(v) if v > LATEST_GAS_FEATURE_VERSION) { - bail!( - "Gas feature version must be at most the latest one: {}", + warn!( + "Gas feature version is greater than the latest one: {}", LATEST_GAS_FEATURE_VERSION ); } diff --git a/aptos-move/replay-benchmark/src/workload.rs b/aptos-move/replay-benchmark/src/workload.rs index f94089450457c..f0e164e09c664 100644 --- a/aptos-move/replay-benchmark/src/workload.rs +++ b/aptos-move/replay-benchmark/src/workload.rs @@ -41,7 +41,7 @@ impl From for Workload { TransactionSliceMetadata::chunk(txn_block.begin_version, end); let signature_verified_txns = into_signature_verified_block(txn_block.transactions); - let txn_provider = DefaultTxnProvider::new(signature_verified_txns); + let txn_provider = DefaultTxnProvider::new_without_info(signature_verified_txns); Self { txn_provider, diff --git a/aptos-move/script-composer/src/helpers.rs b/aptos-move/script-composer/src/helpers.rs index adb27fe8bea59..b40c8d0a27469 100644 --- a/aptos-move/script-composer/src/helpers.rs +++ b/aptos-move/script-composer/src/helpers.rs @@ -9,7 +9,7 @@ use move_binary_format::{ }; use move_core_types::{ identifier::IdentStr, - language_storage::{ModuleId, TypeTag}, + language_storage::{FunctionParamOrReturnTag, ModuleId, TypeTag}, transaction_argument::TransactionArgument, vm_status::StatusCode, }; @@ -29,11 +29,6 @@ pub(crate) fn import_type_tag( type_tag: &TypeTag, module_resolver: &BTreeMap, ) -> PartialVMResult { - let to_list = |script_builder: &mut CompiledScriptBuilder, ts: &[TypeTag]| { - ts.iter() - .map(|t| import_type_tag(script_builder, t, module_resolver)) - .collect::>>() - }; Ok(match type_tag { TypeTag::Address => SignatureToken::Address, TypeTag::U8 => SignatureToken::U8, @@ -56,17 +51,44 @@ pub(crate) fn import_type_tag( if s.type_args.is_empty() { SignatureToken::Struct(struct_idx) } else { - SignatureToken::StructInstantiation( - struct_idx, - to_list(script_builder, &s.type_args)?, - ) + let type_args = s + .type_args + .iter() + .map(|t| import_type_tag(script_builder, t, module_resolver)) + .collect::>>()?; + SignatureToken::StructInstantiation(struct_idx, type_args) } }, - TypeTag::Function(f) => SignatureToken::Function( - to_list(script_builder, &f.args)?, - to_list(script_builder, &f.results)?, - f.abilities, - ), + TypeTag::Function(f) => { + let to_list = |script_builder: &mut CompiledScriptBuilder, + ts: &[FunctionParamOrReturnTag]| { + ts.iter() + .map(|t| { + Ok(match t { + FunctionParamOrReturnTag::Reference(t) => SignatureToken::Reference( + Box::new(import_type_tag(script_builder, t, module_resolver)?), + ), + FunctionParamOrReturnTag::MutableReference(t) => { + SignatureToken::MutableReference(Box::new(import_type_tag( + script_builder, + t, + module_resolver, + )?)) + }, + FunctionParamOrReturnTag::Value(t) => { + import_type_tag(script_builder, t, module_resolver)? + }, + }) + }) + .collect::>>() + }; + + SignatureToken::Function( + to_list(script_builder, &f.args)?, + to_list(script_builder, &f.results)?, + f.abilities, + ) + }, }) } diff --git a/aptos-move/vm-genesis/src/lib.rs b/aptos-move/vm-genesis/src/lib.rs index 1fcca982da74a..8d5e1770667c3 100644 --- a/aptos-move/vm-genesis/src/lib.rs +++ b/aptos-move/vm-genesis/src/lib.rs @@ -17,7 +17,10 @@ use aptos_gas_schedule::{ AptosGasParameters, InitialGasSchedule, ToOnChainGasSchedule, LATEST_GAS_FEATURE_VERSION, }; use aptos_types::{ - account_config::{self, aptos_test_root_address, events::NewEpochEvent, CORE_CODE_ADDRESS}, + account_config::{ + self, aptos_test_root_address, events::NewEpochEvent, CORE_CODE_ADDRESS, + EXPERIMENTAL_CODE_ADDRESS, + }, chain_id::ChainId, contract_event::{ContractEvent, ContractEventV1}, executable::ModulePath, @@ -156,6 +159,9 @@ pub fn encode_aptos_mainnet_genesis_transaction( let genesis_change_set_configs = genesis_vm.genesis_change_set_configs(); let mut session = genesis_vm.new_genesis_session(&resolver, HashValue::zero()); + let traversal_storage = TraversalStorage::new(); + let mut traversal_context = TraversalContext::new(&traversal_storage); + // On-chain genesis process. let consensus_config = OnChainConsensusConfig::default_for_genesis(); let execution_config = OnChainExecutionConfig::default_for_genesis(); @@ -163,6 +169,7 @@ pub fn encode_aptos_mainnet_genesis_transaction( initialize( &mut session, &module_storage, + &mut traversal_context, chain_id, genesis_config, &consensus_config, @@ -172,23 +179,45 @@ pub fn encode_aptos_mainnet_genesis_transaction( initialize_features( &mut session, &module_storage, + &mut traversal_context, genesis_config .initial_features_override .clone() .map(Features::into_flag_vec), ); - initialize_aptos_coin(&mut session, &module_storage); - initialize_on_chain_governance(&mut session, &module_storage, genesis_config); - create_accounts(&mut session, &module_storage, accounts); - create_employee_validators(&mut session, &module_storage, employees, genesis_config); - create_and_initialize_validators_with_commission(&mut session, &module_storage, validators); - set_genesis_end(&mut session, &module_storage); + initialize_aptos_coin(&mut session, &module_storage, &mut traversal_context); + initialize_on_chain_governance( + &mut session, + &module_storage, + &mut traversal_context, + genesis_config, + ); + create_accounts( + &mut session, + &module_storage, + &mut traversal_context, + accounts, + ); + create_employee_validators( + &mut session, + &module_storage, + &mut traversal_context, + employees, + genesis_config, + ); + create_and_initialize_validators_with_commission( + &mut session, + &module_storage, + &mut traversal_context, + validators, + ); + set_genesis_end(&mut session, &module_storage, &mut traversal_context); // Reconfiguration should happen after all on-chain invocations. - emit_new_block_and_epoch_event(&mut session, &module_storage); + emit_new_block_and_epoch_event(&mut session, &module_storage, &mut traversal_context); // Create a change set with all initialized resources. - let mut change_set = assert_ok!(session.finish(&genesis_change_set_configs, &module_storage)); + let mut change_set = assert_ok!(session.finish(&genesis_change_set_configs, &module_storage,)); // Publish the framework, using a different session id, in case both sessions create tables. let mut new_id = [0u8; 32]; @@ -258,10 +287,14 @@ pub fn encode_genesis_change_set( let genesis_change_set_configs = genesis_vm.genesis_change_set_configs(); let mut session = genesis_vm.new_genesis_session(&resolver, HashValue::zero()); + let traversal_storage = TraversalStorage::new(); + let mut traversal_context = TraversalContext::new(&traversal_storage); + // On-chain genesis process. initialize( &mut session, &module_storage, + &mut traversal_context, chain_id, genesis_config, consensus_config, @@ -271,52 +304,85 @@ pub fn encode_genesis_change_set( initialize_features( &mut session, &module_storage, + &mut traversal_context, genesis_config .initial_features_override .clone() .map(Features::into_flag_vec), ); if genesis_config.is_test { - initialize_core_resources_and_aptos_coin(&mut session, &module_storage, core_resources_key); + initialize_core_resources_and_aptos_coin( + &mut session, + &module_storage, + &mut traversal_context, + core_resources_key, + ); } else { - initialize_aptos_coin(&mut session, &module_storage); + initialize_aptos_coin(&mut session, &module_storage, &mut traversal_context); } - initialize_config_buffer(&mut session, &module_storage); - initialize_dkg(&mut session, &module_storage); - initialize_reconfiguration_state(&mut session, &module_storage); + initialize_config_buffer(&mut session, &module_storage, &mut traversal_context); + initialize_dkg(&mut session, &module_storage, &mut traversal_context); + initialize_reconfiguration_state(&mut session, &module_storage, &mut traversal_context); let randomness_config = genesis_config .randomness_config_override .clone() .unwrap_or_else(OnChainRandomnessConfig::default_for_genesis); - initialize_randomness_api_v0_config(&mut session, &module_storage); - initialize_randomness_config_seqnum(&mut session, &module_storage); - initialize_randomness_config(&mut session, &module_storage, randomness_config); - initialize_randomness_resources(&mut session, &module_storage); - initialize_on_chain_governance(&mut session, &module_storage, genesis_config); - initialize_account_abstraction(&mut session, &module_storage); - create_and_initialize_validators(&mut session, &module_storage, validators); + initialize_randomness_api_v0_config(&mut session, &module_storage, &mut traversal_context); + initialize_randomness_config_seqnum(&mut session, &module_storage, &mut traversal_context); + initialize_randomness_config( + &mut session, + &module_storage, + &mut traversal_context, + randomness_config, + ); + initialize_randomness_resources(&mut session, &module_storage, &mut traversal_context); + initialize_on_chain_governance( + &mut session, + &module_storage, + &mut traversal_context, + genesis_config, + ); + initialize_account_abstraction(&mut session, &module_storage, &mut traversal_context); + create_and_initialize_validators( + &mut session, + &module_storage, + &mut traversal_context, + validators, + ); if genesis_config.is_test { - allow_core_resources_to_set_version(&mut session, &module_storage); + allow_core_resources_to_set_version(&mut session, &module_storage, &mut traversal_context); } let jwk_consensus_config = genesis_config .jwk_consensus_config_override .clone() .unwrap_or_else(OnChainJWKConsensusConfig::default_for_genesis); - initialize_jwk_consensus_config(&mut session, &module_storage, &jwk_consensus_config); - initialize_jwks_resources(&mut session, &module_storage); + initialize_jwk_consensus_config( + &mut session, + &module_storage, + &mut traversal_context, + &jwk_consensus_config, + ); + initialize_jwks_resources(&mut session, &module_storage, &mut traversal_context); initialize_keyless_accounts( &mut session, &module_storage, + &mut traversal_context, chain_id, genesis_config.initial_jwks.clone(), genesis_config.keyless_groth16_vk.clone(), ); - set_genesis_end(&mut session, &module_storage); + initialize_confidential_asset( + &mut session, + &module_storage, + chain_id, + &mut traversal_context, + ); + set_genesis_end(&mut session, &module_storage, &mut traversal_context); // Reconfiguration should happen after all on-chain invocations. - emit_new_block_and_epoch_event(&mut session, &module_storage); + emit_new_block_and_epoch_event(&mut session, &module_storage, &mut traversal_context); - let mut change_set = assert_ok!(session.finish(&genesis_change_set_configs, &module_storage)); + let mut change_set = assert_ok!(session.finish(&genesis_change_set_configs, &module_storage,)); // Publish the framework, using a different id, in case both sessions create tables. let mut new_id = [0u8; 32]; @@ -373,28 +439,30 @@ fn validate_genesis_config(genesis_config: &GenesisConfiguration) { ); } -fn exec_function( +fn exec_function_internal( session: &mut SessionExt, module_storage: &impl ModuleStorage, + traversal_context: &mut TraversalContext, module_name: &str, function_name: &str, ty_args: Vec, args: Vec>, + address: AccountAddress, ) { - let storage = TraversalStorage::new(); session .execute_function_bypass_visibility( - &ModuleId::new(CORE_CODE_ADDRESS, Identifier::new(module_name).unwrap()), + &ModuleId::new(address, Identifier::new(module_name).unwrap()), &Identifier::new(function_name).unwrap(), ty_args, args, &mut UnmeteredGasMeter, - &mut TraversalContext::new(&storage), + traversal_context, module_storage, ) .unwrap_or_else(|e| { panic!( - "Error calling {}.{}: ({:#x}) {}", + "Error calling {}.{}.{}: ({:#x}) {}", + address, module_name, function_name, e.sub_status().unwrap_or_default(), @@ -403,9 +471,52 @@ fn exec_function( }); } +fn exec_function( + session: &mut SessionExt, + module_storage: &impl ModuleStorage, + traversal_context: &mut TraversalContext, + module_name: &str, + function_name: &str, + ty_args: Vec, + args: Vec>, +) { + exec_function_internal( + session, + module_storage, + traversal_context, + module_name, + function_name, + ty_args, + args, + CORE_CODE_ADDRESS, + ); +} + +fn exec_experimental_function( + session: &mut SessionExt, + module_storage: &impl ModuleStorage, + traversal_context: &mut TraversalContext, + module_name: &str, + function_name: &str, + ty_args: Vec, + args: Vec>, +) { + exec_function_internal( + session, + module_storage, + traversal_context, + module_name, + function_name, + ty_args, + args, + EXPERIMENTAL_CODE_ADDRESS, + ); +} + fn initialize( session: &mut SessionExt, module_storage: &impl AptosModuleStorage, + traversal_context: &mut TraversalContext, chain_id: ChainId, genesis_config: &GenesisConfiguration, consensus_config: &OnChainConsensusConfig, @@ -436,6 +547,7 @@ fn initialize( exec_function( session, module_storage, + traversal_context, GENESIS_MODULE_NAME, "initialize", vec![], @@ -460,6 +572,7 @@ fn initialize( fn initialize_features( session: &mut SessionExt, module_storage: &impl AptosModuleStorage, + traversal_context: &mut TraversalContext, features_override: Option>, ) { let features: Vec = features_override @@ -475,6 +588,7 @@ fn initialize_features( exec_function( session, module_storage, + traversal_context, "features", "change_feature_flags_internal", vec![], @@ -485,10 +599,12 @@ fn initialize_features( fn initialize_aptos_coin( session: &mut SessionExt, module_storage: &impl AptosModuleStorage, + traversal_context: &mut TraversalContext, ) { exec_function( session, module_storage, + traversal_context, GENESIS_MODULE_NAME, "initialize_aptos_coin", vec![], @@ -499,10 +615,12 @@ fn initialize_aptos_coin( fn initialize_config_buffer( session: &mut SessionExt, module_storage: &impl AptosModuleStorage, + traversal_context: &mut TraversalContext, ) { exec_function( session, module_storage, + traversal_context, CONFIG_BUFFER_MODULE_NAME, "initialize", vec![], @@ -513,10 +631,12 @@ fn initialize_config_buffer( fn initialize_dkg( session: &mut SessionExt, module_storage: &impl AptosModuleStorage, + traversal_context: &mut TraversalContext, ) { exec_function( session, module_storage, + traversal_context, DKG_MODULE_NAME, "initialize", vec![], @@ -527,10 +647,12 @@ fn initialize_dkg( fn initialize_randomness_config_seqnum( session: &mut SessionExt, module_storage: &impl AptosModuleStorage, + traversal_context: &mut TraversalContext, ) { exec_function( session, module_storage, + traversal_context, RANDOMNESS_CONFIG_SEQNUM_MODULE_NAME, "initialize", vec![], @@ -541,10 +663,12 @@ fn initialize_randomness_config_seqnum( fn initialize_randomness_api_v0_config( session: &mut SessionExt, module_storage: &impl AptosModuleStorage, + traversal_context: &mut TraversalContext, ) { exec_function( session, module_storage, + traversal_context, RANDOMNESS_API_V0_CONFIG_MODULE_NAME, "initialize", vec![], @@ -559,11 +683,13 @@ fn initialize_randomness_api_v0_config( fn initialize_randomness_config( session: &mut SessionExt, module_storage: &impl AptosModuleStorage, + traversal_context: &mut TraversalContext, randomness_config: OnChainRandomnessConfig, ) { exec_function( session, module_storage, + traversal_context, RANDOMNESS_CONFIG_MODULE_NAME, "initialize", vec![], @@ -577,10 +703,12 @@ fn initialize_randomness_config( fn initialize_randomness_resources( session: &mut SessionExt, module_storage: &impl AptosModuleStorage, + traversal_context: &mut TraversalContext, ) { exec_function( session, module_storage, + traversal_context, RANDOMNESS_MODULE_NAME, "initialize", vec![], @@ -591,10 +719,12 @@ fn initialize_randomness_resources( fn initialize_account_abstraction( session: &mut SessionExt, module_storage: &impl AptosModuleStorage, + traversal_context: &mut TraversalContext, ) { exec_function( session, module_storage, + traversal_context, ACCOUNT_ABSTRACTION_MODULE_NAME, "initialize", vec![], @@ -604,6 +734,7 @@ fn initialize_account_abstraction( exec_function( session, module_storage, + traversal_context, ACCOUNT_ABSTRACTION_MODULE_NAME, "register_derivable_authentication_function", vec![], @@ -620,6 +751,7 @@ fn initialize_account_abstraction( exec_function( session, module_storage, + traversal_context, ACCOUNT_ABSTRACTION_MODULE_NAME, "register_derivable_authentication_function", vec![], @@ -634,6 +766,7 @@ fn initialize_account_abstraction( exec_function( session, module_storage, + traversal_context, ACCOUNT_ABSTRACTION_MODULE_NAME, "register_derivable_authentication_function", vec![], @@ -649,10 +782,12 @@ fn initialize_account_abstraction( fn initialize_reconfiguration_state( session: &mut SessionExt, module_storage: &impl AptosModuleStorage, + traversal_context: &mut TraversalContext, ) { exec_function( session, module_storage, + traversal_context, RECONFIGURATION_STATE_MODULE_NAME, "initialize", vec![], @@ -663,11 +798,13 @@ fn initialize_reconfiguration_state( fn initialize_jwk_consensus_config( session: &mut SessionExt, module_storage: &impl AptosModuleStorage, + traversal_context: &mut TraversalContext, jwk_consensus_config: &OnChainJWKConsensusConfig, ) { exec_function( session, module_storage, + traversal_context, JWK_CONSENSUS_CONFIG_MODULE_NAME, "initialize", vec![], @@ -681,10 +818,12 @@ fn initialize_jwk_consensus_config( fn initialize_jwks_resources( session: &mut SessionExt, module_storage: &impl AptosModuleStorage, + traversal_context: &mut TraversalContext, ) { exec_function( session, module_storage, + traversal_context, JWKS_MODULE_NAME, "initialize", vec![], @@ -695,10 +834,12 @@ fn initialize_jwks_resources( fn set_genesis_end( session: &mut SessionExt, module_storage: &impl AptosModuleStorage, + traversal_context: &mut TraversalContext, ) { exec_function( session, module_storage, + traversal_context, GENESIS_MODULE_NAME, "set_genesis_end", vec![], @@ -709,12 +850,14 @@ fn set_genesis_end( fn initialize_core_resources_and_aptos_coin( session: &mut SessionExt, module_storage: &impl AptosModuleStorage, + traversal_context: &mut TraversalContext, core_resources_key: &Ed25519PublicKey, ) { let core_resources_auth_key = AuthenticationKey::ed25519(core_resources_key); exec_function( session, module_storage, + traversal_context, GENESIS_MODULE_NAME, "initialize_core_resources_and_aptos_coin", vec![], @@ -729,11 +872,13 @@ fn initialize_core_resources_and_aptos_coin( fn initialize_on_chain_governance( session: &mut SessionExt, module_storage: &impl AptosModuleStorage, + traversal_context: &mut TraversalContext, genesis_config: &GenesisConfiguration, ) { exec_function( session, module_storage, + traversal_context, GOVERNANCE_MODULE_NAME, "initialize", vec![], @@ -749,6 +894,7 @@ fn initialize_on_chain_governance( fn initialize_keyless_accounts( session: &mut SessionExt, module_storage: &impl AptosModuleStorage, + traversal_context: &mut TraversalContext, chain_id: ChainId, mut initial_jwks: Vec, vk: Option, @@ -757,6 +903,7 @@ fn initialize_keyless_accounts( exec_function( session, module_storage, + traversal_context, KEYLESS_ACCOUNT_MODULE_NAME, "update_configuration", vec![], @@ -770,6 +917,7 @@ fn initialize_keyless_accounts( exec_function( session, module_storage, + traversal_context, KEYLESS_ACCOUNT_MODULE_NAME, "update_groth16_verification_key", vec![], @@ -801,6 +949,7 @@ fn initialize_keyless_accounts( exec_function( session, module_storage, + traversal_context, JWKS_MODULE_NAME, "set_patches", vec![], @@ -812,9 +961,29 @@ fn initialize_keyless_accounts( } } +fn initialize_confidential_asset( + session: &mut SessionExt, + module_storage: &impl AptosModuleStorage, + chain_id: ChainId, + traversal_context: &mut TraversalContext, +) { + if !chain_id.is_mainnet() && !chain_id.is_testnet() { + exec_experimental_function( + session, + module_storage, + traversal_context, + "confidential_asset", + "init_module_for_genesis", + vec![], + serialize_values(&vec![MoveValue::Signer(EXPERIMENTAL_CODE_ADDRESS)]), + ); + } +} + fn create_accounts( session: &mut SessionExt, module_storage: &impl AptosModuleStorage, + traversal_context: &mut TraversalContext, accounts: &[AccountBalance], ) { let accounts_bytes = bcs::to_bytes(accounts).expect("AccountMaps can be serialized"); @@ -823,6 +992,7 @@ fn create_accounts( exec_function( session, module_storage, + traversal_context, GENESIS_MODULE_NAME, "create_accounts", vec![], @@ -833,6 +1003,7 @@ fn create_accounts( fn create_employee_validators( session: &mut SessionExt, module_storage: &impl AptosModuleStorage, + traversal_context: &mut TraversalContext, employees: &[EmployeePool], genesis_config: &GenesisConfiguration, ) { @@ -846,6 +1017,7 @@ fn create_employee_validators( exec_function( session, module_storage, + traversal_context, GENESIS_MODULE_NAME, "create_employee_validators", vec![], @@ -859,6 +1031,7 @@ fn create_employee_validators( fn create_and_initialize_validators( session: &mut SessionExt, module_storage: &impl AptosModuleStorage, + traversal_context: &mut TraversalContext, validators: &[Validator], ) { let validators_bytes = bcs::to_bytes(validators).expect("Validators can be serialized"); @@ -867,6 +1040,7 @@ fn create_and_initialize_validators( exec_function( session, module_storage, + traversal_context, GENESIS_MODULE_NAME, "create_initialize_validators", vec![], @@ -877,6 +1051,7 @@ fn create_and_initialize_validators( fn create_and_initialize_validators_with_commission( session: &mut SessionExt, module_storage: &impl AptosModuleStorage, + traversal_context: &mut TraversalContext, validators: &[ValidatorWithCommissionRate], ) { let validators_bytes = bcs::to_bytes(validators).expect("Validators can be serialized"); @@ -888,6 +1063,7 @@ fn create_and_initialize_validators_with_commission( exec_function( session, module_storage, + traversal_context, GENESIS_MODULE_NAME, "create_initialize_validators_with_commission", vec![], @@ -898,10 +1074,12 @@ fn create_and_initialize_validators_with_commission( fn allow_core_resources_to_set_version( session: &mut SessionExt, module_storage: &impl AptosModuleStorage, + traversal_context: &mut TraversalContext, ) { exec_function( session, module_storage, + traversal_context, VERSION_MODULE_NAME, "initialize_for_test", vec![], @@ -912,12 +1090,14 @@ fn allow_core_resources_to_set_version( fn initialize_package( session: &mut SessionExt, module_storage: &impl ModuleStorage, + traversal_context: &mut TraversalContext, addr: AccountAddress, package: &ReleasePackage, ) { exec_function( session, module_storage, + traversal_context, CODE_MODULE_NAME, "initialize", vec![], @@ -1007,6 +1187,9 @@ fn publish_framework( let resolver = state_view.as_move_resolver(); let mut session = genesis_vm.new_genesis_session(&resolver, hash_value); + let traversal_storage = TraversalStorage::new(); + let mut traversal_context = TraversalContext::new(&traversal_storage); + for pack in &framework.packages { // Unfortunately, package does not contain address information, so we have to access its // modules to extract the destination address. @@ -1017,11 +1200,17 @@ fn publish_framework( .1 .self_id() .address(); - initialize_package(&mut session, &module_storage, addr, pack); + initialize_package( + &mut session, + &module_storage, + &mut traversal_context, + addr, + pack, + ); } let change_set = - assert_ok!(session.finish(&genesis_vm.genesis_change_set_configs(), &module_storage)); + assert_ok!(session.finish(&genesis_vm.genesis_change_set_configs(), &module_storage,)); (change_set, module_write_set) } @@ -1029,10 +1218,12 @@ fn publish_framework( fn emit_new_block_and_epoch_event( session: &mut SessionExt, module_storage: &impl AptosModuleStorage, + traversal_context: &mut TraversalContext, ) { exec_function( session, module_storage, + traversal_context, "block", "emit_genesis_block_event", vec![], @@ -1043,6 +1234,7 @@ fn emit_new_block_and_epoch_event( exec_function( session, module_storage, + traversal_context, "reconfiguration", "emit_genesis_reconfiguration_event", vec![], diff --git a/config/Cargo.toml b/config/Cargo.toml index b9c45b0436ef6..808dbe9624b8b 100644 --- a/config/Cargo.toml +++ b/config/Cargo.toml @@ -20,6 +20,7 @@ aptos-logger = { workspace = true } aptos-secure-storage = { workspace = true } aptos-short-hex-str = { workspace = true } aptos-temppath = { workspace = true } +aptos-transaction-filters = { workspace = true } aptos-types = { workspace = true } arr_macro = { workspace = true } bcs = { workspace = true } diff --git a/config/src/config/api_config.rs b/config/src/config/api_config.rs index a70d17411def4..d736dad227e6c 100644 --- a/config/src/config/api_config.rs +++ b/config/src/config/api_config.rs @@ -2,7 +2,6 @@ // Parts of the project are originally copyright © Meta Platforms, Inc. // SPDX-License-Identifier: Apache-2.0 -use super::transaction_filter_type::{Filter, Matcher}; use crate::{ config::{ config_sanitizer::ConfigSanitizer, gas_estimation_config::GasEstimationConfig, @@ -80,8 +79,6 @@ pub struct ApiConfig { pub gas_estimation: GasEstimationConfig, /// Periodically call gas estimation pub periodic_gas_estimation_ms: Option, - /// Configuration to filter simulation requests. - pub simulation_filter: Filter, /// Configuration to filter view function requests. pub view_filter: ViewFilter, /// Periodically log stats for view function and simulate transaction usage @@ -139,7 +136,6 @@ impl Default for ApiConfig { runtime_worker_multiplier: 2, gas_estimation: GasEstimationConfig::default(), periodic_gas_estimation_ms: Some(30_000), - simulation_filter: Filter::default(), view_filter: ViewFilter::default(), periodic_function_stats_sec: Some(60), wait_by_hash_timeout_ms: 1_000, @@ -194,16 +190,6 @@ impl ConfigSanitizer for ApiConfig { )); } - // We don't support Block ID based simulation filters. - for rule in api_config.simulation_filter.rules() { - if let Matcher::BlockId(_) = rule.matcher() { - return Err(Error::ConfigSanitizerFailed( - sanitizer_name, - "Block ID based simulation filters are not supported!".into(), - )); - } - } - // Sanitize the gas estimation config GasEstimationConfig::sanitize(node_config, node_type, chain_id)?; diff --git a/config/src/config/config_optimizer.rs b/config/src/config/config_optimizer.rs index 0165cbadb975e..bbcdcb8bf03f5 100644 --- a/config/src/config/config_optimizer.rs +++ b/config/src/config/config_optimizer.rs @@ -2,8 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 use super::{ - ConsensusObserverConfig, Identity, IdentityFromConfig, IdentitySource, IndexerGrpcConfig, - StorageConfig, + ConsensusConfig, ConsensusObserverConfig, Identity, IdentityFromConfig, IdentitySource, + IndexerGrpcConfig, StorageConfig, }; use crate::{ config::{ @@ -111,6 +111,9 @@ impl ConfigOptimizer for NodeConfig { if AdminServiceConfig::optimize(node_config, local_config_yaml, node_type, chain_id)? { optimizers_with_modifications.push(AdminServiceConfig::get_optimizer_name()); } + if ConsensusConfig::optimize(node_config, local_config_yaml, node_type, chain_id)? { + optimizers_with_modifications.push(ConsensusConfig::get_optimizer_name()); + } if ConsensusObserverConfig::optimize(node_config, local_config_yaml, node_type, chain_id)? { optimizers_with_modifications.push(ConsensusObserverConfig::get_optimizer_name()); } diff --git a/config/src/config/consensus_config.rs b/config/src/config/consensus_config.rs index 04c4c05847036..b8884e75bebf5 100644 --- a/config/src/config/consensus_config.rs +++ b/config/src/config/consensus_config.rs @@ -6,17 +6,20 @@ use super::DEFEAULT_MAX_BATCH_TXNS; use crate::config::{ - config_sanitizer::ConfigSanitizer, node_config_loader::NodeType, Error, NodeConfig, - QuorumStoreConfig, ReliableBroadcastConfig, SafetyRulesConfig, BATCH_PADDING_BYTES, + config_optimizer::ConfigOptimizer, config_sanitizer::ConfigSanitizer, + node_config_loader::NodeType, Error, NodeConfig, QuorumStoreConfig, ReliableBroadcastConfig, + SafetyRulesConfig, BATCH_PADDING_BYTES, }; use aptos_crypto::_once_cell::sync::Lazy; use aptos_types::chain_id::ChainId; use cfg_if::cfg_if; use serde::{Deserialize, Serialize}; +use serde_yaml::Value; use std::path::PathBuf; // NOTE: when changing, make sure to update QuorumStoreBackPressureConfig::backlog_txn_limit_count as well. const MAX_SENDING_BLOCK_TXNS_AFTER_FILTERING: u64 = 1800; +const MAX_SENDING_OPT_BLOCK_TXNS_AFTER_FILTERING: u64 = 1000; const MAX_SENDING_BLOCK_TXNS: u64 = 5000; pub(crate) static MAX_RECEIVING_BLOCK_TXNS: Lazy = Lazy::new(|| 10000.max(2 * MAX_SENDING_BLOCK_TXNS)); @@ -32,6 +35,7 @@ pub struct ConsensusConfig { pub max_network_channel_size: usize, pub max_sending_block_txns: u64, pub max_sending_block_txns_after_filtering: u64, + pub max_sending_opt_block_txns_after_filtering: u64, pub max_sending_block_bytes: u64, pub max_sending_inline_txns: u64, pub max_sending_inline_bytes: u64, @@ -95,7 +99,8 @@ pub struct ConsensusConfig { pub max_pending_rounds_in_commit_vote_cache: u64, pub optimistic_sig_verification: bool, pub enable_round_timeout_msg: bool, - pub enable_pipeline: bool, + pub enable_optimistic_proposal_rx: bool, + pub enable_optimistic_proposal_tx: bool, } /// Deprecated @@ -187,6 +192,7 @@ impl Default for ConsensusConfig { max_network_channel_size: 1024, max_sending_block_txns: MAX_SENDING_BLOCK_TXNS, max_sending_block_txns_after_filtering: MAX_SENDING_BLOCK_TXNS_AFTER_FILTERING, + max_sending_opt_block_txns_after_filtering: MAX_SENDING_OPT_BLOCK_TXNS_AFTER_FILTERING, max_sending_block_bytes: 3 * 1024 * 1024, // 3MB max_receiving_block_txns: *MAX_RECEIVING_BLOCK_TXNS, max_sending_inline_txns: 100, @@ -217,16 +223,16 @@ impl Default for ConsensusConfig { // increase uncontrollably, and we know when to go to state sync. // Considering block gas limit and pipeline backpressure should keep number of blocks // in the pipline very low, we can keep this limit pretty low, too. - vote_back_pressure_limit: 7, + vote_back_pressure_limit: 12, min_max_txns_in_block_after_filtering_from_backpressure: MIN_BLOCK_TXNS_AFTER_FILTERING, execution_backpressure: Some(ExecutionBackpressureConfig { txn_limit: Some(ExecutionBackpressureTxnLimitConfig { lookback_config: ExecutionBackpressureLookbackConfig { num_blocks_to_look_at: 18, - min_block_time_ms_to_activate: 100, + min_block_time_ms_to_activate: 50, min_blocks_to_activate: 4, metric: ExecutionBackpressureMetric::Percentile(0.5), - target_block_time_ms: 120, + target_block_time_ms: 60, }, min_calibrated_txns_per_block: 8, }), @@ -236,7 +242,7 @@ impl Default for ConsensusConfig { min_block_time_ms_to_activate: 10, min_blocks_to_activate: 4, metric: ExecutionBackpressureMetric::Mean, - target_block_time_ms: 120, + target_block_time_ms: 60, }, block_execution_overhead_ms: 10, min_calibrated_block_gas_limit: 2000, @@ -363,7 +369,8 @@ impl Default for ConsensusConfig { max_pending_rounds_in_commit_vote_cache: 100, optimistic_sig_verification: true, enable_round_timeout_msg: true, - enable_pipeline: true, + enable_optimistic_proposal_rx: true, + enable_optimistic_proposal_tx: false, } } } @@ -513,6 +520,29 @@ impl ConfigSanitizer for ConsensusConfig { } } +// TODO: Re-enable pre-commit for VFNs and PFNs once the feature supports +// a rollback mechanism (to tolerate execution divergence in fullnodes). +impl ConfigOptimizer for ConsensusConfig { + fn optimize( + node_config: &mut NodeConfig, + local_config_yaml: &Value, + node_type: NodeType, + _chain_id: Option, + ) -> Result { + let consensus_config = &mut node_config.consensus; + let local_consensus_config_yaml = &local_config_yaml["consensus"]; + + // Disable pre-commit for VFNs and PFNs (if they are not manually set) + let mut modified_config = false; + if local_consensus_config_yaml["enable_pre_commit"].is_null() && !node_type.is_validator() { + consensus_config.enable_pre_commit = false; + modified_config = true; + } + + Ok(modified_config) + } +} + /// Returns true iff consensus-only-perf-test is enabled fn is_consensus_only_perf_test_enabled() -> bool { cfg_if! { @@ -773,4 +803,72 @@ mod test { ConsensusConfig::sanitize(&node_config, NodeType::ValidatorFullnode, None).unwrap_err(); assert!(matches!(error, Error::ConfigSanitizerFailed(_, _))); } + + #[test] + fn test_optimize_pre_commit() { + // Create a node config with pre-commit enabled + let mut node_config = create_config_with_pre_commit_enabled(); + + // Optimize the config for a validator + let modified_config = ConsensusConfig::optimize( + &mut node_config, + &serde_yaml::from_str("{}").unwrap(), // An empty local config, + NodeType::Validator, + None, + ) + .unwrap(); + + // Verify that the config was not modified, and that pre-commit is still enabled + assert!(!modified_config); + assert!(node_config.consensus.enable_pre_commit); + + // Next, optimize the config for a validator fullnode + let modified_config = ConsensusConfig::optimize( + &mut node_config, + &serde_yaml::from_str("{}").unwrap(), // An empty local config, + NodeType::ValidatorFullnode, + None, + ) + .unwrap(); + + // Verify that the config was modified, and that pre-commit is disabled + assert!(modified_config); + assert!(!node_config.consensus.enable_pre_commit); + + // Create a node config with pre-commit enabled + let mut node_config = create_config_with_pre_commit_enabled(); + + // Create a local config with pre-commit manually enabled + let local_config_yaml = serde_yaml::from_str( + r#" + consensus: + enable_pre_commit: false + "#, + ) + .unwrap(); + + // Optimize the config for a public fullnode (using the local config) + let modified_config = ConsensusConfig::optimize( + &mut node_config, + &local_config_yaml, + NodeType::PublicFullnode, + None, + ) + .unwrap(); + + // Verify that the config was not modified, and that pre-commit is still enabled + assert!(!modified_config); + assert!(node_config.consensus.enable_pre_commit); + } + + /// Creates a node config with pre-commit enabled + fn create_config_with_pre_commit_enabled() -> NodeConfig { + NodeConfig { + consensus: ConsensusConfig { + enable_pre_commit: true, + ..Default::default() + }, + ..Default::default() + } + } } diff --git a/config/src/config/consensus_observer_config.rs b/config/src/config/consensus_observer_config.rs index 0cbd8dd9d0189..da2965f3c5882 100644 --- a/config/src/config/consensus_observer_config.rs +++ b/config/src/config/consensus_observer_config.rs @@ -20,8 +20,6 @@ pub struct ConsensusObserverConfig { pub observer_enabled: bool, /// Whether the consensus publisher is enabled pub publisher_enabled: bool, - /// Whether to use new pipeline - pub enable_pipeline: bool, /// Maximum number of pending network messages pub max_network_channel_size: u64, @@ -64,7 +62,6 @@ impl Default for ConsensusObserverConfig { Self { observer_enabled: false, publisher_enabled: false, - enable_pipeline: true, max_network_channel_size: 1000, max_parallel_serialization_tasks: num_cpus::get(), // Default to the number of CPUs network_request_timeout_ms: 5_000, // 5 seconds diff --git a/config/src/config/execution_config.rs b/config/src/config/execution_config.rs index 0959ba8aa9d6d..1374df5788d2e 100644 --- a/config/src/config/execution_config.rs +++ b/config/src/config/execution_config.rs @@ -5,8 +5,7 @@ use super::WaypointConfig; use crate::config::{ config_optimizer::ConfigOptimizer, config_sanitizer::ConfigSanitizer, - node_config_loader::NodeType, transaction_filter_type::Filter, utils::RootPath, Error, - NodeConfig, + node_config_loader::NodeType, utils::RootPath, Error, NodeConfig, }; use aptos_types::{chain_id::ChainId, transaction::Transaction, waypoint::Waypoint}; use serde::{Deserialize, Serialize}; @@ -50,8 +49,6 @@ pub struct ExecutionConfig { pub paranoid_hot_potato_verification: bool, /// Enables enhanced metrics around processed transactions pub processed_transactions_detailed_counters: bool, - /// Enables filtering of transactions before they are sent to execution - pub transaction_filter: Filter, /// Used during DB bootstrapping pub genesis_waypoint: Option, } @@ -84,7 +81,6 @@ impl Default for ExecutionConfig { paranoid_hot_potato_verification: true, discard_failed_blocks: false, processed_transactions_detailed_counters: false, - transaction_filter: Filter::empty(), genesis_waypoint: None, } } diff --git a/config/src/config/mod.rs b/config/src/config/mod.rs index bc67df4381f09..a11e9e87d43a0 100644 --- a/config/src/config/mod.rs +++ b/config/src/config/mod.rs @@ -37,7 +37,7 @@ mod safety_rules_config; mod secure_backend_config; mod state_sync_config; mod storage_config; -pub mod transaction_filter_type; +mod transaction_filters_config; mod utils; // All public usage statements should be declared below @@ -69,3 +69,4 @@ pub use safety_rules_config::*; pub use secure_backend_config::*; pub use state_sync_config::*; pub use storage_config::*; +pub use transaction_filters_config::*; diff --git a/config/src/config/node_config.rs b/config/src/config/node_config.rs index f124dee3b2a33..f1991e384009e 100644 --- a/config/src/config/node_config.rs +++ b/config/src/config/node_config.rs @@ -8,9 +8,10 @@ use crate::{ internal_indexer_db_config::InternalIndexerDBConfig, jwk_consensus_config::JWKConsensusConfig, netbench_config::NetbenchConfig, node_config_loader::NodeConfigLoader, node_startup_config::NodeStartupConfig, - persistable_config::PersistableConfig, utils::RootPath, AdminServiceConfig, ApiConfig, - BaseConfig, ConsensusConfig, Error, ExecutionConfig, IndexerConfig, IndexerGrpcConfig, - InspectionServiceConfig, LoggerConfig, MempoolConfig, NetworkConfig, + persistable_config::PersistableConfig, + transaction_filters_config::TransactionFiltersConfig, utils::RootPath, AdminServiceConfig, + ApiConfig, BaseConfig, ConsensusConfig, Error, ExecutionConfig, IndexerConfig, + IndexerGrpcConfig, InspectionServiceConfig, LoggerConfig, MempoolConfig, NetworkConfig, PeerMonitoringServiceConfig, SafetyRulesTestConfig, StateSyncConfig, StorageConfig, }, network_id::NetworkId, @@ -83,6 +84,8 @@ pub struct NodeConfig { #[serde(default)] pub storage: StorageConfig, #[serde(default)] + pub transaction_filters: TransactionFiltersConfig, + #[serde(default)] pub validator_network: Option, #[serde(default)] pub indexer_db_config: InternalIndexerDBConfig, diff --git a/config/src/config/quorum_store_config.rs b/config/src/config/quorum_store_config.rs index e4c87be9e2ee1..aff47ab637e19 100644 --- a/config/src/config/quorum_store_config.rs +++ b/config/src/config/quorum_store_config.rs @@ -10,7 +10,7 @@ use serde::{Deserialize, Serialize}; use std::time::Duration; pub const BATCH_PADDING_BYTES: usize = 160; -pub const DEFEAULT_MAX_BATCH_TXNS: usize = 250; +pub const DEFEAULT_MAX_BATCH_TXNS: usize = 100; const DEFAULT_MAX_NUM_BATCHES: usize = 10; #[derive(Clone, Copy, Debug, Deserialize, PartialEq, Serialize)] @@ -107,7 +107,7 @@ impl Default for QuorumStoreConfig { channel_size: 1000, proof_timeout_ms: 10000, batch_generation_poll_interval_ms: 25, - batch_generation_min_non_empty_interval_ms: 100, + batch_generation_min_non_empty_interval_ms: 50, batch_generation_max_interval_ms: 250, sender_max_batch_txns: DEFEAULT_MAX_BATCH_TXNS, // TODO: on next release, remove BATCH_PADDING_BYTES @@ -116,7 +116,7 @@ impl Default for QuorumStoreConfig { sender_max_total_txns: 1500, // TODO: on next release, remove DEFAULT_MAX_NUM_BATCHES * BATCH_PADDING_BYTES sender_max_total_bytes: 4 * 1024 * 1024 - DEFAULT_MAX_NUM_BATCHES * BATCH_PADDING_BYTES, - receiver_max_batch_txns: 250, + receiver_max_batch_txns: 150, receiver_max_batch_bytes: 1024 * 1024 + BATCH_PADDING_BYTES, receiver_max_num_batches: 20, receiver_max_total_txns: 2000, @@ -125,7 +125,7 @@ impl Default for QuorumStoreConfig { + BATCH_PADDING_BYTES, batch_request_num_peers: 5, batch_request_retry_limit: 10, - batch_request_retry_interval_ms: 1000, + batch_request_retry_interval_ms: 500, batch_request_rpc_timeout_ms: 5000, batch_expiry_gap_when_init_usecs: Duration::from_secs(60).as_micros() as u64, remote_batch_expiry_gap_when_init_usecs: Duration::from_millis(500).as_micros() as u64, diff --git a/config/src/config/storage_config.rs b/config/src/config/storage_config.rs index a737bf71393cb..a38f904c01cda 100644 --- a/config/src/config/storage_config.rs +++ b/config/src/config/storage_config.rs @@ -152,10 +152,14 @@ pub struct RocksdbConfigs { pub state_merkle_db_config: RocksdbConfig, pub state_kv_db_config: RocksdbConfig, pub index_db_config: RocksdbConfig, - // Note: Not ready for production use yet. + #[serde(default = "default_to_true")] pub enable_storage_sharding: bool, } +fn default_to_true() -> bool { + true +} + impl Default for RocksdbConfigs { fn default() -> Self { Self { @@ -166,7 +170,7 @@ impl Default for RocksdbConfigs { max_open_files: 1000, ..Default::default() }, - enable_storage_sharding: false, + enable_storage_sharding: true, } } } diff --git a/config/src/config/transaction_filter_type.rs b/config/src/config/transaction_filter_type.rs deleted file mode 100644 index 8804d6295352a..0000000000000 --- a/config/src/config/transaction_filter_type.rs +++ /dev/null @@ -1,258 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -use aptos_crypto::HashValue; -use aptos_types::{ - account_address::AccountAddress, - transaction::{SignedTransaction, TransactionExecutableRef}, -}; -use serde::{Deserialize, Serialize}; - -#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] -pub enum Matcher { - All, // Matches any transactions - BlockId(HashValue), // Matches transactions in a specific block (identified by block ID) - BlockTimeStampGreaterThan(u64), // Matches transactions in blocks with timestamps greater than the specified value - BlockTimeStampLessThan(u64), // Matches transactions in blocks with timestamps less than the specified value - TransactionId(HashValue), // Matches a specific transaction by its ID - Sender(AccountAddress), // Matches transactions sent by a specific account address - ModuleAddress(AccountAddress), // Matches transactions that call a module at a specific address - EntryFunction(AccountAddress, String, String), // Matches transactions that call a specific entry function in a module - BlockEpochGreaterThan(u64), // Matches transactions in blocks with epochs greater than the specified value - BlockEpochLessThan(u64), // Matches transactions in blocks with epochs less than the specified value - MatchesAllOf(Vec), // Matches transactions that satisfy all the provided conditions (i.e., logical AND) -} - -impl Matcher { - fn matches( - &self, - block_id: HashValue, - block_epoch: u64, - block_timestamp: u64, - txn: &SignedTransaction, - ) -> bool { - match self { - Matcher::All => true, - Matcher::BlockId(id) => block_id == *id, - Matcher::BlockTimeStampGreaterThan(timestamp) => block_timestamp > *timestamp, - Matcher::BlockTimeStampLessThan(timestamp) => block_timestamp < *timestamp, - Matcher::TransactionId(id) => txn.committed_hash() == *id, - Matcher::Sender(sender) => txn.sender() == *sender, - Matcher::ModuleAddress(address) => match txn.payload().executable_ref() { - Ok(TransactionExecutableRef::EntryFunction(entry_function)) - if !txn.payload().is_multisig() => - { - *entry_function.module().address() == *address - }, - _ => false, - }, - Matcher::EntryFunction(address, module_name, function) => { - match txn.payload().executable_ref() { - Ok(TransactionExecutableRef::EntryFunction(entry_function)) - if !txn.payload().is_multisig() => - { - *entry_function.module().address() == *address - && entry_function.module().name().to_string() == *module_name - && entry_function.function().to_string() == *function - }, - _ => false, - } - }, - Matcher::BlockEpochGreaterThan(epoch) => block_epoch > *epoch, - Matcher::BlockEpochLessThan(epoch) => block_epoch < *epoch, - Matcher::MatchesAllOf(matchers) => matchers - .iter() - .all(|matcher| matcher.matches(block_id, block_epoch, block_timestamp, txn)), - } - } -} - -#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] -pub enum Rule { - Allow(Matcher), - Deny(Matcher), -} - -impl Rule { - pub fn matcher(&self) -> &Matcher { - match self { - Rule::Allow(matcher) => matcher, - Rule::Deny(matcher) => matcher, - } - } -} - -enum EvalResult { - Allow, - Deny, - NoMatch, -} - -impl Rule { - fn eval( - &self, - block_id: HashValue, - block_epoch: u64, - block_timestamp: u64, - txn: &SignedTransaction, - ) -> EvalResult { - match self { - Rule::Allow(matcher) => { - if matcher.matches(block_id, block_epoch, block_timestamp, txn) { - EvalResult::Allow - } else { - EvalResult::NoMatch - } - }, - Rule::Deny(matcher) => { - if matcher.matches(block_id, block_epoch, block_timestamp, txn) { - EvalResult::Deny - } else { - EvalResult::NoMatch - } - }, - } - } -} - -/// A filter that can be used to allow or deny transactions from being executed. It contains a -/// set of rules that are evaluated one by one in the order of declaration. If a rule matches, -/// the transaction is either allowed or denied depending on the rule. If no rule matches, -/// the transaction is allowed. -/// -/// For example, a filter might look like this: -/// rules: -/// - Allow: -/// Sender: f8871acf2c827d40e23b71f6ff2b9accef8dbb17709b88bd9eb95e6bb748c25a -/// - Allow: -/// MatchesAllOf: -/// - Sender: 0xcd3357a925307983f7fbf1a433e87e49eda93fbb94d0d31974e68b5d60e09f3a -/// - BlockEpochGreaterThan: 10 -/// - Allow: -/// ModuleAddress: "0000000000000000000000000000000000000000000000000000000000000001" -/// - Allow: -/// EntryFunction: -/// - "0000000000000000000000000000000000000000000000000000000000000002" -/// - test -/// - check -/// - Allow: -/// EntryFunction: -/// - "0000000000000000000000000000000000000000000000000000000000000002" -/// - test -/// - new -/// - Deny: All -/// This filter allows transactions with the following properties: -/// - Sender with address f8871acf2c827d40e23b71f6ff2b9accef8dbb17709b88bd9eb95e6bb748c25a. -/// - Sender with address cd3357a925307983f7fbf1a433e87e49eda93fbb94d0d31974e68b5d60e09f3a, and -/// block epoch greater than 10. -/// - Transactions for the module with address 0000000000000000000000000000000000000000000000000000000000000001. -/// - Transactions that call the entry function test::check or test::new from the module with -/// address 0000000000000000000000000000000000000000000000000000000000000002. -/// All other transactions are denied. -#[derive(Clone, Debug, Default, Deserialize, Eq, PartialEq, Serialize)] -pub struct Filter { - rules: Vec, -} - -impl Filter { - pub fn empty() -> Self { - Self { rules: vec![] } - } - - pub fn is_empty(&self) -> bool { - self.rules.is_empty() - } - - fn add_match_rule(mut self, allow: bool, matcher: Matcher) -> Self { - if allow { - self.rules.push(Rule::Allow(matcher)); - } else { - self.rules.push(Rule::Deny(matcher)); - } - self - } - - pub fn add_all_filter(self, allow: bool) -> Self { - let matcher = Matcher::All; - self.add_match_rule(allow, matcher) - } - - pub fn add_block_id_filter(self, allow: bool, block_id: HashValue) -> Self { - let matcher = Matcher::BlockId(block_id); - self.add_match_rule(allow, matcher) - } - - pub fn add_block_timestamp_greater_than_filter(self, allow: bool, timestamp: u64) -> Self { - let matcher = Matcher::BlockTimeStampGreaterThan(timestamp); - self.add_match_rule(allow, matcher) - } - - pub fn add_block_timestamp_less_than_filter(self, allow: bool, timestamp: u64) -> Self { - let matcher = Matcher::BlockTimeStampLessThan(timestamp); - self.add_match_rule(allow, matcher) - } - - pub fn add_transaction_id_filter(self, allow: bool, txn_id: HashValue) -> Self { - let matcher = Matcher::TransactionId(txn_id); - self.add_match_rule(allow, matcher) - } - - pub fn add_sender_filter(self, allow: bool, sender: AccountAddress) -> Self { - let matcher = Matcher::Sender(sender); - self.add_match_rule(allow, matcher) - } - - pub fn add_module_address_filter(self, allow: bool, address: AccountAddress) -> Self { - let matcher = Matcher::ModuleAddress(address); - self.add_match_rule(allow, matcher) - } - - pub fn add_entry_function_filter( - self, - allow: bool, - address: AccountAddress, - module_name: String, - function: String, - ) -> Self { - let matcher = Matcher::EntryFunction(address, module_name, function); - self.add_match_rule(allow, matcher) - } - - pub fn add_block_epoch_greater_than_filter(self, allow: bool, epoch: u64) -> Self { - let matcher = Matcher::BlockEpochGreaterThan(epoch); - self.add_match_rule(allow, matcher) - } - - pub fn add_block_epoch_less_than_filter(self, allow: bool, epoch: u64) -> Self { - let matcher = Matcher::BlockEpochLessThan(epoch); - self.add_match_rule(allow, matcher) - } - - pub fn add_matches_all_of_filter(self, allow: bool, matchers: Vec) -> Self { - let matcher = Matcher::MatchesAllOf(matchers); - self.add_match_rule(allow, matcher) - } - - pub fn rules(&self) -> &[Rule] { - &self.rules - } - - pub fn allows( - &self, - block_id: HashValue, - block_epoch: u64, - block_timestamp: u64, - txn: &SignedTransaction, - ) -> bool { - for rule in &self.rules { - // Rules are evaluated in the order and the first rule that matches is used. If no rule - // matches, the transaction is allowed. - match rule.eval(block_id, block_epoch, block_timestamp, txn) { - EvalResult::Allow => return true, - EvalResult::Deny => return false, - EvalResult::NoMatch => continue, - } - } - true - } -} diff --git a/config/src/config/transaction_filters_config.rs b/config/src/config/transaction_filters_config.rs new file mode 100644 index 0000000000000..08065260520a6 --- /dev/null +++ b/config/src/config/transaction_filters_config.rs @@ -0,0 +1,123 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use aptos_transaction_filters::{ + batch_transaction_filter::BatchTransactionFilter, + block_transaction_filter::BlockTransactionFilter, transaction_filter::TransactionFilter, +}; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Debug, Default, Deserialize, PartialEq, Eq, Serialize)] +#[serde(default, deny_unknown_fields)] +pub struct TransactionFiltersConfig { + pub api_filter: TransactionFilterConfig, // Filter configuration for the API (e.g., transaction simulation) + pub consensus_filter: BlockTransactionFilterConfig, // Filter configuration for consensus + pub execution_filter: BlockTransactionFilterConfig, // Filter configuration for execution + pub mempool_filter: TransactionFilterConfig, // Filter configuration for mempool + pub quorum_store_filter: BatchTransactionFilterConfig, // Filter configuration for quorum store +} + +#[derive(Clone, Debug, Deserialize, PartialEq, Eq, Serialize)] +#[serde(default, deny_unknown_fields)] +pub struct TransactionFilterConfig { + filter_enabled: bool, // Whether the filter is enabled + transaction_filter: TransactionFilter, // The transaction filter to apply +} + +impl TransactionFilterConfig { + pub fn new(filter_enabled: bool, transaction_filter: TransactionFilter) -> Self { + Self { + filter_enabled, + transaction_filter, + } + } + + /// Returns true iff the filter is enabled and not empty + pub fn is_enabled(&self) -> bool { + self.filter_enabled && !self.transaction_filter.is_empty() + } + + /// Returns a reference to the transaction filter + pub fn transaction_filter(&self) -> &TransactionFilter { + &self.transaction_filter + } +} + +impl Default for TransactionFilterConfig { + fn default() -> Self { + Self { + filter_enabled: false, // Disable the filter + transaction_filter: TransactionFilter::empty(), // Use an empty filter + } + } +} + +#[derive(Clone, Debug, Deserialize, PartialEq, Eq, Serialize)] +#[serde(default, deny_unknown_fields)] +pub struct BatchTransactionFilterConfig { + filter_enabled: bool, // Whether the filter is enabled + batch_transaction_filter: BatchTransactionFilter, // The batch transaction filter to apply +} + +impl BatchTransactionFilterConfig { + pub fn new(filter_enabled: bool, batch_transaction_filter: BatchTransactionFilter) -> Self { + Self { + filter_enabled, + batch_transaction_filter, + } + } + + /// Returns true iff the filter is enabled and not empty + pub fn is_enabled(&self) -> bool { + self.filter_enabled && !self.batch_transaction_filter.is_empty() + } + + /// Returns a reference to the batch transaction filter + pub fn batch_transaction_filter(&self) -> &BatchTransactionFilter { + &self.batch_transaction_filter + } +} + +impl Default for BatchTransactionFilterConfig { + fn default() -> Self { + Self { + filter_enabled: false, // Disable the filter + batch_transaction_filter: BatchTransactionFilter::empty(), // Use an empty filter + } + } +} + +#[derive(Clone, Debug, Deserialize, PartialEq, Eq, Serialize)] +#[serde(default, deny_unknown_fields)] +pub struct BlockTransactionFilterConfig { + filter_enabled: bool, // Whether the filter is enabled + block_transaction_filter: BlockTransactionFilter, // The block transaction filter to apply +} + +impl BlockTransactionFilterConfig { + pub fn new(filter_enabled: bool, block_transaction_filter: BlockTransactionFilter) -> Self { + Self { + filter_enabled, + block_transaction_filter, + } + } + + /// Returns true iff the filter is enabled and not empty + pub fn is_enabled(&self) -> bool { + self.filter_enabled && !self.block_transaction_filter.is_empty() + } + + /// Returns a reference to the block transaction filter + pub fn block_transaction_filter(&self) -> &BlockTransactionFilter { + &self.block_transaction_filter + } +} + +impl Default for BlockTransactionFilterConfig { + fn default() -> Self { + Self { + filter_enabled: false, // Disable the filter + block_transaction_filter: BlockTransactionFilter::empty(), // Use an empty filter + } + } +} diff --git a/consensus/Cargo.toml b/consensus/Cargo.toml index 84eac7943ac72..7b6183aab16b1 100644 --- a/consensus/Cargo.toml +++ b/consensus/Cargo.toml @@ -45,6 +45,7 @@ aptos-short-hex-str = { workspace = true } aptos-storage-interface = { workspace = true } aptos-temppath = { workspace = true } aptos-time-service = { workspace = true } +aptos-transaction-filters = { workspace = true } aptos-types = { workspace = true } aptos-validator-transaction-pool = { workspace = true } aptos-vm = { workspace = true } @@ -77,7 +78,6 @@ scopeguard = { workspace = true } serde = { workspace = true } serde_bytes = { workspace = true } serde_json = { workspace = true } -serde_yaml = { workspace = true } sha3 = { workspace = true } strum = { workspace = true } strum_macros = { workspace = true } diff --git a/consensus/consensus-types/src/block.rs b/consensus/consensus-types/src/block.rs index 3af591dbbb7ef..e807c563aef5c 100644 --- a/consensus/consensus-types/src/block.rs +++ b/consensus/consensus-types/src/block.rs @@ -5,9 +5,10 @@ use crate::{ block_data::{BlockData, BlockType}, common::{Author, Payload, Round}, + opt_block_data::OptBlockData, quorum_cert::QuorumCert, }; -use anyhow::{bail, ensure, format_err}; +use anyhow::{bail, ensure, format_err, Result}; use aptos_bitvec::BitVec; use aptos_crypto::{bls12381, hash::CryptoHash, HashValue}; use aptos_infallible::duration_since_epoch; @@ -124,6 +125,83 @@ impl Block { } } + /// Returns the number of proofs, the number of txns in the proofs, and the bytes of txns in the proofs + pub fn proof_stats(&self) -> (usize, usize, usize) { + match self.block_data.payload() { + None => (0, 0, 0), + Some(payload) => match payload { + Payload::InQuorumStore(pos) => (pos.num_proofs(), pos.num_txns(), pos.num_bytes()), + Payload::DirectMempool(_txns) => (0, 0, 0), + Payload::InQuorumStoreWithLimit(pos) => ( + pos.proof_with_data.num_proofs(), + pos.proof_with_data.num_txns(), + pos.proof_with_data.num_bytes(), + ), + Payload::QuorumStoreInlineHybrid(_inline_batches, proof_with_data, _) + | Payload::QuorumStoreInlineHybridV2(_inline_batches, proof_with_data, _) => ( + proof_with_data.num_proofs(), + proof_with_data.num_txns(), + proof_with_data.num_bytes(), + ), + Payload::OptQuorumStore(opt_quorum_store_payload) => ( + opt_quorum_store_payload.proof_with_data().num_proofs(), + opt_quorum_store_payload.proof_with_data().num_txns(), + opt_quorum_store_payload.proof_with_data().num_bytes(), + ), + }, + } + } + + /// Returns the number of inline batches, the number of txns in the inline batches, and the bytes of txns in the inline batches + pub fn inline_batch_stats(&self) -> (usize, usize, usize) { + match self.block_data.payload() { + None => (0, 0, 0), + Some(payload) => match payload { + Payload::QuorumStoreInlineHybrid(inline_batches, _proof_with_data, _) + | Payload::QuorumStoreInlineHybridV2(inline_batches, _proof_with_data, _) => ( + inline_batches.len(), + inline_batches + .iter() + .map(|(b, _)| b.num_txns() as usize) + .sum(), + inline_batches + .iter() + .map(|(b, _)| b.num_bytes() as usize) + .sum(), + ), + Payload::OptQuorumStore(opt_quorum_store_payload) => ( + opt_quorum_store_payload.inline_batches().num_batches(), + opt_quorum_store_payload.inline_batches().num_txns(), + opt_quorum_store_payload.inline_batches().num_bytes(), + ), + _ => (0, 0, 0), + }, + } + } + + /// Returns the number of opt batches, the number of txns in the opt batches, and the bytes of txns in the opt batches + pub fn opt_batch_stats(&self) -> (usize, usize, usize) { + match self.block_data.payload() { + None => (0, 0, 0), + Some(payload) => match payload { + Payload::OptQuorumStore(opt_quorum_store_payload) => ( + opt_quorum_store_payload.opt_batches().len(), + opt_quorum_store_payload + .opt_batches() + .iter() + .map(|b| b.num_txns() as usize) + .sum(), + opt_quorum_store_payload + .opt_batches() + .iter() + .map(|b| b.num_bytes() as usize) + .sum(), + ), + _ => (0, 0, 0), + }, + } + } + pub fn quorum_cert(&self) -> &QuorumCert { self.block_data.quorum_cert() } @@ -169,6 +247,10 @@ impl Block { self.block_data.is_nil_block() } + pub fn is_opt_block(&self) -> bool { + self.block_data.is_opt_block() + } + #[cfg(any(test, feature = "fuzzing"))] pub fn make_genesis_block() -> Self { Self::make_genesis_block_from_ledger_info(&LedgerInfo::mock_genesis(None)) @@ -309,6 +391,15 @@ impl Block { } } + pub fn new_from_opt(opt_block_data: OptBlockData, quorum_cert: QuorumCert) -> Self { + let block_data = BlockData::new_from_opt(opt_block_data, quorum_cert); + Block { + id: block_data.hash(), + block_data, + signature: None, + } + } + pub fn validator_txns(&self) -> Option<&Vec> { self.block_data.validator_txns() } @@ -324,16 +415,33 @@ impl Block { .signature .as_ref() .ok_or_else(|| format_err!("Missing signature in Proposal"))?; - validator.verify(*author, &self.block_data, signature)?; - self.quorum_cert().verify(validator) + let (res1, res2) = rayon::join( + || validator.verify(*author, &self.block_data, signature), + || self.quorum_cert().verify(validator), + ); + res1?; + res2 }, BlockType::ProposalExt(proposal_ext) => { let signature = self .signature .as_ref() .ok_or_else(|| format_err!("Missing signature in Proposal"))?; - validator.verify(*proposal_ext.author(), &self.block_data, signature)?; - self.quorum_cert().verify(validator) + let (res1, res2) = rayon::join( + || validator.verify(*proposal_ext.author(), &self.block_data, signature), + || self.quorum_cert().verify(validator), + ); + res1?; + res2 + }, + BlockType::OptimisticProposal(p) => { + // Note: Optimistic proposal is not signed by proposer unlike normal proposal + let (res1, res2) = rayon::join( + || p.grandparent_qc().verify(validator), + || self.quorum_cert().verify(validator), + ); + res1?; + res2 }, BlockType::DAGBlock { .. } => bail!("We should not accept DAG block from others"), } @@ -441,11 +549,15 @@ impl Block { .collect() } + /// Returns the voters, as BitVec, of the parent block for a normal proposal or + /// the grandparent block for an optimistic proposal. fn previous_bitvec(&self) -> BitVec { - if let BlockType::DAGBlock { parents_bitvec, .. } = self.block_data.block_type() { - parents_bitvec.clone() - } else { - self.quorum_cert().ledger_info().get_voters_bitvec().clone() + match self.block_data.block_type() { + BlockType::DAGBlock { parents_bitvec, .. } => parents_bitvec.clone(), + BlockType::OptimisticProposal(p) => { + p.grandparent_qc().ledger_info().get_voters_bitvec().clone() + }, + _ => self.quorum_cert().ledger_info().get_voters_bitvec().clone(), } } diff --git a/consensus/consensus-types/src/block_data.rs b/consensus/consensus-types/src/block_data.rs index a7e923c80e0d4..b88fffc11a5d8 100644 --- a/consensus/consensus-types/src/block_data.rs +++ b/consensus/consensus-types/src/block_data.rs @@ -4,13 +4,17 @@ use crate::{ common::{Author, Payload, Round}, - proposal_ext::ProposalExt, + opt_block_data::OptBlockData, + proposal_ext::{OptBlockBody, ProposalExt}, quorum_cert::QuorumCert, vote_data::VoteData, }; use aptos_bitvec::BitVec; -use aptos_crypto::hash::HashValue; -use aptos_crypto_derive::{BCSCryptoHash, CryptoHasher}; +use aptos_crypto::{ + hash::{CryptoHash, CryptoHasher}, + HashValue, +}; +use aptos_crypto_derive::CryptoHasher; use aptos_types::{ aggregate_signature::AggregateSignature, block_info::BlockInfo, @@ -49,6 +53,9 @@ pub enum BlockType { /// Proposal with extensions (e.g. system transactions). ProposalExt(ProposalExt), + /// Optimistic proposal. + OptimisticProposal(OptBlockBody), + /// A virtual block that's constructed by nodes from DAG, this is purely a local thing so /// we hide it from serde #[serde(skip_deserializing)] @@ -63,7 +70,7 @@ pub enum BlockType { }, } -#[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Eq, CryptoHasher, BCSCryptoHash)] +#[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Eq, CryptoHasher)] /// Block has the core data of a consensus block that should be persistent when necessary. /// Each block must know the id of its parent and keep the QuorurmCertificate to that parent. pub struct BlockData { @@ -96,6 +103,37 @@ pub struct BlockData { block_type: BlockType, } +impl CryptoHash for BlockData { + type Hasher = BlockDataHasher; + + fn hash(&self) -> HashValue { + let mut state = Self::Hasher::default(); + if self.is_opt_block() { + #[derive(Serialize)] + struct OptBlockDataForHash<'a> { + epoch: u64, + round: Round, + timestamp_usecs: u64, + quorum_cert_vote_data: &'a VoteData, + block_type: &'a BlockType, + } + + let opt_block_data_for_hash = OptBlockDataForHash { + epoch: self.epoch, + round: self.round, + timestamp_usecs: self.timestamp_usecs, + quorum_cert_vote_data: self.quorum_cert.vote_data(), + block_type: &self.block_type, + }; + bcs::serialize_into(&mut state, &opt_block_data_for_hash) + .expect("OptBlockDataForHash must be serializable"); + } else { + bcs::serialize_into(&mut state, &self).expect("BlockData must be serializable"); + } + state.finish() + } +} + impl BlockData { pub fn author(&self) -> Option { match &self.block_type { @@ -103,6 +141,7 @@ impl BlockData { Some(*author) }, BlockType::ProposalExt(p) => Some(*p.author()), + BlockType::OptimisticProposal(p) => Some(*p.author()), _ => None, } } @@ -132,13 +171,15 @@ impl BlockData { Some(payload) }, BlockType::ProposalExt(p) => p.payload(), + BlockType::OptimisticProposal(p) => Some(p.payload()), _ => None, } } pub fn validator_txns(&self) -> Option<&Vec> { match &self.block_type { - BlockType::ProposalExt(proposal_ext) => proposal_ext.validator_txns(), + BlockType::ProposalExt(p) => p.validator_txns(), + BlockType::OptimisticProposal(p) => p.validator_txns(), BlockType::Proposal { .. } | BlockType::NilBlock { .. } | BlockType::Genesis => None, BlockType::DAGBlock { validator_txns, .. } => Some(validator_txns), } @@ -176,6 +217,10 @@ impl BlockData { matches!(self.block_type, BlockType::NilBlock { .. }) } + pub fn is_opt_block(&self) -> bool { + matches!(self.block_type, BlockType::OptimisticProposal { .. }) + } + /// the list of consecutive proposers from the immediately preceeding /// rounds that didn't produce a successful block pub fn failed_authors(&self) -> Option<&Vec<(Round, Author)>> { @@ -184,7 +229,7 @@ impl BlockData { | BlockType::NilBlock { failed_authors, .. } | BlockType::DAGBlock { failed_authors, .. } => Some(failed_authors), BlockType::ProposalExt(p) => Some(p.failed_authors()), - BlockType::Genesis => None, + BlockType::OptimisticProposal(_) | BlockType::Genesis => None, } } @@ -355,6 +400,25 @@ impl BlockData { } } + /// Returns an instance of BlockData by converting the OptBlockData to BlockData + /// and adding QC and failed_authors + pub fn new_from_opt(opt_block_data: OptBlockData, quorum_cert: QuorumCert) -> Self { + let OptBlockData { + epoch, + round, + timestamp_usecs, + block_body: proposal_body, + .. + } = opt_block_data; + Self { + epoch, + round, + timestamp_usecs, + quorum_cert, + block_type: BlockType::OptimisticProposal(proposal_body), + } + } + /// It's a reconfiguration suffix block if the parent block's executed state indicates next epoch. pub fn is_reconfiguration_suffix(&self) -> bool { self.quorum_cert.certified_block().has_reconfiguration() diff --git a/consensus/consensus-types/src/common.rs b/consensus/consensus-types/src/common.rs index 9acd527ac8251..9099748c82355 100644 --- a/consensus/consensus-types/src/common.rs +++ b/consensus/consensus-types/src/common.rs @@ -143,7 +143,11 @@ impl ProofWithData { self.proofs.extend(other.proofs); } - pub fn len(&self) -> usize { + pub fn num_proofs(&self) -> usize { + self.proofs.len() + } + + pub fn num_txns(&self) -> usize { self.proofs .iter() .map(|proof| proof.num_txns() as usize) @@ -280,15 +284,15 @@ impl Payload { pub fn len(&self) -> usize { match self { Payload::DirectMempool(txns) => txns.len(), - Payload::InQuorumStore(proof_with_status) => proof_with_status.len(), + Payload::InQuorumStore(proof_with_status) => proof_with_status.num_txns(), Payload::InQuorumStoreWithLimit(proof_with_status) => { // here we return the actual length of the payload; limit is considered at the stage // where we prepare the block from the payload - proof_with_status.proof_with_data.len() + proof_with_status.proof_with_data.num_txns() }, Payload::QuorumStoreInlineHybrid(inline_batches, proof_with_data, _) | Payload::QuorumStoreInlineHybridV2(inline_batches, proof_with_data, _) => { - proof_with_data.len() + proof_with_data.num_txns() + inline_batches .iter() .map(|(_, txns)| txns.len()) @@ -301,18 +305,18 @@ impl Payload { pub fn len_for_execution(&self) -> u64 { match self { Payload::DirectMempool(txns) => txns.len() as u64, - Payload::InQuorumStore(proof_with_status) => proof_with_status.len() as u64, + Payload::InQuorumStore(proof_with_status) => proof_with_status.num_txns() as u64, Payload::InQuorumStoreWithLimit(proof_with_status) => { // here we return the actual length of the payload; limit is considered at the stage // where we prepare the block from the payload - (proof_with_status.proof_with_data.len() as u64) + (proof_with_status.proof_with_data.num_txns() as u64) .min(proof_with_status.max_txns_to_execute.unwrap_or(u64::MAX)) }, Payload::QuorumStoreInlineHybrid( inline_batches, proof_with_data, max_txns_to_execute, - ) => ((proof_with_data.len() + ) => ((proof_with_data.num_txns() + inline_batches .iter() .map(|(_, txns)| txns.len()) @@ -327,7 +331,7 @@ impl Payload { inline_batches, proof_with_data, execution_limit, - ) => ((proof_with_data.len() + ) => ((proof_with_data.num_txns() + inline_batches .iter() .map(|(_, txns)| txns.len()) diff --git a/consensus/consensus-types/src/lib.rs b/consensus/consensus-types/src/lib.rs index 27ca8b6f92874..216fa059c49ba 100644 --- a/consensus/consensus-types/src/lib.rs +++ b/consensus/consensus-types/src/lib.rs @@ -9,13 +9,14 @@ pub mod block_data; pub mod block_retrieval; pub mod common; pub mod epoch_retrieval; +pub mod opt_block_data; +pub mod opt_proposal_msg; pub mod order_vote; pub mod order_vote_msg; pub mod order_vote_proposal; pub mod payload; pub mod payload_pull_params; pub mod pipeline; -pub mod pipeline_execution_result; pub mod pipelined_block; pub mod proof_of_store; pub mod proposal_ext; diff --git a/consensus/consensus-types/src/opt_block_data.rs b/consensus/consensus-types/src/opt_block_data.rs new file mode 100644 index 0000000000000..99e48bfd24544 --- /dev/null +++ b/consensus/consensus-types/src/opt_block_data.rs @@ -0,0 +1,140 @@ +// Copyright © Aptos Foundation +// Parts of the project are originally copyright © Meta Platforms, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + common::{Author, Payload, Round}, + proposal_ext::OptBlockBody, + quorum_cert::QuorumCert, +}; +use anyhow::ensure; +use aptos_crypto::HashValue; +use aptos_crypto_derive::CryptoHasher; +use aptos_infallible::duration_since_epoch; +use aptos_types::{block_info::BlockInfo, validator_txn::ValidatorTransaction}; +use serde::{Deserialize, Serialize}; +use std::{ + fmt::{Display, Formatter}, + ops::Deref, +}; + +#[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Eq, CryptoHasher)] +/// Same as BlockData, without QC and with parent id +pub struct OptBlockData { + pub epoch: u64, + pub round: Round, + pub timestamp_usecs: u64, + pub parent: BlockInfo, + pub block_body: OptBlockBody, +} + +impl OptBlockData { + pub fn new( + validator_txns: Vec, + payload: Payload, + author: Author, + epoch: u64, + round: Round, + timestamp_usecs: u64, + parent: BlockInfo, + grandparent_qc: QuorumCert, + ) -> Self { + Self { + epoch, + round, + timestamp_usecs, + parent, + block_body: OptBlockBody::V0 { + validator_txns, + payload, + author, + grandparent_qc, + }, + } + } + + pub fn epoch(&self) -> u64 { + self.epoch + } + + pub fn parent_id(&self) -> HashValue { + self.parent.id() + } + + pub fn parent(&self) -> &BlockInfo { + &self.parent + } + + pub fn timestamp_usecs(&self) -> u64 { + self.timestamp_usecs + } + + pub fn round(&self) -> Round { + self.round + } + + pub fn verify_well_formed(&self) -> anyhow::Result<()> { + let parent = self.parent(); + let grandparent_qc = self.grandparent_qc().certified_block(); + ensure!( + grandparent_qc.round() + 1 == parent.round(), + "Block's parent's round {} must be one more than grandparent's round {}", + parent.round(), + grandparent_qc.round(), + ); + ensure!( + parent.round() + 1 == self.round(), + "Block's round {} must be one more than parent's round {}", + self.round(), + parent.round(), + ); + ensure!( + grandparent_qc.epoch() == self.epoch() && parent.epoch() == self.epoch(), + "Block's parent and grantparent should be in the same epoch" + ); + ensure!( + !grandparent_qc.has_reconfiguration(), + "Optimistic proposals are disallowed after the reconfiguration block" + ); + + self.payload().verify_epoch(self.epoch())?; + + ensure!( + self.timestamp_usecs() > parent.timestamp_usecs() + && parent.timestamp_usecs() > grandparent_qc.timestamp_usecs(), + "Blocks must have strictly increasing timestamps" + ); + + let current_ts = duration_since_epoch(); + + // we can say that too far is 5 minutes in the future + const TIMEBOUND: u64 = 300_000_000; + ensure!( + self.timestamp_usecs() <= (current_ts.as_micros() as u64).saturating_add(TIMEBOUND), + "Blocks must not be too far in the future" + ); + Ok(()) + } +} + +impl Deref for OptBlockData { + type Target = OptBlockBody; + + fn deref(&self) -> &Self::Target { + &self.block_body + } +} + +impl Display for OptBlockData { + fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { + write!( + f, + "[author: {}, epoch: {}, round: {:02}, parent_id: {}, timestamp: {}]", + self.author(), + self.epoch(), + self.round(), + self.parent_id(), + self.timestamp_usecs(), + ) + } +} diff --git a/consensus/consensus-types/src/opt_proposal_msg.rs b/consensus/consensus-types/src/opt_proposal_msg.rs new file mode 100644 index 0000000000000..4bcfab14b6f46 --- /dev/null +++ b/consensus/consensus-types/src/opt_proposal_msg.rs @@ -0,0 +1,120 @@ +// Copyright © Aptos Foundation +// Parts of the project are originally copyright © Meta Platforms, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + common::Author, opt_block_data::OptBlockData, proof_of_store::ProofCache, sync_info::SyncInfo, +}; +use anyhow::{ensure, Context, Result}; +use aptos_types::validator_verifier::ValidatorVerifier; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] +pub struct OptProposalMsg { + block_data: OptBlockData, + sync_info: SyncInfo, +} + +impl OptProposalMsg { + pub fn new(block_data: OptBlockData, sync_info: SyncInfo) -> Self { + Self { + block_data, + sync_info, + } + } + + pub fn block_data(&self) -> &OptBlockData { + &self.block_data + } + + pub fn take_block_data(self) -> OptBlockData { + self.block_data + } + + pub fn epoch(&self) -> u64 { + self.block_data.epoch() + } + + pub fn round(&self) -> u64 { + self.block_data.round() + } + + pub fn timestamp_usecs(&self) -> u64 { + self.block_data.timestamp_usecs() + } + + pub fn proposer(&self) -> Author { + *self.block_data.author() + } + + pub fn sync_info(&self) -> &SyncInfo { + &self.sync_info + } + + /// Verifies that the ProposalMsg is well-formed. + pub fn verify_well_formed(&self) -> Result<()> { + self.block_data + .verify_well_formed() + .context("Fail to verify OptProposalMsg's data")?; + ensure!( + self.block_data.round() > 1, + "Proposal for {} has round <= 1", + self.block_data, + ); + ensure!( + self.block_data.epoch() == self.sync_info.epoch(), + "ProposalMsg has different epoch number from SyncInfo" + ); + // Ensure the sync info has the grandparent QC + ensure!( + self.block_data.grandparent_qc().certified_block().id() + == self.sync_info.highest_quorum_cert().certified_block().id(), + "Proposal HQC in SyncInfo certifies {}, but block grandparent id is {}", + self.sync_info.highest_quorum_cert().certified_block().id(), + self.block_data.grandparent_qc().certified_block().id(), + ); + let grandparent_round = self + .block_data + .round() + .checked_sub(2) + .ok_or_else(|| anyhow::anyhow!("proposal round overflowed!"))?; + + let highest_certified_round = self.block_data.grandparent_qc().certified_block().round(); + ensure!( + grandparent_round == highest_certified_round, + "Proposal {} does not have a certified round {}", + self.block_data, + grandparent_round + ); + // Optimistic proposal shouldn't have a timeout certificate + ensure!( + self.sync_info.highest_2chain_timeout_cert().is_none(), + "Optimistic proposal shouldn't have a timeout certificate" + ); + Ok(()) + } + + pub fn verify( + &self, + sender: Author, + validator: &ValidatorVerifier, + proof_cache: &ProofCache, + quorum_store_enabled: bool, + ) -> Result<()> { + ensure!( + self.proposer() == sender, + "OptProposal author {:?} doesn't match sender {:?}", + self.proposer(), + sender + ); + + self.block_data() + .payload() + .verify(validator, proof_cache, quorum_store_enabled)?; + + self.block_data().grandparent_qc().verify(validator)?; + + // Note that we postpone the verification of SyncInfo until it's being used. + self.block_data.verify_well_formed() + } +} diff --git a/consensus/consensus-types/src/payload.rs b/consensus/consensus-types/src/payload.rs index 1c3ae2f4f14d2..8e7a82cbb1bb0 100644 --- a/consensus/consensus-types/src/payload.rs +++ b/consensus/consensus-types/src/payload.rs @@ -44,6 +44,10 @@ where self.batch_summary.extend(other.batch_summary); } + pub fn num_proofs(&self) -> usize { + self.batch_summary.len() + } + pub fn num_txns(&self) -> usize { self.batch_summary .iter() @@ -210,14 +214,18 @@ impl InlineBatch { pub struct InlineBatches(Vec); impl InlineBatches { - fn num_txns(&self) -> usize { + pub fn num_batches(&self) -> usize { + self.0.len() + } + + pub fn num_txns(&self) -> usize { self.0 .iter() .map(|batch| batch.batch_info.num_txns() as usize) .sum() } - fn num_bytes(&self) -> usize { + pub fn num_bytes(&self) -> usize { self.0 .iter() .map(|batch| batch.batch_info.num_bytes() as usize) diff --git a/consensus/consensus-types/src/pipeline_execution_result.rs b/consensus/consensus-types/src/pipeline_execution_result.rs deleted file mode 100644 index 813a105c09753..0000000000000 --- a/consensus/consensus-types/src/pipeline_execution_result.rs +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (c) Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -use aptos_executor_types::{state_compute_result::StateComputeResult, ExecutorResult}; -use aptos_types::transaction::SignedTransaction; -use derivative::Derivative; -use futures::future::BoxFuture; -use std::time::Duration; - -#[derive(Derivative)] -#[derivative(Debug)] -pub struct PipelineExecutionResult { - pub input_txns: Vec, - pub result: StateComputeResult, - pub execution_time: Duration, - #[derivative(Debug = "ignore")] - pub pre_commit_fut: BoxFuture<'static, ExecutorResult<()>>, -} - -impl PipelineExecutionResult { - pub fn new( - input_txns: Vec, - result: StateComputeResult, - execution_time: Duration, - pre_commit_fut: BoxFuture<'static, ExecutorResult<()>>, - ) -> Self { - Self { - input_txns, - result, - execution_time, - pre_commit_fut, - } - } -} diff --git a/consensus/consensus-types/src/pipelined_block.rs b/consensus/consensus-types/src/pipelined_block.rs index b23b50b1f4753..e2a1d56e23085 100644 --- a/consensus/consensus-types/src/pipelined_block.rs +++ b/consensus/consensus-types/src/pipelined_block.rs @@ -7,7 +7,6 @@ use crate::{ common::{Payload, Round}, order_vote_proposal::OrderVoteProposal, pipeline::commit_vote::CommitVote, - pipeline_execution_result::PipelineExecutionResult, quorum_cert::QuorumCert, vote_proposal::VoteProposal, wrapped_ledger_info::WrappedLedgerInfo, @@ -200,7 +199,6 @@ pub struct PipelinedBlock { randomness: OnceCell, pipeline_insertion_time: OnceCell, execution_summary: OnceCell, - pre_commit_fut: Mutex>>>, /// pipeline related fields pipeline_futs: Mutex>, pipeline_tx: Mutex>, @@ -321,24 +319,6 @@ impl PipelinedBlock { } } - pub fn set_execution_result(&self, pipeline_execution_result: PipelineExecutionResult) { - let PipelineExecutionResult { - input_txns: _, - result, - execution_time, - pre_commit_fut, - } = pipeline_execution_result; - - *self.pre_commit_fut.lock() = Some(pre_commit_fut); - - self.set_compute_result(result, execution_time); - } - - #[cfg(any(test, feature = "fuzzing"))] - pub fn mark_successful_pre_commit_for_test(&self) { - *self.pre_commit_fut.lock() = Some(Box::pin(async { Ok(()) })); - } - pub fn set_randomness(&self, randomness: Randomness) { assert!(self.randomness.set(randomness.clone()).is_ok()); } @@ -347,13 +327,6 @@ impl PipelinedBlock { assert!(self.pipeline_insertion_time.set(Instant::now()).is_ok()); } - pub fn take_pre_commit_fut(&self) -> BoxFuture<'static, ExecutorResult<()>> { - self.pre_commit_fut - .lock() - .take() - .expect("pre_commit_result_rx missing.") - } - pub fn set_qc(&self, qc: Arc) { *self.block_qc.lock() = Some(qc.clone()); if let Some(tx) = self.pipeline_tx().lock().as_mut() { @@ -395,7 +368,6 @@ impl PipelinedBlock { randomness: OnceCell::new(), pipeline_insertion_time: OnceCell::new(), execution_summary: OnceCell::new(), - pre_commit_fut: Mutex::new(None), pipeline_futs: Mutex::new(None), pipeline_tx: Mutex::new(None), pipeline_abort_handle: Mutex::new(None), @@ -523,12 +495,6 @@ impl PipelinedBlock { /// Pipeline related functions impl PipelinedBlock { - pub fn pipeline_enabled(&self) -> bool { - // if the pipeline_tx is set, the pipeline is enabled, - // we don't use pipeline fut here because it can't be taken when abort - self.pipeline_tx.lock().is_some() - } - pub fn pipeline_futs(&self) -> Option { self.pipeline_futs.lock().clone() } diff --git a/consensus/consensus-types/src/proof_of_store.rs b/consensus/consensus-types/src/proof_of_store.rs index e895244695073..9a912046a5226 100644 --- a/consensus/consensus-types/src/proof_of_store.rs +++ b/consensus/consensus-types/src/proof_of_store.rs @@ -7,65 +7,18 @@ use aptos_crypto::{bls12381, CryptoMaterialError, HashValue}; use aptos_crypto_derive::{BCSCryptoHash, CryptoHasher}; use aptos_types::{ aggregate_signature::AggregateSignature, ledger_info::SignatureWithStatus, - validator_signer::ValidatorSigner, validator_verifier::ValidatorVerifier, PeerId, + quorum_store::BatchId, validator_signer::ValidatorSigner, + validator_verifier::ValidatorVerifier, PeerId, }; use mini_moka::sync::Cache; use rand::{seq::SliceRandom, thread_rng}; use serde::{Deserialize, Serialize}; use std::{ - cmp::Ordering, fmt::{Display, Formatter}, hash::Hash, ops::Deref, }; -#[derive( - Copy, Clone, Debug, Deserialize, Serialize, PartialEq, Eq, Hash, CryptoHasher, BCSCryptoHash, -)] -pub struct BatchId { - pub id: u64, - /// A number that is stored in the DB and updated only if the value does not exist in - /// the DB: (a) at the start of an epoch, or (b) the DB was wiped. When the nonce is updated, - /// id starts again at 0. Using the current system time allows the nonce to be ordering. - pub nonce: u64, -} - -impl BatchId { - pub fn new(nonce: u64) -> Self { - Self { id: 0, nonce } - } - - pub fn new_for_test(id: u64) -> Self { - Self { id, nonce: 0 } - } - - pub fn increment(&mut self) { - self.id += 1; - } -} - -impl PartialOrd for BatchId { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for BatchId { - fn cmp(&self, other: &Self) -> Ordering { - match self.nonce.cmp(&other.nonce) { - Ordering::Equal => {}, - ordering => return ordering, - } - self.id.cmp(&other.id) - } -} - -impl Display for BatchId { - fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { - write!(f, "({}, {})", self.id, self.nonce) - } -} - #[derive( Clone, Debug, Deserialize, Serialize, CryptoHasher, BCSCryptoHash, PartialEq, Eq, Hash, )] diff --git a/consensus/consensus-types/src/proposal_ext.rs b/consensus/consensus-types/src/proposal_ext.rs index a61aa94010c60..e7dbbaa7c3f3b 100644 --- a/consensus/consensus-types/src/proposal_ext.rs +++ b/consensus/consensus-types/src/proposal_ext.rs @@ -1,10 +1,52 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::common::{Author, Payload, Round}; +use crate::{ + common::{Author, Payload, Round}, + quorum_cert::QuorumCert, +}; use aptos_types::validator_txn::ValidatorTransaction; use serde::{Deserialize, Serialize}; +#[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Eq)] +pub enum OptBlockBody { + V0 { + validator_txns: Vec, + // T of the block (e.g. one or more transaction(s) + payload: Payload, + // Author of the block that can be validated by the author's public key and the signature + author: Author, + // QC of the grandparent block + grandparent_qc: QuorumCert, + }, +} + +impl OptBlockBody { + pub fn author(&self) -> &Author { + match self { + OptBlockBody::V0 { author, .. } => author, + } + } + + pub fn validator_txns(&self) -> Option<&Vec> { + match self { + OptBlockBody::V0 { validator_txns, .. } => Some(validator_txns), + } + } + + pub fn payload(&self) -> &Payload { + match self { + OptBlockBody::V0 { payload, .. } => payload, + } + } + + pub fn grandparent_qc(&self) -> &QuorumCert { + match self { + OptBlockBody::V0 { grandparent_qc, .. } => grandparent_qc, + } + } +} + #[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Eq)] pub enum ProposalExt { V0 { diff --git a/consensus/consensus-types/src/proposal_msg.rs b/consensus/consensus-types/src/proposal_msg.rs index 5382cfd6e6271..3784651ac88c9 100644 --- a/consensus/consensus-types/src/proposal_msg.rs +++ b/consensus/consensus-types/src/proposal_msg.rs @@ -95,13 +95,21 @@ impl ProposalMsg { sender ); } - self.proposal().payload().map_or(Ok(()), |p| { - p.verify(validator, proof_cache, quorum_store_enabled) - })?; + let (payload_result, sig_result) = rayon::join( + || { + self.proposal().payload().map_or(Ok(()), |p| { + p.verify(validator, proof_cache, quorum_store_enabled) + }) + }, + || { + self.proposal() + .validate_signature(validator) + .map_err(|e| format_err!("{:?}", e)) + }, + ); + payload_result?; + sig_result?; - self.proposal() - .validate_signature(validator) - .map_err(|e| format_err!("{:?}", e))?; // if there is a timeout certificate, verify its signatures if let Some(tc) = self.sync_info.highest_2chain_timeout_cert() { tc.verify(validator).map_err(|e| format_err!("{:?}", e))?; diff --git a/consensus/consensus-types/src/timeout_2chain.rs b/consensus/consensus-types/src/timeout_2chain.rs index 2e9315416d938..7fdb0d8c3a60e 100644 --- a/consensus/consensus-types/src/timeout_2chain.rs +++ b/consensus/consensus-types/src/timeout_2chain.rs @@ -140,28 +140,34 @@ impl TwoChainTimeoutCertificate { /// 2. all signatures are properly formed (timeout.epoch, timeout.round, round) /// 3. timeout.hqc_round == max(signed round) pub fn verify(&self, validators: &ValidatorVerifier) -> anyhow::Result<()> { - // Verify the highest timeout validity. - self.timeout.verify(validators)?; let hqc_round = self.timeout.hqc_round(); - let timeout_messages: Vec<_> = self - .signatures_with_rounds - .get_voters_and_rounds( - &validators - .get_ordered_account_addresses_iter() - .collect_vec(), - ) - .into_iter() - .map(|(_, round)| TimeoutSigningRepr { - epoch: self.timeout.epoch(), - round: self.timeout.round(), - hqc_round: round, - }) - .collect(); - let timeout_messages_ref: Vec<_> = timeout_messages.iter().collect(); - validators.verify_aggregate_signatures( - &timeout_messages_ref, - self.signatures_with_rounds.sig(), - )?; + // Verify the highest timeout validity. + let (timeout_result, sig_result) = rayon::join( + || self.timeout.verify(validators), + || { + let timeout_messages: Vec<_> = self + .signatures_with_rounds + .get_voters_and_rounds( + &validators + .get_ordered_account_addresses_iter() + .collect_vec(), + ) + .into_iter() + .map(|(_, round)| TimeoutSigningRepr { + epoch: self.timeout.epoch(), + round: self.timeout.round(), + hqc_round: round, + }) + .collect(); + let timeout_messages_ref: Vec<_> = timeout_messages.iter().collect(); + validators.verify_aggregate_signatures( + &timeout_messages_ref, + self.signatures_with_rounds.sig(), + ) + }, + ); + timeout_result?; + sig_result?; let signed_hqc = self .signatures_with_rounds .rounds() diff --git a/consensus/safety-rules/src/remote_service.rs b/consensus/safety-rules/src/remote_service.rs index fc7ec1b944f2e..4d9cad3e70bf3 100644 --- a/consensus/safety-rules/src/remote_service.rs +++ b/consensus/safety-rules/src/remote_service.rs @@ -29,7 +29,7 @@ pub trait RemoteService { } pub fn execute(storage: PersistentSafetyStorage, listen_addr: SocketAddr, network_timeout_ms: u64) { - let mut safety_rules = SafetyRules::new(storage); + let mut safety_rules = SafetyRules::new(storage, false); if let Err(e) = safety_rules.consensus_state() { warn!("Unable to print consensus state: {}", e); } diff --git a/consensus/safety-rules/src/safety_rules.rs b/consensus/safety-rules/src/safety_rules.rs index d7ddaa5444180..0e339312571e7 100644 --- a/consensus/safety-rules/src/safety_rules.rs +++ b/consensus/safety-rules/src/safety_rules.rs @@ -43,16 +43,20 @@ pub struct SafetyRules { pub(crate) persistent_storage: PersistentSafetyStorage, pub(crate) validator_signer: Option, pub(crate) epoch_state: Option, + // Skip verification of signatures and well-formed, this can be set if it's used in local mode + // where consensus already verifies. + pub(crate) skip_sig_verify: bool, } impl SafetyRules { /// Constructs a new instance of SafetyRules with the given persistent storage and the /// consensus private keys - pub fn new(persistent_storage: PersistentSafetyStorage) -> Self { + pub fn new(persistent_storage: PersistentSafetyStorage, skip_sig_verify: bool) -> Self { Self { persistent_storage, validator_signer: None, epoch_state: None, + skip_sig_verify, } } @@ -67,9 +71,11 @@ impl SafetyRules { self.verify_epoch(proposed_block.epoch(), &safety_data)?; self.verify_qc(proposed_block.quorum_cert())?; - proposed_block - .validate_signature(&self.epoch_state()?.verifier) - .map_err(|error| Error::InvalidProposal(error.to_string()))?; + if !self.skip_sig_verify { + proposed_block + .validate_signature(&self.epoch_state()?.verifier) + .map_err(|error| Error::InvalidProposal(error.to_string()))?; + } proposed_block .verify_well_formed() .map_err(|error| Error::InvalidProposal(error.to_string()))?; @@ -230,8 +236,10 @@ impl SafetyRules { pub(crate) fn verify_qc(&self, qc: &QuorumCert) -> Result<(), Error> { let epoch_state = self.epoch_state()?; - qc.verify(&epoch_state.verifier) - .map_err(|e| Error::InvalidQuorumCertificate(e.to_string()))?; + if !self.skip_sig_verify { + qc.verify(&epoch_state.verifier) + .map_err(|e| Error::InvalidQuorumCertificate(e.to_string()))?; + } Ok(()) } @@ -396,9 +404,11 @@ impl SafetyRules { } // Verify that ledger_info contains at least 2f + 1 dostinct signatures - ledger_info - .verify_signatures(&self.epoch_state()?.verifier) - .map_err(|error| Error::InvalidQuorumCertificate(error.to_string()))?; + if !self.skip_sig_verify { + ledger_info + .verify_signatures(&self.epoch_state()?.verifier) + .map_err(|error| Error::InvalidQuorumCertificate(error.to_string()))?; + } // TODO: add guarding rules in unhappy path // TODO: add extension check diff --git a/consensus/safety-rules/src/safety_rules_2chain.rs b/consensus/safety-rules/src/safety_rules_2chain.rs index e93d2cd187405..7218165c22e24 100644 --- a/consensus/safety-rules/src/safety_rules_2chain.rs +++ b/consensus/safety-rules/src/safety_rules_2chain.rs @@ -25,9 +25,11 @@ impl SafetyRules { self.signer()?; let mut safety_data = self.persistent_storage.safety_data()?; self.verify_epoch(timeout.epoch(), &safety_data)?; - timeout - .verify(&self.epoch_state()?.verifier) - .map_err(|e| Error::InvalidTimeout(e.to_string()))?; + if !self.skip_sig_verify { + timeout + .verify(&self.epoch_state()?.verifier) + .map_err(|e| Error::InvalidTimeout(e.to_string()))?; + } if let Some(tc) = timeout_cert { self.verify_tc(tc)?; } @@ -179,8 +181,10 @@ impl SafetyRules { fn verify_tc(&self, tc: &TwoChainTimeoutCertificate) -> Result<(), Error> { let epoch_state = self.epoch_state()?; - tc.verify(&epoch_state.verifier) - .map_err(|e| Error::InvalidTimeoutCertificate(e.to_string()))?; + if !self.skip_sig_verify { + tc.verify(&epoch_state.verifier) + .map_err(|e| Error::InvalidTimeoutCertificate(e.to_string()))?; + } Ok(()) } diff --git a/consensus/safety-rules/src/safety_rules_manager.rs b/consensus/safety-rules/src/safety_rules_manager.rs index 7d87297789d2b..9b2eb1c7656be 100644 --- a/consensus/safety-rules/src/safety_rules_manager.rs +++ b/consensus/safety-rules/src/safety_rules_manager.rs @@ -130,7 +130,7 @@ impl SafetyRulesManager { } pub fn new_local(storage: PersistentSafetyStorage) -> Self { - let safety_rules = SafetyRules::new(storage); + let safety_rules = SafetyRules::new(storage, true); Self { internal_safety_rules: SafetyRulesWrapper::Local(Arc::new(RwLock::new(safety_rules))), } @@ -144,7 +144,7 @@ impl SafetyRulesManager { } pub fn new_serializer(storage: PersistentSafetyStorage) -> Self { - let safety_rules = SafetyRules::new(storage); + let safety_rules = SafetyRules::new(storage, false); let serializer_service = SerializerService::new(safety_rules); Self { internal_safety_rules: SafetyRulesWrapper::Serializer(Arc::new(RwLock::new( diff --git a/consensus/safety-rules/src/test_utils.rs b/consensus/safety-rules/src/test_utils.rs index ce161c0a5fb14..6f09a8057e8c2 100644 --- a/consensus/safety-rules/src/test_utils.rs +++ b/consensus/safety-rules/src/test_utils.rs @@ -248,7 +248,7 @@ pub fn test_safety_rules() -> SafetyRules { let storage = test_storage(&signer); let (epoch_change_proof, _) = make_genesis(&signer); - let mut safety_rules = SafetyRules::new(storage); + let mut safety_rules = SafetyRules::new(storage, false); safety_rules.initialize(&epoch_change_proof).unwrap(); safety_rules } @@ -257,7 +257,7 @@ pub fn test_safety_rules() -> SafetyRules { pub fn test_safety_rules_uninitialized() -> SafetyRules { let signer = ValidatorSigner::from_int(0); let storage = test_storage(&signer); - SafetyRules::new(storage) + SafetyRules::new(storage, false) } /// Returns a simple serializer for testing purposes. diff --git a/consensus/safety-rules/src/tests/local.rs b/consensus/safety-rules/src/tests/local.rs index 14fa87849378c..f5b7363c88eea 100644 --- a/consensus/safety-rules/src/tests/local.rs +++ b/consensus/safety-rules/src/tests/local.rs @@ -7,7 +7,7 @@ use aptos_types::validator_signer::ValidatorSigner; #[test] fn test() { - suite::run_test_suite(&safety_rules()); + suite::run_test_suite_without_sig_check(&safety_rules()); } fn safety_rules() -> suite::Callback { diff --git a/consensus/safety-rules/src/tests/safety_rules.rs b/consensus/safety-rules/src/tests/safety_rules.rs index a392ab0cbe20e..bae6ddda69d0b 100644 --- a/consensus/safety-rules/src/tests/safety_rules.rs +++ b/consensus/safety-rules/src/tests/safety_rules.rs @@ -14,7 +14,7 @@ fn safety_rules() -> suite::Callback { Box::new(move || { let signer = ValidatorSigner::from_int(0); let storage = test_utils::test_storage(&signer); - let safety_rules = Box::new(SafetyRules::new(storage)); + let safety_rules = Box::new(SafetyRules::new(storage, false)); (safety_rules, signer) }) } diff --git a/consensus/safety-rules/src/tests/suite.rs b/consensus/safety-rules/src/tests/suite.rs index e8f96e69c6554..c8bd53d0fc8a6 100644 --- a/consensus/safety-rules/src/tests/suite.rs +++ b/consensus/safety-rules/src/tests/suite.rs @@ -81,6 +81,27 @@ pub fn run_test_suite(safety_rules: &Callback) { test_order_votes_with_timeout(safety_rules); } +pub fn run_test_suite_without_sig_check(safety_rules: &Callback) { + test_end_to_end(safety_rules); + test_initialize(safety_rules); + test_voting_bad_epoch(safety_rules); + test_sign_old_proposal(safety_rules); + test_sign_proposal_with_bad_signer(safety_rules); + // test_sign_proposal_with_invalid_qc(safety_rules); + test_sign_proposal_with_early_preferred_round(safety_rules); + test_uninitialized_signer(safety_rules); + test_validator_not_in_set(safety_rules); + test_key_not_in_store(safety_rules); + test_2chain_rules(safety_rules); + // test_2chain_timeout(safety_rules); + // test_sign_commit_vote(safety_rules); + test_bad_execution_output(safety_rules); + test_order_votes_correct_execution(safety_rules); + test_order_votes_out_of_order_execution(safety_rules); + test_order_votes_incorrect_qc(safety_rules); + test_order_votes_with_timeout(safety_rules); +} + fn test_order_votes_correct_execution(safety_rules: &Callback) { let (mut safety_rules, signer) = safety_rules(); diff --git a/consensus/src/block_preparer.rs b/consensus/src/block_preparer.rs index f911659ab9f74..105128213e0b4 100644 --- a/consensus/src/block_preparer.rs +++ b/consensus/src/block_preparer.rs @@ -5,19 +5,21 @@ use crate::{ counters::{self, MAX_TXNS_FROM_BLOCK_TO_EXECUTE, TXN_SHUFFLE_SECONDS}, payload_manager::TPayloadManager, transaction_deduper::TransactionDeduper, - transaction_filter::TransactionFilter, transaction_shuffler::TransactionShuffler, }; +use aptos_config::config::BlockTransactionFilterConfig; use aptos_consensus_types::{block::Block, quorum_cert::QuorumCert}; +use aptos_crypto::HashValue; use aptos_executor_types::ExecutorResult; use aptos_types::transaction::SignedTransaction; use fail::fail_point; use futures::future::Shared; +use move_core_types::account_address::AccountAddress; use std::{future::Future, sync::Arc, time::Instant}; pub struct BlockPreparer { payload_manager: Arc, - txn_filter: Arc, + txn_filter_config: Arc, txn_deduper: Arc, txn_shuffler: Arc, } @@ -25,13 +27,13 @@ pub struct BlockPreparer { impl BlockPreparer { pub fn new( payload_manager: Arc, - txn_filter: Arc, + txn_filter_config: Arc, txn_deduper: Arc, txn_shuffler: Arc, ) -> Self { Self { payload_manager, - txn_filter, + txn_filter_config, txn_deduper, txn_shuffler, } @@ -60,16 +62,26 @@ impl BlockPreparer { result } }?; - let txn_filter = self.txn_filter.clone(); + + let txn_filter_config = self.txn_filter_config.clone(); let txn_deduper = self.txn_deduper.clone(); let txn_shuffler = self.txn_shuffler.clone(); + let block_id = block.id(); + let block_author = block.author(); let block_epoch = block.epoch(); let block_timestamp_usecs = block.timestamp_usecs(); + // Transaction filtering, deduplication and shuffling are CPU intensive tasks, so we run them in a blocking task. let result = tokio::task::spawn_blocking(move || { - let filtered_txns = - txn_filter.filter(block_id, block_epoch, block_timestamp_usecs, txns); + let filtered_txns = filter_block_transactions( + txn_filter_config, + block_id, + block_author, + block_epoch, + block_timestamp_usecs, + txns, + ); let deduped_txns = txn_deduper.dedup(filtered_txns); let mut shuffled_txns = { let _timer = TXN_SHUFFLE_SECONDS.start_timer(); @@ -89,3 +101,29 @@ impl BlockPreparer { result.map(|result| (result, block_gas_limit)) } } + +/// Filters transactions in a block based on the filter configuration +fn filter_block_transactions( + txn_filter_config: Arc, + block_id: HashValue, + block_author: Option, + block_epoch: u64, + block_timestamp_usecs: u64, + txns: Vec, +) -> Vec { + // If the transaction filter is disabled, return early + if !txn_filter_config.is_enabled() { + return txns; + } + + // Otherwise, filter the transactions + txn_filter_config + .block_transaction_filter() + .filter_block_transactions( + block_id, + block_author, + block_epoch, + block_timestamp_usecs, + txns, + ) +} diff --git a/consensus/src/block_storage/block_store.rs b/consensus/src/block_storage/block_store.rs index a91e547d508c3..9fa734d429227 100644 --- a/consensus/src/block_storage/block_store.rs +++ b/consensus/src/block_storage/block_store.rs @@ -57,6 +57,12 @@ pub mod sync_manager; fn update_counters_for_ordered_blocks(ordered_blocks: &[Arc]) { for block in ordered_blocks { observe_block(block.block().timestamp_usecs(), BlockStage::ORDERED); + if block.block().is_opt_block() { + observe_block( + block.block().timestamp_usecs(), + BlockStage::ORDERED_OPT_BLOCK, + ); + } } } @@ -324,8 +330,6 @@ impl BlockStore { assert!(!blocks_to_commit.is_empty()); - let block_tree = self.inner.clone(); - let storage = self.storage.clone(); let finality_proof_clone = finality_proof.clone(); self.pending_blocks .lock() @@ -337,25 +341,8 @@ impl BlockStore { .insert_ordered_cert(finality_proof_clone.clone()); update_counters_for_ordered_blocks(&blocks_to_commit); - let window_size = self.window_size; - // This callback is invoked synchronously with and could be used for multiple batches of blocks. self.execution_client - .finalize_order( - blocks_to_commit, - finality_proof.clone(), - Box::new( - move |committed_blocks: &[Arc], - commit_decision: LedgerInfoWithSignatures| { - block_tree.write().commit_callback_deprecated( - storage, - committed_blocks, - finality_proof, - commit_decision, - window_size, - ); - }, - ), - ) + .finalize_order(blocks_to_commit, finality_proof.clone()) .await .expect("Failed to persist commit"); @@ -551,6 +538,12 @@ impl BlockStore { pipelined_block.block().timestamp_usecs(), BlockStage::QC_ADDED, ); + if pipelined_block.block().is_opt_block() { + observe_block( + pipelined_block.block().timestamp_usecs(), + BlockStage::QC_ADDED_OPT_BLOCK, + ); + } pipelined_block.set_qc(Arc::new(qc.clone())); }, None => bail!("Insert {} without having the block in store first", qc), diff --git a/consensus/src/block_storage/block_tree.rs b/consensus/src/block_storage/block_tree.rs index 945c352ea41d3..e6d7410423fe3 100644 --- a/consensus/src/block_storage/block_tree.rs +++ b/consensus/src/block_storage/block_tree.rs @@ -4,7 +4,6 @@ use crate::{ counters, - counters::update_counters_for_committed_blocks, logging::{LogEvent, LogSchema}, persistent_liveness_storage::PersistentLivenessStorage, util::calculate_window_start_round, @@ -566,29 +565,6 @@ impl BlockTree { } /// Update the counters for committed blocks and prune them from the in-memory and persisted store. - pub fn commit_callback_deprecated( - &mut self, - storage: Arc, - blocks_to_commit: &[Arc], - finality_proof: WrappedLedgerInfo, - commit_decision: LedgerInfoWithSignatures, - window_size: Option, - ) { - update_counters_for_committed_blocks(blocks_to_commit); - - let last_block = blocks_to_commit.last().expect("pipeline is empty").clone(); - let (block_id, block_round) = (last_block.id(), last_block.round()); - - self.commit_callback( - storage, - block_id, - block_round, - finality_proof, - commit_decision, - window_size, - ); - } - pub fn commit_callback( &mut self, storage: Arc, diff --git a/consensus/src/block_storage/tracing.rs b/consensus/src/block_storage/tracing.rs index 7e8f819ac546a..396a12600dda4 100644 --- a/consensus/src/block_storage/tracing.rs +++ b/consensus/src/block_storage/tracing.rs @@ -10,16 +10,27 @@ pub struct BlockStage; impl BlockStage { pub const COMMITTED: &'static str = "committed"; + pub const COMMITTED_OPT_BLOCK: &'static str = "committed_opt_block"; pub const COMMIT_CERTIFIED: &'static str = "commit_certified"; pub const EPOCH_MANAGER_RECEIVED: &'static str = "epoch_manager_received"; + pub const EPOCH_MANAGER_RECEIVED_OPT_PROPOSAL: &'static str = + "epoch_manager_received_opt_proposal"; pub const EPOCH_MANAGER_VERIFIED: &'static str = "epoch_manager_verified"; + pub const EPOCH_MANAGER_VERIFIED_OPT_PROPOSAL: &'static str = + "epoch_manager_verified_opt_proposal"; pub const EXECUTED: &'static str = "executed"; - pub const EXECUTION_PIPELINE_INSERTED: &'static str = "execution_pipeline_inserted"; pub const NETWORK_RECEIVED: &'static str = "network_received"; + pub const NETWORK_RECEIVED_OPT_PROPOSAL: &'static str = "network_received_opt_proposal"; pub const OC_ADDED: &'static str = "ordered_cert_created"; + // Optimistic Proposal + pub const OPT_PROPOSED: &'static str = "opt_proposed"; pub const ORDERED: &'static str = "ordered"; + pub const ORDERED_OPT_BLOCK: &'static str = "ordered_opt_block"; pub const ORDER_VOTED: &'static str = "order_voted"; + pub const ORDER_VOTED_OPT_BLOCK: &'static str = "order_voted_opt_block"; + pub const PROCESS_OPT_PROPOSAL: &'static str = "process_opt_proposal"; pub const QC_ADDED: &'static str = "qc_added"; + pub const QC_ADDED_OPT_BLOCK: &'static str = "qc_added_opt_block"; pub const QC_AGGREGATED: &'static str = "qc_aggregated"; pub const RAND_ADD_DECISION: &'static str = "rand_add_decision"; pub const RAND_ADD_ENOUGH_SHARE_FAST: &'static str = "rand_add_enough_share_fast"; @@ -27,9 +38,13 @@ impl BlockStage { pub const RAND_ENTER: &'static str = "rand_enter"; pub const RAND_READY: &'static str = "rand_ready"; pub const ROUND_MANAGER_RECEIVED: &'static str = "round_manager_received"; + pub const ROUND_MANAGER_RECEIVED_OPT_PROPOSAL: &'static str = + "round_manager_received_opt_proposal"; pub const SIGNED: &'static str = "signed"; pub const SYNCED: &'static str = "synced"; + pub const SYNCED_OPT_BLOCK: &'static str = "synced_opt_block"; pub const VOTED: &'static str = "voted"; + pub const VOTED_OPT_BLOCK: &'static str = "voted_opt_block"; } /// Record the time during each stage of a block. diff --git a/consensus/src/consensus_observer/network/observer_message.rs b/consensus/src/consensus_observer/network/observer_message.rs index 19618b34bce63..967acc1f1c4de 100644 --- a/consensus/src/consensus_observer/network/observer_message.rs +++ b/consensus/src/consensus_observer/network/observer_message.rs @@ -1047,7 +1047,6 @@ mod test { ProofBatches, }, pipelined_block::OrderedBlockWindow, - proof_of_store::BatchId, quorum_cert::QuorumCert, }; use aptos_crypto::{ed25519::Ed25519PrivateKey, HashValue, PrivateKey, SigningKey, Uniform}; @@ -1055,6 +1054,7 @@ mod test { aggregate_signature::AggregateSignature, chain_id::ChainId, ledger_info::LedgerInfo, + quorum_store::BatchId, transaction::{RawTransaction, Script, TransactionPayload}, validator_signer::ValidatorSigner, validator_verifier::{ValidatorConsensusInfo, ValidatorVerifier}, diff --git a/consensus/src/consensus_observer/observer/consensus_observer.rs b/consensus/src/consensus_observer/observer/consensus_observer.rs index 0a90bf96d0315..7950f6949ac59 100644 --- a/consensus/src/consensus_observer/observer/consensus_observer.rs +++ b/consensus/src/consensus_observer/observer/consensus_observer.rs @@ -255,41 +255,34 @@ impl ConsensusObserver { )) ); - // If the new pipeline is enabled, build the pipeline for the ordered blocks - if self.pipeline_enabled() { - let block = ordered_block.first_block(); - let get_parent_pipeline_futs = self - .observer_block_data - .lock() - .get_parent_pipeline_futs(&block, self.pipeline_builder()); + let block = ordered_block.first_block(); + let get_parent_pipeline_futs = self + .observer_block_data + .lock() + .get_parent_pipeline_futs(&block, self.pipeline_builder()); - let mut parent_fut = if let Some(futs) = get_parent_pipeline_futs { - Some(futs) - } else { - warn!( - LogSchema::new(LogEntry::ConsensusObserver).message(&format!( - "Parent block's pipeline futures for ordered block is missing! Ignoring: {:?}", - ordered_block.proof_block_info() - )) - ); - return; - }; - - for block in ordered_block.blocks() { - let commit_callback = - block_data::create_commit_callback(self.observer_block_data.clone()); - self.pipeline_builder().build( - block, - parent_fut.take().expect("future should be set"), - commit_callback, - ); - parent_fut = Some(block.pipeline_futs().expect("pipeline futures just built")); - } - } + let mut parent_fut = if let Some(futs) = get_parent_pipeline_futs { + Some(futs) + } else { + warn!( + LogSchema::new(LogEntry::ConsensusObserver).message(&format!( + "Parent block's pipeline futures for ordered block is missing! Ignoring: {:?}", + ordered_block.proof_block_info() + )) + ); + return; + }; - // Create the commit callback (to be called after the execution pipeline) - let commit_callback = - block_data::create_commit_callback_deprecated(self.observer_block_data.clone()); + for block in ordered_block.blocks() { + let commit_callback = + block_data::create_commit_callback(self.observer_block_data.clone()); + self.pipeline_builder().build( + block, + parent_fut.take().expect("future should be set"), + commit_callback, + ); + parent_fut = Some(block.pipeline_futs().expect("pipeline futures just built")); + } // Send the ordered block to the execution pipeline if let Err(error) = self @@ -297,7 +290,6 @@ impl ConsensusObserver { .finalize_order( ordered_block.blocks().clone(), WrappedLedgerInfo::new(VoteData::dummy(), ordered_block.ordered_proof().clone()), - commit_callback, ) .await { @@ -1086,12 +1078,9 @@ impl ConsensusObserver { None, rand_msg_rx, 0, - self.pipeline_enabled(), ) .await; - if self.pipeline_enabled() { - self.pipeline_builder = Some(self.execution_client.pipeline_builder(signer)) - } + self.pipeline_builder = Some(self.execution_client.pipeline_builder(signer)); } /// Starts the consensus observer loop that processes incoming @@ -1138,11 +1127,6 @@ impl ConsensusObserver { .message("The consensus observer loop exited unexpectedly!")); } - /// Returns whether the pipeline is enabled - pub fn pipeline_enabled(&self) -> bool { - self.observer_epoch_state.pipeline_enabled() - } - /// Returns the builder, should only be called if pipeline is enabled pub fn pipeline_builder(&self) -> &PipelineBuilder { self.pipeline_builder diff --git a/consensus/src/consensus_observer/observer/epoch_state.rs b/consensus/src/consensus_observer/observer/epoch_state.rs index f42cb2d06b9e0..2c2e1232817a7 100644 --- a/consensus/src/consensus_observer/observer/epoch_state.rs +++ b/consensus/src/consensus_observer/observer/epoch_state.rs @@ -125,11 +125,6 @@ impl ObserverEpochState { randomness_config, ) } - - /// Returns whether the pipeline is enabled - pub fn pipeline_enabled(&self) -> bool { - self.node_config.consensus_observer.enable_pipeline - } } /// A simple helper function that extracts the on-chain configs from the reconfig events diff --git a/consensus/src/consensus_observer/observer/payload_store.rs b/consensus/src/consensus_observer/observer/payload_store.rs index b87b94eec975b..9ef2866769bd5 100644 --- a/consensus/src/consensus_observer/observer/payload_store.rs +++ b/consensus/src/consensus_observer/observer/payload_store.rs @@ -284,7 +284,7 @@ mod test { block_data::{BlockData, BlockType}, common::{Author, Payload, ProofWithData}, pipelined_block::OrderedBlockWindow, - proof_of_store::{BatchId, BatchInfo, ProofOfStore}, + proof_of_store::{BatchInfo, ProofOfStore}, quorum_cert::QuorumCert, }; use aptos_crypto::HashValue; @@ -292,6 +292,7 @@ mod test { aggregate_signature::AggregateSignature, block_info::{BlockInfo, Round}, ledger_info::{LedgerInfo, LedgerInfoWithSignatures}, + quorum_store::BatchId, transaction::Version, validator_signer::ValidatorSigner, validator_verifier::{ValidatorConsensusInfo, ValidatorVerifier}, diff --git a/consensus/src/consensus_provider.rs b/consensus/src/consensus_provider.rs index 1343f58b426e1..4699baf8039ad 100644 --- a/consensus/src/consensus_provider.rs +++ b/consensus/src/consensus_provider.rs @@ -20,7 +20,6 @@ use crate::{ quorum_store::quorum_store_db::QuorumStoreDB, rand::rand_gen::storage::db::RandDb, state_computer::ExecutionProxy, - transaction_filter::TransactionFilter, txn_notifier::MempoolNotifier, util::time_service::ClockTimeService, }; @@ -68,8 +67,7 @@ pub fn start_consensus( Arc::new(BlockExecutor::::new(aptos_db)), txn_notifier, state_sync_notifier, - runtime.handle(), - TransactionFilter::new(node_config.execution.transaction_filter.clone()), + node_config.transaction_filters.execution_filter.clone(), node_config.consensus.enable_pre_commit, ); @@ -161,8 +159,7 @@ pub fn start_consensus_observer( Arc::new(BlockExecutor::::new(aptos_db.clone())), txn_notifier, state_sync_notifier, - consensus_observer_runtime.handle(), - TransactionFilter::new(node_config.execution.transaction_filter.clone()), + node_config.transaction_filters.execution_filter.clone(), node_config.consensus.enable_pre_commit, ); diff --git a/consensus/src/consensusdb/mod.rs b/consensus/src/consensusdb/mod.rs index d9b80a9c948a0..b40b0828d00ad 100644 --- a/consensus/src/consensusdb/mod.rs +++ b/consensus/src/consensusdb/mod.rs @@ -155,7 +155,7 @@ impl ConsensusDB { /// Write the whole schema batch including all data necessary to mutate the ledger /// state of some transaction by leveraging rocksdb atomicity support. fn commit(&self, batch: SchemaBatch) -> Result<(), DbError> { - self.db.write_schemas(batch)?; + self.db.write_schemas_relaxed(batch)?; Ok(()) } diff --git a/consensus/src/counters.rs b/consensus/src/counters.rs index 944a1fc758d3a..c03325ac146b1 100644 --- a/consensus/src/counters.rs +++ b/consensus/src/counters.rs @@ -11,7 +11,7 @@ use crate::{ use aptos_consensus_types::{block::Block, pipelined_block::PipelinedBlock}; use aptos_crypto::HashValue; use aptos_executor_types::{state_compute_result::StateComputeResult, ExecutorError}; -use aptos_logger::prelude::{error, warn}; +use aptos_logger::prelude::warn; use aptos_metrics_core::{ exponential_buckets, op_counters::DurationHistogram, register_avg_counter, register_counter, register_gauge, register_gauge_vec, register_histogram, register_histogram_vec, @@ -68,6 +68,15 @@ pub static LAST_COMMITTED_ROUND: Lazy = Lazy::new(|| { .unwrap() }); +/// The counter corresponds to the round of the highest committed opt block. +pub static LAST_COMMITTED_OPT_BLOCK_ROUND: Lazy = Lazy::new(|| { + register_int_gauge!( + "aptos_consensus_last_committed_opt_block_round", + "The counter corresponds to the round of the highest committed opt block." + ) + .unwrap() +}); + /// The counter corresponds to the version of the last committed ledger info. pub static LAST_COMMITTED_VERSION: Lazy = Lazy::new(|| { register_int_gauge!( @@ -95,6 +104,15 @@ pub static COMMITTED_BLOCKS_COUNT: Lazy = Lazy::new(|| { .unwrap() }); +/// Count of the committed opt blocks since last restart. +pub static COMMITTED_OPT_BLOCKS_COUNT: Lazy = Lazy::new(|| { + register_int_counter!( + "aptos_consensus_committed_opt_blocks_count", + "Count of the committed opt blocks since last restart." + ) + .unwrap() +}); + /// Count of the committed transactions since last restart. pub static COMMITTED_TXNS_COUNT: Lazy = Lazy::new(|| { register_int_counter_vec!( @@ -1151,7 +1169,6 @@ pub fn log_executor_error_occurred( e: ExecutorError, counter: &Lazy, block_id: HashValue, - new_pipeline_enabled: bool, ) { match e { ExecutorError::CouldNotGetData => { @@ -1170,17 +1187,10 @@ pub fn log_executor_error_occurred( }, e => { counter.with_label_values(&["UnexpectedError"]).inc(); - if new_pipeline_enabled { - warn!( - block_id = block_id, - "Execution error {:?} for {}", e, block_id - ); - } else { - error!( - block_id = block_id, - "Execution error {:?} for {}", e, block_id - ); - } + warn!( + block_id = block_id, + "Execution error {:?} for {}", e, block_id + ); }, } } @@ -1280,6 +1290,11 @@ pub fn update_counters_for_block(block: &Block) { NUM_BYTES_PER_BLOCK.observe(block.payload().map_or(0, |payload| payload.size()) as f64); COMMITTED_BLOCKS_COUNT.inc(); LAST_COMMITTED_ROUND.set(block.round() as i64); + if block.is_opt_block() { + observe_block(block.timestamp_usecs(), BlockStage::COMMITTED_OPT_BLOCK); + COMMITTED_OPT_BLOCKS_COUNT.inc(); + LAST_COMMITTED_OPT_BLOCK_ROUND.set(block.round() as i64); + } let failed_rounds = block .block_data() .failed_authors() @@ -1288,7 +1303,7 @@ pub fn update_counters_for_block(block: &Block) { if failed_rounds > 0 { COMMITTED_FAILED_ROUNDS_COUNT.inc_by(failed_rounds as u64); } - quorum_store::counters::NUM_BATCH_PER_BLOCK.observe(block.payload_size() as f64); + quorum_store::counters::update_batch_stats(block); } pub fn update_counters_for_compute_result(compute_result: &StateComputeResult) { diff --git a/consensus/src/dag/adapter.rs b/consensus/src/dag/adapter.rs index d04e02efda9af..1f72197dbdcd1 100644 --- a/consensus/src/dag/adapter.rs +++ b/consensus/src/dag/adapter.rs @@ -9,7 +9,6 @@ use crate::{ block_storage::tracing::{observe_block, BlockStage}, consensusdb::{CertifiedNodeSchema, ConsensusDB, DagVoteSchema, NodeSchema}, counters, - counters::update_counters_for_committed_blocks, dag::{ storage::{CommitEvent, DAGStorage}, CertifiedNode, Node, NodeId, Vote, @@ -199,14 +198,11 @@ impl OrderedNotifier for OrderedNotifierAdapter { StateComputeResult::new_dummy(), )); let block_info = block.block_info(); - let ledger_info_provider = self.ledger_info_provider.clone(); - let dag = self.dag.clone(); *self.parent_block_info.write() = block_info.clone(); self.block_ordered_ts .write() .insert(block_info.round(), Instant::now()); - let block_created_ts = self.block_ordered_ts.clone(); observe_block(block.block().timestamp_usecs(), BlockStage::ORDERED); @@ -216,20 +212,22 @@ impl OrderedNotifier for OrderedNotifierAdapter { LedgerInfo::new(block_info, anchor.digest()), AggregateSignature::empty(), ), - callback: Box::new( - move |committed_blocks: &[Arc], - commit_decision: LedgerInfoWithSignatures| { - block_created_ts - .write() - .retain(|&round, _| round > commit_decision.commit_info().round()); - dag.commit_callback(commit_decision.commit_info().round()); - ledger_info_provider - .write() - .notify_commit_proof(commit_decision); - update_counters_for_committed_blocks(committed_blocks); - }, - ), + // TODO: this needs to be properly integrated with pipeline_builder + // callback: Box::new( + // move |committed_blocks: &[Arc], + // commit_decision: LedgerInfoWithSignatures| { + // block_created_ts + // .write() + // .retain(|&round, _| round > commit_decision.commit_info().round()); + // dag.commit_callback(commit_decision.commit_info().round()); + // ledger_info_provider + // .write() + // .notify_commit_proof(commit_decision); + // update_counters_for_committed_blocks(committed_blocks); + // }, + // ), }; + // if self .executor_channel .unbounded_send(blocks_to_send) diff --git a/consensus/src/dag/dag_driver.rs b/consensus/src/dag/dag_driver.rs index fa0caee1faa8a..aaa8c9ddb2d04 100644 --- a/consensus/src/dag/dag_driver.rs +++ b/consensus/src/dag/dag_driver.rs @@ -275,7 +275,6 @@ impl DagDriver { block_timestamp: self.time_service.now_unix_time(), }, sys_payload_filter, - Box::pin(async {}), ) .await { diff --git a/consensus/src/epoch_manager.rs b/consensus/src/epoch_manager.rs index c5673730609d9..6e6777627ca85 100644 --- a/consensus/src/epoch_manager.rs +++ b/consensus/src/epoch_manager.rs @@ -58,7 +58,9 @@ use crate::{ use anyhow::{anyhow, bail, ensure, Context}; use aptos_bounded_executor::BoundedExecutor; use aptos_channels::{aptos_channel, message_queues::QueueStyle}; -use aptos_config::config::{ConsensusConfig, DagConsensusConfig, ExecutionConfig, NodeConfig}; +use aptos_config::config::{ + BatchTransactionFilterConfig, ConsensusConfig, DagConsensusConfig, NodeConfig, +}; use aptos_consensus_types::{ block_retrieval::BlockRetrievalRequest, common::{Author, Round}, @@ -131,8 +133,6 @@ pub enum LivenessStorageData { pub struct EpochManager { author: Author, config: ConsensusConfig, - #[allow(unused)] - execution_config: ExecutionConfig, randomness_override_seq_num: u64, time_service: Arc, self_sender: aptos_channels::UnboundedSender>, @@ -175,6 +175,8 @@ pub struct EpochManager { consensus_publisher: Option>, pending_blocks: Arc>, key_storage: PersistentSafetyStorage, + + quorum_store_txn_filter_config: BatchTransactionFilterConfig, } impl EpochManager

{ @@ -198,15 +200,16 @@ impl EpochManager

{ ) -> Self { let author = node_config.validator_network.as_ref().unwrap().peer_id(); let config = node_config.consensus.clone(); - let execution_config = node_config.execution.clone(); let dag_config = node_config.dag_consensus.clone(); let sr_config = &node_config.consensus.safety_rules; let safety_rules_manager = SafetyRulesManager::new(sr_config); let key_storage = safety_rules_manager::storage(sr_config); + let quorum_store_txn_filter_config = + node_config.transaction_filters.quorum_store_filter.clone(); + Self { author, config, - execution_config, randomness_override_seq_num: node_config.randomness_override_seq_num, time_service, self_sender, @@ -246,6 +249,7 @@ impl EpochManager

{ consensus_publisher, pending_blocks: Arc::new(Mutex::new(PendingBlocks::new())), key_storage, + quorum_store_txn_filter_config, } } @@ -727,6 +731,7 @@ impl EpochManager

{ self.author, epoch_state.verifier.len() as u64, quorum_store_config, + self.quorum_store_txn_filter_config.clone(), consensus_to_quorum_store_rx, self.quorum_store_to_mempool_sender.clone(), self.config.mempool_txn_pull_timeout_ms, @@ -857,17 +862,12 @@ impl EpochManager

{ fast_rand_config.clone(), rand_msg_rx, recovery_data.commit_root_block().round(), - self.config.enable_pipeline, ) .await; let consensus_sk = consensus_key; - let maybe_pipeline_builder = if self.config.enable_pipeline { - let signer = Arc::new(ValidatorSigner::new(self.author, consensus_sk)); - Some(self.execution_client.pipeline_builder(signer)) - } else { - None - }; + let signer = Arc::new(ValidatorSigner::new(self.author, consensus_sk)); + let pipeline_builder = self.execution_client.pipeline_builder(signer); info!(epoch = epoch, "Create BlockStore"); // Read the last vote, before "moving" `recovery_data` let last_vote = recovery_data.last_vote(); @@ -882,7 +882,7 @@ impl EpochManager

{ onchain_consensus_config.order_vote_enabled(), onchain_consensus_config.window_size(), self.pending_blocks.clone(), - maybe_pipeline_builder, + Some(pipeline_builder), )); let failures_tracker = Arc::new(Mutex::new(ExponentialWindowFailureTracker::new( @@ -896,6 +896,11 @@ impl EpochManager

{ )); info!(epoch = epoch, "Create ProposalGenerator"); + let max_sending_block_txns_after_filtering = if self.config.enable_optimistic_proposal_tx { + self.config.max_sending_opt_block_txns_after_filtering + } else { + self.config.max_sending_block_txns_after_filtering + }; // txn manager is required both by proposal generator (to pull the proposers) // and by event processor (to update their status). let proposal_generator = ProposalGenerator::new( @@ -908,7 +913,7 @@ impl EpochManager

{ self.config.max_sending_block_txns, self.config.max_sending_block_bytes, ), - self.config.max_sending_block_txns_after_filtering, + max_sending_block_txns_after_filtering, PayloadTxnsSize::new( self.config.max_sending_inline_txns, self.config.max_sending_inline_bytes, @@ -940,6 +945,10 @@ impl EpochManager

{ 10, Some(&counters::ROUND_MANAGER_CHANNEL_MSGS), ); + + let (opt_proposal_loopback_tx, opt_proposal_loopback_rx) = + aptos_channels::new_unbounded(&counters::OP_COUNTERS.gauge("opt_proposal_queue")); + self.round_manager_tx = Some(round_manager_tx.clone()); self.buffered_proposal_tx = Some(buffered_proposal_tx.clone()); let max_blocks_allowed = self @@ -962,13 +971,19 @@ impl EpochManager

{ onchain_jwk_consensus_config, fast_rand_config, failures_tracker, + opt_proposal_loopback_tx, ); round_manager.init(last_vote).await; let (close_tx, close_rx) = oneshot::channel(); self.round_manager_close_tx = Some(close_tx); - tokio::spawn(round_manager.start(round_manager_rx, buffered_proposal_rx, close_rx)); + tokio::spawn(round_manager.start( + round_manager_rx, + buffered_proposal_rx, + opt_proposal_loopback_rx, + close_rx, + )); self.spawn_block_retrieval_task(epoch, block_store, max_blocks_allowed); } @@ -1421,7 +1436,6 @@ impl EpochManager

{ fast_rand_config, rand_msg_rx, highest_committed_round, - self.config.enable_pipeline, ) .await; @@ -1496,6 +1510,24 @@ impl EpochManager

{ BlockStage::EPOCH_MANAGER_RECEIVED, ); } + if let ConsensusMsg::OptProposalMsg(proposal) = &consensus_msg { + if !self.config.enable_optimistic_proposal_rx { + bail!( + "Unexpected OptProposalMsg. Feature is disabled. Author: {}, Epoch: {}, Round: {}", + proposal.block_data().author(), + proposal.epoch(), + proposal.round() + ) + } + observe_block( + proposal.timestamp_usecs(), + BlockStage::EPOCH_MANAGER_RECEIVED, + ); + observe_block( + proposal.timestamp_usecs(), + BlockStage::EPOCH_MANAGER_RECEIVED_OPT_PROPOSAL, + ); + } // we can't verify signatures from a different epoch let maybe_unverified_event = self.check_epoch(peer_id, consensus_msg).await?; @@ -1569,6 +1601,7 @@ impl EpochManager

{ ) -> anyhow::Result> { match msg { ConsensusMsg::ProposalMsg(_) + | ConsensusMsg::OptProposalMsg(_) | ConsensusMsg::SyncInfo(_) | ConsensusMsg::VoteMsg(_) | ConsensusMsg::RoundTimeoutMsg(_) @@ -1681,6 +1714,16 @@ impl EpochManager

{ BlockStage::EPOCH_MANAGER_VERIFIED, ); } + if let VerifiedEvent::OptProposalMsg(proposal) = &event { + observe_block( + proposal.timestamp_usecs(), + BlockStage::EPOCH_MANAGER_VERIFIED, + ); + observe_block( + proposal.timestamp_usecs(), + BlockStage::EPOCH_MANAGER_VERIFIED_OPT_PROPOSAL, + ); + } if let Err(e) = match event { quorum_store_event @ (VerifiedEvent::SignedBatchInfo(_) | VerifiedEvent::ProofOfStoreMsg(_) @@ -1703,6 +1746,18 @@ impl EpochManager

{ Self::forward_event_to(buffered_proposal_tx, peer_id, proposal_event) .context("proposal precheck sender") }, + opt_proposal_event @ VerifiedEvent::OptProposalMsg(_) => { + if let VerifiedEvent::OptProposalMsg(p) = &opt_proposal_event { + payload_manager.prefetch_payload_data( + p.block_data().payload(), + p.proposer(), + p.timestamp_usecs(), + ); + } + + Self::forward_event_to(buffered_proposal_tx, peer_id, opt_proposal_event) + .context("proposal precheck sender") + }, round_manager_event => Self::forward_event_to( round_manager_tx, (peer_id, discriminant(&round_manager_event)), diff --git a/consensus/src/execution_pipeline.rs b/consensus/src/execution_pipeline.rs deleted file mode 100644 index f37c37fd45eed..0000000000000 --- a/consensus/src/execution_pipeline.rs +++ /dev/null @@ -1,450 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -#![forbid(unsafe_code)] - -use crate::{ - block_preparer::BlockPreparer, - counters::{self, log_executor_error_occurred}, - monitor, - pipeline::pipeline_phase::CountedRequest, - state_computer::StateComputeResultFut, - transaction_shuffler::TransactionShuffler, -}; -use aptos_consensus_types::{ - block::Block, pipeline_execution_result::PipelineExecutionResult, quorum_cert::QuorumCert, -}; -use aptos_crypto::HashValue; -use aptos_executor_types::{ - state_compute_result::StateComputeResult, BlockExecutorTrait, ExecutorError, ExecutorResult, -}; -use aptos_experimental_runtimes::thread_manager::optimal_min_len; -use aptos_logger::{debug, warn}; -use aptos_types::{ - block_executor::{config::BlockExecutorConfigFromOnchain, partitioner::ExecutableBlock}, - block_metadata_ext::BlockMetadataExt, - transaction::{ - signature_verified_transaction::SignatureVerifiedTransaction, SignedTransaction, - }, -}; -use fail::fail_point; -use futures::{future::BoxFuture, FutureExt}; -use once_cell::sync::Lazy; -use rayon::iter::{IndexedParallelIterator, IntoParallelIterator, ParallelIterator}; -use std::{ - sync::Arc, - time::{Duration, Instant}, -}; -use tokio::sync::{mpsc, oneshot}; - -/// Smallest number of transactions Rayon should put into a single worker task. -/// Same as in execution/executor-benchmark/src/block_preparation.rs -pub const SIG_VERIFY_RAYON_MIN_THRESHOLD: usize = 32; - -pub type PreCommitHook = - Box BoxFuture<'static, ()> + Send>; - -#[allow(clippy::unwrap_used)] -pub static SIG_VERIFY_POOL: Lazy> = Lazy::new(|| { - Arc::new( - rayon::ThreadPoolBuilder::new() - .num_threads(8) // More than 8 threads doesn't seem to help much - .thread_name(|index| format!("signature-checker-{}", index)) - .build() - .unwrap(), - ) -}); - -pub struct ExecutionPipeline { - prepare_block_tx: mpsc::UnboundedSender, -} - -impl ExecutionPipeline { - pub fn spawn( - executor: Arc, - runtime: &tokio::runtime::Handle, - enable_pre_commit: bool, - ) -> Self { - let (prepare_block_tx, prepare_block_rx) = mpsc::unbounded_channel(); - let (execute_block_tx, execute_block_rx) = mpsc::unbounded_channel(); - let (ledger_apply_tx, ledger_apply_rx) = mpsc::unbounded_channel(); - let (pre_commit_tx, pre_commit_rx) = mpsc::unbounded_channel(); - - runtime.spawn(Self::prepare_block_stage( - prepare_block_rx, - execute_block_tx, - )); - runtime.spawn(Self::execute_stage( - execute_block_rx, - ledger_apply_tx, - executor.clone(), - )); - runtime.spawn(Self::ledger_apply_stage( - ledger_apply_rx, - pre_commit_tx, - executor.clone(), - enable_pre_commit, - )); - runtime.spawn(Self::pre_commit_stage(pre_commit_rx, executor)); - - Self { prepare_block_tx } - } - - pub async fn queue( - &self, - block: Block, - metadata: BlockMetadataExt, - parent_block_id: HashValue, - block_qc: Option>, - txn_generator: BlockPreparer, - block_executor_onchain_config: BlockExecutorConfigFromOnchain, - pre_commit_hook: PreCommitHook, - lifetime_guard: CountedRequest<()>, - shuffler: Arc, - ) -> StateComputeResultFut { - let (result_tx, result_rx) = oneshot::channel(); - let block_id = block.id(); - self.prepare_block_tx - .send(PrepareBlockCommand { - block, - metadata, - block_executor_onchain_config, - parent_block_id, - block_preparer: txn_generator, - result_tx, - command_creation_time: Instant::now(), - pre_commit_hook, - lifetime_guard, - block_qc, - shuffler, - }) - .expect("Failed to send block to execution pipeline."); - - Box::pin(async move { - result_rx - .await - .map_err(|err| ExecutorError::InternalError { - error: format!( - "Failed to receive execution result for block {}: {:?}.", - block_id, err - ), - })? - }) - } - - async fn prepare_block( - execute_block_tx: mpsc::UnboundedSender, - command: PrepareBlockCommand, - ) { - let PrepareBlockCommand { - block, - metadata, - block_executor_onchain_config, - parent_block_id, - block_preparer, - pre_commit_hook, - result_tx, - command_creation_time, - lifetime_guard, - block_qc, - shuffler, - } = command; - counters::PREPARE_BLOCK_WAIT_TIME.observe_duration(command_creation_time.elapsed()); - debug!("prepare_block received block {}.", block.id()); - let prepare_block_result = block_preparer - .prepare_block(&block, async { block_qc }.shared()) - .await; - if let Err(e) = prepare_block_result { - result_tx - .send(Err(e)) - .unwrap_or_else(log_failed_to_send_result("prepare_block", block.id())); - return; - } - let validator_txns = block.validator_txns().cloned().unwrap_or_default(); - let (input_txns, block_gas_limit) = - prepare_block_result.expect("prepare_block must return Ok"); - let block_executor_onchain_config = - block_executor_onchain_config.with_block_gas_limit_override(block_gas_limit); - tokio::task::spawn_blocking(move || { - let txns_to_execute = - Block::combine_to_input_transactions(validator_txns, input_txns.clone(), metadata); - let sig_verification_start = Instant::now(); - let sig_verified_txns: Vec = - SIG_VERIFY_POOL.install(|| { - let num_txns = txns_to_execute.len(); - txns_to_execute - .into_par_iter() - .with_min_len(optimal_min_len(num_txns, SIG_VERIFY_RAYON_MIN_THRESHOLD)) - .map(|t| t.into()) - .collect::>() - }); - counters::PREPARE_BLOCK_SIG_VERIFICATION_TIME - .observe_duration(sig_verification_start.elapsed()); - execute_block_tx - .send(ExecuteBlockCommand { - input_txns, - block: (block.id(), sig_verified_txns).into(), - parent_block_id, - block_executor_onchain_config, - pre_commit_hook, - result_tx, - command_creation_time: Instant::now(), - lifetime_guard, - shuffler, - }) - .expect("Failed to send block to execution pipeline."); - }) - .await - .expect("Failed to spawn_blocking."); - } - - async fn prepare_block_stage( - mut prepare_block_rx: mpsc::UnboundedReceiver, - execute_block_tx: mpsc::UnboundedSender, - ) { - while let Some(command) = prepare_block_rx.recv().await { - monitor!( - "prepare_block", - Self::prepare_block(execute_block_tx.clone(), command).await - ); - } - debug!("prepare_block_stage quitting."); - } - - async fn execute_stage( - mut block_rx: mpsc::UnboundedReceiver, - ledger_apply_tx: mpsc::UnboundedSender, - executor: Arc, - ) { - while let Some(ExecuteBlockCommand { - input_txns, - block, - parent_block_id, - block_executor_onchain_config, - pre_commit_hook, - result_tx, - command_creation_time, - lifetime_guard, - shuffler: _, - }) = block_rx.recv().await - { - counters::EXECUTE_BLOCK_WAIT_TIME.observe_duration(command_creation_time.elapsed()); - let block_id = block.block_id; - debug!("execute_stage received block {}.", block_id); - let executor = executor.clone(); - let execution_time = monitor!( - "execute_block", - tokio::task::spawn_blocking(move || { - fail_point!("consensus::compute", |_| { - Err(ExecutorError::InternalError { - error: "Injected error in compute".into(), - }) - }); - let start = Instant::now(); - executor - .execute_and_update_state( - block, - parent_block_id, - block_executor_onchain_config, - ) - .map(|_| start.elapsed()) - }) - .await - ) - .expect("Failed to spawn_blocking."); - - ledger_apply_tx - .send(LedgerApplyCommand { - input_txns, - block_id, - parent_block_id, - execution_time, - pre_commit_hook, - result_tx, - command_creation_time: Instant::now(), - lifetime_guard, - }) - .expect("Failed to send block to ledger_apply stage."); - } - debug!("execute_stage quitting."); - } - - async fn ledger_apply_stage( - mut block_rx: mpsc::UnboundedReceiver, - pre_commit_tx: mpsc::UnboundedSender, - executor: Arc, - enable_pre_commit: bool, - ) { - while let Some(LedgerApplyCommand { - input_txns, - block_id, - parent_block_id, - execution_time, - pre_commit_hook, - result_tx, - command_creation_time, - lifetime_guard, - }) = block_rx.recv().await - { - counters::APPLY_LEDGER_WAIT_TIME.observe_duration(command_creation_time.elapsed()); - debug!("ledger_apply stage received block {}.", block_id); - let res = async { - let execution_duration = execution_time?; - let executor = executor.clone(); - monitor!( - "ledger_apply", - tokio::task::spawn_blocking(move || { - executor.ledger_update(block_id, parent_block_id) - }) - .await - ) - .expect("Failed to spawn_blocking().") - .map(|output| (output, execution_duration)) - } - .await; - let pipeline_res = res.map(|(output, execution_duration)| { - let pre_commit_hook_fut = pre_commit_hook(&output); - let pre_commit_fut: BoxFuture<'static, ExecutorResult<()>> = - if output.epoch_state().is_some() || !enable_pre_commit { - // hack: it causes issue if pre-commit is finished at an epoch ending, and - // we switch to state sync, so we do the pre-commit only after we actually - // decide to commit (in the commit phase) - let executor = executor.clone(); - Box::pin(async move { - tokio::task::spawn_blocking(move || { - executor.pre_commit_block(block_id) - }) - .await - .expect("failed to spawn_blocking")?; - pre_commit_hook_fut.await; - Ok(()) - }) - } else { - // kick off pre-commit right away - let (pre_commit_result_tx, pre_commit_result_rx) = oneshot::channel(); - // schedule pre-commit - pre_commit_tx - .send(PreCommitCommand { - block_id, - pre_commit_hook_fut, - result_tx: pre_commit_result_tx, - lifetime_guard, - }) - .expect("Failed to send block to pre_commit stage."); - Box::pin(async { - pre_commit_result_rx - .await - .map_err(ExecutorError::internal_err)? - }) - }; - - PipelineExecutionResult::new(input_txns, output, execution_duration, pre_commit_fut) - }); - result_tx - .send(pipeline_res) - .unwrap_or_else(log_failed_to_send_result("ledger_apply", block_id)); - } - debug!("ledger_apply stage quitting."); - } - - async fn pre_commit_stage( - mut block_rx: mpsc::UnboundedReceiver, - executor: Arc, - ) { - while let Some(PreCommitCommand { - block_id, - pre_commit_hook_fut, - result_tx, - lifetime_guard, - }) = block_rx.recv().await - { - debug!("pre_commit stage received block {}.", block_id); - let res = async { - let executor = executor.clone(); - monitor!( - "pre_commit", - tokio::task::spawn_blocking(move || { executor.pre_commit_block(block_id) }) - ) - .await - .expect("Failed to spawn_blocking().")?; - pre_commit_hook_fut.await; - Ok(()) - } - .await; - result_tx - .send(res) - .unwrap_or_else(log_failed_to_send_result("pre_commit", block_id)); - drop(lifetime_guard); - } - debug!("pre_commit stage quitting."); - } -} - -struct PrepareBlockCommand { - block: Block, - metadata: BlockMetadataExt, - block_executor_onchain_config: BlockExecutorConfigFromOnchain, - // The parent block id. - parent_block_id: HashValue, - block_preparer: BlockPreparer, - pre_commit_hook: PreCommitHook, - result_tx: oneshot::Sender>, - command_creation_time: Instant, - lifetime_guard: CountedRequest<()>, - block_qc: Option>, - shuffler: Arc, -} - -struct ExecuteBlockCommand { - input_txns: Vec, - block: ExecutableBlock, - parent_block_id: HashValue, - block_executor_onchain_config: BlockExecutorConfigFromOnchain, - pre_commit_hook: PreCommitHook, - result_tx: oneshot::Sender>, - command_creation_time: Instant, - lifetime_guard: CountedRequest<()>, - #[allow(dead_code)] - shuffler: Arc, -} - -struct LedgerApplyCommand { - input_txns: Vec, - block_id: HashValue, - parent_block_id: HashValue, - execution_time: ExecutorResult, - pre_commit_hook: PreCommitHook, - result_tx: oneshot::Sender>, - command_creation_time: Instant, - lifetime_guard: CountedRequest<()>, -} - -struct PreCommitCommand { - block_id: HashValue, - pre_commit_hook_fut: BoxFuture<'static, ()>, - result_tx: oneshot::Sender>, - lifetime_guard: CountedRequest<()>, -} - -fn log_failed_to_send_result( - from_stage: &'static str, - block_id: HashValue, -) -> impl FnOnce(ExecutorResult) { - move |value| { - warn!( - from_stage = from_stage, - block_id = block_id, - is_err = value.is_err(), - "Failed to send back execution/pre_commit result. (rx dropped)", - ); - if let Err(e) = value { - // receive channel discarding error, log for debugging. - log_executor_error_occurred( - e, - &counters::PIPELINE_DISCARDED_EXECUTOR_ERROR_COUNT, - block_id, - false, - ); - } - } -} diff --git a/consensus/src/lib.rs b/consensus/src/lib.rs index a38b66912e191..2c831d4bc3ebb 100644 --- a/consensus/src/lib.rs +++ b/consensus/src/lib.rs @@ -40,8 +40,6 @@ mod rand; mod recovery_manager; mod round_manager; mod state_computer; -#[cfg(test)] -mod state_computer_tests; mod state_replication; #[cfg(any(test, feature = "fuzzing"))] mod test_utils; @@ -56,12 +54,10 @@ pub mod consensus_observer; pub mod consensus_provider; /// Required by the telemetry service pub mod counters; -mod execution_pipeline; /// AptosNet interface. pub mod network_interface; mod payload_manager; mod transaction_deduper; -mod transaction_filter; mod transaction_shuffler; #[cfg(feature = "fuzzing")] pub use transaction_shuffler::transaction_shuffler_fuzzing; diff --git a/consensus/src/liveness/proposal_generator.rs b/consensus/src/liveness/proposal_generator.rs index 355f0a18ce3c9..ac8cf21bdc1a0 100644 --- a/consensus/src/liveness/proposal_generator.rs +++ b/consensus/src/liveness/proposal_generator.rs @@ -26,7 +26,8 @@ use aptos_consensus_types::{ block::Block, block_data::BlockData, common::{Author, Payload, PayloadFilter, Round}, - payload_pull_params::PayloadPullParameters, + opt_block_data::OptBlockData, + payload_pull_params::{OptQSPayloadPullParams, PayloadPullParameters}, pipelined_block::ExecutionSummary, quorum_cert::QuorumCert, utils::PayloadTxnsSize, @@ -34,9 +35,10 @@ use aptos_consensus_types::{ use aptos_crypto::{hash::CryptoHash, HashValue}; use aptos_infallible::Mutex; use aptos_logger::{error, sample, sample::SampleRate, warn}; -use aptos_types::{on_chain_config::ValidatorTxnConfig, validator_txn::ValidatorTransaction}; +use aptos_types::{ + block_info::BlockInfo, on_chain_config::ValidatorTxnConfig, validator_txn::ValidatorTransaction, +}; use aptos_validator_transaction_pool as vtxn_pool; -use futures::future::BoxFuture; use itertools::Itertools; use std::{ collections::{BTreeMap, HashSet}, @@ -471,6 +473,10 @@ impl ProposalGenerator { Ok(Block::new_nil(round, quorum_cert, failed_authors)) } + pub fn can_propose_in_round(&self, round: Round) -> bool { + *self.last_round_generated.lock() < round + } + /// The function generates a new proposal block: the returned future is fulfilled when the /// payload is delivered by the PayloadClient implementation. At most one proposal can be /// generated per round (no proposal equivocation allowed). @@ -485,16 +491,7 @@ impl ProposalGenerator { &self, round: Round, proposer_election: Arc, - wait_callback: BoxFuture<'static, ()>, ) -> anyhow::Result { - { - let mut last_round_generated = self.last_round_generated.lock(); - if *last_round_generated < round { - *last_round_generated = round; - } else { - bail!("Already proposed in the round {}", round); - } - } let maybe_optqs_payload_pull_params = self.opt_qs_payload_param_provider.get_params(); let hqc = self.ensure_highest_quorum_cert(round)?; @@ -511,120 +508,13 @@ impl ProposalGenerator { hqc.certified_block().timestamp_usecs(), ) } else { - // One needs to hold the blocks with the references to the payloads while get_block is - // being executed: pending blocks vector keeps all the pending ancestors of the extended branch. - let mut pending_blocks = self - .block_store - .path_from_commit_root(hqc.certified_block().id()) - .ok_or_else(|| format_err!("HQC {} already pruned", hqc.certified_block().id()))?; - // Avoid txn manager long poll if the root block has txns, so that the leader can - // deliver the commit proof to others without delay. - pending_blocks.push(self.block_store.commit_root()); - - // Exclude all the pending transactions: these are all the ancestors of - // parent (including) up to the root (including). - let exclude_payload: Vec<_> = pending_blocks - .iter() - .flat_map(|block| block.payload()) - .collect(); - let payload_filter = PayloadFilter::from(&exclude_payload); - - let pending_ordering = self - .block_store - .path_from_ordered_root(hqc.certified_block().id()) - .ok_or_else(|| format_err!("HQC {} already pruned", hqc.certified_block().id()))? - .iter() - .any(|block| !block.payload().map_or(true, |txns| txns.is_empty())); - - // All proposed blocks in a branch are guaranteed to have increasing timestamps - // since their predecessor block will not be added to the BlockStore until - // the local time exceeds it. - let timestamp = self.time_service.get_current_timestamp(); - - let voting_power_ratio = proposer_election.get_voting_power_participation_ratio(round); - - let ( - max_block_txns, - max_block_txns_after_filtering, - max_txns_from_block_to_execute, - block_gas_limit_override, - proposal_delay, - ) = self - .calculate_max_block_sizes(voting_power_ratio, timestamp, round) - .await; - - PROPOSER_MAX_BLOCK_TXNS_AFTER_FILTERING.observe(max_block_txns_after_filtering as f64); - if let Some(max_to_execute) = max_txns_from_block_to_execute { - PROPOSER_MAX_BLOCK_TXNS_TO_EXECUTE.observe(max_to_execute as f64); - } - - PROPOSER_DELAY_PROPOSAL.observe(proposal_delay.as_secs_f64()); - if !proposal_delay.is_zero() { - tokio::time::sleep(proposal_delay).await; - } - - let max_pending_block_size = pending_blocks - .iter() - .map(|block| { - block.payload().map_or(PayloadTxnsSize::zero(), |p| { - PayloadTxnsSize::new(p.len() as u64, p.size() as u64) - }) - }) - .reduce(PayloadTxnsSize::maximum) - .unwrap_or_default(); - // Use non-backpressure reduced values for computing fill_fraction - let max_fill_fraction = - (max_pending_block_size.count() as f32 / self.max_block_txns.count() as f32).max( - max_pending_block_size.size_in_bytes() as f32 - / self.max_block_txns.size_in_bytes() as f32, - ); - PROPOSER_PENDING_BLOCKS_COUNT.set(pending_blocks.len() as i64); - PROPOSER_PENDING_BLOCKS_FILL_FRACTION.set(max_fill_fraction as f64); - - let pending_validator_txn_hashes: HashSet = pending_blocks - .iter() - .filter_map(|block| block.validator_txns()) - .flatten() - .map(ValidatorTransaction::hash) - .collect(); - let validator_txn_filter = - vtxn_pool::TransactionFilter::PendingTxnHashSet(pending_validator_txn_hashes); - - let (validator_txns, mut payload) = self - .payload_client - .pull_payload( - PayloadPullParameters { - max_poll_time: self.quorum_store_poll_time.saturating_sub(proposal_delay), - max_txns: max_block_txns, - max_txns_after_filtering: max_block_txns_after_filtering, - soft_max_txns_after_filtering: max_txns_from_block_to_execute - .unwrap_or(max_block_txns_after_filtering), - max_inline_txns: self.max_inline_txns, - maybe_optqs_payload_pull_params, - user_txn_filter: payload_filter, - pending_ordering, - pending_uncommitted_blocks: pending_blocks.len(), - recent_max_fill_fraction: max_fill_fraction, - block_timestamp: timestamp, - }, - validator_txn_filter, - wait_callback, - ) - .await - .context("Fail to retrieve payload")?; - - if !payload.is_direct() - && max_txns_from_block_to_execute.is_some() - && max_txns_from_block_to_execute.is_some_and(|v| payload.len() as u64 > v) - { - payload = payload.transform_to_quorum_store_v2( - max_txns_from_block_to_execute, - block_gas_limit_override, - ); - } else if block_gas_limit_override.is_some() { - payload = payload.transform_to_quorum_store_v2(None, block_gas_limit_override); - } - (validator_txns, payload, timestamp.as_micros() as u64) + self.generate_proposal_inner( + round, + hqc.certified_block().id(), + proposer_election.clone(), + maybe_optqs_payload_pull_params, + ) + .await? }; let quorum_cert = hqc.as_ref().clone(); @@ -659,6 +549,187 @@ impl ProposalGenerator { Ok(block) } + async fn generate_proposal_inner( + &self, + round: Round, + parent_id: HashValue, + proposer_election: Arc, + maybe_optqs_payload_pull_params: Option, + ) -> anyhow::Result<(Vec, Payload, u64)> { + { + let mut last_round_generated = self.last_round_generated.lock(); + if *last_round_generated < round { + *last_round_generated = round; + } else { + bail!("Already proposed in the round {}", round); + } + } + // One needs to hold the blocks with the references to the payloads while get_block is + // being executed: pending blocks vector keeps all the pending ancestors of the extended branch. + let mut pending_blocks = self + .block_store + .path_from_commit_root(parent_id) + .ok_or_else(|| format_err!("Parent block {} already pruned", parent_id))?; + // Avoid txn manager long poll if the root block has txns, so that the leader can + // deliver the commit proof to others without delay. + pending_blocks.push(self.block_store.commit_root()); + + // Exclude all the pending transactions: these are all the ancestors of + // parent (including) up to the root (including). + let exclude_payload: Vec<_> = pending_blocks + .iter() + .flat_map(|block| block.payload()) + .collect(); + let payload_filter = PayloadFilter::from(&exclude_payload); + + let pending_ordering = self + .block_store + .path_from_ordered_root(parent_id) + .ok_or_else(|| format_err!("Parent block {} already pruned", parent_id))? + .iter() + .any(|block| !block.payload().map_or(true, |txns| txns.is_empty())); + + // All proposed blocks in a branch are guaranteed to have increasing timestamps + // since their predecessor block will not be added to the BlockStore until + // the local time exceeds it. + let timestamp = self.time_service.get_current_timestamp(); + + let voting_power_ratio = proposer_election.get_voting_power_participation_ratio(round); + + let ( + max_block_txns, + max_block_txns_after_filtering, + max_txns_from_block_to_execute, + block_gas_limit_override, + proposal_delay, + ) = self + .calculate_max_block_sizes(voting_power_ratio, timestamp, round) + .await; + + PROPOSER_MAX_BLOCK_TXNS_AFTER_FILTERING.observe(max_block_txns_after_filtering as f64); + if let Some(max_to_execute) = max_txns_from_block_to_execute { + PROPOSER_MAX_BLOCK_TXNS_TO_EXECUTE.observe(max_to_execute as f64); + } + + PROPOSER_DELAY_PROPOSAL.observe(proposal_delay.as_secs_f64()); + if !proposal_delay.is_zero() { + tokio::time::sleep(proposal_delay).await; + } + + let max_pending_block_size = pending_blocks + .iter() + .map(|block| { + block.payload().map_or(PayloadTxnsSize::zero(), |p| { + PayloadTxnsSize::new(p.len() as u64, p.size() as u64) + }) + }) + .reduce(PayloadTxnsSize::maximum) + .unwrap_or_default(); + // Use non-backpressure reduced values for computing fill_fraction + let max_fill_fraction = + (max_pending_block_size.count() as f32 / self.max_block_txns.count() as f32).max( + max_pending_block_size.size_in_bytes() as f32 + / self.max_block_txns.size_in_bytes() as f32, + ); + PROPOSER_PENDING_BLOCKS_COUNT.set(pending_blocks.len() as i64); + PROPOSER_PENDING_BLOCKS_FILL_FRACTION.set(max_fill_fraction as f64); + + let pending_validator_txn_hashes: HashSet = pending_blocks + .iter() + .filter_map(|block| block.validator_txns()) + .flatten() + .map(ValidatorTransaction::hash) + .collect(); + let validator_txn_filter = + vtxn_pool::TransactionFilter::PendingTxnHashSet(pending_validator_txn_hashes); + + let (validator_txns, mut payload) = self + .payload_client + .pull_payload( + PayloadPullParameters { + max_poll_time: self.quorum_store_poll_time.saturating_sub(proposal_delay), + max_txns: max_block_txns, + max_txns_after_filtering: max_block_txns_after_filtering, + soft_max_txns_after_filtering: max_txns_from_block_to_execute + .unwrap_or(max_block_txns_after_filtering), + max_inline_txns: self.max_inline_txns, + maybe_optqs_payload_pull_params, + user_txn_filter: payload_filter, + pending_ordering, + pending_uncommitted_blocks: pending_blocks.len(), + recent_max_fill_fraction: max_fill_fraction, + block_timestamp: timestamp, + }, + validator_txn_filter, + ) + .await + .context("Fail to retrieve payload")?; + + if !payload.is_direct() + && max_txns_from_block_to_execute.is_some() + && max_txns_from_block_to_execute.is_some_and(|v| payload.len() as u64 > v) + { + payload = payload.transform_to_quorum_store_v2( + max_txns_from_block_to_execute, + block_gas_limit_override, + ); + } else if block_gas_limit_override.is_some() { + payload = payload.transform_to_quorum_store_v2(None, block_gas_limit_override); + } + Ok((validator_txns, payload, timestamp.as_micros() as u64)) + } + + pub async fn generate_opt_proposal( + &self, + epoch: u64, + round: Round, + parent: BlockInfo, + grandparent_qc: QuorumCert, + proposer_election: Arc, + ) -> anyhow::Result { + let maybe_optqs_payload_pull_params = self.opt_qs_payload_param_provider.get_params(); + + let hqc = self.ensure_highest_quorum_cert(round)?; + + ensure!( + hqc.certified_block().round() + 2 == round, + "[OptProposal] Given round {} is not equal to hqc round {} + 2, should generate regular proposal instead of optimistic", + round, + hqc.certified_block().round() + ); + + let (validator_txns, payload, timestamp) = if hqc.certified_block().has_reconfiguration() { + bail!("[OptProposal] HQC has reconfiguration!"); + } else { + self.generate_proposal_inner( + round, + parent.id(), + proposer_election, + maybe_optqs_payload_pull_params, + ) + .await? + }; + + let validator_txns = if self.vtxn_config.enabled() { + validator_txns + } else { + vec![] + }; + + let block = OptBlockData::new( + validator_txns, + payload, + self.author, + epoch, + round, + timestamp, + parent, + grandparent_qc, + ); + + Ok(block) + } + async fn calculate_max_block_sizes( &self, voting_power_ratio: f64, diff --git a/consensus/src/liveness/proposal_generator_test.rs b/consensus/src/liveness/proposal_generator_test.rs index d4099030572e1..934061b9ff0eb 100644 --- a/consensus/src/liveness/proposal_generator_test.rs +++ b/consensus/src/liveness/proposal_generator_test.rs @@ -22,15 +22,10 @@ use aptos_consensus_types::{ utils::PayloadTxnsSize, }; use aptos_types::{on_chain_config::ValidatorTxnConfig, validator_signer::ValidatorSigner}; -use futures::{future::BoxFuture, FutureExt}; use std::{sync::Arc, time::Duration}; const MAX_BLOCK_GAS_LIMIT: u64 = 30_000; -fn empty_callback() -> BoxFuture<'static, ()> { - async move {}.boxed() -} - struct MockOptQSPayloadProvider {} impl TOptQSPullParamsProvider for MockOptQSPayloadProvider { @@ -69,7 +64,7 @@ async fn test_proposal_generation_empty_tree() { // Generate proposals for an empty tree. let proposal_data = proposal_generator - .generate_proposal(1, proposer_election.clone(), empty_callback()) + .generate_proposal(1, proposer_election.clone()) .await .unwrap(); let proposal = Block::new_proposal_from_block_data(proposal_data, &signer).unwrap(); @@ -80,7 +75,7 @@ async fn test_proposal_generation_empty_tree() { // Duplicate proposals on the same round are not allowed let proposal_err = proposal_generator - .generate_proposal(1, proposer_election.clone(), empty_callback()) + .generate_proposal(1, proposer_election.clone()) .await .err(); assert!(proposal_err.is_some()); @@ -121,7 +116,7 @@ async fn test_proposal_generation_parent() { .await; let original_res = proposal_generator - .generate_proposal(10, proposer_election.clone(), empty_callback()) + .generate_proposal(10, proposer_election.clone()) .await .unwrap(); // With no certifications the parent is genesis @@ -135,7 +130,7 @@ async fn test_proposal_generation_parent() { // Once a1 is certified, it should be the one to choose from inserter.insert_qc_for_block(a1.as_ref(), None); let a1_child_res = proposal_generator - .generate_proposal(11, proposer_election.clone(), empty_callback()) + .generate_proposal(11, proposer_election.clone()) .await .unwrap(); assert_eq!(a1_child_res.parent_id(), a1.id()); @@ -150,7 +145,7 @@ async fn test_proposal_generation_parent() { // Once b1 is certified, it should be the one to choose from inserter.insert_qc_for_block(b1.as_ref(), None); let b1_child_res = proposal_generator - .generate_proposal(15, proposer_election.clone(), empty_callback()) + .generate_proposal(15, proposer_election.clone()) .await .unwrap(); assert_eq!(b1_child_res.parent_id(), b1.id()); @@ -196,7 +191,7 @@ async fn test_old_proposal_generation() { inserter.insert_qc_for_block(a1.as_ref(), None); let proposal_err = proposal_generator - .generate_proposal(1, proposer_election.clone(), empty_callback()) + .generate_proposal(1, proposer_election.clone()) .await .err(); assert!(proposal_err.is_some()); @@ -234,7 +229,7 @@ async fn test_correct_failed_authors() { let genesis = block_store.ordered_root(); let result = proposal_generator - .generate_proposal(6, proposer_election.clone(), empty_callback()) + .generate_proposal(6, proposer_election.clone()) .await .unwrap(); // With no certifications the parent is genesis diff --git a/consensus/src/logging.rs b/consensus/src/logging.rs index a6eb19cfc658b..e8308eabcd759 100644 --- a/consensus/src/logging.rs +++ b/consensus/src/logging.rs @@ -58,6 +58,11 @@ pub enum LogEvent { // randomness fast path BroadcastRandShareFastPath, ReceiveRandShareFastPath, + // optimistic proposal + OptPropose, + NetworkReceiveOptProposal, + ReceiveOptProposal, + ProcessOptProposal, } impl LogSchema { diff --git a/consensus/src/network.rs b/consensus/src/network.rs index 3a57d55b6d368..c9b90459f8b56 100644 --- a/consensus/src/network.rs +++ b/consensus/src/network.rs @@ -25,6 +25,7 @@ use aptos_config::network_id::NetworkId; use aptos_consensus_types::{ block_retrieval::{BlockRetrievalRequest, BlockRetrievalRequestV1, BlockRetrievalResponse}, common::Author, + opt_proposal_msg::OptProposalMsg, order_vote_msg::OrderVoteMsg, pipeline::{commit_decision::CommitDecision, commit_vote::CommitVote}, proof_of_store::{ProofOfStore, ProofOfStoreMsg, SignedBatchInfo, SignedBatchInfoMsg}, @@ -406,6 +407,12 @@ impl NetworkSender { self.broadcast(msg).await } + pub async fn broadcast_opt_proposal(&self, proposal_msg: OptProposalMsg) { + fail_point!("consensus::send::broadcast_opt_proposal", |_| ()); + let msg = ConsensusMsg::OptProposalMsg(Box::new(proposal_msg)); + self.broadcast(msg).await + } + pub async fn broadcast_sync_info(&self, sync_info_msg: SyncInfo) { fail_point!("consensus::send::broadcast_sync_info", |_| ()); let msg = ConsensusMsg::SyncInfo(Box::new(sync_info_msg)); @@ -793,6 +800,7 @@ impl NetworkTask { }; }, consensus_msg @ (ConsensusMsg::ProposalMsg(_) + | ConsensusMsg::OptProposalMsg(_) | ConsensusMsg::VoteMsg(_) | ConsensusMsg::RoundTimeoutMsg(_) | ConsensusMsg::OrderVoteMsg(_) @@ -811,6 +819,23 @@ impl NetworkTask { block_hash = proposal.proposal().id(), ); } + if let ConsensusMsg::OptProposalMsg(proposal) = &consensus_msg { + observe_block( + proposal.timestamp_usecs(), + BlockStage::NETWORK_RECEIVED, + ); + observe_block( + proposal.timestamp_usecs(), + BlockStage::NETWORK_RECEIVED_OPT_PROPOSAL, + ); + info!( + LogSchema::new(LogEvent::NetworkReceiveOptProposal) + .remote_peer(peer_id), + block_author = proposal.proposer(), + block_epoch = proposal.epoch(), + block_round = proposal.round(), + ); + } Self::push_msg(peer_id, consensus_msg, &self.consensus_messages_tx); }, // TODO: get rid of the rpc dummy value diff --git a/consensus/src/network_interface.rs b/consensus/src/network_interface.rs index e7d351895bd13..aef081a8273a3 100644 --- a/consensus/src/network_interface.rs +++ b/consensus/src/network_interface.rs @@ -14,6 +14,7 @@ use aptos_config::network_id::{NetworkId, PeerNetworkId}; use aptos_consensus_types::{ block_retrieval::{BlockRetrievalRequest, BlockRetrievalRequestV1, BlockRetrievalResponse}, epoch_retrieval::EpochRetrievalRequest, + opt_proposal_msg::OptProposalMsg, order_vote_msg::OrderVoteMsg, pipeline::{commit_decision::CommitDecision, commit_vote::CommitVote}, proof_of_store::{ProofOfStoreMsg, SignedBatchInfoMsg}, @@ -88,6 +89,8 @@ pub enum ConsensusMsg { RoundTimeoutMsg(Box), /// RPC to get a chain of block of the given length starting from the given block id, using epoch and round. BlockRetrievalRequest(Box), + /// OptProposalMsg contains the optimistic proposal and sync info. + OptProposalMsg(Box), } /// Network type for consensus @@ -100,6 +103,7 @@ impl ConsensusMsg { ConsensusMsg::BlockRetrievalResponse(_) => "BlockRetrievalResponse", ConsensusMsg::EpochRetrievalRequest(_) => "EpochRetrievalRequest", ConsensusMsg::ProposalMsg(_) => "ProposalMsg", + ConsensusMsg::OptProposalMsg(_) => "OptProposalMsg", ConsensusMsg::SyncInfo(_) => "SyncInfo", ConsensusMsg::EpochChangeProof(_) => "EpochChangeProof", ConsensusMsg::VoteMsg(_) => "VoteMsg", diff --git a/consensus/src/payload_client/mixed.rs b/consensus/src/payload_client/mixed.rs index afc981ab4a3b8..bb9c38c80cc25 100644 --- a/consensus/src/payload_client/mixed.rs +++ b/consensus/src/payload_client/mixed.rs @@ -12,7 +12,6 @@ use aptos_logger::debug; use aptos_types::{on_chain_config::ValidatorTxnConfig, validator_txn::ValidatorTransaction}; use aptos_validator_transaction_pool::TransactionFilter; use fail::fail_point; -use futures::future::BoxFuture; use std::{cmp::min, sync::Arc, time::Instant}; pub struct MixedPayloadClient { @@ -60,7 +59,6 @@ impl PayloadClient for MixedPayloadClient { &self, params: PayloadPullParameters, validator_txn_filter: TransactionFilter, - wait_callback: BoxFuture<'static, ()>, ) -> anyhow::Result<(Vec, Payload), QuorumStoreError> { // Pull validator txns first. let validator_txn_pull_timer = Instant::now(); @@ -100,10 +98,7 @@ impl PayloadClient for MixedPayloadClient { .saturating_sub(validator_txn_pull_timer.elapsed()); // Pull user payload. - let user_payload = self - .user_payload_client - .pull(user_txn_pull_params, wait_callback) - .await?; + let user_payload = self.user_payload_client.pull(user_txn_pull_params).await?; Ok((validator_txns, user_payload)) } @@ -159,7 +154,6 @@ mod tests { aptos_infallible::duration_since_epoch(), ), vtxn_pool::TransactionFilter::PendingTxnHashSet(HashSet::new()), - Box::pin(async {}), ) .await .unwrap() @@ -187,7 +181,6 @@ mod tests { aptos_infallible::duration_since_epoch(), ), vtxn_pool::TransactionFilter::PendingTxnHashSet(HashSet::new()), - Box::pin(async {}), ) .await .unwrap() @@ -215,7 +208,6 @@ mod tests { aptos_infallible::duration_since_epoch(), ), vtxn_pool::TransactionFilter::PendingTxnHashSet(HashSet::new()), - Box::pin(async {}), ) .await .unwrap() @@ -243,7 +235,6 @@ mod tests { aptos_infallible::duration_since_epoch(), ), vtxn_pool::TransactionFilter::PendingTxnHashSet(HashSet::new()), - Box::pin(async {}), ) .await .unwrap() @@ -289,7 +280,6 @@ mod tests { aptos_infallible::duration_since_epoch(), ), vtxn_pool::TransactionFilter::PendingTxnHashSet(HashSet::new()), - Box::pin(async {}), ) .await .unwrap() diff --git a/consensus/src/payload_client/mod.rs b/consensus/src/payload_client/mod.rs index e38ba3194329f..716687f689737 100644 --- a/consensus/src/payload_client/mod.rs +++ b/consensus/src/payload_client/mod.rs @@ -5,7 +5,6 @@ use crate::error::QuorumStoreError; use aptos_consensus_types::{common::Payload, payload_pull_params::PayloadPullParameters}; use aptos_types::validator_txn::ValidatorTransaction; use aptos_validator_transaction_pool::TransactionFilter; -use futures::future::BoxFuture; pub mod mixed; pub mod user; @@ -17,6 +16,5 @@ pub trait PayloadClient: Send + Sync { &self, config: PayloadPullParameters, validator_txn_filter: TransactionFilter, - wait_callback: BoxFuture<'static, ()>, ) -> anyhow::Result<(Vec, Payload), QuorumStoreError>; } diff --git a/consensus/src/payload_client/user/mod.rs b/consensus/src/payload_client/user/mod.rs index e3f2ca8acba43..57fd99a174bee 100644 --- a/consensus/src/payload_client/user/mod.rs +++ b/consensus/src/payload_client/user/mod.rs @@ -5,7 +5,6 @@ use crate::error::QuorumStoreError; use aptos_consensus_types::{common::Payload, payload_pull_params::PayloadPullParameters}; #[cfg(test)] use aptos_types::transaction::SignedTransaction; -use futures::future::BoxFuture; #[cfg(test)] use std::time::Duration; #[cfg(test)] @@ -18,7 +17,6 @@ pub trait UserPayloadClient: Send + Sync { async fn pull( &self, params: PayloadPullParameters, - wait_callback: BoxFuture<'static, ()>, ) -> anyhow::Result; } @@ -41,7 +39,6 @@ impl UserPayloadClient for DummyClient { async fn pull( &self, mut params: PayloadPullParameters, - _wait_callback: BoxFuture<'static, ()>, ) -> anyhow::Result { use aptos_consensus_types::utils::PayloadTxnsSize; diff --git a/consensus/src/payload_client/user/quorum_store_client.rs b/consensus/src/payload_client/user/quorum_store_client.rs index c8c541208c863..74a91fbf9b1de 100644 --- a/consensus/src/payload_client/user/quorum_store_client.rs +++ b/consensus/src/payload_client/user/quorum_store_client.rs @@ -13,7 +13,6 @@ use aptos_consensus_types::{ }; use aptos_logger::info; use fail::fail_point; -use futures::future::BoxFuture; use futures_channel::{mpsc, oneshot}; use std::time::{Duration, Instant}; use tokio::time::{sleep, timeout}; @@ -93,7 +92,6 @@ impl UserPayloadClient for QuorumStoreClient { async fn pull( &self, params: PayloadPullParameters, - wait_callback: BoxFuture<'static, ()>, ) -> anyhow::Result { let return_non_full = params.recent_max_fill_fraction < self.wait_for_full_blocks_above_recent_fill_threshold @@ -105,7 +103,6 @@ impl UserPayloadClient for QuorumStoreClient { fail_point!("consensus::pull_payload", |_| { Err(anyhow::anyhow!("Injected error in pull_payload").into()) }); - let mut callback_wrapper = Some(wait_callback); // keep polling QuorumStore until there's payloads available or there's still pending payloads let start_time = Instant::now(); @@ -125,9 +122,6 @@ impl UserPayloadClient for QuorumStoreClient { ) .await?; if payload.is_empty() && !return_empty && !done { - if let Some(callback) = callback_wrapper.take() { - callback.await; - } sleep(Duration::from_millis(NO_TXN_DELAY)).await; continue; } diff --git a/consensus/src/pipeline/buffer_item.rs b/consensus/src/pipeline/buffer_item.rs index 6840b26e43dc4..bb263f737b641 100644 --- a/consensus/src/pipeline/buffer_item.rs +++ b/consensus/src/pipeline/buffer_item.rs @@ -2,9 +2,7 @@ // Parts of the project are originally copyright © Meta Platforms, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::{ - counters, pipeline::hashable::Hashable, state_replication::StateComputerCommitCallBackType, -}; +use crate::{counters, pipeline::hashable::Hashable}; use anyhow::anyhow; use aptos_consensus_types::{ common::{Author, Round}, @@ -61,7 +59,6 @@ pub struct OrderedItem { // This can happen in the fast forward sync path, where we can receive the commit proof // from peers. pub commit_proof: Option, - pub callback: StateComputerCommitCallBackType, pub ordered_blocks: Vec>, pub ordered_proof: LedgerInfoWithSignatures, } @@ -69,7 +66,6 @@ pub struct OrderedItem { pub struct ExecutedItem { pub executed_blocks: Vec>, pub partial_commit_proof: SignatureAggregator, - pub callback: StateComputerCommitCallBackType, pub commit_info: BlockInfo, pub ordered_proof: LedgerInfoWithSignatures, } @@ -77,7 +73,6 @@ pub struct ExecutedItem { pub struct SignedItem { pub executed_blocks: Vec>, pub partial_commit_proof: SignatureAggregator, - pub callback: StateComputerCommitCallBackType, pub commit_vote: CommitVote, pub rb_handle: Option<(Instant, DropGuard)>, } @@ -85,7 +80,6 @@ pub struct SignedItem { pub struct AggregatedItem { pub executed_blocks: Vec>, pub commit_proof: LedgerInfoWithSignatures, - pub callback: StateComputerCommitCallBackType, } pub enum BufferItem { @@ -107,13 +101,11 @@ impl BufferItem { pub fn new_ordered( ordered_blocks: Vec>, ordered_proof: LedgerInfoWithSignatures, - callback: StateComputerCommitCallBackType, unverified_votes: HashMap, ) -> Self { Self::Ordered(Box::new(OrderedItem { unverified_votes, commit_proof: None, - callback, ordered_blocks, ordered_proof, })) @@ -133,7 +125,6 @@ impl BufferItem { ordered_blocks, commit_proof, unverified_votes, - callback, ordered_proof, } = *ordered_item; for (b1, b2) in zip_eq(ordered_blocks.iter(), executed_blocks.iter()) { @@ -164,7 +155,6 @@ impl BufferItem { Self::Aggregated(Box::new(AggregatedItem { executed_blocks, commit_proof, - callback, })) } else { let commit_ledger_info = generate_commit_ledger_info( @@ -188,13 +178,11 @@ impl BufferItem { Self::Aggregated(Box::new(AggregatedItem { executed_blocks, commit_proof, - callback, })) } else { Self::Executed(Box::new(ExecutedItem { executed_blocks, partial_commit_proof, - callback, commit_info, ordered_proof, })) @@ -212,7 +200,6 @@ impl BufferItem { Self::Executed(executed_item) => { let ExecutedItem { executed_blocks, - callback, partial_commit_proof, .. } = *executed_item; @@ -230,7 +217,6 @@ impl BufferItem { Self::Signed(Box::new(SignedItem { executed_blocks, - callback, partial_commit_proof, commit_vote, rb_handle: None, @@ -252,7 +238,6 @@ impl BufferItem { Self::Signed(signed_item) => { let SignedItem { executed_blocks, - callback, partial_commit_proof: local_commit_proof, .. } = *signed_item; @@ -266,14 +251,12 @@ impl BufferItem { ); Self::Aggregated(Box::new(AggregatedItem { executed_blocks, - callback, commit_proof, })) }, Self::Executed(executed_item) => { let ExecutedItem { executed_blocks, - callback, commit_info, .. } = *executed_item; @@ -284,7 +267,6 @@ impl BufferItem { ); Self::Aggregated(Box::new(AggregatedItem { executed_blocks, - callback, commit_proof, })) }, @@ -332,7 +314,6 @@ impl BufferItem { return Self::Aggregated(Box::new(AggregatedItem { executed_blocks: signed_item.executed_blocks, commit_proof, - callback: signed_item.callback, })); } } @@ -358,7 +339,6 @@ impl BufferItem { return Self::Aggregated(Box::new(AggregatedItem { executed_blocks: executed_item.executed_blocks, commit_proof, - callback: executed_item.callback, })); } } @@ -577,7 +557,6 @@ mod test { let mut ordered_item = BufferItem::new_ordered( vec![pipelined_block.clone()], ordered_proof.clone(), - Box::new(move |_, _| {}), cached_commit_votes, ); @@ -682,7 +661,6 @@ mod test { let mut ordered_item = BufferItem::new_ordered( vec![pipelined_block.clone()], ordered_proof.clone(), - Box::new(move |_, _| {}), cached_commit_votes, ); diff --git a/consensus/src/pipeline/buffer_manager.rs b/consensus/src/pipeline/buffer_manager.rs index 9576105a374c7..0c0a6ebb4f8e9 100644 --- a/consensus/src/pipeline/buffer_manager.rs +++ b/consensus/src/pipeline/buffer_manager.rs @@ -22,7 +22,6 @@ use crate::{ pipeline_phase::CountedRequest, signing_phase::{SigningRequest, SigningResponse}, }, - state_replication::StateComputerCommitCallBackType, }; use aptos_bounded_executor::BoundedExecutor; use aptos_config::config::ConsensusObserverConfig; @@ -81,7 +80,6 @@ pub struct ResetRequest { pub struct OrderedBlocks { pub ordered_blocks: Vec>, pub ordered_proof: LedgerInfoWithSignatures, - pub callback: StateComputerCommitCallBackType, } impl OrderedBlocks { @@ -139,10 +137,6 @@ pub struct BufferManager { block_rx: UnboundedReceiver, reset_rx: UnboundedReceiver, - // self channel to retry execution schedule phase - execution_schedule_retry_tx: UnboundedSender<()>, - execution_schedule_retry_rx: UnboundedReceiver<()>, - stop: bool, epoch_state: Arc, @@ -177,7 +171,6 @@ pub struct BufferManager { // Items are popped from the buffer when sending to the persisting phase since callback is not clonable. // but we need to keep the pending blocks for reset. pending_commit_blocks: BTreeMap>, - new_pipeline_enabled: bool, } impl BufferManager { @@ -209,7 +202,6 @@ impl BufferManager { consensus_observer_config: ConsensusObserverConfig, consensus_publisher: Option>, max_pending_rounds_in_commit_vote_cache: u64, - new_pipeline_enabled: bool, ) -> Self { let buffer = Buffer::::new(); @@ -217,7 +209,6 @@ impl BufferManager { .factor(50) .max_delay(Duration::from_secs(5)); - let (tx, rx) = unbounded(); Self { author, @@ -252,9 +243,6 @@ impl BufferManager { block_rx, reset_rx, - execution_schedule_retry_tx: tx, - execution_schedule_retry_rx: rx, - stop: false, epoch_state, @@ -275,7 +263,6 @@ impl BufferManager { max_pending_rounds_in_commit_vote_cache, pending_commit_votes: BTreeMap::new(), pending_commit_blocks: BTreeMap::new(), - new_pipeline_enabled, } } @@ -399,7 +386,6 @@ impl BufferManager { let OrderedBlocks { ordered_blocks, ordered_proof, - callback, } = ordered_blocks; info!( @@ -411,7 +397,6 @@ impl BufferManager { let request = self.create_new_request(ExecutionRequest { ordered_blocks: ordered_blocks.clone(), - lifetime_guard: self.create_new_request(()), }); if let Some(consensus_publisher) = &self.consensus_publisher { let message = ConsensusObserverMessage::new_ordered_block_message( @@ -435,8 +420,7 @@ impl BufferManager { } } } - let item = - BufferItem::new_ordered(ordered_blocks, ordered_proof, callback, unverified_votes); + let item = BufferItem::new_ordered(ordered_blocks, ordered_proof, unverified_votes); self.buffer.push_back(item); } @@ -540,13 +524,6 @@ impl BufferManager { .send(self.create_new_request(PersistingRequest { blocks: blocks_to_persist, commit_ledger_info: aggregated_item.commit_proof, - // we use the last callback - // this is okay because the callback function (from BlockStore::commit) - // takes in the actual blocks and ledger info from the state computer - // the encoded values are references to the block_tree, storage, and a commit root - // the block_tree and storage are the same for all the callbacks in the current epoch - // the commit root is used in logging only. - callback: aggregated_item.callback, })) .await .expect("Failed to send persist request"); @@ -591,9 +568,6 @@ impl BufferManager { async fn process_reset_request(&mut self, request: ResetRequest) { let ResetRequest { tx, signal } = request; info!("Receive reset"); - if !self.new_pipeline_enabled { - self.reset_flag.store(true, Ordering::SeqCst); - } match signal { ResetSignal::Stop => self.stop = true, @@ -607,9 +581,6 @@ impl BufferManager { self.reset().await; let _ = tx.send(ResetAck::default()); - if !self.new_pipeline_enabled { - self.reset_flag.store(false, Ordering::SeqCst); - } info!("Reset finishes"); } @@ -622,29 +593,7 @@ impl BufferManager { .expect("Failed to send execution wait request."); } - async fn retry_schedule_phase(&mut self) { - let mut cursor = self.execution_root; - let mut count = 0; - while cursor.is_some() { - let ordered_blocks = self.buffer.get(&cursor).get_blocks().clone(); - let request = self.create_new_request(ExecutionRequest { - ordered_blocks, - lifetime_guard: self.create_new_request(()), - }); - count += 1; - self.execution_schedule_phase_tx - .send(request) - .await - .expect("Failed to send execution schedule request."); - cursor = self.buffer.get_next(&cursor); - } - info!( - "Reschedule {} execution requests from {:?}", - count, self.execution_root - ); - } - - /// If the response is successful, advance the item to Executed, otherwise panic (TODO fix). + /// If the response is successful, advance the item to Executed. #[allow(clippy::unwrap_used)] async fn process_execution_response(&mut self, response: ExecutionResponse) { let ExecutionResponse { block_id, inner } = response; @@ -661,7 +610,6 @@ impl BufferManager { e, &counters::BUFFER_MANAGER_RECEIVED_EXECUTOR_ERROR_COUNT, block_id, - self.new_pipeline_enabled, ); return; }, @@ -994,29 +942,12 @@ impl BufferManager { })}, Some(response) = self.execution_wait_phase_rx.next() => { monitor!("buffer_manager_process_execution_wait_response", { - let response_block_id = response.block_id; self.process_execution_response(response).await; - if let Some(block_id) = self.advance_execution_root() { - // if the response is for the current execution root, retry the schedule phase - if response_block_id == block_id { - let mut tx = self.execution_schedule_retry_tx.clone(); - tokio::spawn(async move { - tokio::time::sleep(Duration::from_millis(100)).await; - // buffer manager can be dropped at the point of sending retry - let _ = tx.send(()).await; - }); - } - } + self.advance_execution_root(); if self.signing_root.is_none() { self.advance_signing_root().await; }}); }, - _ = self.execution_schedule_retry_rx.next() => { - if !self.new_pipeline_enabled { - monitor!("buffer_manager_process_execution_schedule_retry", - self.retry_schedule_phase().await); - } - }, Some(response) = self.signing_phase_rx.next() => { monitor!("buffer_manager_process_signing_response", { self.process_signing_response(response).await; diff --git a/consensus/src/pipeline/decoupled_execution_utils.rs b/consensus/src/pipeline/decoupled_execution_utils.rs index 1f3488031f284..e7356767e914e 100644 --- a/consensus/src/pipeline/decoupled_execution_utils.rs +++ b/consensus/src/pipeline/decoupled_execution_utils.rs @@ -13,7 +13,6 @@ use crate::{ pipeline_phase::{CountedRequest, PipelinePhase}, signing_phase::{CommitSignerProvider, SigningPhase, SigningRequest, SigningResponse}, }, - state_replication::StateComputer, }; use aptos_bounded_executor::BoundedExecutor; use aptos_channels::aptos_channel::Receiver; @@ -30,11 +29,9 @@ use std::sync::{ #[allow(clippy::too_many_arguments)] pub fn prepare_phases_and_buffer_manager( author: Author, - execution_proxy: Arc, safety_rules: Arc, commit_msg_tx: NetworkSender, commit_msg_rx: Receiver, - persisting_proxy: Arc, block_rx: UnboundedReceiver, sync_rx: UnboundedReceiver, epoch_state: Arc, @@ -45,7 +42,6 @@ pub fn prepare_phases_and_buffer_manager( consensus_observer_config: ConsensusObserverConfig, consensus_publisher: Option>, max_pending_rounds_in_commit_vote_cache: u64, - new_pipeline_enabled: bool, ) -> ( PipelinePhase, PipelinePhase, @@ -61,7 +57,7 @@ pub fn prepare_phases_and_buffer_manager( create_channel::>(); let (execution_schedule_phase_response_tx, execution_schedule_phase_response_rx) = create_channel::(); - let execution_schedule_phase_processor = ExecutionSchedulePhase::new(execution_proxy); + let execution_schedule_phase_processor = ExecutionSchedulePhase::new(); let execution_schedule_phase = PipelinePhase::new( execution_schedule_phase_request_rx, Some(execution_schedule_phase_response_tx), @@ -101,7 +97,7 @@ pub fn prepare_phases_and_buffer_manager( let (persisting_phase_response_tx, persisting_phase_response_rx) = create_channel(); let commit_msg_tx = Arc::new(commit_msg_tx); - let persisting_phase_processor = PersistingPhase::new(persisting_proxy, commit_msg_tx.clone()); + let persisting_phase_processor = PersistingPhase::new(commit_msg_tx.clone()); let persisting_phase = PipelinePhase::new( persisting_phase_request_rx, Some(persisting_phase_response_tx), @@ -138,7 +134,6 @@ pub fn prepare_phases_and_buffer_manager( consensus_observer_config, consensus_publisher, max_pending_rounds_in_commit_vote_cache, - new_pipeline_enabled, ), ) } diff --git a/consensus/src/pipeline/execution_client.rs b/consensus/src/pipeline/execution_client.rs index b1f107d04af19..7d631820de61d 100644 --- a/consensus/src/pipeline/execution_client.rs +++ b/consensus/src/pipeline/execution_client.rs @@ -22,7 +22,7 @@ use crate::{ types::{AugmentedData, RandConfig, Share}, }, state_computer::ExecutionProxy, - state_replication::{StateComputer, StateComputerCommitCallBackType}, + state_replication::StateComputer, transaction_deduper::create_transaction_deduper, transaction_shuffler::create_transaction_shuffler, }; @@ -71,7 +71,6 @@ pub trait TExecutionClient: Send + Sync { fast_rand_config: Option, rand_msg_rx: aptos_channel::Receiver, highest_committed_round: Round, - new_pipeline_enabled: bool, ); /// This is needed for some DAG tests. Clean this up as a TODO. @@ -82,7 +81,6 @@ pub trait TExecutionClient: Send + Sync { &self, blocks: Vec>, ordered_proof: WrappedLedgerInfo, - callback: StateComputerCommitCallBackType, ) -> ExecutorResult<()>; fn send_commit_msg( @@ -210,7 +208,6 @@ impl ExecutionProxyClient { buffer_manager_back_pressure_enabled: bool, consensus_observer_config: ConsensusObserverConfig, consensus_publisher: Option>, - new_pipeline_enabled: bool, ) { let network_sender = NetworkSender::new( self.author, @@ -282,11 +279,9 @@ impl ExecutionProxyClient { buffer_manager, ) = prepare_phases_and_buffer_manager( self.author, - self.execution_proxy.clone(), commit_signer_provider, network_sender, commit_msg_rx, - self.execution_proxy.clone(), execution_ready_block_rx, reset_buffer_manager_rx, epoch_state, @@ -298,7 +293,6 @@ impl ExecutionProxyClient { consensus_publisher, self.consensus_config .max_pending_rounds_in_commit_vote_cache, - new_pipeline_enabled, ); tokio::spawn(execution_schedule_phase.start()); @@ -324,7 +318,6 @@ impl TExecutionClient for ExecutionProxyClient { fast_rand_config: Option, rand_msg_rx: aptos_channel::Receiver, highest_committed_round: Round, - new_pipeline_enabled: bool, ) { let maybe_rand_msg_tx = self.spawn_decoupled_execution( maybe_consensus_key, @@ -338,7 +331,6 @@ impl TExecutionClient for ExecutionProxyClient { self.consensus_config.enable_pre_commit, self.consensus_observer_config, self.consensus_publisher.clone(), - new_pipeline_enabled, ); let transaction_shuffler = @@ -370,7 +362,6 @@ impl TExecutionClient for ExecutionProxyClient { &self, blocks: Vec>, ordered_proof: WrappedLedgerInfo, - callback: StateComputerCommitCallBackType, ) -> ExecutorResult<()> { assert!(!blocks.is_empty()); let mut execute_tx = match self.handle.read().execute_tx.clone() { @@ -394,7 +385,6 @@ impl TExecutionClient for ExecutionProxyClient { .send(OrderedBlocks { ordered_blocks: blocks, ordered_proof: ordered_proof.ledger_info().clone(), - callback, }) .await .is_err() @@ -545,7 +535,6 @@ impl TExecutionClient for DummyExecutionClient { _fast_rand_config: Option, _rand_msg_rx: aptos_channel::Receiver, _highest_committed_round: Round, - _new_pipeline_enabled: bool, ) { } @@ -557,7 +546,6 @@ impl TExecutionClient for DummyExecutionClient { &self, _: Vec>, _: WrappedLedgerInfo, - _: StateComputerCommitCallBackType, ) -> ExecutorResult<()> { Ok(()) } diff --git a/consensus/src/pipeline/execution_schedule_phase.rs b/consensus/src/pipeline/execution_schedule_phase.rs index 4c3882290e45c..702956760d21b 100644 --- a/consensus/src/pipeline/execution_schedule_phase.rs +++ b/consensus/src/pipeline/execution_schedule_phase.rs @@ -1,19 +1,13 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::{ - pipeline::{ - execution_wait_phase::ExecutionWaitRequest, - pipeline_phase::{CountedRequest, StatelessPipeline}, - }, - state_replication::StateComputer, +use crate::pipeline::{ + execution_wait_phase::ExecutionWaitRequest, pipeline_phase::StatelessPipeline, }; use aptos_consensus_types::pipelined_block::PipelinedBlock; use aptos_crypto::HashValue; -use aptos_executor_types::ExecutorError; -use aptos_logger::debug; use async_trait::async_trait; -use futures::{FutureExt, TryFutureExt}; +use futures::FutureExt; use std::{ fmt::{Debug, Display, Formatter}, sync::Arc, @@ -25,9 +19,6 @@ use std::{ pub struct ExecutionRequest { pub ordered_blocks: Vec>, - // Pass down a CountedRequest to the ExecutionPipeline stages in order to guarantee the executor - // doesn't get reset with pending tasks stuck in the pipeline. - pub lifetime_guard: CountedRequest<()>, } impl Debug for ExecutionRequest { @@ -42,13 +33,11 @@ impl Display for ExecutionRequest { } } -pub struct ExecutionSchedulePhase { - execution_proxy: Arc, -} +pub struct ExecutionSchedulePhase; impl ExecutionSchedulePhase { - pub fn new(execution_proxy: Arc) -> Self { - Self { execution_proxy } + pub fn new() -> Self { + Self } } @@ -60,13 +49,10 @@ impl StatelessPipeline for ExecutionSchedulePhase { const NAME: &'static str = "execution_schedule"; async fn process(&self, req: ExecutionRequest) -> ExecutionWaitRequest { - let ExecutionRequest { - mut ordered_blocks, - lifetime_guard, - } = req; + let ExecutionRequest { mut ordered_blocks } = req; - let (block_id, pipeline_enabled) = match ordered_blocks.last() { - Some(block) => (block.id(), block.pipeline_enabled()), + let block_id = match ordered_blocks.last() { + Some(block) => block.id(), None => { return ExecutionWaitRequest { block_id: HashValue::zero(), @@ -75,53 +61,20 @@ impl StatelessPipeline for ExecutionSchedulePhase { }, }; - let fut = if pipeline_enabled { - for b in &ordered_blocks { - if let Some(tx) = b.pipeline_tx().lock().as_mut() { - tx.rand_tx.take().map(|tx| tx.send(b.randomness().cloned())); - } + for b in &ordered_blocks { + if let Some(tx) = b.pipeline_tx().lock().as_mut() { + tx.rand_tx.take().map(|tx| tx.send(b.randomness().cloned())); } + } - async move { - for b in ordered_blocks.iter_mut() { - let (compute_result, execution_time) = b.wait_for_compute_result().await?; - b.set_compute_result(compute_result, execution_time); - } - Ok(ordered_blocks) - } - .boxed() - } else { - // Call schedule_compute() for each block here (not in the fut being returned) to - // make sure they are scheduled in order. - let mut futs = vec![]; - for b in &ordered_blocks { - let fut = self - .execution_proxy - .schedule_compute( - b.block(), - b.parent_id(), - b.randomness().cloned(), - b.qc(), - lifetime_guard.spawn(()), - ) - .await; - futs.push(fut) + let fut = async move { + for b in ordered_blocks.iter_mut() { + let (compute_result, execution_time) = b.wait_for_compute_result().await?; + b.set_compute_result(compute_result, execution_time); } - - // In the future being returned, wait for the compute results in order. - tokio::task::spawn(async move { - let mut results = vec![]; - for (block, fut) in itertools::zip_eq(ordered_blocks, futs) { - debug!("try to receive compute result for block {}", block.id()); - block.set_execution_result(fut.await?); - results.push(block); - } - Ok(results) - }) - .map_err(ExecutorError::internal_err) - .and_then(|res| async { res }) - .boxed() - }; + Ok(ordered_blocks) + } + .boxed(); ExecutionWaitRequest { block_id, fut } } diff --git a/consensus/src/pipeline/persisting_phase.rs b/consensus/src/pipeline/persisting_phase.rs index e6c4d70713684..f8c9f9a50c560 100644 --- a/consensus/src/pipeline/persisting_phase.rs +++ b/consensus/src/pipeline/persisting_phase.rs @@ -2,11 +2,7 @@ // Parts of the project are originally copyright © Meta Platforms, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::{ - network::NetworkSender, - pipeline::pipeline_phase::StatelessPipeline, - state_replication::{StateComputer, StateComputerCommitCallBackType}, -}; +use crate::{network::NetworkSender, pipeline::pipeline_phase::StatelessPipeline}; use aptos_consensus_types::{common::Round, pipelined_block::PipelinedBlock}; use aptos_executor_types::ExecutorResult; use aptos_types::{epoch_change::EpochChangeProof, ledger_info::LedgerInfoWithSignatures}; @@ -24,7 +20,6 @@ use std::{ pub struct PersistingRequest { pub blocks: Vec>, pub commit_ledger_info: LedgerInfoWithSignatures, - pub callback: StateComputerCommitCallBackType, } impl Debug for PersistingRequest { @@ -46,19 +41,12 @@ impl Display for PersistingRequest { pub type PersistingResponse = ExecutorResult; pub struct PersistingPhase { - persisting_handle: Arc, commit_msg_tx: Arc, } impl PersistingPhase { - pub fn new( - persisting_handle: Arc, - commit_msg_tx: Arc, - ) -> Self { - Self { - persisting_handle, - commit_msg_tx, - } + pub fn new(commit_msg_tx: Arc) -> Self { + Self { commit_msg_tx } } } @@ -73,31 +61,18 @@ impl StatelessPipeline for PersistingPhase { let PersistingRequest { blocks, commit_ledger_info, - callback, } = req; - let response = if blocks - .last() - .expect("Blocks can't be empty") - .pipeline_enabled() - { - for b in &blocks { - if let Some(tx) = b.pipeline_tx().lock().as_mut() { - tx.commit_proof_tx - .take() - .map(|tx| tx.send(commit_ledger_info.clone())); - } - b.wait_for_commit_ledger().await; + for b in &blocks { + if let Some(tx) = b.pipeline_tx().lock().as_mut() { + tx.commit_proof_tx + .take() + .map(|tx| tx.send(commit_ledger_info.clone())); } + b.wait_for_commit_ledger().await; + } - Ok(blocks.last().expect("Blocks can't be empty").round()) - } else { - let round = commit_ledger_info.ledger_info().round(); - self.persisting_handle - .commit(blocks, commit_ledger_info.clone(), callback) - .await - .map(|_| round) - }; + let response = Ok(blocks.last().expect("Blocks can't be empty").round()); if commit_ledger_info.ledger_info().ends_epoch() { self.commit_msg_tx .send_epoch_change(EpochChangeProof::new(vec![commit_ledger_info], false)) diff --git a/consensus/src/pipeline/pipeline_builder.rs b/consensus/src/pipeline/pipeline_builder.rs index 749698f469793..4255267a45e25 100644 --- a/consensus/src/pipeline/pipeline_builder.rs +++ b/consensus/src/pipeline/pipeline_builder.rs @@ -5,7 +5,6 @@ use crate::{ block_preparer::BlockPreparer, block_storage::tracing::{observe_block, BlockStage}, counters::{self, update_counters_for_block, update_counters_for_compute_result}, - execution_pipeline::SIG_VERIFY_POOL, monitor, payload_manager::TPayloadManager, txn_notifier::TxnNotifier, @@ -37,12 +36,14 @@ use aptos_types::{ randomness::Randomness, transaction::{ signature_verified_transaction::{SignatureVerifiedTransaction, TransactionProvider}, - SignedTransaction, Transaction, + AuxiliaryInfo, EphemeralAuxiliaryInfo, PersistedAuxiliaryInfo, SignedTransaction, + Transaction, }, validator_signer::ValidatorSigner, }; use futures::FutureExt; use move_core_types::account_address::AccountAddress; +use once_cell::sync::Lazy; use rayon::prelude::*; use std::{ future::Future, @@ -50,6 +51,15 @@ use std::{ time::{Duration, Instant}, }; use tokio::{select, sync::oneshot, task::AbortHandle}; +static SIG_VERIFY_POOL: Lazy> = Lazy::new(|| { + Arc::new( + rayon::ThreadPoolBuilder::new() + .num_threads(16) + .thread_name(|index| format!("signature-checker-{}", index)) + .build() + .expect("Failed to create signature verification thread pool"), + ) +}); /// Status to help synchornize the pipeline and sync_manager /// It is used to track the round of the block that could be pre-committed and sync manager decides @@ -564,11 +574,30 @@ impl PipelineBuilder { user_txns.as_ref().clone(), ] .concat(); + let proposer_index = block + .author() + .and_then(|proposer| validator.iter().position(|&v| v == proposer)); + let auxiliary_info = txns + .iter() + .map(|txn| { + txn.borrow_into_inner().try_as_signed_user_txn().map_or( + AuxiliaryInfo::new_empty(), + |_| { + AuxiliaryInfo::new( + PersistedAuxiliaryInfo::None, + proposer_index.map(|index| EphemeralAuxiliaryInfo { + proposer_index: index as u64, + }), + ) + }, + ) + }) + .collect(); let start = Instant::now(); tokio::task::spawn_blocking(move || { executor .execute_and_update_state( - (block.id(), txns).into(), + (block.id(), txns, auxiliary_info).into(), block.parent_id(), onchain_execution_config, ) diff --git a/consensus/src/pipeline/tests/buffer_manager_tests.rs b/consensus/src/pipeline/tests/buffer_manager_tests.rs index df8b01ba3e569..29868cf17f687 100644 --- a/consensus/src/pipeline/tests/buffer_manager_tests.rs +++ b/consensus/src/pipeline/tests/buffer_manager_tests.rs @@ -20,8 +20,7 @@ use crate::{ tests::test_utils::prepare_executed_blocks_with_ledger_info, }, test_utils::{ - consensus_runtime, timed_block_on, EmptyStateComputer, MockStorage, - RandomComputeResultStateComputer, + consensus_runtime, timed_block_on, MockStorage, RandomComputeResultStateComputer, }, }; use aptos_bounded_executor::BoundedExecutor; @@ -127,8 +126,7 @@ pub fn prepare_buffer_manager( (AccountAddress, IncomingCommitRequest), >(QueueStyle::FIFO, channel_size, None); - let (result_tx, result_rx) = create_channel::(); - let state_computer = Arc::new(EmptyStateComputer::new(result_tx)); + let (_result_tx, result_rx) = create_channel::(); let (block_tx, block_rx) = create_channel::(); let (buffer_reset_tx, buffer_reset_rx) = create_channel::(); @@ -144,11 +142,9 @@ pub fn prepare_buffer_manager( buffer_manager, ) = prepare_phases_and_buffer_manager( author, - mocked_execution_proxy, Arc::new(Mutex::new(safety_rules)), network, msg_rx, - state_computer, block_rx, buffer_reset_rx, Arc::new(EpochState { @@ -162,7 +158,6 @@ pub fn prepare_buffer_manager( ConsensusObserverConfig::default(), None, 100, - true, ); ( @@ -281,6 +276,7 @@ async fn assert_results( } #[test] +#[ignore] fn buffer_manager_happy_path_test() { // happy path let ( @@ -326,7 +322,6 @@ fn buffer_manager_happy_path_test() { .send(OrderedBlocks { ordered_blocks: batches[i].clone(), ordered_proof: proofs[i].clone(), - callback: Box::new(move |_, _| {}), }) .await .ok(); @@ -346,6 +341,7 @@ fn buffer_manager_happy_path_test() { } #[test] +#[ignore] fn buffer_manager_sync_test() { // happy path let ( @@ -393,7 +389,6 @@ fn buffer_manager_sync_test() { .send(OrderedBlocks { ordered_blocks: batches[i].clone(), ordered_proof: proofs[i].clone(), - callback: Box::new(move |_, _| {}), }) .await .ok(); @@ -423,7 +418,6 @@ fn buffer_manager_sync_test() { .send(OrderedBlocks { ordered_blocks: batches[i].clone(), ordered_proof: proofs[i].clone(), - callback: Box::new(move |_, _| {}), }) .await .ok(); diff --git a/consensus/src/pipeline/tests/execution_phase_tests.rs b/consensus/src/pipeline/tests/execution_phase_tests.rs deleted file mode 100644 index 807a22ca10cb7..0000000000000 --- a/consensus/src/pipeline/tests/execution_phase_tests.rs +++ /dev/null @@ -1,171 +0,0 @@ -// Copyright © Aptos Foundation -// Parts of the project are originally copyright © Meta Platforms, Inc. -// SPDX-License-Identifier: Apache-2.0 - -use crate::{ - pipeline::{ - buffer_manager::create_channel, - execution_schedule_phase::{ExecutionRequest, ExecutionSchedulePhase}, - execution_wait_phase::{ExecutionResponse, ExecutionWaitPhase}, - pipeline_phase::{CountedRequest, PipelinePhase, StatelessPipeline}, - tests::phase_tester::PhaseTester, - }, - state_replication::StateComputer, - test_utils::{consensus_runtime, RandomComputeResultStateComputer}, -}; -use aptos_consensus_types::{ - block::{block_test_utils::certificate_for_genesis, Block}, - common::Payload, - pipelined_block::PipelinedBlock, - quorum_cert::QuorumCert, -}; -use aptos_crypto::HashValue; -use aptos_executor_types::{state_compute_result::StateComputeResult, ExecutorError}; -use aptos_types::{ledger_info::LedgerInfo, validator_verifier::random_validator_verifier}; -use async_trait::async_trait; -use std::sync::{ - atomic::{AtomicBool, AtomicU64}, - Arc, -}; - -// ExecutionSchedulePhase and ExecutionWaitPhase chained together. -// In BufferManager they are chained through the main loop. -pub struct ExecutionPhaseForTest { - schedule_phase: ExecutionSchedulePhase, - wait_phase: ExecutionWaitPhase, -} - -impl ExecutionPhaseForTest { - pub fn new(execution_proxy: Arc) -> Self { - let schedule_phase = ExecutionSchedulePhase::new(execution_proxy); - let wait_phase = ExecutionWaitPhase; - Self { - schedule_phase, - wait_phase, - } - } -} - -#[async_trait] -impl StatelessPipeline for ExecutionPhaseForTest { - type Request = ExecutionRequest; - type Response = ExecutionResponse; - - const NAME: &'static str = "execution"; - - async fn process(&self, req: ExecutionRequest) -> ExecutionResponse { - let wait_req = self.schedule_phase.process(req).await; - self.wait_phase.process(wait_req).await - } -} - -pub fn prepare_execution_phase() -> (HashValue, ExecutionPhaseForTest) { - let execution_proxy = Arc::new(RandomComputeResultStateComputer::new()); - let random_hash_value = execution_proxy.get_root_hash(); - let execution_phase = ExecutionPhaseForTest::new(execution_proxy); - - (random_hash_value, execution_phase) -} - -fn dummy_guard() -> CountedRequest<()> { - CountedRequest::new((), Arc::new(AtomicU64::new(0))) -} - -fn add_execution_phase_test_cases( - phase_tester: &mut PhaseTester, - random_hash_value: HashValue, -) { - let genesis_qc = certificate_for_genesis(); - let (signers, _validators) = random_validator_verifier(1, None, false); - let block = Block::new_proposal( - Payload::empty(false, true), - 1, - 1, - genesis_qc, - &signers[0], - Vec::new(), - ) - .unwrap(); - - // happy path - phase_tester.add_test_case( - ExecutionRequest { - ordered_blocks: vec![Arc::new(PipelinedBlock::new( - block, - vec![], - StateComputeResult::new_dummy(), - ))], - lifetime_guard: dummy_guard(), - }, - Box::new(move |resp| { - assert_eq!( - resp.inner.unwrap()[0].compute_result().root_hash(), - random_hash_value - ); - }), - ); - - // empty block - phase_tester.add_test_case( - ExecutionRequest { - ordered_blocks: vec![], - lifetime_guard: dummy_guard(), - }, - Box::new(move |resp| assert!(matches!(resp.inner, Err(ExecutorError::EmptyBlocks)))), - ); - - // bad parent id - let bad_qc = QuorumCert::certificate_for_genesis_from_ledger_info( - &LedgerInfo::mock_genesis(None), - random_hash_value, - ); - let bad_block = Block::new_proposal( - Payload::empty(false, true), - 1, - 1, - bad_qc, - &signers[0], - Vec::new(), - ) - .unwrap(); - phase_tester.add_test_case( - ExecutionRequest { - ordered_blocks: vec![Arc::new(PipelinedBlock::new( - bad_block, - vec![], - StateComputeResult::new_dummy(), - ))], - lifetime_guard: dummy_guard(), - }, - Box::new(move |resp| assert!(matches!(resp.inner, Err(ExecutorError::BlockNotFound(_))))), - ); -} - -#[test] -fn execution_phase_tests() { - let runtime = consensus_runtime(); - - // unit tests - let (random_hash_value, execution_phase) = prepare_execution_phase(); - let mut unit_phase_tester = PhaseTester::::new(); - add_execution_phase_test_cases(&mut unit_phase_tester, random_hash_value); - unit_phase_tester.unit_test(&execution_phase); - - // e2e tests - let (in_channel_tx, in_channel_rx) = create_channel::>(); - let (out_channel_tx, out_channel_rx) = create_channel::(); - let reset_flag = Arc::new(AtomicBool::new(false)); - - let execution_phase_pipeline = PipelinePhase::new( - in_channel_rx, - Some(out_channel_tx), - Box::new(execution_phase), - reset_flag, - ); - - runtime.spawn(execution_phase_pipeline.start()); - - let mut e2e_phase_tester = PhaseTester::::new(); - add_execution_phase_test_cases(&mut e2e_phase_tester, random_hash_value); - e2e_phase_tester.e2e_test(in_channel_tx, out_channel_rx); -} diff --git a/consensus/src/pipeline/tests/mod.rs b/consensus/src/pipeline/tests/mod.rs index 2d68165141586..9f5bdee653e14 100644 --- a/consensus/src/pipeline/tests/mod.rs +++ b/consensus/src/pipeline/tests/mod.rs @@ -3,7 +3,6 @@ // SPDX-License-Identifier: Apache-2.0 mod buffer_manager_tests; -mod execution_phase_tests; mod integration_tests; mod ordering_state_computer_tests; mod phase_tester; diff --git a/consensus/src/pipeline/tests/test_utils.rs b/consensus/src/pipeline/tests/test_utils.rs index add86853cf67a..de19bc58fa7fe 100644 --- a/consensus/src/pipeline/tests/test_utils.rs +++ b/consensus/src/pipeline/tests/test_utils.rs @@ -46,7 +46,7 @@ pub fn prepare_safety_rules() -> (Arc>, Vec Self { Self { my_peer_id, @@ -68,6 +71,7 @@ impl BatchCoordinator { max_total_txns, max_total_bytes, batch_expiry_gap_when_init_usecs, + transaction_filter_config, } } @@ -145,7 +149,7 @@ impl BatchCoordinator { Ok(()) } - async fn handle_batches_msg(&mut self, author: PeerId, batches: Vec) { + pub(crate) async fn handle_batches_msg(&mut self, author: PeerId, batches: Vec) { if let Err(e) = self.ensure_max_limits(&batches) { error!("Batch from {}: {}", author, e); counters::RECEIVED_BATCH_MAX_LIMIT_FAILED.inc(); @@ -157,6 +161,32 @@ impl BatchCoordinator { return; }; + // Filter the transactions in the batches. If any transaction is rejected, + // the message will be dropped, and all batches will be rejected. + if self.transaction_filter_config.is_enabled() { + let transaction_filter = &self.transaction_filter_config.batch_transaction_filter(); + for batch in batches.iter() { + for transaction in batch.txns() { + if !transaction_filter.allows_transaction( + batch.batch_info().batch_id(), + batch.author(), + batch.digest(), + transaction, + ) { + error!( + "Transaction {}, in batch {}, from {}, was rejected by the filter. Dropping {} batches!", + transaction.committed_hash(), + batch.batch_info().batch_id(), + author.short_str().as_str(), + batches.len() + ); + counters::RECEIVED_BATCH_REJECTED_BY_FILTER.inc(); + return; + } + } + } + } + let approx_created_ts_usecs = batch .info() .expiration() diff --git a/consensus/src/quorum_store/batch_generator.rs b/consensus/src/quorum_store/batch_generator.rs index 22041256998e4..cba8e670d97e3 100644 --- a/consensus/src/quorum_store/batch_generator.rs +++ b/consensus/src/quorum_store/batch_generator.rs @@ -14,12 +14,12 @@ use crate::{ use aptos_config::config::QuorumStoreConfig; use aptos_consensus_types::{ common::{TransactionInProgress, TransactionSummary}, - proof_of_store::{BatchId, BatchInfo}, + proof_of_store::BatchInfo, }; use aptos_experimental_runtimes::thread_manager::optimal_min_len; use aptos_logger::prelude::*; use aptos_mempool::QuorumStoreRequest; -use aptos_types::{transaction::SignedTransaction, PeerId}; +use aptos_types::{quorum_store::BatchId, transaction::SignedTransaction, PeerId}; use futures_channel::mpsc::Sender; use rayon::prelude::*; use std::{ diff --git a/consensus/src/quorum_store/counters.rs b/consensus/src/quorum_store/counters.rs index cf33841f6afd4..d626f4930f55d 100644 --- a/consensus/src/quorum_store/counters.rs +++ b/consensus/src/quorum_store/counters.rs @@ -3,6 +3,7 @@ #![allow(clippy::unwrap_used)] +use aptos_consensus_types::block::Block; use aptos_metrics_core::{ exponential_buckets, op_counters::DurationHistogram, register_avg_counter, register_histogram, register_histogram_vec, register_int_counter, register_int_counter_vec, Histogram, @@ -150,15 +151,75 @@ pub static BATCH_GENERATOR_MAIN_LOOP: Lazy = Lazy::new(|| { /// Histograms /// Histogram for the number of batches per (committed) blocks. -pub static NUM_BATCH_PER_BLOCK: Lazy = Lazy::new(|| { - register_histogram!( - "quorum_store_num_batch_per_block", +/// types: proof, inline_batch, opt_batch +pub static BATCH_NUM_PER_BLOCK: Lazy = Lazy::new(|| { + register_histogram_vec!( + "quorum_store_batch_num_per_block", "Histogram for the number of batches per (committed) blocks.", + &["type"], TRANSACTION_COUNT_BUCKETS.clone(), ) .unwrap() }); +/// Histogram for the number of txns per batch type in (committed) blocks. +/// types: proof, inline_batch, opt_batch +pub static TXN_NUM_PER_BATCH_TYPE_PER_BLOCK: Lazy = Lazy::new(|| { + register_histogram_vec!( + "quorum_store_txn_num_per_batch_type_per_block", + "Histogram for the number of txns per batch type in (committed) blocks.", + &["type"], + TRANSACTION_COUNT_BUCKETS.clone(), + ) + .unwrap() +}); + +/// Histogram for the txn bytes per batch type in (committed) blocks. +/// types: proof, inline_batch, opt_batch +pub static TXN_BYTES_PER_BATCH_TYPE_PER_BLOCK: Lazy = Lazy::new(|| { + register_histogram_vec!( + "quorum_store_txn_bytes_per_batch_type_per_block", + "Histogram for the txn bytes per batch type in (committed) blocks.", + &["type"], + BYTE_BUCKETS.clone(), + ) + .unwrap() +}); + +pub fn update_batch_stats(block: &Block) { + let (proof_num, proof_txn_num, proof_txn_bytes) = block.proof_stats(); + BATCH_NUM_PER_BLOCK + .with_label_values(&["proof"]) + .observe(proof_num as f64); + TXN_NUM_PER_BATCH_TYPE_PER_BLOCK + .with_label_values(&["proof"]) + .observe(proof_txn_num as f64); + TXN_BYTES_PER_BATCH_TYPE_PER_BLOCK + .with_label_values(&["proof"]) + .observe(proof_txn_bytes as f64); + let (inline_batch_num, inline_batch_txn_num, inline_batch_txn_bytes) = + block.inline_batch_stats(); + BATCH_NUM_PER_BLOCK + .with_label_values(&["inline_batch"]) + .observe(inline_batch_num as f64); + TXN_NUM_PER_BATCH_TYPE_PER_BLOCK + .with_label_values(&["inline_batch"]) + .observe(inline_batch_txn_num as f64); + TXN_BYTES_PER_BATCH_TYPE_PER_BLOCK + .with_label_values(&["inline_batch"]) + .observe(inline_batch_txn_bytes as f64); + let (opt_batch_num, opt_batch_txn_num, opt_batch_txn_bytes) = block.opt_batch_stats(); + BATCH_NUM_PER_BLOCK + .with_label_values(&["opt_batch"]) + .observe(opt_batch_num as f64); + TXN_NUM_PER_BATCH_TYPE_PER_BLOCK + .with_label_values(&["opt_batch"]) + .observe(opt_batch_txn_num as f64); + TXN_BYTES_PER_BATCH_TYPE_PER_BLOCK + .with_label_values(&["opt_batch"]) + .observe(opt_batch_txn_bytes as f64); +} + /// Histogram for the number of transactions per batch. static NUM_TXN_PER_BATCH: Lazy = Lazy::new(|| { register_histogram_vec!( @@ -652,6 +713,15 @@ pub static RECEIVED_BATCH_MAX_LIMIT_FAILED: Lazy = Lazy::new(|| { .unwrap() }); +/// Count of the batch messages that contained transactions rejected by the filter +pub static RECEIVED_BATCH_REJECTED_BY_FILTER: Lazy = Lazy::new(|| { + register_int_counter!( + "quorum_store_received_batch_rejected_by_filter", + "Count of the batch messages that contained transactions rejected by the filter" + ) + .unwrap() +}); + /// Count of the missed batches when execute. pub static MISSED_BATCHES_COUNT: Lazy = Lazy::new(|| { register_int_counter!( diff --git a/consensus/src/quorum_store/quorum_store_builder.rs b/consensus/src/quorum_store/quorum_store_builder.rs index 33ec29b08a499..77e78de8b002d 100644 --- a/consensus/src/quorum_store/quorum_store_builder.rs +++ b/consensus/src/quorum_store/quorum_store_builder.rs @@ -28,7 +28,7 @@ use crate::{ round_manager::VerifiedEvent, }; use aptos_channels::{aptos_channel, message_queues::QueueStyle}; -use aptos_config::config::QuorumStoreConfig; +use aptos_config::config::{BatchTransactionFilterConfig, QuorumStoreConfig}; use aptos_consensus_types::{ common::Author, proof_of_store::ProofCache, request_response::GetPayloadCommand, }; @@ -125,6 +125,7 @@ pub struct InnerBuilder { author: Author, num_validators: u64, config: QuorumStoreConfig, + transaction_filter_config: BatchTransactionFilterConfig, consensus_to_quorum_store_receiver: Receiver, quorum_store_to_mempool_sender: Sender, mempool_txn_pull_timeout_ms: u64, @@ -160,6 +161,7 @@ impl InnerBuilder { author: Author, num_validators: u64, config: QuorumStoreConfig, + transaction_filter_config: BatchTransactionFilterConfig, consensus_to_quorum_store_receiver: Receiver, quorum_store_to_mempool_sender: Sender, mempool_txn_pull_timeout_ms: u64, @@ -199,6 +201,7 @@ impl InnerBuilder { author, num_validators, config, + transaction_filter_config, consensus_to_quorum_store_receiver, quorum_store_to_mempool_sender, mempool_txn_pull_timeout_ms, @@ -327,6 +330,7 @@ impl InnerBuilder { self.config.receiver_max_total_txns as u64, self.config.receiver_max_total_bytes as u64, self.config.batch_expiry_gap_when_init_usecs, + self.transaction_filter_config.clone(), ); #[allow(unused_variables)] let name = format!("batch_coordinator-{}", i); diff --git a/consensus/src/quorum_store/quorum_store_db.rs b/consensus/src/quorum_store/quorum_store_db.rs index bc78d6ed456bb..38c81a5c3dee5 100644 --- a/consensus/src/quorum_store/quorum_store_db.rs +++ b/consensus/src/quorum_store/quorum_store_db.rs @@ -9,10 +9,14 @@ use crate::{ }, }; use anyhow::Result; -use aptos_consensus_types::proof_of_store::BatchId; use aptos_crypto::HashValue; use aptos_logger::prelude::*; -use aptos_schemadb::{batch::SchemaBatch, Options, DB}; +use aptos_schemadb::{ + batch::{SchemaBatch, WriteBatch}, + schema::Schema, + Options, DB, +}; +use aptos_types::quorum_store::BatchId; use std::{collections::HashMap, path::Path, time::Instant}; pub trait QuorumStoreStorage: Sync + Send { @@ -59,6 +63,15 @@ impl QuorumStoreDB { Self { db } } + + /// Relaxed writes instead of sync writes. + pub fn put(&self, key: &S::Key, value: &S::Value) -> Result<(), DbError> { + // Not necessary to use a batch, but we'd like a central place to bump counters. + let mut batch = self.db.new_native_batch(); + batch.put::(key, value)?; + self.db.write_schemas_relaxed(batch)?; + Ok(()) + } } impl QuorumStoreStorage for QuorumStoreDB { @@ -68,7 +81,7 @@ impl QuorumStoreStorage for QuorumStoreDB { trace!("QS: db delete digest {}", digest); batch.delete::(digest)?; } - self.db.write_schemas(batch)?; + self.db.write_schemas_relaxed(batch)?; Ok(()) } @@ -85,7 +98,7 @@ impl QuorumStoreStorage for QuorumStoreDB { batch.digest(), batch.expiration() ); - Ok(self.db.put::(batch.digest(), &batch)?) + self.put::(batch.digest(), &batch) } fn get_batch(&self, digest: &HashValue) -> Result, DbError> { @@ -95,7 +108,7 @@ impl QuorumStoreStorage for QuorumStoreDB { fn delete_batch_id(&self, epoch: u64) -> Result<(), DbError> { let mut batch = SchemaBatch::new(); batch.delete::(&epoch)?; - self.db.write_schemas(batch)?; + self.db.write_schemas_relaxed(batch)?; Ok(()) } @@ -118,7 +131,7 @@ impl QuorumStoreStorage for QuorumStoreDB { } fn save_batch_id(&self, epoch: u64, batch_id: BatchId) -> Result<(), DbError> { - Ok(self.db.put::(&epoch, &batch_id)?) + self.put::(&epoch, &batch_id) } } diff --git a/consensus/src/quorum_store/schema.rs b/consensus/src/quorum_store/schema.rs index f6213c463c9c0..4de503c9cc3c5 100644 --- a/consensus/src/quorum_store/schema.rs +++ b/consensus/src/quorum_store/schema.rs @@ -3,12 +3,12 @@ use crate::quorum_store::types::PersistedValue; use anyhow::Result; -use aptos_consensus_types::proof_of_store::BatchId; use aptos_crypto::HashValue; use aptos_schemadb::{ schema::{KeyCodec, Schema, ValueCodec}, ColumnFamilyName, }; +use aptos_types::quorum_store::BatchId; pub(crate) const BATCH_CF_NAME: ColumnFamilyName = "batch"; pub(crate) const BATCH_ID_CF_NAME: ColumnFamilyName = "batch_ID"; diff --git a/consensus/src/quorum_store/tests/batch_coordinator_test.rs b/consensus/src/quorum_store/tests/batch_coordinator_test.rs new file mode 100644 index 0000000000000..999e689c8cf39 --- /dev/null +++ b/consensus/src/quorum_store/tests/batch_coordinator_test.rs @@ -0,0 +1,207 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + network::NetworkSender, + network_interface::ConsensusNetworkClient, + quorum_store::{ + batch_coordinator::BatchCoordinator, batch_generator::BatchGeneratorCommand, + batch_store::BatchStore, proof_manager::ProofManagerCommand, + quorum_store_db::MockQuorumStoreDB, types::Batch, + }, +}; +use aptos_config::config::BatchTransactionFilterConfig; +use aptos_consensus_types::common::Author; +use aptos_crypto::{ed25519::Ed25519PrivateKey, PrivateKey, SigningKey, Uniform}; +use aptos_network::application::{interface::NetworkClient, storage::PeersAndMetadata}; +use aptos_transaction_filters::batch_transaction_filter::BatchTransactionFilter; +use aptos_types::{ + chain_id::ChainId, + quorum_store::BatchId, + transaction::{RawTransaction, Script, SignedTransaction, TransactionPayload}, + validator_signer::ValidatorSigner, + validator_verifier::ValidatorVerifier, + PeerId, +}; +use futures::FutureExt; +use move_core_types::account_address::AccountAddress; +use std::{collections::HashMap, sync::Arc, time::Duration}; +use tokio::{ + sync::mpsc::{channel, Sender}, + time::timeout, +}; + +#[tokio::test(flavor = "multi_thread")] +async fn test_handle_batches_msg_filter_disabled() { + // Create the message channels + let (sender_to_proof_manager, _receiver_for_proof_manager) = channel(100); + let (sender_to_batch_generator, mut receiver_for_batch_generator) = channel(100); + + // Create a filtering config with filtering disabled + let transaction_filter = BatchTransactionFilter::empty(); + let transaction_filter_config = BatchTransactionFilterConfig::new(false, transaction_filter); + + // Create a batch coordinator + let mut batch_coordinator = create_batch_coordinator( + sender_to_proof_manager, + sender_to_batch_generator, + transaction_filter_config, + ); + + // Create a single batch with some transactions + let transactions = create_signed_transactions(10); + let account_address = AccountAddress::random(); + let batch = Batch::new( + BatchId::new_for_test(100), + transactions.clone(), + 1, + 1, + account_address, + 0, + ); + + // Handle a batches message + batch_coordinator + .handle_batches_msg(account_address, vec![batch.clone()]) + .await; + + // Verify that the receiver for the batch generator received the batch + let received_message = timeout(Duration::from_secs(10), receiver_for_batch_generator.recv()) + .await + .unwrap() + .unwrap(); + if let BatchGeneratorCommand::RemoteBatch(remote_batch) = received_message { + assert_eq!(remote_batch.batch_info(), batch.batch_info()); + } else { + panic!( + "Expected a RemoteBatch command! Received: {:?}", + received_message + ); + } +} + +#[tokio::test(flavor = "multi_thread")] +async fn test_handle_batches_msg_filter_enabled() { + // Create the message channels + let (sender_to_proof_manager, _receiver_for_proof_manager) = channel(100); + let (sender_to_batch_generator, mut receiver_for_batch_generator) = channel(100); + + // Create a filtering config with filtering enabled (the first transaction sender is rejected) + let transactions = create_signed_transactions(10); + let transaction_filter = + BatchTransactionFilter::empty().add_sender_filter(false, transactions[0].sender()); + let transaction_filter_config = BatchTransactionFilterConfig::new(true, transaction_filter); + + // Create a batch coordinator + let mut batch_coordinator = create_batch_coordinator( + sender_to_proof_manager, + sender_to_batch_generator, + transaction_filter_config, + ); + + // Create a single batch + let account_address = AccountAddress::random(); + let batch = Batch::new( + BatchId::new_for_test(109), + transactions.clone(), + 1, + 1, + account_address, + 0, + ); + + // Handle a batches message + batch_coordinator + .handle_batches_msg(account_address, vec![batch]) + .await; + + // Verify that the receiver for the batch generator does not receive the batch + assert!(receiver_for_batch_generator.recv().now_or_never().is_none()); +} + +/// Creates and returns a new batch coordinator with the specified parameters +fn create_batch_coordinator( + sender_to_proof_manager: Sender, + sender_to_batch_generator: Sender, + transaction_filter_config: BatchTransactionFilterConfig, +) -> BatchCoordinator { + // Create the consensus network sender and batch store + let consensus_network_sender = create_consensus_network_sender(); + let batch_store = create_batch_store(); + + // Create the batch coordinator + BatchCoordinator::new( + PeerId::random(), + consensus_network_sender, + sender_to_proof_manager, + sender_to_batch_generator, + Arc::new(batch_store), + 10_000, + 10_000, + 10_000, + 10_000, + 10_000, + transaction_filter_config, + ) +} + +/// Creates and returns a mock batch store +fn create_batch_store() -> BatchStore { + let qs_storage = Arc::new(MockQuorumStoreDB::new()); + let validator_signer = ValidatorSigner::random(None); + BatchStore::new(0, false, 0, qs_storage, 0, 0, 0, validator_signer, 0) +} + +/// Creates and returns a mock consensus network sender +fn create_consensus_network_sender() -> NetworkSender { + // Create the consensus network client + let peers_and_metadata = PeersAndMetadata::new(&[]); + let network_client = + NetworkClient::new(vec![], vec![], HashMap::new(), peers_and_metadata.clone()); + let consensus_network_client = ConsensusNetworkClient::new(network_client.clone()); + + // Create the self sender and validator verifier + let (self_sender, _self_receiver) = aptos_channels::new_unbounded_test(); + let validator_verifier = Arc::new(ValidatorVerifier::new(vec![])); + + // Create a network sender + NetworkSender::new( + Author::random(), + consensus_network_client, + self_sender, + validator_verifier, + ) +} + +/// Creates and returns a raw transaction +fn create_raw_transaction() -> RawTransaction { + RawTransaction::new( + AccountAddress::random(), + 0, + TransactionPayload::Script(Script::new(vec![], vec![], vec![])), + 0, + 0, + 0, + ChainId::new(10), + ) +} + +/// Creates and returns the specified number of signed transactions +fn create_signed_transactions(num_transactions: u64) -> Vec { + let mut signed_transactions = Vec::new(); + + for _ in 0..num_transactions { + let raw_transaction = create_raw_transaction(); + let private_key_1 = Ed25519PrivateKey::generate_for_testing(); + let signature = private_key_1.sign(&raw_transaction).unwrap(); + + let signed_transaction = SignedTransaction::new( + raw_transaction.clone(), + private_key_1.public_key(), + signature.clone(), + ); + signed_transactions.push(signed_transaction); + } + + signed_transactions +} diff --git a/consensus/src/quorum_store/tests/batch_generator_test.rs b/consensus/src/quorum_store/tests/batch_generator_test.rs index 5ac4cda03fec3..a29d48226b246 100644 --- a/consensus/src/quorum_store/tests/batch_generator_test.rs +++ b/consensus/src/quorum_store/tests/batch_generator_test.rs @@ -14,10 +14,10 @@ use crate::{ use aptos_config::config::QuorumStoreConfig; use aptos_consensus_types::{ common::{TransactionInProgress, TransactionSummary}, - proof_of_store::{BatchId, SignedBatchInfo}, + proof_of_store::SignedBatchInfo, }; use aptos_mempool::{QuorumStoreRequest, QuorumStoreResponse}; -use aptos_types::transaction::SignedTransaction; +use aptos_types::{quorum_store::BatchId, transaction::SignedTransaction}; use futures::{ channel::mpsc::{channel, Receiver}, StreamExt, diff --git a/consensus/src/quorum_store/tests/batch_proof_queue_test.rs b/consensus/src/quorum_store/tests/batch_proof_queue_test.rs index 04fd143a0c948..731aa730bedad 100644 --- a/consensus/src/quorum_store/tests/batch_proof_queue_test.rs +++ b/consensus/src/quorum_store/tests/batch_proof_queue_test.rs @@ -6,11 +6,14 @@ use crate::quorum_store::{ }; use aptos_consensus_types::{ common::TxnSummaryWithExpiration, - proof_of_store::{BatchId, BatchInfo, ProofOfStore}, + proof_of_store::{BatchInfo, ProofOfStore}, utils::PayloadTxnsSize, }; use aptos_crypto::HashValue; -use aptos_types::{aggregate_signature::AggregateSignature, transaction::ReplayProtector, PeerId}; +use aptos_types::{ + aggregate_signature::AggregateSignature, quorum_store::BatchId, transaction::ReplayProtector, + PeerId, +}; use maplit::hashset; use std::{collections::HashSet, time::Duration}; diff --git a/consensus/src/quorum_store/tests/batch_requester_test.rs b/consensus/src/quorum_store/tests/batch_requester_test.rs index 578d232e15110..1f171ce722bca 100644 --- a/consensus/src/quorum_store/tests/batch_requester_test.rs +++ b/consensus/src/quorum_store/tests/batch_requester_test.rs @@ -11,7 +11,7 @@ use crate::{ }; use aptos_consensus_types::{ common::Author, - proof_of_store::{BatchId, ProofOfStore, SignedBatchInfo}, + proof_of_store::{ProofOfStore, SignedBatchInfo}, }; use aptos_crypto::HashValue; use aptos_infallible::Mutex; @@ -19,6 +19,7 @@ use aptos_types::{ aggregate_signature::PartialSignatures, block_info::BlockInfo, ledger_info::{LedgerInfo, LedgerInfoWithSignatures}, + quorum_store::BatchId, validator_signer::ValidatorSigner, validator_verifier::{ValidatorConsensusInfo, ValidatorVerifier}, }; diff --git a/consensus/src/quorum_store/tests/batch_store_test.rs b/consensus/src/quorum_store/tests/batch_store_test.rs index 2992700b05dd2..a2704c2bf80c1 100644 --- a/consensus/src/quorum_store/tests/batch_store_test.rs +++ b/consensus/src/quorum_store/tests/batch_store_test.rs @@ -6,11 +6,11 @@ use crate::quorum_store::{ quorum_store_db::QuorumStoreDB, types::{PersistedValue, StorageMode}, }; -use aptos_consensus_types::proof_of_store::{BatchId, BatchInfo}; +use aptos_consensus_types::proof_of_store::BatchInfo; use aptos_crypto::HashValue; use aptos_temppath::TempPath; use aptos_types::{ - account_address::AccountAddress, transaction::SignedTransaction, + account_address::AccountAddress, quorum_store::BatchId, transaction::SignedTransaction, validator_verifier::random_validator_verifier, }; use claims::{assert_err, assert_ok, assert_ok_eq}; diff --git a/consensus/src/quorum_store/tests/mod.rs b/consensus/src/quorum_store/tests/mod.rs index fc7c97f6de9ab..00cb5daf8d6a2 100644 --- a/consensus/src/quorum_store/tests/mod.rs +++ b/consensus/src/quorum_store/tests/mod.rs @@ -1,6 +1,7 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 +mod batch_coordinator_test; mod batch_generator_test; mod batch_proof_queue_test; mod batch_requester_test; diff --git a/consensus/src/quorum_store/tests/proof_coordinator_test.rs b/consensus/src/quorum_store/tests/proof_coordinator_test.rs index 4bd4277f3efcc..efb2e40136dac 100644 --- a/consensus/src/quorum_store/tests/proof_coordinator_test.rs +++ b/consensus/src/quorum_store/tests/proof_coordinator_test.rs @@ -10,13 +10,12 @@ use crate::{ }, test_utils::{create_vec_signed_transactions, mock_quorum_store_sender::MockQuorumStoreSender}, }; -use aptos_consensus_types::proof_of_store::{ - BatchId, BatchInfo, SignedBatchInfo, SignedBatchInfoMsg, -}; +use aptos_consensus_types::proof_of_store::{BatchInfo, SignedBatchInfo, SignedBatchInfoMsg}; use aptos_crypto::HashValue; use aptos_executor_types::ExecutorResult; use aptos_types::{ - transaction::SignedTransaction, validator_verifier::random_validator_verifier, PeerId, + quorum_store::BatchId, transaction::SignedTransaction, + validator_verifier::random_validator_verifier, PeerId, }; use futures::future::Shared; use mini_moka::sync::Cache; diff --git a/consensus/src/quorum_store/tests/proof_manager_test.rs b/consensus/src/quorum_store/tests/proof_manager_test.rs index 1c10027d53ff0..4f3ba16eaec9b 100644 --- a/consensus/src/quorum_store/tests/proof_manager_test.rs +++ b/consensus/src/quorum_store/tests/proof_manager_test.rs @@ -6,12 +6,12 @@ use crate::quorum_store::{ }; use aptos_consensus_types::{ common::{Payload, PayloadFilter}, - proof_of_store::{BatchId, BatchInfo, ProofOfStore}, + proof_of_store::{BatchInfo, ProofOfStore}, request_response::{GetPayloadCommand, GetPayloadRequest, GetPayloadResponse}, utils::PayloadTxnsSize, }; use aptos_crypto::HashValue; -use aptos_types::{aggregate_signature::AggregateSignature, PeerId}; +use aptos_types::{aggregate_signature::AggregateSignature, quorum_store::BatchId, PeerId}; use futures::channel::oneshot; use std::{cmp::max, collections::HashSet}; diff --git a/consensus/src/quorum_store/tests/quorum_store_db_test.rs b/consensus/src/quorum_store/tests/quorum_store_db_test.rs index 2bdc060abc4a8..9d526499e976b 100644 --- a/consensus/src/quorum_store/tests/quorum_store_db_test.rs +++ b/consensus/src/quorum_store/tests/quorum_store_db_test.rs @@ -8,9 +8,8 @@ use crate::{ }, test_utils::create_vec_signed_transactions, }; -use aptos_consensus_types::proof_of_store::BatchId; use aptos_temppath::TempPath; -use aptos_types::account_address::AccountAddress; +use aptos_types::{account_address::AccountAddress, quorum_store::BatchId}; use claims::assert_ok; #[test] diff --git a/consensus/src/quorum_store/tests/types_test.rs b/consensus/src/quorum_store/tests/types_test.rs index 81bafe86959fd..7ec22efc24dd1 100644 --- a/consensus/src/quorum_store/tests/types_test.rs +++ b/consensus/src/quorum_store/tests/types_test.rs @@ -5,9 +5,9 @@ use crate::{ quorum_store::types::{Batch, BatchRequest}, test_utils::create_vec_signed_transactions, }; -use aptos_consensus_types::{common::BatchPayload, proof_of_store::BatchId}; +use aptos_consensus_types::common::BatchPayload; use aptos_crypto::{hash::CryptoHash, HashValue}; -use aptos_types::account_address::AccountAddress; +use aptos_types::{account_address::AccountAddress, quorum_store::BatchId}; use claims::{assert_err, assert_ok}; #[test] diff --git a/consensus/src/quorum_store/types.rs b/consensus/src/quorum_store/types.rs index 33f6f5b526de5..86ee45e8e695c 100644 --- a/consensus/src/quorum_store/types.rs +++ b/consensus/src/quorum_store/types.rs @@ -4,10 +4,13 @@ use anyhow::ensure; use aptos_consensus_types::{ common::{BatchPayload, TxnSummaryWithExpiration}, - proof_of_store::{BatchId, BatchInfo}, + proof_of_store::BatchInfo, }; use aptos_crypto::{hash::CryptoHash, HashValue}; -use aptos_types::{ledger_info::LedgerInfoWithSignatures, transaction::SignedTransaction, PeerId}; +use aptos_types::{ + ledger_info::LedgerInfoWithSignatures, quorum_store::BatchId, transaction::SignedTransaction, + PeerId, +}; use serde::{Deserialize, Serialize}; use std::{ fmt::{Display, Formatter}, diff --git a/consensus/src/quorum_store/utils.rs b/consensus/src/quorum_store/utils.rs index 74dcc0fdac05b..91e8d61d9e338 100644 --- a/consensus/src/quorum_store/utils.rs +++ b/consensus/src/quorum_store/utils.rs @@ -4,11 +4,11 @@ use crate::monitor; use aptos_consensus_types::{ common::{TransactionInProgress, TransactionSummary}, - proof_of_store::{BatchId, BatchInfo}, + proof_of_store::BatchInfo, }; use aptos_logger::prelude::*; use aptos_mempool::{QuorumStoreRequest, QuorumStoreResponse}; -use aptos_types::{transaction::SignedTransaction, PeerId}; +use aptos_types::{quorum_store::BatchId, transaction::SignedTransaction, PeerId}; use chrono::Utc; use futures::channel::{mpsc::Sender, oneshot}; use std::{ diff --git a/consensus/src/rand/rand_gen/test_utils.rs b/consensus/src/rand/rand_gen/test_utils.rs index 29b9f4aa1a1b9..6323fa70fa738 100644 --- a/consensus/src/rand/rand_gen/test_utils.rs +++ b/consensus/src/rand/rand_gen/test_utils.rs @@ -48,7 +48,6 @@ pub fn create_ordered_blocks(rounds: Vec) -> OrderedBlocks { LedgerInfo::mock_genesis(None), AggregateSignature::empty(), ), - callback: Box::new(move |_, _| {}), } } diff --git a/consensus/src/round_manager.rs b/consensus/src/round_manager.rs index b1244dda21aab..5a6ee5f9a1e74 100644 --- a/consensus/src/round_manager.rs +++ b/consensus/src/round_manager.rs @@ -40,6 +40,8 @@ use aptos_consensus_types::{ block::Block, block_data::BlockType, common::{Author, Round}, + opt_block_data::OptBlockData, + opt_proposal_msg::OptProposalMsg, order_vote::OrderVote, order_vote_msg::OrderVoteMsg, pipelined_block::PipelinedBlock, @@ -73,10 +75,12 @@ use aptos_types::{ PeerId, }; use fail::fail_point; -use futures::{channel::oneshot, stream::FuturesUnordered, Future, FutureExt, StreamExt}; +use futures::{channel::oneshot, stream::FuturesUnordered, Future, FutureExt, SinkExt, StreamExt}; use lru::LruCache; use serde::Serialize; -use std::{mem::Discriminant, pin::Pin, sync::Arc, time::Duration}; +use std::{ + collections::BTreeMap, mem::Discriminant, ops::Add, pin::Pin, sync::Arc, time::Duration, +}; use tokio::{ sync::oneshot as TokioOneshot, time::{sleep, Instant}, @@ -92,6 +96,7 @@ pub enum UnverifiedEvent { BatchMsg(Box), SignedBatchInfo(Box), ProofOfStoreMsg(Box), + OptProposalMsg(Box), } pub const BACK_PRESSURE_POLLING_INTERVAL_MS: u64 = 10; @@ -119,6 +124,15 @@ impl UnverifiedEvent { } VerifiedEvent::ProposalMsg(p) }, + UnverifiedEvent::OptProposalMsg(p) => { + if !self_message { + p.verify(peer_id, validator, proof_cache, quorum_store_enabled)?; + counters::VERIFY_MSG + .with_label_values(&["opt_proposal"]) + .observe(start_time.elapsed().as_secs_f64()); + } + VerifiedEvent::OptProposalMsg(p) + }, UnverifiedEvent::VoteMsg(v) => { if !self_message { v.verify(peer_id, validator)?; @@ -186,6 +200,7 @@ impl UnverifiedEvent { pub fn epoch(&self) -> anyhow::Result { match self { UnverifiedEvent::ProposalMsg(p) => Ok(p.epoch()), + UnverifiedEvent::OptProposalMsg(p) => Ok(p.epoch()), UnverifiedEvent::VoteMsg(v) => Ok(v.epoch()), UnverifiedEvent::OrderVoteMsg(v) => Ok(v.epoch()), UnverifiedEvent::SyncInfo(s) => Ok(s.epoch()), @@ -201,6 +216,7 @@ impl From for UnverifiedEvent { fn from(value: ConsensusMsg) -> Self { match value { ConsensusMsg::ProposalMsg(m) => UnverifiedEvent::ProposalMsg(m), + ConsensusMsg::OptProposalMsg(m) => UnverifiedEvent::OptProposalMsg(m), ConsensusMsg::VoteMsg(m) => UnverifiedEvent::VoteMsg(m), ConsensusMsg::OrderVoteMsg(m) => UnverifiedEvent::OrderVoteMsg(m), ConsensusMsg::SyncInfo(m) => UnverifiedEvent::SyncInfo(m), @@ -229,11 +245,12 @@ pub enum VerifiedEvent { LocalTimeout(Round), // Shutdown the NetworkListener Shutdown(TokioOneshot::Sender<()>), + OptProposalMsg(Box), } #[cfg(test)] -#[path = "round_manager_test.rs"] -mod round_manager_test; +#[path = "round_manager_tests/mod.rs"] +mod round_manager_tests; #[cfg(feature = "fuzzing")] #[path = "round_manager_fuzzing.rs"] @@ -270,6 +287,8 @@ pub struct RoundManager { Pin, Block, Instant)> + Send>>, >, proposal_status_tracker: Arc, + pending_opt_proposals: BTreeMap, + opt_proposal_loopback_tx: aptos_channels::UnboundedSender, } impl RoundManager { @@ -290,6 +309,7 @@ impl RoundManager { jwk_consensus_config: OnChainJWKConsensusConfig, fast_rand_config: Option, proposal_status_tracker: Arc, + opt_proposal_loopback_tx: aptos_channels::UnboundedSender, ) -> Self { // when decoupled execution is false, // the counter is still static. @@ -321,6 +341,8 @@ impl RoundManager { blocks_with_broadcasted_fast_shares: LruCache::new(5), futures: FuturesUnordered::new(), proposal_status_tracker, + pending_opt_proposals: BTreeMap::new(), + opt_proposal_loopback_tx, } } @@ -403,7 +425,23 @@ impl RoundManager { self.proposal_status_tracker .push(new_round_event.reason.clone()); - if is_current_proposer { + // Process pending opt proposal for the new round. + // The existence of pending optimistic proposal and being the current proposer are mutually + // exclusive. Note that the opt proposal is checked for valid proposer before inserting into + // the pending queue. + if let Some(opt_proposal) = self.pending_opt_proposals.remove(&new_round) { + self.opt_proposal_loopback_tx + .send(opt_proposal) + .await + .expect("Sending to a self loopback unbounded channel cannot fail"); + } + + // If the current proposer is the leading, try to propose a regular block if not opt proposed already + if is_current_proposer + && self + .proposal_generator + .can_propose_in_round(new_round_event.round) + { let epoch_state = self.epoch_state.clone(); let network = self.network.clone(); let sync_info = self.block_store.sync_info(); @@ -445,7 +483,6 @@ impl RoundManager { epoch_state.clone(), new_round_event, sync_info, - network.clone(), proposal_generator, safety_rules, proposer_election, @@ -467,6 +504,31 @@ impl RoundManager { Ok(()) } + async fn generate_and_send_opt_proposal( + epoch_state: Arc, + round: Round, + parent: BlockInfo, + grandparent_qc: QuorumCert, + network: Arc, + sync_info: SyncInfo, + proposal_generator: Arc, + proposer_election: Arc, + ) -> anyhow::Result<()> { + let proposal_msg = Self::generate_opt_proposal( + epoch_state.clone(), + round, + parent, + grandparent_qc, + sync_info, + proposal_generator, + proposer_election, + ) + .await?; + network.broadcast_opt_proposal(proposal_msg).await; + counters::PROPOSALS_COUNT.inc(); + Ok(()) + } + fn log_collected_vote_stats(epoch_state: Arc, new_round_event: &NewRoundEvent) { let prev_round_votes_for_li = new_round_event .prev_round_votes @@ -552,7 +614,6 @@ impl RoundManager { self.epoch_state.clone(), new_round_event, self.block_store.sync_info(), - self.network.clone(), self.proposal_generator.clone(), self.safety_rules.clone(), self.proposer_election.clone(), @@ -564,20 +625,12 @@ impl RoundManager { epoch_state: Arc, new_round_event: NewRoundEvent, sync_info: SyncInfo, - network: Arc, proposal_generator: Arc, safety_rules: Arc>, proposer_election: Arc, ) -> anyhow::Result { - // Proposal generator will ensure that at most one proposal is generated per round - let callback_sync_info = sync_info.clone(); - let callback = async move { - network.broadcast_sync_info(callback_sync_info).await; - } - .boxed(); - let proposal = proposal_generator - .generate_proposal(new_round_event.round, proposer_election, callback) + .generate_proposal(new_round_event.round, proposer_election) .await?; let signature = safety_rules.lock().sign_proposal(&proposal)?; let signed_proposal = @@ -594,6 +647,35 @@ impl RoundManager { Ok(ProposalMsg::new(signed_proposal, sync_info)) } + async fn generate_opt_proposal( + epoch_state: Arc, + round: Round, + parent: BlockInfo, + grandparent_qc: QuorumCert, + sync_info: SyncInfo, + proposal_generator: Arc, + proposer_election: Arc, + ) -> anyhow::Result { + // Proposal generator will ensure that at most one proposal is generated per round + + let proposal = proposal_generator + .generate_opt_proposal( + epoch_state.epoch, + round, + parent, + grandparent_qc, + proposer_election, + ) + .await?; + observe_block(proposal.timestamp_usecs(), BlockStage::OPT_PROPOSED); + info!(Self::new_log_with_round_epoch( + LogEvent::OptPropose, + round, + epoch_state.epoch + ),); + Ok(OptProposalMsg::new(proposal, sync_info)) + } + /// Process the proposal message: /// 1. ensure after processing sync info, we're at the same round as the proposal /// 2. execute and decide whether to vote for the proposal @@ -650,6 +732,104 @@ impl RoundManager { self.process_verified_proposal(proposal).await } + /// Process the optimistic proposal message: + /// If entered the round of opt proposal, process the opt proposal directly. + /// Otherwise, buffer the opt proposal and process it later when parent QC is available. + pub async fn process_opt_proposal_msg( + &mut self, + proposal_msg: OptProposalMsg, + ) -> anyhow::Result<()> { + ensure!(self.local_config.enable_optimistic_proposal_rx, + "Opt proposal is disabled, but received opt proposal msg of epoch {} round {} from peer {}", + proposal_msg.block_data().epoch(), proposal_msg.round(), proposal_msg.proposer() + ); + + fail_point!("consensus::process_opt_proposal_msg", |_| { + Err(anyhow::anyhow!( + "Injected error in process_opt_proposal_msg" + )) + }); + + observe_block( + proposal_msg.block_data().timestamp_usecs(), + BlockStage::ROUND_MANAGER_RECEIVED, + ); + observe_block( + proposal_msg.block_data().timestamp_usecs(), + BlockStage::ROUND_MANAGER_RECEIVED_OPT_PROPOSAL, + ); + info!( + self.new_log(LogEvent::ReceiveOptProposal), + block_author = proposal_msg.proposer(), + block_epoch = proposal_msg.block_data().epoch(), + block_round = proposal_msg.round(), + block_parent_hash = proposal_msg.block_data().parent_id(), + ); + + self.sync_up(proposal_msg.sync_info(), proposal_msg.proposer()) + .await?; + + if self.round_state.current_round() == proposal_msg.round() { + self.opt_proposal_loopback_tx + .send(proposal_msg.take_block_data()) + .await + .expect("Sending to a self loopback unbounded channel cannot fail"); + } else { + // Pre-check that proposal is from valid proposer before queuing it. + // This check is done after syncing up to sync info to ensure proposer + // election provider is up to date. + ensure!( + self.proposer_election + .is_valid_proposer(proposal_msg.proposer(), proposal_msg.round()), + "[OptProposal] Not a valid proposer for round {}: {}", + proposal_msg.round(), + proposal_msg.proposer() + ); + self.pending_opt_proposals + .insert(proposal_msg.round(), proposal_msg.take_block_data()); + } + + Ok(()) + } + + /// Process the optimistic proposal: + /// 1. Ensure the highest quorum cert certifies the parent block of the opt block + /// 2. Create a regular proposal by adding QC and failed_authors to the opt block + /// 3. Process the proposal using exsiting logic + async fn process_opt_proposal(&mut self, opt_block_data: OptBlockData) -> anyhow::Result<()> { + ensure!( + self.block_store + .get_block_for_round(opt_block_data.round()) + .is_none(), + "Proposal has already been processed for round: {}", + opt_block_data.round() + ); + let hqc = self.block_store.highest_quorum_cert().as_ref().clone(); + ensure!( + hqc.certified_block().round() + 1 == opt_block_data.round(), + "Opt proposal round {} is not the next round after the highest qc round {}", + opt_block_data.round(), + hqc.certified_block().round() + ); + ensure!( + hqc.certified_block().id() == opt_block_data.parent_id(), + "Opt proposal parent id {} is not the same as the highest qc certified block id {}", + opt_block_data.parent_id(), + hqc.certified_block().id() + ); + let proposal = Block::new_from_opt(opt_block_data, hqc); + observe_block(proposal.timestamp_usecs(), BlockStage::PROCESS_OPT_PROPOSAL); + info!( + self.new_log(LogEvent::ProcessOptProposal), + block_author = proposal.author(), + block_epoch = proposal.epoch(), + block_round = proposal.round(), + block_hash = proposal.id(), + block_parent_hash = proposal.quorum_cert().certified_block().id(), + ); + self.process_proposal(proposal).await + } + /// Sync to the sync info sending from peer if it has newer certificates. async fn sync_up(&mut self, sync_info: &SyncInfo, author: Author) -> anyhow::Result<()> { let local_sync_info = self.block_store.sync_info(); @@ -896,7 +1076,7 @@ impl RoundManager { ) { counters::UNEXPECTED_PROPOSAL_EXT_COUNT.inc(); - bail!("ProposalExt unexpected while the feature is disabled."); + bail!("ProposalExt unexpected while the vtxn feature is disabled."); } if let Some(vtxns) = proposal.validator_txns() { @@ -975,20 +1155,22 @@ impl RoundManager { proposal, ); - // Validate that failed_authors list is correctly specified in the block. - let expected_failed_authors = self.proposal_generator.compute_failed_authors( - proposal.round(), - proposal.quorum_cert().certified_block().round(), - false, - self.proposer_election.clone(), - ); - ensure!( - proposal.block_data().failed_authors().is_some_and(|failed_authors| *failed_authors == expected_failed_authors), - "[RoundManager] Proposal for block {} has invalid failed_authors list {:?}, expected {:?}", - proposal.round(), - proposal.block_data().failed_authors(), - expected_failed_authors, - ); + if !proposal.is_opt_block() { + // Validate that failed_authors list is correctly specified in the block. + let expected_failed_authors = self.proposal_generator.compute_failed_authors( + proposal.round(), + proposal.quorum_cert().certified_block().round(), + false, + self.proposer_election.clone(), + ); + ensure!( + proposal.block_data().failed_authors().is_some_and(|failed_authors| *failed_authors == expected_failed_authors), + "[RoundManager] Proposal for block {} has invalid failed_authors list {:?}, expected {:?}", + proposal.round(), + proposal.block_data().failed_authors(), + expected_failed_authors, + ); + } let block_time_since_epoch = Duration::from_micros(proposal.timestamp_usecs()); @@ -1001,6 +1183,9 @@ impl RoundManager { ); observe_block(proposal.timestamp_usecs(), BlockStage::SYNCED); + if proposal.is_opt_block() { + observe_block(proposal.timestamp_usecs(), BlockStage::SYNCED_OPT_BLOCK); + } // Since processing proposal is delayed due to backpressure or payload availability, we add // the block to the block store so that we don't need to fetch it from remote once we @@ -1008,7 +1193,7 @@ impl RoundManager { // guaranteed to add the block to the block store if we don't get out of the backpressure // before the timeout, so this is needed to ensure that the proposed block is added to // the block store irrespective. Also, it is possible that delayed processing of proposal - // tries to add the same block again, which is okay as `execute_and_insert_block` call + // tries to add the same block again, which is okay as `insert_block` call // is idempotent. self.block_store .insert_block(proposal.clone()) @@ -1138,6 +1323,7 @@ impl RoundManager { pub async fn process_verified_proposal(&mut self, proposal: Block) -> anyhow::Result<()> { let proposal_round = proposal.round(); + let parent_qc = proposal.quorum_cert().clone(); let sync_info = self.block_store.sync_info(); if proposal_round <= sync_info.highest_round() { @@ -1173,6 +1359,73 @@ impl RoundManager { ); self.network.send_vote(vote_msg, vec![recipient]).await; } + + if let Err(e) = self.start_next_opt_round(vote, parent_qc) { + debug!("Cannot start next opt round: {}", e); + }; + Ok(()) + } + + fn start_next_opt_round( + &self, + parent_vote: Vote, + grandparent_qc: QuorumCert, + ) -> anyhow::Result<()> { + // Optimistic Proposal: + // When receiving round r block, send optimistic proposal for round r+1 if: + // 0. opt proposal is enabled + // 1. it is the leader of the next round r+1 + // 2. voted for round r block + // 3. the round r block contains QC of round r-1 + // 4. does not propose in round r+1 + if !self.local_config.enable_optimistic_proposal_tx { + return Ok(()); + }; + + let parent = parent_vote.vote_data().proposed().clone(); + let opt_proposal_round = parent.round() + 1; + if self + .proposer_election + .is_valid_proposer(self.proposal_generator.author(), opt_proposal_round) + { + let expected_grandparent_round = parent + .round() + .checked_sub(1) + .ok_or_else(|| anyhow::anyhow!("Invalid parent round {}", parent.round()))?; + ensure!( + grandparent_qc.certified_block().round() == expected_grandparent_round, + "Cannot start Optimistic Round. Grandparent QC is not for round minus one: {} < {}", + grandparent_qc.certified_block().round(), + parent.round() + ); + + let epoch_state = self.epoch_state.clone(); + let network = self.network.clone(); + let sync_info = self.block_store.sync_info(); + let proposal_generator = self.proposal_generator.clone(); + let proposer_election = self.proposer_election.clone(); + tokio::spawn(async move { + if let Err(e) = monitor!( + "generate_and_send_opt_proposal", + Self::generate_and_send_opt_proposal( + epoch_state, + opt_proposal_round, + parent, + grandparent_qc, + network, + sync_info, + proposal_generator, + proposer_election, + ) + .await + ) { + warn!( + "[OptProposal] Error generating and sending opt proposal: {}", + e + ); + } + }); + } Ok(()) } @@ -1213,6 +1466,13 @@ impl RoundManager { observe_block(block_arc.block().timestamp_usecs(), BlockStage::VOTED); } + if block_arc.block().is_opt_block() { + observe_block( + block_arc.block().timestamp_usecs(), + BlockStage::VOTED_OPT_BLOCK, + ); + } + self.storage .save_vote(&vote) .context("[RoundManager] Fail to persist last vote")?; @@ -1340,6 +1600,12 @@ impl RoundManager { BlockStage::ORDER_VOTED, ); } + if proposed_block.block().is_opt_block() { + observe_block( + proposed_block.block().timestamp_usecs(), + BlockStage::ORDER_VOTED_OPT_BLOCK, + ); + } let order_vote_msg = OrderVoteMsg::new(order_vote, qc.as_ref().clone()); info!( self.new_log(LogEvent::BroadcastOrderVote), @@ -1728,6 +1994,7 @@ impl RoundManager { (Author, VerifiedEvent), >, mut buffered_proposal_rx: aptos_channel::Receiver, + mut opt_proposal_loopback_rx: aptos_channels::UnboundedReceiver, close_rx: oneshot::Receiver>, ) { info!(epoch = self.epoch_state.epoch, "RoundManager started"); @@ -1741,6 +2008,18 @@ impl RoundManager { } break; } + opt_proposal = opt_proposal_loopback_rx.select_next_some() => { + self.pending_opt_proposals = self.pending_opt_proposals.split_off(&opt_proposal.round().add(1)); + let result = monitor!("process_opt_proposal_loopback", self.process_opt_proposal(opt_proposal).await); + let round_state = self.round_state(); + match result { + Ok(_) => trace!(RoundStateLogSchema::new(round_state)), + Err(e) => { + counters::ERROR_COUNT.inc(); + warn!(kind = error_kind(&e), RoundStateLogSchema::new(round_state), "Error: {:#}", e); + } + } + } proposal = buffered_proposal_rx.select_next_some() => { let mut proposals = vec![proposal]; while let Some(Some(proposal)) = buffered_proposal_rx.next().now_or_never() { @@ -1750,6 +2029,7 @@ impl RoundManager { match event { VerifiedEvent::ProposalMsg(p) => p.proposal().round(), VerifiedEvent::VerifiedProposalMsg(p) => p.round(), + VerifiedEvent::OptProposalMsg(p) => p.round(), unexpected_event => unreachable!("Unexpected event {:?}", unexpected_event), } }; @@ -1773,6 +2053,12 @@ impl RoundManager { self.process_delayed_proposal_msg(*proposal_msg).await ) } + VerifiedEvent::OptProposalMsg(proposal_msg) => { + monitor!( + "process_opt_proposal", + self.process_opt_proposal_msg(*proposal_msg).await + ) + } unexpected_event => unreachable!("Unexpected event: {:?}", unexpected_event), }; let round_state = self.round_state(); diff --git a/consensus/src/round_manager_fuzzing.rs b/consensus/src/round_manager_fuzzing.rs index 28eeca5611878..43fd863715068 100644 --- a/consensus/src/round_manager_fuzzing.rs +++ b/consensus/src/round_manager_fuzzing.rs @@ -6,6 +6,7 @@ use crate::{ block_storage::{pending_blocks::PendingBlocks, BlockStore}, + counters, liveness::{ proposal_generator::{ ChainHealthBackoffConfig, PipelineBackpressureConfig, ProposalGenerator, @@ -132,7 +133,7 @@ fn create_node_for_fuzzing() -> RoundManager { // TODO: remove let proof = make_initial_epoch_change_proof(&signer); - let mut safety_rules = SafetyRules::new(test_utils::test_storage(&signer)); + let mut safety_rules = SafetyRules::new(test_utils::test_storage(&signer), false); safety_rules.initialize(&proof).unwrap(); // TODO: mock channels @@ -196,6 +197,9 @@ fn create_node_for_fuzzing() -> RoundManager { let (round_manager_tx, _) = aptos_channel::new(QueueStyle::LIFO, 1, None); + let (opt_proposal_loopback_tx, _) = + aptos_channels::new_unbounded(&counters::OP_COUNTERS.gauge("opt_proposal_loopback_queue")); + // event processor RoundManager::new( epoch_state, @@ -216,6 +220,7 @@ fn create_node_for_fuzzing() -> RoundManager { OnChainJWKConsensusConfig::default_enabled(), None, Arc::new(MockPastProposalStatusTracker {}), + opt_proposal_loopback_tx, ) } diff --git a/consensus/src/round_manager_test.rs b/consensus/src/round_manager_tests/consensus_test.rs similarity index 59% rename from consensus/src/round_manager_test.rs rename to consensus/src/round_manager_tests/consensus_test.rs index adeaaf98da18a..fe0ab0012c742 100644 --- a/consensus/src/round_manager_test.rs +++ b/consensus/src/round_manager_tests/consensus_test.rs @@ -1,38 +1,21 @@ -// Copyright © Aptos Foundation -// Parts of the project are originally copyright © Meta Platforms, Inc. +// Copyright (c) Aptos Foundation // SPDX-License-Identifier: Apache-2.0 +// Parts of the project are originally copyright © Meta Platforms, Inc. use crate::{ - block_storage::{pending_blocks::PendingBlocks, BlockReader, BlockStore}, + block_storage::BlockReader, counters, - liveness::{ - proposal_generator::{ - ChainHealthBackoffConfig, PipelineBackpressureConfig, ProposalGenerator, - }, - proposer_election::ProposerElection, - rotating_proposer_election::RotatingProposer, - round_state::{ExponentialTimeInterval, RoundState}, - }, metrics_safety_rules::MetricsSafetyRules, - network::{IncomingBlockRetrievalRequest, NetworkSender}, - network_interface::{CommitMessage, ConsensusMsg, ConsensusNetworkClient, DIRECT_SEND, RPC}, + network::IncomingBlockRetrievalRequest, + network_interface::ConsensusMsg, network_tests::{NetworkPlayground, TwinId}, - payload_manager::DirectMempoolPayloadManager, - persistent_liveness_storage::RecoveryData, - pipeline::buffer_manager::OrderedBlocks, - round_manager::RoundManager, - test_utils::{ - consensus_runtime, create_vec_signed_transactions, - mock_execution_client::MockExecutionClient, timed_block_on, MockOptQSPayloadProvider, - MockPastProposalStatusTracker, MockPayloadManager, MockStorage, TreeInserter, + round_manager::round_manager_tests::{ + config_with_round_timeout_msg_disabled, start_replying_to_block_retreival, NodeSetup, + ProposalMsgType, }, - util::time_service::{ClockTimeService, TimeService}, -}; -use aptos_channels::{self, aptos_channel, message_queues::QueueStyle}; -use aptos_config::{ - config::ConsensusConfig, - network_id::{NetworkId, PeerNetworkId}, + test_utils::{consensus_runtime, timed_block_on, TreeInserter}, }; +use aptos_config::config::ConsensusConfig; use aptos_consensus_types::{ block::{ block_test_utils::{certificate_for_genesis, gen_test_certificate}, @@ -40,628 +23,69 @@ use aptos_consensus_types::{ }, block_retrieval::{BlockRetrievalRequest, BlockRetrievalRequestV1, BlockRetrievalStatus}, common::{Author, Payload, Round}, - order_vote_msg::OrderVoteMsg, - pipeline::commit_decision::CommitDecision, + opt_proposal_msg::OptProposalMsg, proposal_msg::ProposalMsg, - round_timeout::RoundTimeoutMsg, sync_info::SyncInfo, timeout_2chain::{TwoChainTimeout, TwoChainTimeoutWithPartialSignatures}, - utils::PayloadTxnsSize, - vote_msg::VoteMsg, }; use aptos_crypto::HashValue; use aptos_infallible::Mutex; use aptos_logger::prelude::info; -use aptos_network::{ - application::interface::NetworkClient, - peer_manager::{ConnectionRequestSender, PeerManagerRequestSender}, - protocols::{ - network, - network::{Event, NetworkEvents, NewNetworkEvents, NewNetworkSender}, - wire::handshake::v1::ProtocolIdSet, - }, - transport::ConnectionMetadata, - ProtocolId, -}; +use aptos_network::{protocols::network::Event, ProtocolId}; use aptos_safety_rules::{PersistentSafetyStorage, SafetyRulesManager}; use aptos_secure_storage::Storage; -use aptos_types::{ - dkg::{real_dkg::RealDKG, DKGSessionMetadata, DKGTrait, DKGTranscript}, - epoch_state::EpochState, - jwks::QuorumCertifiedUpdate, - ledger_info::LedgerInfo, - on_chain_config::{ - ConsensusAlgorithmConfig, ConsensusConfigV1, OnChainConsensusConfig, - OnChainJWKConsensusConfig, OnChainRandomnessConfig, RandomnessConfigMoveStruct, - ValidatorTxnConfig, DEFAULT_WINDOW_SIZE, - }, - transaction::SignedTransaction, - validator_signer::ValidatorSigner, - validator_txn::ValidatorTransaction, - validator_verifier::{ - generate_validator_verifier, random_validator_verifier, - random_validator_verifier_with_voting_power, ValidatorConsensusInfoMoveStruct, - ValidatorVerifier, - }, - waypoint::Waypoint, -}; -use futures::{ - channel::{mpsc, oneshot}, - executor::block_on, - stream::select, - FutureExt, Stream, StreamExt, -}; -use maplit::hashmap; -use rand::{rngs::ThreadRng, thread_rng}; -use std::{ - collections::VecDeque, - iter::FromIterator, - sync::{ - atomic::{AtomicBool, Ordering}, - Arc, - }, - time::Duration, -}; -use tokio::{ - runtime::{Handle, Runtime}, - task::JoinHandle, - time::timeout, -}; - -/// Auxiliary struct that is setting up node environment for the test. -pub struct NodeSetup { - block_store: Arc, - round_manager: RoundManager, - storage: Arc, - signer: ValidatorSigner, - proposers: Vec, - safety_rules_manager: SafetyRulesManager, - pending_network_events: Vec>, - all_network_events: Box> + Send + Unpin>, - ordered_blocks_events: mpsc::UnboundedReceiver, - mock_execution_client: Arc, - _state_sync_receiver: mpsc::UnboundedReceiver>, - id: usize, - onchain_consensus_config: OnChainConsensusConfig, - local_consensus_config: ConsensusConfig, - onchain_randomness_config: OnChainRandomnessConfig, - onchain_jwk_consensus_config: OnChainJWKConsensusConfig, - vote_queue: VecDeque, - order_vote_queue: VecDeque, - proposal_queue: VecDeque, - round_timeout_queue: VecDeque, - commit_decision_queue: VecDeque, -} - -impl NodeSetup { - fn create_round_state(time_service: Arc) -> RoundState { - let base_timeout = Duration::new(60, 0); - let time_interval = Box::new(ExponentialTimeInterval::fixed(base_timeout)); - let (round_timeout_sender, _) = aptos_channels::new_test(1_024); - RoundState::new(time_interval, time_service, round_timeout_sender) - } - - fn create_proposer_election(proposers: Vec) -> Arc { - Arc::new(RotatingProposer::new(proposers, 1)) - } - - fn create_nodes( - playground: &mut NetworkPlayground, - executor: Handle, - num_nodes: usize, - proposer_indices: Option>, - onchain_consensus_config: Option, - local_consensus_config: Option, - onchain_randomness_config: Option, - onchain_jwk_consensus_config: Option, - ) -> Vec { - Self::create_nodes_with_validator_set( - playground, - executor, - num_nodes, - proposer_indices, - onchain_consensus_config, - local_consensus_config, - onchain_randomness_config, - onchain_jwk_consensus_config, - None, - ) - } - - fn create_nodes_with_validator_set( - playground: &mut NetworkPlayground, - executor: Handle, - num_nodes: usize, - proposer_indices: Option>, - onchain_consensus_config: Option, - local_consensus_config: Option, - onchain_randomness_config: Option, - onchain_jwk_consensus_config: Option, - validator_set: Option<(Vec, ValidatorVerifier)>, - ) -> Vec { - let mut onchain_consensus_config = onchain_consensus_config.unwrap_or_default(); - // With order votes feature, the validators additionally send order votes. - // next_proposal and next_vote functions could potentially break because of it. - if let OnChainConsensusConfig::V4 { - alg: - ConsensusAlgorithmConfig::JolteonV2 { - main: _, - quorum_store_enabled: _, - order_vote_enabled, - }, - vtxn: _, - window_size: _, - } = &mut onchain_consensus_config - { - *order_vote_enabled = false; - } - let onchain_randomness_config = - onchain_randomness_config.unwrap_or_else(OnChainRandomnessConfig::default_if_missing); - let onchain_jwk_consensus_config = onchain_jwk_consensus_config - .unwrap_or_else(OnChainJWKConsensusConfig::default_if_missing); - let local_consensus_config = local_consensus_config.unwrap_or_default(); - let (signers, validators) = - validator_set.unwrap_or_else(|| random_validator_verifier(num_nodes, None, false)); - let proposers = proposer_indices - .unwrap_or_else(|| vec![0]) - .iter() - .map(|i| signers[*i].author()) - .collect::>(); - let validator_set = (&validators).into(); - let waypoint = - Waypoint::new_epoch_boundary(&LedgerInfo::mock_genesis(Some(validator_set))).unwrap(); - - let mut nodes = vec![]; - // pre-initialize the mapping to avoid race conditions (peer try to broadcast to someone not added yet) - let peers_and_metadata = playground.peer_protocols(); - for signer in signers.iter().take(num_nodes) { - let peer_id = signer.author(); - let mut conn_meta = ConnectionMetadata::mock(peer_id); - conn_meta.application_protocols = ProtocolIdSet::from_iter([ - ProtocolId::ConsensusDirectSendJson, - ProtocolId::ConsensusDirectSendBcs, - ProtocolId::ConsensusRpcBcs, - ]); - let peer_network_id = PeerNetworkId::new(NetworkId::Validator, peer_id); - peers_and_metadata - .insert_connection_metadata(peer_network_id, conn_meta) - .unwrap(); - } - for (id, signer) in signers.iter().take(num_nodes).enumerate() { - let (initial_data, storage) = MockStorage::start_for_testing((&validators).into()); - - let safety_storage = PersistentSafetyStorage::initialize( - Storage::from(aptos_secure_storage::InMemoryStorage::new()), - signer.author(), - signer.private_key().clone(), - waypoint, - true, - ); - let safety_rules_manager = SafetyRulesManager::new_local(safety_storage); - - nodes.push(Self::new( - playground, - executor.clone(), - signer.to_owned(), - proposers.clone(), - storage, - initial_data, - safety_rules_manager, - id, - onchain_consensus_config.clone(), - local_consensus_config.clone(), - onchain_randomness_config.clone(), - onchain_jwk_consensus_config.clone(), - )); - } - nodes - } - - fn new( - playground: &mut NetworkPlayground, - executor: Handle, - signer: ValidatorSigner, - proposers: Vec, - storage: Arc, - initial_data: RecoveryData, - safety_rules_manager: SafetyRulesManager, - id: usize, - onchain_consensus_config: OnChainConsensusConfig, - local_consensus_config: ConsensusConfig, - onchain_randomness_config: OnChainRandomnessConfig, - onchain_jwk_consensus_config: OnChainJWKConsensusConfig, - ) -> Self { - let _entered_runtime = executor.enter(); - let epoch_state = Arc::new(EpochState::new(1, storage.get_validator_set().into())); - let validators = epoch_state.verifier.clone(); - let (network_reqs_tx, network_reqs_rx) = aptos_channel::new(QueueStyle::FIFO, 8, None); - let (connection_reqs_tx, _) = aptos_channel::new(QueueStyle::FIFO, 8, None); - let (consensus_tx, consensus_rx) = aptos_channel::new(QueueStyle::FIFO, 8, None); - let (_conn_mgr_reqs_tx, conn_mgr_reqs_rx) = aptos_channels::new_test(8); - let network_sender = network::NetworkSender::new( - PeerManagerRequestSender::new(network_reqs_tx), - ConnectionRequestSender::new(connection_reqs_tx), - ); - let network_client = NetworkClient::new( - DIRECT_SEND.into(), - RPC.into(), - hashmap! {NetworkId::Validator => network_sender}, - playground.peer_protocols(), - ); - let consensus_network_client = ConsensusNetworkClient::new(network_client); - let network_events = NetworkEvents::new(consensus_rx, None, true); - let author = signer.author(); - - let twin_id = TwinId { id, author }; - - playground.add_node(twin_id, consensus_tx, network_reqs_rx, conn_mgr_reqs_rx); - - let (self_sender, self_receiver) = aptos_channels::new_unbounded_test(); - let network = Arc::new(NetworkSender::new( - author, - consensus_network_client, - self_sender, - validators, - )); - - let all_network_events = Box::new(select(network_events, self_receiver)); - - let last_vote_sent = initial_data.last_vote(); - let (ordered_blocks_tx, ordered_blocks_events) = mpsc::unbounded::(); - let (state_sync_client, _state_sync_receiver) = mpsc::unbounded(); - let mock_execution_client = Arc::new(MockExecutionClient::new( - state_sync_client.clone(), - ordered_blocks_tx.clone(), - Arc::clone(&storage), - )); - let time_service = Arc::new(ClockTimeService::new(executor)); - - let window_size = onchain_consensus_config.window_size(); - let block_store = Arc::new(BlockStore::new( - storage.clone(), - initial_data, - mock_execution_client.clone(), - 10, // max pruned blocks in mem - time_service.clone(), - 10, - Arc::from(DirectMempoolPayloadManager::new()), - false, - window_size, - Arc::new(Mutex::new(PendingBlocks::new())), - None, - )); - - let proposer_election = Self::create_proposer_election(proposers.clone()); - let proposal_generator = ProposalGenerator::new( - author, - block_store.clone(), - Arc::new(MockPayloadManager::new(None)), - time_service.clone(), - Duration::ZERO, - PayloadTxnsSize::new(20, 1000), - 10, - PayloadTxnsSize::new(5, 500), - 10, - 1, - Some(30_000), - PipelineBackpressureConfig::new_no_backoff(), - ChainHealthBackoffConfig::new_no_backoff(), - false, - onchain_consensus_config.effective_validator_txn_config(), - true, - Arc::new(MockOptQSPayloadProvider {}), - ); - - let round_state = Self::create_round_state(time_service); - let mut safety_rules = - MetricsSafetyRules::new(safety_rules_manager.client(), storage.clone()); - safety_rules.perform_initialize().unwrap(); - - let (round_manager_tx, _) = aptos_channel::new(QueueStyle::LIFO, 1, None); - - let local_config = local_consensus_config.clone(); - - let mut round_manager = RoundManager::new( - epoch_state, - Arc::clone(&block_store), - round_state, - proposer_election, - proposal_generator, - Arc::new(Mutex::new(safety_rules)), - network, - storage.clone(), - onchain_consensus_config.clone(), - round_manager_tx, - local_config, - onchain_randomness_config.clone(), - onchain_jwk_consensus_config.clone(), - None, - Arc::new(MockPastProposalStatusTracker {}), - ); - block_on(round_manager.init(last_vote_sent)); - Self { - block_store, - round_manager, - storage, - signer, - proposers, - safety_rules_manager, - pending_network_events: Vec::new(), - all_network_events, - ordered_blocks_events, - mock_execution_client, - _state_sync_receiver, - id, - onchain_consensus_config, - local_consensus_config, - onchain_randomness_config, - onchain_jwk_consensus_config, - vote_queue: VecDeque::new(), - order_vote_queue: VecDeque::new(), - proposal_queue: VecDeque::new(), - round_timeout_queue: VecDeque::new(), - commit_decision_queue: VecDeque::new(), - } - } - - pub fn restart(self, playground: &mut NetworkPlayground, executor: Handle) -> Self { - let recover_data = self - .storage - .try_start( - self.onchain_consensus_config.order_vote_enabled(), - self.onchain_consensus_config.window_size(), - ) - .unwrap_or_else(|e| panic!("fail to restart due to: {}", e)); - Self::new( - playground, - executor, - self.signer, - self.proposers, - self.storage, - recover_data, - self.safety_rules_manager, - self.id, - self.onchain_consensus_config.clone(), - self.local_consensus_config.clone(), - self.onchain_randomness_config.clone(), - self.onchain_jwk_consensus_config.clone(), - ) - } - - pub fn identity_desc(&self) -> String { - format!("{} [{}]", self.id, self.signer.author()) - } - - fn poll_next_network_event(&mut self) -> Option> { - if !self.pending_network_events.is_empty() { - Some(self.pending_network_events.remove(0)) - } else { - self.all_network_events - .next() - .now_or_never() - .map(|v| v.unwrap()) - } - } - - pub async fn next_network_event(&mut self) -> Event { - if !self.pending_network_events.is_empty() { - self.pending_network_events.remove(0) - } else { - self.all_network_events.next().await.unwrap() - } - } - - pub async fn next_network_message(&mut self) { - let consensus_msg = match self.next_network_event().await { - Event::Message(_, msg) => msg, - Event::RpcRequest(_, msg, _, _) if matches!(msg, ConsensusMsg::CommitMessage(_)) => msg, - Event::RpcRequest(_, msg, _, _) => { - panic!( - "Unexpected event, got RpcRequest, expected Message: {:?} on node {}", - msg, - self.identity_desc() - ) - }, - }; - - match consensus_msg { - ConsensusMsg::ProposalMsg(proposal) => { - self.proposal_queue.push_back(*proposal); - }, - ConsensusMsg::VoteMsg(vote) => { - self.vote_queue.push_back(*vote); - }, - ConsensusMsg::OrderVoteMsg(order_vote) => { - self.order_vote_queue.push_back(*order_vote); - }, - ConsensusMsg::RoundTimeoutMsg(round_timeout) => { - self.round_timeout_queue.push_back(*round_timeout); - }, - ConsensusMsg::CommitDecisionMsg(commit_decision) => { - self.commit_decision_queue.push_back(*commit_decision); - }, - ConsensusMsg::CommitMessage(d) if matches!(*d, CommitMessage::Decision(_)) => { - match *d { - CommitMessage::Decision(commit_decision) => { - self.commit_decision_queue.push_back(commit_decision); - }, - _ => unreachable!(), - } - }, - msg => panic!( - "Unexpected Consensus Message: {:?} on node {}", - msg, - self.identity_desc() - ), - } - } - - pub fn no_next_msg(&mut self) { - match self.poll_next_network_event() { - Some(Event::RpcRequest(_, msg, _, _)) | Some(Event::Message(_, msg)) => panic!( - "Unexpected Consensus Message: {:?} on node {}", - msg, - self.identity_desc() - ), - None => {}, - } - } - - pub async fn next_proposal(&mut self) -> ProposalMsg { - while self.proposal_queue.is_empty() { - self.next_network_message().await; - } - self.proposal_queue.pop_front().unwrap() - } - - pub async fn next_vote(&mut self) -> VoteMsg { - while self.vote_queue.is_empty() { - self.next_network_message().await; - } - self.vote_queue.pop_front().unwrap() - } - - #[allow(unused)] - pub async fn next_order_vote(&mut self) -> OrderVoteMsg { - while self.order_vote_queue.is_empty() { - self.next_network_message().await; - } - self.order_vote_queue.pop_front().unwrap() - } - - pub async fn next_timeout(&mut self) -> RoundTimeoutMsg { - while self.round_timeout_queue.is_empty() { - self.next_network_message().await; - } - self.round_timeout_queue.pop_front().unwrap() - } - - pub async fn next_commit_decision(&mut self) -> CommitDecision { - while self.commit_decision_queue.is_empty() { - self.next_network_message().await; - } - self.commit_decision_queue.pop_front().unwrap() - } - - pub async fn poll_block_retrieval(&mut self) -> Option { - match self.poll_next_network_event() { - Some(Event::RpcRequest(_, msg, protocol, response_sender)) => match msg { - ConsensusMsg::DeprecatedBlockRetrievalRequest(v) => { - Some(IncomingBlockRetrievalRequest { - req: BlockRetrievalRequest::V1(*v), - protocol, - response_sender, - }) - }, - ConsensusMsg::BlockRetrievalRequest(v) => Some(IncomingBlockRetrievalRequest { - req: *v, - protocol, - response_sender, - }), - msg => panic!( - "Unexpected Consensus Message: {:?} on node {}", - msg, - self.identity_desc() - ), - }, - Some(Event::Message(_, msg)) => panic!( - "Unexpected Consensus Message: {:?} on node {}", - msg, - self.identity_desc() - ), - None => None, - } - } +use aptos_types::validator_verifier::generate_validator_verifier; +use futures::{channel::oneshot, StreamExt}; +use std::{sync::Arc, time::Duration}; +use tokio::{runtime::Runtime, time::timeout}; - pub fn no_next_ordered(&mut self) { - if self.ordered_blocks_events.next().now_or_never().is_some() { - panic!("Unexpected Ordered Blocks Event"); - } - } +pub(super) fn process_and_vote_opt_proposal( + runtime: &Runtime, + node: &mut NodeSetup, + opt_proposal_msg: OptProposalMsg, + expected_round: Round, + expected_qc_ordered_round: Round, + expected_qc_committed_round: Round, +) { + info!("Processing opt proposal on {}", node.identity_desc()); - pub async fn commit_next_ordered(&mut self, expected_rounds: &[Round]) { - info!( - "Starting commit_next_ordered to wait for {:?} on node {:?}", - expected_rounds, - self.identity_desc() - ); - let ordered_blocks = self.ordered_blocks_events.next().await.unwrap(); - let rounds = ordered_blocks - .ordered_blocks - .iter() - .map(|b| b.round()) - .collect::>(); - assert_eq!(&rounds, expected_rounds); - self.mock_execution_client - .commit_to_storage(ordered_blocks) - .await - .unwrap(); - } -} + assert_eq!(opt_proposal_msg.round(), expected_round); + assert_eq!( + opt_proposal_msg.sync_info().highest_ordered_round(), + expected_qc_ordered_round.saturating_sub(1) + ); + assert_eq!( + opt_proposal_msg.sync_info().highest_commit_round(), + expected_qc_committed_round + ); -fn config_with_round_timeout_msg_disabled() -> ConsensusConfig { - // Disable RoundTimeoutMsg to unless expliclity enabled. - ConsensusConfig { - enable_round_timeout_msg: false, - ..Default::default() - } -} + timed_block_on( + runtime, + node.round_manager + .process_opt_proposal_msg(opt_proposal_msg), + ) + .unwrap(); + info!("Finish process opt proposal on {}", node.identity_desc()); -fn start_replying_to_block_retreival(nodes: Vec) -> ReplyingRPCHandle { - let done = Arc::new(AtomicBool::new(false)); - let mut handles = Vec::new(); - for mut node in nodes.into_iter() { - let done_clone = done.clone(); - handles.push(tokio::spawn(async move { - while !done_clone.load(Ordering::Relaxed) { - info!("Asking for RPC request on {:?}", node.identity_desc()); - let maybe_request = node.poll_block_retrieval().await; - if let Some(request) = maybe_request { - info!( - "RPC request received: {:?} on {:?}", - request, - node.identity_desc() - ); - let wrapped_request = IncomingBlockRetrievalRequest { - req: request.req, - protocol: request.protocol, - response_sender: request.response_sender, - }; - node.block_store - .process_block_retrieval(wrapped_request) - .await - .unwrap(); - } else { - tokio::time::sleep(Duration::from_millis(50)).await; - } - } - node - })); - } - ReplyingRPCHandle { handles, done } -} + info!( + "Processing proposal (from opt proposal) on {}", + node.identity_desc() + ); -struct ReplyingRPCHandle { - handles: Vec>, - done: Arc, -} + let opt_block_data = timed_block_on(runtime, node.processed_opt_proposal_rx.next()).unwrap(); + timed_block_on( + runtime, + node.round_manager.process_opt_proposal(opt_block_data), + ) + .unwrap(); -impl ReplyingRPCHandle { - async fn join(self) -> Vec { - self.done.store(true, Ordering::Relaxed); - let mut result = Vec::new(); - for handle in self.handles.into_iter() { - result.push(handle.await.unwrap()); - } - info!( - "joined nodes in order: {:?}", - result.iter().map(|v| v.id).collect::>() - ); - result - } + info!( + "Finish process proposal (from opt proposal) on {}", + node.identity_desc() + ); } -fn process_and_vote_on_proposal( +pub(super) fn process_and_vote_on_proposal( runtime: &Runtime, nodes: &mut [NodeSetup], next_proposer: usize, @@ -683,29 +107,41 @@ fn process_and_vote_on_proposal( info!("Waiting on next_proposal on node {}", node.identity_desc()); if down_nodes.contains(&node.id) { // Drop the proposal on down nodes - timed_block_on(runtime, node.next_proposal()); + timed_block_on(runtime, node.next_opt_or_normal_proposal()); info!("Dropping proposal on down node {}", node.identity_desc()); } else { // Proccess proposal on other nodes - let proposal_msg = timed_block_on(runtime, node.next_proposal()); - info!("Processing proposal on {}", node.identity_desc()); - - assert_eq!(proposal_msg.proposal().round(), expected_round); - assert_eq!( - proposal_msg.sync_info().highest_ordered_round(), - expected_qc_ordered_round - ); - assert_eq!( - proposal_msg.sync_info().highest_commit_round(), - expected_qc_committed_round - ); + let proposal_msg_type = timed_block_on(runtime, node.next_opt_or_normal_proposal()); + match proposal_msg_type { + ProposalMsgType::Normal(proposal_msg) => { + info!("Processing proposal on {}", node.identity_desc()); + + assert_eq!(proposal_msg.proposal().round(), expected_round); + assert_eq!( + proposal_msg.sync_info().highest_ordered_round(), + expected_qc_ordered_round + ); + assert_eq!( + proposal_msg.sync_info().highest_commit_round(), + expected_qc_committed_round + ); - timed_block_on( - runtime, - node.round_manager.process_proposal_msg(proposal_msg), - ) - .unwrap(); - info!("Finish process proposal on {}", node.identity_desc()); + timed_block_on( + runtime, + node.round_manager.process_proposal_msg(proposal_msg), + ) + .unwrap(); + info!("Finish process proposal on {}", node.identity_desc()); + }, + ProposalMsgType::Optimistic(opt_proposal_msg) => process_and_vote_opt_proposal( + runtime, + node, + opt_proposal_msg, + expected_round, + expected_qc_ordered_round, + expected_qc_committed_round, + ), + } num_votes += 1; } } @@ -780,16 +216,30 @@ fn new_round_on_quorum_cert() { .process_proposal_msg(proposal_msg) .await .unwrap(); + let vote_msg = node.next_vote().await; // Adding vote to form a QC node.round_manager.process_vote_msg(vote_msg).await.unwrap(); // round 2 should start - let proposal_msg = node.next_proposal().await; - let proposal = proposal_msg.proposal(); - assert_eq!(proposal.round(), 2); - assert_eq!(proposal.parent_id(), b1_id); - assert_eq!(proposal.quorum_cert().certified_block().id(), b1_id); + let proposal_msg_type = node.next_opt_or_normal_proposal().await; + match proposal_msg_type { + ProposalMsgType::Normal(proposal_msg) => { + let proposal = proposal_msg.proposal(); + assert_eq!(proposal.round(), 2); + assert_eq!(proposal.parent_id(), b1_id); + assert_eq!(proposal.quorum_cert().certified_block().id(), b1_id); + }, + ProposalMsgType::Optimistic(opt_proposal_msg) => { + let proposal = opt_proposal_msg.block_data(); + assert_eq!(proposal.round(), 2); + assert_eq!(proposal.parent_id(), b1_id); + assert_eq!( + proposal.grandparent_qc().certified_block().id(), + genesis.id() + ); + }, + } }); } @@ -864,9 +314,7 @@ fn delay_proposal_processing_in_sync_only() { node.next_proposal().await; // Set sync only to true so that new proposal processing is delayed. - node.round_manager - .block_store - .set_back_pressure_for_test(true); + node.block_store.set_back_pressure_for_test(true); let proposal = Block::new_proposal( Payload::empty(false, true), 1, @@ -888,9 +336,7 @@ fn delay_proposal_processing_in_sync_only() { .unwrap_err(); // Clear the sync only mode and process verified proposal and ensure it is processed now - node.round_manager - .block_store - .set_back_pressure_for_test(false); + node.block_store.set_back_pressure_for_test(false); node.round_manager .process_verified_proposal(proposal) @@ -1808,14 +1254,30 @@ fn safety_rules_crash() { timed_block_on(&runtime, async { for _ in 0..2 { - let proposal_msg = node.next_proposal().await; + let proposal_msg_type = node.next_opt_or_normal_proposal().await; reset_safety_rules(&mut node); - // construct_and_sign_vote - node.round_manager - .process_proposal_msg(proposal_msg) - .await - .unwrap(); + + match proposal_msg_type { + ProposalMsgType::Normal(proposal_msg) => { + // construct_and_sign_vote + node.round_manager + .process_proposal_msg(proposal_msg) + .await + .unwrap(); + }, + ProposalMsgType::Optimistic(opt_proposal_msg) => { + node.round_manager + .process_opt_proposal_msg(opt_proposal_msg) + .await + .unwrap(); + let opt_block_data = node.processed_opt_proposal_rx.next().await.unwrap(); + node.round_manager + .process_opt_proposal(opt_block_data) + .await + .unwrap(); + }, + } let vote_msg = node.next_vote().await; @@ -1834,7 +1296,7 @@ fn safety_rules_crash() { } // verify the last sign proposal happened - node.next_proposal().await; + node.next_opt_or_normal_proposal().await; }); } @@ -2068,7 +1530,7 @@ fn block_retrieval_test() { // Drain the queue on other nodes for node in nodes.iter_mut() { - let _ = node.next_proposal().await; + let _ = node.next_opt_or_normal_proposal().await; } info!( @@ -2076,12 +1538,23 @@ fn block_retrieval_test() { behind_node.identity_desc() ); let handle = start_replying_to_block_retreival(nodes); - let proposal_msg = behind_node.next_proposal().await; - behind_node - .round_manager - .process_proposal_msg(proposal_msg) - .await - .unwrap(); + + let proposal_msg_type = behind_node.next_opt_or_normal_proposal().await; + info!("got proposal msg: {:?}", proposal_msg_type); + match proposal_msg_type { + ProposalMsgType::Normal(proposal_msg) => behind_node + .round_manager + .process_proposal_msg(proposal_msg) + .await + .unwrap(), + ProposalMsgType::Optimistic(opt_proposal_msg) => { + behind_node + .round_manager + .process_opt_proposal_msg(opt_proposal_msg) + .await + .unwrap(); + }, + } handle.join().await; }); @@ -2138,7 +1611,7 @@ fn block_retrieval_timeout_test() { // Drain the queue on other nodes for node in nodes.iter_mut() { - let _ = node.next_proposal().await; + let _ = node.next_opt_or_normal_proposal().await; } info!( @@ -2146,12 +1619,23 @@ fn block_retrieval_timeout_test() { behind_node.identity_desc() ); - let proposal_msg = behind_node.next_proposal().await; - behind_node - .round_manager - .process_proposal_msg(proposal_msg) - .await - .unwrap_err(); + let proposal_msg_type = behind_node.next_opt_or_normal_proposal().await; + match proposal_msg_type { + ProposalMsgType::Normal(proposal_msg) => { + behind_node + .round_manager + .process_proposal_msg(proposal_msg) + .await + .unwrap_err(); + }, + ProposalMsgType::Optimistic(opt_proposal_msg) => { + behind_node + .round_manager + .process_opt_proposal_msg(opt_proposal_msg) + .await + .unwrap_err(); + }, + } }); } @@ -2410,366 +1894,3 @@ pub fn forking_retrieval_test() { 3, ); } - -#[test] -/// If ProposalExt feature is disabled, ProposalExt should be rejected -/// No votes are sent, but the block is still added to the block tree. -fn no_vote_on_proposal_ext_when_feature_disabled() { - let runtime = consensus_runtime(); - let mut playground = NetworkPlayground::new(runtime.handle().clone()); - // In order to observe the votes we're going to check proposal processing on the non-proposer - // node (which will send the votes to the proposer). - let mut nodes = NodeSetup::create_nodes( - &mut playground, - runtime.handle().clone(), - 1, - None, - None, - None, - None, - None, - ); - let node = &mut nodes[0]; - let genesis_qc = certificate_for_genesis(); - - let invalid_block = Block::new_proposal_ext( - vec![ValidatorTransaction::dummy(vec![0xFF]); 5], - Payload::empty(false, true), - 1, - 1, - genesis_qc.clone(), - &node.signer, - Vec::new(), - ) - .unwrap(); - - let valid_block = Block::new_proposal( - Payload::empty(false, true), - 1, - 1, - genesis_qc, - &node.signer, - Vec::new(), - ) - .unwrap(); - - timed_block_on(&runtime, async { - // clear the message queue - node.next_proposal().await; - - assert!(node - .round_manager - .process_proposal(invalid_block) - .await - .is_err()); - - assert!(node - .round_manager - .process_proposal(valid_block) - .await - .is_ok()); - }); -} - -#[test] -fn no_vote_on_proposal_with_unexpected_vtxns() { - let vtxns = vec![ValidatorTransaction::ObservedJWKUpdate( - QuorumCertifiedUpdate::dummy(), - )]; - - assert_process_proposal_result( - None, - None, - Some(OnChainJWKConsensusConfig::default_disabled()), - vtxns.clone(), - false, - ); - - assert_process_proposal_result( - None, - None, - Some(OnChainJWKConsensusConfig::default_enabled()), - vtxns, - true, - ); -} - -#[test] -fn no_vote_on_proposal_with_uncertified_dkg_result() { - test_dkg_result_handling( - &[25_000_000; 4], - 1, - RealDKG::sample_secret_and_generate_transcript, - false, - ); -} - -#[test] -fn no_vote_on_proposal_with_inconsistent_secret_dkg_result() { - test_dkg_result_handling( - &[10_000_000, 70_000_000, 10_000_000, 10_000_000], - 1, - RealDKG::generate_transcript_for_inconsistent_secrets, - false, - ); -} - -#[test] -fn no_vote_on_proposal_with_dup_dealers_in_dkg_transcript() { - test_dkg_result_handling( - &[10_000_000, 40_000_000, 10_000_000, 40_000_000], - 1, - RealDKG::deal_twice_and_aggregate, - false, - ); -} - -#[test] -fn vote_on_proposal_with_valid_dkg_result() { - test_dkg_result_handling( - &[10_000_000, 70_000_000, 10_000_000, 10_000_000], - 1, - RealDKG::sample_secret_and_generate_transcript, - true, - ); -} - -fn test_dkg_result_handling( - voting_powers: &[u64], - dealer_idx: usize, - trx_gen_func: F, - should_accept: bool, -) where - F: Fn( - &mut ThreadRng, - &::PublicParams, - u64, - &::DealerPrivateKey, - ) -> ::Transcript, -{ - let mut rng = thread_rng(); - let epoch = 123; - let num_validators = voting_powers.len(); - let (signers, verifier) = - random_validator_verifier_with_voting_power(num_validators, None, false, voting_powers); - let validator_set: Vec = verifier - .validator_infos - .clone() - .into_iter() - .map(ValidatorConsensusInfoMoveStruct::from) - .collect(); - - let dkg_session_metadata = DKGSessionMetadata { - dealer_epoch: epoch, - randomness_config: RandomnessConfigMoveStruct::from( - OnChainRandomnessConfig::default_enabled(), - ), - dealer_validator_set: validator_set.clone(), - target_validator_set: validator_set, - }; - let public_params = RealDKG::new_public_params(&dkg_session_metadata); - let trx = trx_gen_func( - &mut rng, - &public_params, - dealer_idx as u64, - signers[dealer_idx].private_key(), - ); - let trx_bytes = bcs::to_bytes(&trx).unwrap(); - let vtxns = vec![ValidatorTransaction::DKGResult(DKGTranscript::new( - epoch, - verifier.get_ordered_account_addresses()[dealer_idx], - trx_bytes, - ))]; - - assert_process_proposal_result( - Some((signers, verifier)), - Some(OnChainRandomnessConfig::default_enabled()), - Some(OnChainJWKConsensusConfig::default_enabled()), - vtxns.clone(), - should_accept, - ); -} - -/// Setup a node with default configs and an optional `Features` override. -/// Create a block, fill it with the given vtxns, and process it with the `RoundManager` from the setup. -/// Assert the processing result. -fn assert_process_proposal_result( - validator_set: Option<(Vec, ValidatorVerifier)>, - randomness_config: Option, - jwk_consensus_config: Option, - vtxns: Vec, - expected_result: bool, -) { - let runtime = consensus_runtime(); - let mut playground = NetworkPlayground::new(runtime.handle().clone()); - let mut nodes = NodeSetup::create_nodes_with_validator_set( - &mut playground, - runtime.handle().clone(), - 1, - None, - Some(OnChainConsensusConfig::default_for_genesis()), - None, - randomness_config, - jwk_consensus_config, - validator_set, - ); - - let node = &mut nodes[0]; - let genesis_qc = certificate_for_genesis(); - let block = Block::new_proposal_ext( - vtxns, - Payload::empty(false, true), - 1, - 1, - genesis_qc.clone(), - &node.signer, - Vec::new(), - ) - .unwrap(); - - timed_block_on(&runtime, async { - // clear the message queue - node.next_proposal().await; - - assert_eq!( - expected_result, - node.round_manager - .process_proposal(block.clone()) - .await - .is_ok() - ); - }); -} - -#[ignore] -#[test] -/// If receiving txn num/block size limit is exceeded, ProposalExt should be rejected. -/// TODO: re-implement dummy vtxn and re-enable. -fn no_vote_on_proposal_ext_when_receiving_limit_exceeded() { - let runtime = consensus_runtime(); - let mut playground = NetworkPlayground::new(runtime.handle().clone()); - - let alg_config = ConsensusAlgorithmConfig::JolteonV2 { - main: ConsensusConfigV1::default(), - quorum_store_enabled: true, - order_vote_enabled: false, - }; - let vtxn_config = ValidatorTxnConfig::V1 { - per_block_limit_txn_count: 5, - per_block_limit_total_bytes: 400, - }; - - let local_config = ConsensusConfig { - max_receiving_block_txns: 10, - max_receiving_block_bytes: 800, - ..Default::default() - }; - - let randomness_config = OnChainRandomnessConfig::default_enabled(); - let mut nodes = NodeSetup::create_nodes( - &mut playground, - runtime.handle().clone(), - 1, - None, - Some(OnChainConsensusConfig::V4 { - alg: alg_config, - vtxn: vtxn_config, - window_size: DEFAULT_WINDOW_SIZE, - }), - Some(local_config), - Some(randomness_config), - None, - ); - let node = &mut nodes[0]; - let genesis_qc = certificate_for_genesis(); - - let block_too_many_txns = Block::new_proposal_ext( - vec![], - Payload::DirectMempool(create_vec_signed_transactions(11)), - 1, - 1, - genesis_qc.clone(), - &node.signer, - Vec::new(), - ) - .unwrap(); - - let block_too_many_vtxns = Block::new_proposal_ext( - vec![ValidatorTransaction::dummy(vec![0xFF; 20]); 6], - Payload::DirectMempool(create_vec_signed_transactions(4)), - 1, - 1, - genesis_qc.clone(), - &node.signer, - Vec::new(), - ) - .unwrap(); - - let block_too_large = Block::new_proposal_ext( - vec![ValidatorTransaction::dummy(vec![0xFF; 200]); 1], // total_bytes >= 200 * 1 = 200 - Payload::DirectMempool(create_vec_signed_transactions(9)), // = total_bytes >= 69 * 9 = 621 - 1, - 1, - genesis_qc.clone(), - &node.signer, - Vec::new(), - ) - .unwrap(); - - let block_vtxns_too_large = Block::new_proposal_ext( - vec![ValidatorTransaction::dummy(vec![0xFF; 200]); 5], // total_bytes >= 200 * 5 = 1000 - Payload::empty(false, true), - 1, - 1, - genesis_qc.clone(), - &node.signer, - Vec::new(), - ) - .unwrap(); - - let valid_block = Block::new_proposal_ext( - vec![ValidatorTransaction::dummy(vec![0xFF; 20]); 5], // total_bytes >= 60 * 5 = 300 - Payload::DirectMempool(create_vec_signed_transactions(5)), // total_bytes >= 69 * 5 = 345 - 1, - 1, - genesis_qc.clone(), - &node.signer, - Vec::new(), - ) - .unwrap(); - - timed_block_on(&runtime, async { - // clear the message queue - node.next_proposal().await; - - assert!(node - .round_manager - .process_proposal(block_too_many_txns) - .await - .is_err()); - - assert!(node - .round_manager - .process_proposal(block_too_many_vtxns) - .await - .is_err()); - - assert!(node - .round_manager - .process_proposal(block_too_large) - .await - .is_err()); - - assert!(node - .round_manager - .process_proposal(block_vtxns_too_large) - .await - .is_err()); - - assert!(node - .round_manager - .process_proposal(valid_block) - .await - .is_ok()); - }); -} diff --git a/consensus/src/round_manager_tests/mod.rs b/consensus/src/round_manager_tests/mod.rs new file mode 100644 index 0000000000000..947806a296eaa --- /dev/null +++ b/consensus/src/round_manager_tests/mod.rs @@ -0,0 +1,688 @@ +// Copyright © Aptos Foundation +// Parts of the project are originally copyright © Meta Platforms, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + block_storage::{pending_blocks::PendingBlocks, BlockStore}, + counters, + liveness::{ + proposal_generator::{ + ChainHealthBackoffConfig, PipelineBackpressureConfig, ProposalGenerator, + }, + proposer_election::ProposerElection, + rotating_proposer_election::RotatingProposer, + round_state::{ExponentialTimeInterval, RoundState}, + }, + metrics_safety_rules::MetricsSafetyRules, + network::{IncomingBlockRetrievalRequest, NetworkSender}, + network_interface::{CommitMessage, ConsensusMsg, ConsensusNetworkClient, DIRECT_SEND, RPC}, + network_tests::{NetworkPlayground, TwinId}, + payload_manager::DirectMempoolPayloadManager, + persistent_liveness_storage::RecoveryData, + pipeline::buffer_manager::OrderedBlocks, + round_manager::RoundManager, + test_utils::{ + mock_execution_client::MockExecutionClient, MockOptQSPayloadProvider, + MockPastProposalStatusTracker, MockPayloadManager, MockStorage, + }, + util::time_service::{ClockTimeService, TimeService}, +}; +use aptos_channels::{self, aptos_channel, message_queues::QueueStyle}; +use aptos_config::{ + config::ConsensusConfig, + network_id::{NetworkId, PeerNetworkId}, +}; +use aptos_consensus_types::{ + block_retrieval::BlockRetrievalRequest, + common::{Author, Round}, + opt_block_data::OptBlockData, + opt_proposal_msg::OptProposalMsg, + order_vote_msg::OrderVoteMsg, + pipeline::commit_decision::CommitDecision, + proposal_msg::ProposalMsg, + round_timeout::RoundTimeoutMsg, + utils::PayloadTxnsSize, + vote_msg::VoteMsg, + wrapped_ledger_info::WrappedLedgerInfo, +}; +use aptos_crypto::HashValue; +use aptos_infallible::Mutex; +use aptos_logger::prelude::info; +use aptos_network::{ + application::interface::NetworkClient, + peer_manager::{ConnectionRequestSender, PeerManagerRequestSender}, + protocols::{ + network, + network::{Event, NetworkEvents, NewNetworkEvents, NewNetworkSender}, + wire::handshake::v1::ProtocolIdSet, + }, + transport::ConnectionMetadata, + ProtocolId, +}; +use aptos_safety_rules::{PersistentSafetyStorage, SafetyRulesManager}; +use aptos_secure_storage::Storage; +use aptos_types::{ + epoch_state::EpochState, + ledger_info::LedgerInfo, + on_chain_config::{ + ConsensusAlgorithmConfig, OnChainConsensusConfig, OnChainJWKConsensusConfig, + OnChainRandomnessConfig, + }, + transaction::SignedTransaction, + validator_signer::ValidatorSigner, + validator_verifier::{random_validator_verifier, ValidatorVerifier}, + waypoint::Waypoint, +}; +use futures::{channel::mpsc, executor::block_on, stream::select, FutureExt, Stream, StreamExt}; +use maplit::hashmap; +use std::{ + collections::VecDeque, + iter::FromIterator, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, + time::Duration, +}; +use tokio::{runtime::Handle, task::JoinHandle}; + +mod consensus_test; +mod opt_proposal_test; +mod vtxn_on_proposal_test; + +fn config_with_round_timeout_msg_disabled() -> ConsensusConfig { + // Disable RoundTimeoutMsg to unless expliclity enabled. + ConsensusConfig { + enable_round_timeout_msg: false, + ..Default::default() + } +} + +fn start_replying_to_block_retreival(nodes: Vec) -> ReplyingRPCHandle { + let done = Arc::new(AtomicBool::new(false)); + let mut handles = Vec::new(); + for mut node in nodes.into_iter() { + let done_clone = done.clone(); + handles.push(tokio::spawn(async move { + while !done_clone.load(Ordering::Relaxed) { + info!("Asking for RPC request on {:?}", node.identity_desc()); + let maybe_request = node.poll_block_retrieval().await; + if let Some(request) = maybe_request { + info!( + "RPC request received: {:?} on {:?}", + request, + node.identity_desc() + ); + let wrapped_request = IncomingBlockRetrievalRequest { + req: request.req, + protocol: request.protocol, + response_sender: request.response_sender, + }; + node.block_store + .process_block_retrieval(wrapped_request) + .await + .unwrap(); + } else { + tokio::time::sleep(Duration::from_millis(50)).await; + } + } + node + })); + } + ReplyingRPCHandle { handles, done } +} + +struct ReplyingRPCHandle { + handles: Vec>, + done: Arc, +} + +impl ReplyingRPCHandle { + async fn join(self) -> Vec { + self.done.store(true, Ordering::Relaxed); + let mut result = Vec::new(); + for handle in self.handles.into_iter() { + result.push(handle.await.unwrap()); + } + info!( + "joined nodes in order: {:?}", + result.iter().map(|v| v.id).collect::>() + ); + result + } +} + +#[derive(Debug)] +pub enum ProposalMsgType { + Normal(ProposalMsg), + Optimistic(OptProposalMsg), +} + +/// Auxiliary struct that is setting up node environment for the test. +pub struct NodeSetup { + pub block_store: Arc, + round_manager: RoundManager, + storage: Arc, + signer: ValidatorSigner, + proposers: Vec, + safety_rules_manager: SafetyRulesManager, + pending_network_events: Vec>, + all_network_events: Box> + Send + Unpin>, + ordered_blocks_events: mpsc::UnboundedReceiver, + mock_execution_client: Arc, + _state_sync_receiver: mpsc::UnboundedReceiver>, + id: usize, + onchain_consensus_config: OnChainConsensusConfig, + local_consensus_config: ConsensusConfig, + onchain_randomness_config: OnChainRandomnessConfig, + onchain_jwk_consensus_config: OnChainJWKConsensusConfig, + vote_queue: VecDeque, + order_vote_queue: VecDeque, + proposal_queue: VecDeque, + opt_proposal_queue: VecDeque, + round_timeout_queue: VecDeque, + commit_decision_queue: VecDeque, + processed_opt_proposal_rx: aptos_channels::UnboundedReceiver, +} + +impl NodeSetup { + fn create_round_state(time_service: Arc) -> RoundState { + let base_timeout = Duration::new(60, 0); + let time_interval = Box::new(ExponentialTimeInterval::fixed(base_timeout)); + let (round_timeout_sender, _) = aptos_channels::new_test(1_024); + RoundState::new(time_interval, time_service, round_timeout_sender) + } + + fn create_proposer_election(proposers: Vec) -> Arc { + Arc::new(RotatingProposer::new(proposers, 1)) + } + + fn create_nodes( + playground: &mut NetworkPlayground, + executor: Handle, + num_nodes: usize, + proposer_indices: Option>, + onchain_consensus_config: Option, + local_consensus_config: Option, + onchain_randomness_config: Option, + onchain_jwk_consensus_config: Option, + ) -> Vec { + Self::create_nodes_with_validator_set( + playground, + executor, + num_nodes, + proposer_indices, + onchain_consensus_config, + local_consensus_config, + onchain_randomness_config, + onchain_jwk_consensus_config, + None, + ) + } + + fn create_nodes_with_validator_set( + playground: &mut NetworkPlayground, + executor: Handle, + num_nodes: usize, + proposer_indices: Option>, + onchain_consensus_config: Option, + local_consensus_config: Option, + onchain_randomness_config: Option, + onchain_jwk_consensus_config: Option, + validator_set: Option<(Vec, ValidatorVerifier)>, + ) -> Vec { + let mut onchain_consensus_config = onchain_consensus_config.unwrap_or_default(); + // With order votes feature, the validators additionally send order votes. + // next_proposal and next_vote functions could potentially break because of it. + if let OnChainConsensusConfig::V4 { + alg: + ConsensusAlgorithmConfig::JolteonV2 { + main: _, + quorum_store_enabled: _, + order_vote_enabled, + }, + vtxn: _, + window_size: _, + } = &mut onchain_consensus_config + { + *order_vote_enabled = false; + } + let onchain_randomness_config = + onchain_randomness_config.unwrap_or_else(OnChainRandomnessConfig::default_if_missing); + let onchain_jwk_consensus_config = onchain_jwk_consensus_config + .unwrap_or_else(OnChainJWKConsensusConfig::default_if_missing); + let local_consensus_config = local_consensus_config.unwrap_or_default(); + let (signers, validators) = + validator_set.unwrap_or_else(|| random_validator_verifier(num_nodes, None, false)); + let proposers = proposer_indices + .unwrap_or_else(|| vec![0]) + .iter() + .map(|i| signers[*i].author()) + .collect::>(); + let validator_set = (&validators).into(); + let waypoint = + Waypoint::new_epoch_boundary(&LedgerInfo::mock_genesis(Some(validator_set))).unwrap(); + + let mut nodes = vec![]; + // pre-initialize the mapping to avoid race conditions (peer try to broadcast to someone not added yet) + let peers_and_metadata = playground.peer_protocols(); + for signer in signers.iter().take(num_nodes) { + let peer_id = signer.author(); + let mut conn_meta = ConnectionMetadata::mock(peer_id); + conn_meta.application_protocols = ProtocolIdSet::from_iter([ + ProtocolId::ConsensusDirectSendJson, + ProtocolId::ConsensusDirectSendBcs, + ProtocolId::ConsensusRpcBcs, + ]); + let peer_network_id = PeerNetworkId::new(NetworkId::Validator, peer_id); + peers_and_metadata + .insert_connection_metadata(peer_network_id, conn_meta) + .unwrap(); + } + for (id, signer) in signers.iter().take(num_nodes).enumerate() { + let (initial_data, storage) = MockStorage::start_for_testing((&validators).into()); + + let safety_storage = PersistentSafetyStorage::initialize( + Storage::from(aptos_secure_storage::InMemoryStorage::new()), + signer.author(), + signer.private_key().clone(), + waypoint, + true, + ); + let safety_rules_manager = SafetyRulesManager::new_local(safety_storage); + + nodes.push(Self::new( + playground, + executor.clone(), + signer.to_owned(), + proposers.clone(), + storage, + initial_data, + safety_rules_manager, + id, + onchain_consensus_config.clone(), + local_consensus_config.clone(), + onchain_randomness_config.clone(), + onchain_jwk_consensus_config.clone(), + )); + } + nodes + } + + fn new( + playground: &mut NetworkPlayground, + executor: Handle, + signer: ValidatorSigner, + proposers: Vec, + storage: Arc, + initial_data: RecoveryData, + safety_rules_manager: SafetyRulesManager, + id: usize, + onchain_consensus_config: OnChainConsensusConfig, + local_consensus_config: ConsensusConfig, + onchain_randomness_config: OnChainRandomnessConfig, + onchain_jwk_consensus_config: OnChainJWKConsensusConfig, + ) -> Self { + let _entered_runtime = executor.enter(); + let epoch_state = Arc::new(EpochState::new(1, storage.get_validator_set().into())); + let validators = epoch_state.verifier.clone(); + let (network_reqs_tx, network_reqs_rx) = aptos_channel::new(QueueStyle::FIFO, 8, None); + let (connection_reqs_tx, _) = aptos_channel::new(QueueStyle::FIFO, 8, None); + let (consensus_tx, consensus_rx) = aptos_channel::new(QueueStyle::FIFO, 8, None); + let (_conn_mgr_reqs_tx, conn_mgr_reqs_rx) = aptos_channels::new_test(8); + let network_sender = network::NetworkSender::new( + PeerManagerRequestSender::new(network_reqs_tx), + ConnectionRequestSender::new(connection_reqs_tx), + ); + let network_client = NetworkClient::new( + DIRECT_SEND.into(), + RPC.into(), + hashmap! {NetworkId::Validator => network_sender}, + playground.peer_protocols(), + ); + let consensus_network_client = ConsensusNetworkClient::new(network_client); + let network_events = NetworkEvents::new(consensus_rx, None, true); + let author = signer.author(); + + let twin_id = TwinId { id, author }; + + playground.add_node(twin_id, consensus_tx, network_reqs_rx, conn_mgr_reqs_rx); + + let (self_sender, self_receiver) = aptos_channels::new_unbounded_test(); + let network = Arc::new(NetworkSender::new( + author, + consensus_network_client, + self_sender, + validators, + )); + + let all_network_events = Box::new(select(network_events, self_receiver)); + + let last_vote_sent = initial_data.last_vote(); + let (ordered_blocks_tx, ordered_blocks_events) = mpsc::unbounded::(); + let (state_sync_client, _state_sync_receiver) = mpsc::unbounded(); + let mock_execution_client = Arc::new(MockExecutionClient::new( + state_sync_client.clone(), + ordered_blocks_tx.clone(), + Arc::clone(&storage), + )); + let time_service = Arc::new(ClockTimeService::new(executor)); + + let window_size = onchain_consensus_config.window_size(); + let block_store = Arc::new(BlockStore::new( + storage.clone(), + initial_data, + mock_execution_client.clone(), + 10, // max pruned blocks in mem + time_service.clone(), + 10, + Arc::from(DirectMempoolPayloadManager::new()), + false, + window_size, + Arc::new(Mutex::new(PendingBlocks::new())), + None, + )); + let block_store_clone = Arc::clone(&block_store); + let callback = Box::new( + move |block_id: HashValue, block_round: Round, commit_proof: WrappedLedgerInfo| { + block_store_clone.commit_callback(block_id, block_round, commit_proof, None) + }, + ); + mock_execution_client.set_callback(callback); + + let proposer_election = Self::create_proposer_election(proposers.clone()); + let proposal_generator = ProposalGenerator::new( + author, + block_store.clone(), + Arc::new(MockPayloadManager::new(None)), + time_service.clone(), + Duration::ZERO, + PayloadTxnsSize::new(20, 1000), + 10, + PayloadTxnsSize::new(5, 500), + 10, + 1, + Some(30_000), + PipelineBackpressureConfig::new_no_backoff(), + ChainHealthBackoffConfig::new_no_backoff(), + false, + onchain_consensus_config.effective_validator_txn_config(), + true, + Arc::new(MockOptQSPayloadProvider {}), + ); + + let round_state = Self::create_round_state(time_service); + let mut safety_rules = + MetricsSafetyRules::new(safety_rules_manager.client(), storage.clone()); + safety_rules.perform_initialize().unwrap(); + + let (round_manager_tx, _) = aptos_channel::new(QueueStyle::LIFO, 1, None); + + let (opt_proposal_loopback_tx, opt_proposal_loopback_rx) = aptos_channels::new_unbounded( + &counters::OP_COUNTERS.gauge("opt_proposal_loopback_queue"), + ); + + let local_config = local_consensus_config.clone(); + + let mut round_manager = RoundManager::new( + epoch_state, + Arc::clone(&block_store), + round_state, + proposer_election, + proposal_generator, + Arc::new(Mutex::new(safety_rules)), + network, + storage.clone(), + onchain_consensus_config.clone(), + round_manager_tx, + local_config, + onchain_randomness_config.clone(), + onchain_jwk_consensus_config.clone(), + None, + Arc::new(MockPastProposalStatusTracker {}), + opt_proposal_loopback_tx, + ); + block_on(round_manager.init(last_vote_sent)); + Self { + block_store, + round_manager, + storage, + signer, + proposers, + safety_rules_manager, + pending_network_events: Vec::new(), + all_network_events, + ordered_blocks_events, + mock_execution_client, + _state_sync_receiver, + id, + onchain_consensus_config, + local_consensus_config, + onchain_randomness_config, + onchain_jwk_consensus_config, + vote_queue: VecDeque::new(), + order_vote_queue: VecDeque::new(), + proposal_queue: VecDeque::new(), + opt_proposal_queue: VecDeque::new(), + round_timeout_queue: VecDeque::new(), + commit_decision_queue: VecDeque::new(), + processed_opt_proposal_rx: opt_proposal_loopback_rx, + } + } + + pub fn restart(self, playground: &mut NetworkPlayground, executor: Handle) -> Self { + let recover_data = self + .storage + .try_start( + self.onchain_consensus_config.order_vote_enabled(), + self.onchain_consensus_config.window_size(), + ) + .unwrap_or_else(|e| panic!("fail to restart due to: {}", e)); + Self::new( + playground, + executor, + self.signer, + self.proposers, + self.storage, + recover_data, + self.safety_rules_manager, + self.id, + self.onchain_consensus_config.clone(), + self.local_consensus_config.clone(), + self.onchain_randomness_config.clone(), + self.onchain_jwk_consensus_config.clone(), + ) + } + + pub fn identity_desc(&self) -> String { + format!("{} [{}]", self.id, self.signer.author()) + } + + fn poll_next_network_event(&mut self) -> Option> { + if !self.pending_network_events.is_empty() { + Some(self.pending_network_events.remove(0)) + } else { + self.all_network_events + .next() + .now_or_never() + .map(|v| v.unwrap()) + } + } + + pub async fn next_network_event(&mut self) -> Event { + if !self.pending_network_events.is_empty() { + self.pending_network_events.remove(0) + } else { + self.all_network_events.next().await.unwrap() + } + } + + pub async fn next_network_message(&mut self) { + let consensus_msg = match self.next_network_event().await { + Event::Message(_, msg) => msg, + Event::RpcRequest(_, msg, _, _) if matches!(msg, ConsensusMsg::CommitMessage(_)) => msg, + Event::RpcRequest(_, msg, _, _) => { + panic!( + "Unexpected event, got RpcRequest, expected Message: {:?} on node {}", + msg, + self.identity_desc() + ) + }, + }; + + match consensus_msg { + ConsensusMsg::ProposalMsg(proposal) => { + self.proposal_queue.push_back(*proposal); + }, + ConsensusMsg::OptProposalMsg(opt_proposal) => { + self.opt_proposal_queue.push_back(*opt_proposal); + }, + ConsensusMsg::VoteMsg(vote) => { + self.vote_queue.push_back(*vote); + }, + ConsensusMsg::OrderVoteMsg(order_vote) => { + self.order_vote_queue.push_back(*order_vote); + }, + ConsensusMsg::RoundTimeoutMsg(round_timeout) => { + self.round_timeout_queue.push_back(*round_timeout); + }, + ConsensusMsg::CommitDecisionMsg(commit_decision) => { + self.commit_decision_queue.push_back(*commit_decision); + }, + ConsensusMsg::CommitMessage(d) if matches!(*d, CommitMessage::Decision(_)) => { + match *d { + CommitMessage::Decision(commit_decision) => { + self.commit_decision_queue.push_back(commit_decision); + }, + _ => unreachable!(), + } + }, + msg => panic!( + "Unexpected Consensus Message: {:?} on node {}", + msg, + self.identity_desc() + ), + } + } + + pub fn no_next_msg(&mut self) { + match self.poll_next_network_event() { + Some(Event::RpcRequest(_, msg, _, _)) | Some(Event::Message(_, msg)) => panic!( + "Unexpected Consensus Message: {:?} on node {}", + msg, + self.identity_desc() + ), + None => {}, + } + } + + pub async fn next_proposal(&mut self) -> ProposalMsg { + while self.proposal_queue.is_empty() { + self.next_network_message().await; + } + self.proposal_queue.pop_front().unwrap() + } + + pub async fn next_opt_proposal(&mut self) -> OptProposalMsg { + while self.opt_proposal_queue.is_empty() { + self.next_network_message().await; + } + self.opt_proposal_queue.pop_front().unwrap() + } + + pub async fn next_opt_or_normal_proposal(&mut self) -> ProposalMsgType { + while self.opt_proposal_queue.is_empty() && self.proposal_queue.is_empty() { + self.next_network_message().await; + } + + if !self.opt_proposal_queue.is_empty() { + return ProposalMsgType::Optimistic(self.opt_proposal_queue.pop_front().unwrap()); + } + + ProposalMsgType::Normal(self.proposal_queue.pop_front().unwrap()) + } + + pub async fn next_vote(&mut self) -> VoteMsg { + while self.vote_queue.is_empty() { + self.next_network_message().await; + } + self.vote_queue.pop_front().unwrap() + } + + #[allow(unused)] + pub async fn next_order_vote(&mut self) -> OrderVoteMsg { + while self.order_vote_queue.is_empty() { + self.next_network_message().await; + } + self.order_vote_queue.pop_front().unwrap() + } + + pub async fn next_timeout(&mut self) -> RoundTimeoutMsg { + while self.round_timeout_queue.is_empty() { + self.next_network_message().await; + } + self.round_timeout_queue.pop_front().unwrap() + } + + pub async fn next_commit_decision(&mut self) -> CommitDecision { + while self.commit_decision_queue.is_empty() { + self.next_network_message().await; + } + self.commit_decision_queue.pop_front().unwrap() + } + + pub async fn poll_block_retrieval(&mut self) -> Option { + match self.poll_next_network_event() { + Some(Event::RpcRequest(_, msg, protocol, response_sender)) => match msg { + ConsensusMsg::DeprecatedBlockRetrievalRequest(v) => { + Some(IncomingBlockRetrievalRequest { + req: BlockRetrievalRequest::V1(*v), + protocol, + response_sender, + }) + }, + ConsensusMsg::BlockRetrievalRequest(v) => Some(IncomingBlockRetrievalRequest { + req: *v, + protocol, + response_sender, + }), + msg => panic!( + "Unexpected Consensus Message: {:?} on node {}", + msg, + self.identity_desc() + ), + }, + Some(Event::Message(_, msg)) => panic!( + "Unexpected Consensus Message: {:?} on node {}", + msg, + self.identity_desc() + ), + None => None, + } + } + + pub fn no_next_ordered(&mut self) { + if self.ordered_blocks_events.next().now_or_never().is_some() { + panic!("Unexpected Ordered Blocks Event"); + } + } + + pub async fn commit_next_ordered(&mut self, expected_rounds: &[Round]) { + info!( + "Starting commit_next_ordered to wait for {:?} on node {:?}", + expected_rounds, + self.identity_desc() + ); + let ordered_blocks = self.ordered_blocks_events.next().await.unwrap(); + let rounds = ordered_blocks + .ordered_blocks + .iter() + .map(|b| b.round()) + .collect::>(); + assert_eq!(&rounds, expected_rounds); + self.mock_execution_client + .commit_to_storage(ordered_blocks) + .await + .unwrap(); + } +} diff --git a/consensus/src/round_manager_tests/opt_proposal_test.rs b/consensus/src/round_manager_tests/opt_proposal_test.rs new file mode 100644 index 0000000000000..172672614529c --- /dev/null +++ b/consensus/src/round_manager_tests/opt_proposal_test.rs @@ -0,0 +1,309 @@ +// Copyright (c) Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + block_storage::BlockReader, + liveness::round_state::{NewRoundEvent, NewRoundReason}, + network_tests::NetworkPlayground, + round_manager::{ + round_manager_tests::{ + consensus_test::{process_and_vote_on_proposal, process_and_vote_opt_proposal}, + NodeSetup, + }, + RoundManager, + }, + test_utils::{consensus_runtime, timed_block_on}, +}; +use aptos_config::config::ConsensusConfig; +use aptos_consensus_types::{ + common::Payload, opt_block_data::OptBlockData, opt_proposal_msg::OptProposalMsg, +}; +use futures::StreamExt; + +fn config_with_opt_proposal_enabled() -> ConsensusConfig { + ConsensusConfig { + enable_optimistic_proposal_rx: true, + enable_optimistic_proposal_tx: true, + ..Default::default() + } +} + +/// Asserts that optimistic proposal is proposed and votes for rounds 2 to n +/// in the absence of failures +#[test] +fn test_opt_proposal_proposed_no_failures() { + let runtime = consensus_runtime(); + let mut playground = NetworkPlayground::new(runtime.handle().clone()); + let mut nodes = NodeSetup::create_nodes( + &mut playground, + runtime.handle().clone(), + 1, + None, + None, + Some(config_with_opt_proposal_enabled()), + None, + None, + ); + let genesis = nodes[0].block_store.ordered_root(); + + // Process and vote on a normal proposal for round 1 + process_and_vote_on_proposal(&runtime, &mut nodes, 0, &[], true, Some(0), true, 1, 0, 0); + + let node = &mut nodes[0]; + let mut expected_grandparent_qc = genesis.id(); + for round in 2..10 { + let opt_proposal_msg = timed_block_on(&runtime, async { node.next_opt_proposal().await }); + assert_eq!(opt_proposal_msg.round(), round); + assert_eq!( + opt_proposal_msg + .block_data() + .grandparent_qc() + .certified_block() + .id(), + expected_grandparent_qc + ); + expected_grandparent_qc = opt_proposal_msg.block_data().parent_id(); + // process and vote on the optimistic proposal only + process_and_vote_opt_proposal( + &runtime, + node, + opt_proposal_msg, + round, + round.saturating_sub(2), + 0, + ); + // process vote to gather QC and enter the next round + timed_block_on(&runtime, async { + let vote_msg = node.next_vote().await; + // Adding vote to form a QC + node.round_manager.process_vote_msg(vote_msg).await.unwrap(); + }); + } +} + +/// Asserts that two consecutive opt-proposal rounds timeout and that +/// the round after the timeout rounds is always a normal round. +#[test] +fn test_normal_proposal_after_opt_proposal_timeout() { + let runtime = consensus_runtime(); + let mut playground = NetworkPlayground::new(runtime.handle().clone()); + let mut nodes = NodeSetup::create_nodes( + &mut playground, + runtime.handle().clone(), + 1, + None, + None, + Some(config_with_opt_proposal_enabled()), + None, + None, + ); + let genesis = nodes[0].block_store.ordered_root(); + + // Process and vote on a normal proposal for round 1 + process_and_vote_on_proposal(&runtime, &mut nodes, 0, &[], true, Some(0), true, 1, 0, 0); + + let node = &mut nodes[0]; + let expected_grandparent_qc = genesis.id(); + + let round = 2; + let opt_proposal_msg = timed_block_on(&runtime, async { node.next_opt_proposal().await }); + assert_eq!(opt_proposal_msg.round(), round); + assert_eq!( + opt_proposal_msg + .block_data() + .grandparent_qc() + .certified_block() + .id(), + expected_grandparent_qc + ); + // process and vote on the optimistic proposal only + process_and_vote_opt_proposal( + &runtime, + node, + opt_proposal_msg, + round, + round.saturating_sub(2), + 0, + ); + + timed_block_on(&runtime, async { + // process round 2 timeout. + let round = 2; + node.round_manager + .process_local_timeout(round) + .await + .unwrap_err(); + let timeout_msg = node.next_timeout().await; + node.round_manager + .process_round_timeout_msg(timeout_msg) + .await + .unwrap(); + + // process round 3 timeout. + let round = 3; + node.round_manager + .process_local_timeout(round) + .await + .unwrap_err(); + let timeout_msg = node.next_timeout().await; + node.round_manager + .process_round_timeout_msg(timeout_msg) + .await + .unwrap(); + + node.next_proposal().await; + }); +} + +/// Asserts that either optimistic proposal or a normal proposal can be +/// created in a given round and not both. +#[test] +fn test_one_proposal_per_round_honest_proposer() { + let runtime = consensus_runtime(); + let mut playground = NetworkPlayground::new(runtime.handle().clone()); + let mut nodes = NodeSetup::create_nodes( + &mut playground, + runtime.handle().clone(), + 1, + None, + None, + Some(config_with_opt_proposal_enabled()), + None, + None, + ); + let genesis = nodes[0].block_store.ordered_root(); + let node = &mut nodes[0]; + + timed_block_on(&runtime, async { + let round_manager = &node.round_manager; + let epoch_state = round_manager.epoch_state.clone(); + let network = round_manager.network.clone(); + let sync_info = round_manager.block_store.sync_info(); + let proposal_generator = round_manager.proposal_generator.clone(); + let proposer_election = round_manager.proposer_election.clone(); + let safety_rules = round_manager.safety_rules.clone(); + + // Ensure that an opt proposal cannot be created + RoundManager::generate_and_send_opt_proposal( + epoch_state.clone(), + 1, + genesis.block_info(), + genesis.quorum_cert().clone(), + network.clone(), + sync_info.clone(), + proposal_generator.clone(), + proposer_election.clone(), + ) + .await + .unwrap_err(); + + // Ensure an opt proposal can be created. + RoundManager::generate_and_send_opt_proposal( + epoch_state.clone(), + 2, + genesis.block_info(), + genesis.quorum_cert().clone(), + network.clone(), + sync_info.clone(), + proposal_generator.clone(), + proposer_election.clone(), + ) + .await + .unwrap(); + + // Ensure a proposal cannot be created after an opt proposal in same round + let new_round_event = NewRoundEvent { + round: 2, + reason: NewRoundReason::QCReady, + timeout: Default::default(), + prev_round_votes: vec![], + prev_round_timeout_votes: None, + }; + RoundManager::generate_and_send_proposal( + epoch_state.clone(), + new_round_event, + network.clone(), + sync_info.clone(), + proposal_generator.clone(), + safety_rules.clone(), + proposer_election.clone(), + ) + .await + .unwrap_err(); + + // Ensure a proposal can be created if opt proposal is not + let new_round_event = NewRoundEvent { + round: 3, + reason: NewRoundReason::QCReady, + timeout: Default::default(), + prev_round_votes: vec![], + prev_round_timeout_votes: None, + }; + RoundManager::generate_and_send_proposal( + epoch_state, + new_round_event, + network, + sync_info, + proposal_generator, + safety_rules, + proposer_election, + ) + .await + .unwrap(); + }); +} + +/// Don't process an optimistic proposal if a normal proposal is processed. +#[test] +fn test_process_either_optimistic_or_normal_proposal() { + let runtime = consensus_runtime(); + let mut playground = NetworkPlayground::new(runtime.handle().clone()); + let mut nodes = NodeSetup::create_nodes( + &mut playground, + runtime.handle().clone(), + 1, + None, + None, + Some(config_with_opt_proposal_enabled()), + None, + None, + ); + let genesis = nodes[0].block_store.ordered_root(); + let node = &mut nodes[0]; + + timed_block_on(&runtime, async { + let proposal_msg = node.next_proposal().await; + node.round_manager + .process_proposal_msg(proposal_msg.clone()) + .await + .unwrap(); + + let opt_block_data = OptBlockData::new( + Vec::new(), + Payload::empty(false, false), + proposal_msg.proposer(), + proposal_msg.proposal().epoch(), + proposal_msg.proposal().round(), + proposal_msg.proposal().timestamp_usecs(), + proposal_msg.proposal().quorum_cert().parent_block().clone(), + genesis.quorum_cert().clone(), + ); + let opt_proposal_msg = + OptProposalMsg::new(opt_block_data, proposal_msg.sync_info().clone()); + node.round_manager + .process_opt_proposal_msg(opt_proposal_msg) + .await + .unwrap(); + let opt_block_data = node.processed_opt_proposal_rx.next().await.unwrap(); + + let error = node + .round_manager + .process_opt_proposal(opt_block_data) + .await + .unwrap_err(); + assert_eq!( + error.to_string(), + "Proposal has already been processed for round: 1" + ); + }) +} diff --git a/consensus/src/round_manager_tests/vtxn_on_proposal_test.rs b/consensus/src/round_manager_tests/vtxn_on_proposal_test.rs new file mode 100644 index 0000000000000..2ef07b054120f --- /dev/null +++ b/consensus/src/round_manager_tests/vtxn_on_proposal_test.rs @@ -0,0 +1,393 @@ +// Copyright (c) Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 +// Parts of the project are originally copyright © Meta Platforms, Inc. + +use crate::{ + network_tests::NetworkPlayground, + round_manager::round_manager_tests::NodeSetup, + test_utils::{consensus_runtime, create_vec_signed_transactions, timed_block_on}, +}; +use aptos_config::config::ConsensusConfig; +use aptos_consensus_types::{ + block::{block_test_utils::certificate_for_genesis, Block}, + common::Payload, +}; +use aptos_types::{ + dkg::{real_dkg::RealDKG, DKGSessionMetadata, DKGTrait, DKGTranscript}, + jwks::QuorumCertifiedUpdate, + on_chain_config::{ + ConsensusAlgorithmConfig, ConsensusConfigV1, OnChainConsensusConfig, + OnChainJWKConsensusConfig, OnChainRandomnessConfig, RandomnessConfigMoveStruct, + ValidatorTxnConfig, DEFAULT_WINDOW_SIZE, + }, + validator_signer::ValidatorSigner, + validator_txn::ValidatorTransaction, + validator_verifier::{ + random_validator_verifier_with_voting_power, ValidatorConsensusInfoMoveStruct, + ValidatorVerifier, + }, +}; +use rand::{rngs::ThreadRng, thread_rng}; + +#[test] +/// If ProposalExt feature is disabled, ProposalExt should be rejected +/// No votes are sent, but the block is still added to the block tree. +fn no_vote_on_proposal_ext_when_feature_disabled() { + let runtime = consensus_runtime(); + let mut playground = NetworkPlayground::new(runtime.handle().clone()); + // In order to observe the votes we're going to check proposal processing on the non-proposer + // node (which will send the votes to the proposer). + let mut nodes = NodeSetup::create_nodes( + &mut playground, + runtime.handle().clone(), + 1, + None, + None, + None, + None, + None, + ); + let node = &mut nodes[0]; + let genesis_qc = certificate_for_genesis(); + + let invalid_block = Block::new_proposal_ext( + vec![ValidatorTransaction::dummy(vec![0xFF]); 5], + Payload::empty(false, true), + 1, + 1, + genesis_qc.clone(), + &node.signer, + Vec::new(), + ) + .unwrap(); + + let valid_block = Block::new_proposal( + Payload::empty(false, true), + 1, + 1, + genesis_qc, + &node.signer, + Vec::new(), + ) + .unwrap(); + + timed_block_on(&runtime, async { + // clear the message queue + node.next_proposal().await; + + assert!(node + .round_manager + .process_proposal(invalid_block) + .await + .is_err()); + + assert!(node + .round_manager + .process_proposal(valid_block) + .await + .is_ok()); + }); +} + +#[test] +fn no_vote_on_proposal_with_unexpected_vtxns() { + let vtxns = vec![ValidatorTransaction::ObservedJWKUpdate( + QuorumCertifiedUpdate::dummy(), + )]; + + assert_process_proposal_result( + None, + None, + Some(OnChainJWKConsensusConfig::default_disabled()), + vtxns.clone(), + false, + ); + + assert_process_proposal_result( + None, + None, + Some(OnChainJWKConsensusConfig::default_enabled()), + vtxns, + true, + ); +} + +#[test] +fn no_vote_on_proposal_with_uncertified_dkg_result() { + test_dkg_result_handling( + &[25_000_000; 4], + 1, + RealDKG::sample_secret_and_generate_transcript, + false, + ); +} + +#[test] +fn no_vote_on_proposal_with_inconsistent_secret_dkg_result() { + test_dkg_result_handling( + &[10_000_000, 70_000_000, 10_000_000, 10_000_000], + 1, + RealDKG::generate_transcript_for_inconsistent_secrets, + false, + ); +} + +#[test] +fn no_vote_on_proposal_with_dup_dealers_in_dkg_transcript() { + test_dkg_result_handling( + &[10_000_000, 40_000_000, 10_000_000, 40_000_000], + 1, + RealDKG::deal_twice_and_aggregate, + false, + ); +} + +#[test] +fn vote_on_proposal_with_valid_dkg_result() { + test_dkg_result_handling( + &[10_000_000, 70_000_000, 10_000_000, 10_000_000], + 1, + RealDKG::sample_secret_and_generate_transcript, + true, + ); +} + +fn test_dkg_result_handling( + voting_powers: &[u64], + dealer_idx: usize, + trx_gen_func: F, + should_accept: bool, +) where + F: Fn( + &mut ThreadRng, + &::PublicParams, + u64, + &::DealerPrivateKey, + ) -> ::Transcript, +{ + let mut rng = thread_rng(); + let epoch = 123; + let num_validators = voting_powers.len(); + let (signers, verifier) = + random_validator_verifier_with_voting_power(num_validators, None, false, voting_powers); + let validator_set: Vec = verifier + .validator_infos + .clone() + .into_iter() + .map(ValidatorConsensusInfoMoveStruct::from) + .collect(); + + let dkg_session_metadata = DKGSessionMetadata { + dealer_epoch: epoch, + randomness_config: RandomnessConfigMoveStruct::from( + OnChainRandomnessConfig::default_enabled(), + ), + dealer_validator_set: validator_set.clone(), + target_validator_set: validator_set, + }; + let public_params = RealDKG::new_public_params(&dkg_session_metadata); + let trx = trx_gen_func( + &mut rng, + &public_params, + dealer_idx as u64, + signers[dealer_idx].private_key(), + ); + let trx_bytes = bcs::to_bytes(&trx).unwrap(); + let vtxns = vec![ValidatorTransaction::DKGResult(DKGTranscript::new( + epoch, + verifier.get_ordered_account_addresses()[dealer_idx], + trx_bytes, + ))]; + + assert_process_proposal_result( + Some((signers, verifier)), + Some(OnChainRandomnessConfig::default_enabled()), + Some(OnChainJWKConsensusConfig::default_enabled()), + vtxns.clone(), + should_accept, + ); +} + +/// Setup a node with default configs and an optional `Features` override. +/// Create a block, fill it with the given vtxns, and process it with the `RoundManager` from the setup. +/// Assert the processing result. +fn assert_process_proposal_result( + validator_set: Option<(Vec, ValidatorVerifier)>, + randomness_config: Option, + jwk_consensus_config: Option, + vtxns: Vec, + expected_result: bool, +) { + let runtime = consensus_runtime(); + let mut playground = NetworkPlayground::new(runtime.handle().clone()); + let mut nodes = NodeSetup::create_nodes_with_validator_set( + &mut playground, + runtime.handle().clone(), + 1, + None, + Some(OnChainConsensusConfig::default_for_genesis()), + None, + randomness_config, + jwk_consensus_config, + validator_set, + ); + + let node = &mut nodes[0]; + let genesis_qc = certificate_for_genesis(); + let block = Block::new_proposal_ext( + vtxns, + Payload::empty(false, true), + 1, + 1, + genesis_qc.clone(), + &node.signer, + Vec::new(), + ) + .unwrap(); + + timed_block_on(&runtime, async { + // clear the message queue + node.next_proposal().await; + + assert_eq!( + expected_result, + node.round_manager + .process_proposal(block.clone()) + .await + .is_ok() + ); + }); +} + +#[ignore] +#[test] +/// If receiving txn num/block size limit is exceeded, ProposalExt should be rejected. +/// TODO: re-implement dummy vtxn and re-enable. +fn no_vote_on_proposal_ext_when_receiving_limit_exceeded() { + let runtime = consensus_runtime(); + let mut playground = NetworkPlayground::new(runtime.handle().clone()); + + let alg_config = ConsensusAlgorithmConfig::JolteonV2 { + main: ConsensusConfigV1::default(), + quorum_store_enabled: true, + order_vote_enabled: false, + }; + let vtxn_config = ValidatorTxnConfig::V1 { + per_block_limit_txn_count: 5, + per_block_limit_total_bytes: 400, + }; + + let local_config = ConsensusConfig { + max_receiving_block_txns: 10, + max_receiving_block_bytes: 800, + ..Default::default() + }; + + let randomness_config = OnChainRandomnessConfig::default_enabled(); + let mut nodes = NodeSetup::create_nodes( + &mut playground, + runtime.handle().clone(), + 1, + None, + Some(OnChainConsensusConfig::V4 { + alg: alg_config, + vtxn: vtxn_config, + window_size: DEFAULT_WINDOW_SIZE, + }), + Some(local_config), + Some(randomness_config), + None, + ); + let node = &mut nodes[0]; + let genesis_qc = certificate_for_genesis(); + + let block_too_many_txns = Block::new_proposal_ext( + vec![], + Payload::DirectMempool(create_vec_signed_transactions(11)), + 1, + 1, + genesis_qc.clone(), + &node.signer, + Vec::new(), + ) + .unwrap(); + + let block_too_many_vtxns = Block::new_proposal_ext( + vec![ValidatorTransaction::dummy(vec![0xFF; 20]); 6], + Payload::DirectMempool(create_vec_signed_transactions(4)), + 1, + 1, + genesis_qc.clone(), + &node.signer, + Vec::new(), + ) + .unwrap(); + + let block_too_large = Block::new_proposal_ext( + vec![ValidatorTransaction::dummy(vec![0xFF; 200]); 1], // total_bytes >= 200 * 1 = 200 + Payload::DirectMempool(create_vec_signed_transactions(9)), // = total_bytes >= 69 * 9 = 621 + 1, + 1, + genesis_qc.clone(), + &node.signer, + Vec::new(), + ) + .unwrap(); + + let block_vtxns_too_large = Block::new_proposal_ext( + vec![ValidatorTransaction::dummy(vec![0xFF; 200]); 5], // total_bytes >= 200 * 5 = 1000 + Payload::empty(false, true), + 1, + 1, + genesis_qc.clone(), + &node.signer, + Vec::new(), + ) + .unwrap(); + + let valid_block = Block::new_proposal_ext( + vec![ValidatorTransaction::dummy(vec![0xFF; 20]); 5], // total_bytes >= 60 * 5 = 300 + Payload::DirectMempool(create_vec_signed_transactions(5)), // total_bytes >= 69 * 5 = 345 + 1, + 1, + genesis_qc.clone(), + &node.signer, + Vec::new(), + ) + .unwrap(); + + timed_block_on(&runtime, async { + // clear the message queue + node.next_proposal().await; + + assert!(node + .round_manager + .process_proposal(block_too_many_txns) + .await + .is_err()); + + assert!(node + .round_manager + .process_proposal(block_too_many_vtxns) + .await + .is_err()); + + assert!(node + .round_manager + .process_proposal(block_too_large) + .await + .is_err()); + + assert!(node + .round_manager + .process_proposal(block_vtxns_too_large) + .await + .is_err()); + + assert!(node + .round_manager + .process_proposal(valid_block) + .await + .is_ok()); + }); +} diff --git a/consensus/src/state_computer.rs b/consensus/src/state_computer.rs index 945a210a2b63f..e74202cf0041b 100644 --- a/consensus/src/state_computer.rs +++ b/consensus/src/state_computer.rs @@ -3,51 +3,27 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - block_preparer::BlockPreparer, - block_storage::tracing::{observe_block, BlockStage}, - counters, - error::StateSyncError, - execution_pipeline::{ExecutionPipeline, PreCommitHook}, - monitor, - payload_manager::TPayloadManager, - pipeline::{pipeline_builder::PipelineBuilder, pipeline_phase::CountedRequest}, - state_replication::{StateComputer, StateComputerCommitCallBackType}, - transaction_deduper::TransactionDeduper, - transaction_filter::TransactionFilter, - transaction_shuffler::TransactionShuffler, - txn_notifier::TxnNotifier, + block_preparer::BlockPreparer, error::StateSyncError, monitor, + payload_manager::TPayloadManager, pipeline::pipeline_builder::PipelineBuilder, + state_replication::StateComputer, transaction_deduper::TransactionDeduper, + transaction_shuffler::TransactionShuffler, txn_notifier::TxnNotifier, }; use anyhow::Result; +use aptos_config::config::BlockTransactionFilterConfig; use aptos_consensus_notifications::ConsensusNotificationSender; -use aptos_consensus_types::{ - block::Block, common::Round, pipeline_execution_result::PipelineExecutionResult, - pipelined_block::PipelinedBlock, quorum_cert::QuorumCert, -}; -use aptos_crypto::HashValue; -use aptos_executor_types::{ - state_compute_result::StateComputeResult, BlockExecutorTrait, ExecutorResult, -}; +use aptos_consensus_types::common::Round; +use aptos_executor_types::BlockExecutorTrait; use aptos_infallible::RwLock; use aptos_logger::prelude::*; -use aptos_metrics_core::IntGauge; use aptos_types::{ account_address::AccountAddress, block_executor::config::BlockExecutorConfigFromOnchain, - epoch_state::EpochState, ledger_info::LedgerInfoWithSignatures, randomness::Randomness, + epoch_state::EpochState, ledger_info::LedgerInfoWithSignatures, validator_signer::ValidatorSigner, }; use fail::fail_point; -use futures::{future::BoxFuture, SinkExt, StreamExt}; -use std::{ - boxed::Box, - sync::Arc, - time::{Duration, Instant}, -}; +use std::{boxed::Box, sync::Arc, time::Duration}; use tokio::sync::Mutex as AsyncMutex; -pub type StateComputeResultFut = BoxFuture<'static, ExecutorResult>; - -type NotificationType = BoxFuture<'static, ()>; - #[derive(Clone, Copy, Debug, Eq, PartialEq, PartialOrd, Ord, Hash)] struct LogicalTime { epoch: u64, @@ -77,11 +53,8 @@ pub struct ExecutionProxy { executor: Arc, txn_notifier: Arc, state_sync_notifier: Arc, - pre_commit_notifier: aptos_channels::Sender, - commit_notifier: aptos_channels::Sender, write_mutex: AsyncMutex, - transaction_filter: Arc, - execution_pipeline: ExecutionPipeline, + txn_filter_config: Arc, state: RwLock>, enable_pre_commit: bool, } @@ -91,102 +64,20 @@ impl ExecutionProxy { executor: Arc, txn_notifier: Arc, state_sync_notifier: Arc, - handle: &tokio::runtime::Handle, - txn_filter: TransactionFilter, + txn_filter_config: BlockTransactionFilterConfig, enable_pre_commit: bool, ) -> Self { - let pre_commit_notifier = Self::spawn_future_runner( - handle, - "pre-commit", - &counters::PENDING_STATE_SYNC_NOTIFICATION, - ); - let commit_notifier = - Self::spawn_future_runner(handle, "commit", &counters::PENDING_COMMIT_NOTIFICATION); - - let execution_pipeline = - ExecutionPipeline::spawn(executor.clone(), handle, enable_pre_commit); Self { executor, txn_notifier, state_sync_notifier, - pre_commit_notifier, - commit_notifier, write_mutex: AsyncMutex::new(LogicalTime::new(0, 0)), - transaction_filter: Arc::new(txn_filter), - execution_pipeline, + txn_filter_config: Arc::new(txn_filter_config), state: RwLock::new(None), enable_pre_commit, } } - fn spawn_future_runner( - handle: &tokio::runtime::Handle, - name: &'static str, - pending_notifications_gauge: &IntGauge, - ) -> aptos_channels::Sender { - let (tx, mut rx) = aptos_channels::new::(10, pending_notifications_gauge); - let _join_handle = handle.spawn(async move { - while let Some(fut) = rx.next().await { - fut.await - } - info!(name = name, "Future runner stopped.") - }); - tx - } - - fn pre_commit_hook(&self) -> PreCommitHook { - let mut pre_commit_notifier = self.pre_commit_notifier.clone(); - let state_sync_notifier = self.state_sync_notifier.clone(); - Box::new(move |state_compute_result: &StateComputeResult| { - let state_compute_result = state_compute_result.clone(); - Box::pin(async move { - pre_commit_notifier - .send(Box::pin(async move { - let _timer = counters::OP_COUNTERS.timer("pre_commit_notify"); - - let txns = state_compute_result.transactions_to_commit().to_vec(); - let subscribable_events = - state_compute_result.subscribable_events().to_vec(); - if let Err(e) = monitor!( - "notify_state_sync", - state_sync_notifier - .notify_new_commit(txns, subscribable_events) - .await - ) { - error!(error = ?e, "Failed to notify state synchronizer"); - } - })) - .await - .expect("Failed to send pre-commit notification"); - }) - }) - } - - fn commit_hook( - &self, - blocks: &[Arc], - callback: StateComputerCommitCallBackType, - finality_proof: LedgerInfoWithSignatures, - ) -> NotificationType { - let payload_manager = self - .state - .read() - .as_ref() - .expect("must be set within an epoch") - .payload_manager - .clone(); - let blocks = blocks.to_vec(); - Box::pin(async move { - for block in blocks.iter() { - let payload = block.payload().cloned(); - let payload_vec = payload.into_iter().collect(); - let timestamp = block.timestamp_usecs(); - payload_manager.notify_commit(timestamp, payload_vec); - } - callback(&blocks, finality_proof); - }) - } - pub fn pipeline_builder(&self, commit_signer: Arc) -> PipelineBuilder { let MutableState { validators, @@ -205,7 +96,7 @@ impl ExecutionProxy { let block_preparer = Arc::new(BlockPreparer::new( payload_manager.clone(), - self.transaction_filter.clone(), + self.txn_filter_config.clone(), transaction_deduper.clone(), transaction_shuffler.clone(), )); @@ -227,158 +118,6 @@ impl ExecutionProxy { #[async_trait::async_trait] impl StateComputer for ExecutionProxy { - async fn schedule_compute( - &self, - // The block to be executed. - block: &Block, - // The parent block id. - parent_block_id: HashValue, - randomness: Option, - block_qc: Option>, - lifetime_guard: CountedRequest<()>, - ) -> StateComputeResultFut { - let block_id = block.id(); - debug!( - block = %block, - parent_id = parent_block_id, - "Executing block", - ); - let MutableState { - validators, - payload_manager, - transaction_shuffler, - block_executor_onchain_config, - transaction_deduper, - is_randomness_enabled, - .. - } = self - .state - .read() - .as_ref() - .cloned() - .expect("must be set within an epoch"); - - let txn_notifier = self.txn_notifier.clone(); - let transaction_generator = BlockPreparer::new( - payload_manager, - self.transaction_filter.clone(), - transaction_deduper.clone(), - transaction_shuffler.clone(), - ); - - let block_executor_onchain_config = block_executor_onchain_config.clone(); - - let timestamp = block.timestamp_usecs(); - let metadata = if is_randomness_enabled { - block.new_metadata_with_randomness(&validators, randomness) - } else { - block.new_block_metadata(&validators).into() - }; - - let pipeline_entry_time = Instant::now(); - let fut = self - .execution_pipeline - .queue( - block.clone(), - metadata.clone(), - parent_block_id, - block_qc, - transaction_generator, - block_executor_onchain_config, - self.pre_commit_hook(), - lifetime_guard, - transaction_shuffler, - ) - .await; - observe_block(timestamp, BlockStage::EXECUTION_PIPELINE_INSERTED); - counters::PIPELINE_ENTRY_TO_INSERTED_TIME.observe_duration(pipeline_entry_time.elapsed()); - let pipeline_inserted_timestamp = Instant::now(); - - Box::pin(async move { - let pipeline_execution_result = fut.await?; - debug!( - block_id = block_id, - "Got state compute result, post processing." - ); - let user_txns = &pipeline_execution_result.input_txns; - let result = &pipeline_execution_result.result; - - observe_block(timestamp, BlockStage::EXECUTED); - counters::PIPELINE_INSERTION_TO_EXECUTED_TIME - .observe_duration(pipeline_inserted_timestamp.elapsed()); - - let compute_status = result.compute_status_for_input_txns(); - // the length of compute_status is user_txns.len() + num_vtxns + 1 due to having blockmetadata - if user_txns.len() >= compute_status.len() { - // reconfiguration suffix blocks don't have any transactions - // otherwise, this is an error - if !compute_status.is_empty() { - error!( - "Expected compute_status length and actual compute_status length mismatch! user_txns len: {}, compute_status len: {}, has_reconfiguration: {}", - user_txns.len(), - compute_status.len(), - result.has_reconfiguration(), - ); - } - } else { - let user_txn_status = &compute_status[compute_status.len() - user_txns.len()..]; - - // notify mempool about failed transaction - if let Err(e) = txn_notifier - .notify_failed_txn(user_txns, user_txn_status) - .await - { - error!( - error = ?e, "Failed to notify mempool of rejected txns", - ); - } - } - - Ok(pipeline_execution_result) - }) - } - - /// Send a successful commit. A future is fulfilled when the state is finalized. - async fn commit( - &self, - blocks: Vec>, - finality_proof: LedgerInfoWithSignatures, - callback: StateComputerCommitCallBackType, - ) -> ExecutorResult<()> { - let mut latest_logical_time = self.write_mutex.lock().await; - let logical_time = LogicalTime::new( - finality_proof.ledger_info().epoch(), - finality_proof.ledger_info().round(), - ); - - // wait until all blocks are committed - for block in &blocks { - block.take_pre_commit_fut().await? - } - - let executor = self.executor.clone(); - let proof = finality_proof.clone(); - monitor!( - "commit_block", - tokio::task::spawn_blocking(move || { - executor - .commit_ledger(proof) - .expect("Failed to commit blocks"); - }) - .await - ) - .expect("spawn_blocking failed"); - - self.commit_notifier - .clone() - .send(self.commit_hook(&blocks, callback, finality_proof)) - .await - .expect("Failed to send commit notification"); - - *latest_logical_time = logical_time; - Ok(()) - } - /// Best effort state synchronization for the specified duration async fn sync_for_duration( &self, @@ -514,171 +253,3 @@ impl StateComputer for ExecutionProxy { self.state.write().take(); } } - -#[tokio::test] -async fn test_commit_sync_race() { - use crate::{ - error::MempoolError, payload_manager::DirectMempoolPayloadManager, - transaction_deduper::create_transaction_deduper, - transaction_shuffler::create_transaction_shuffler, - }; - use aptos_config::config::transaction_filter_type::Filter; - use aptos_consensus_notifications::Error; - use aptos_infallible::Mutex; - use aptos_types::{ - aggregate_signature::AggregateSignature, - block_executor::partitioner::ExecutableBlock, - block_info::BlockInfo, - contract_event::ContractEvent, - ledger_info::LedgerInfo, - on_chain_config::{TransactionDeduperType, TransactionShufflerType}, - transaction::{SignedTransaction, Transaction, TransactionStatus}, - }; - - struct RecordedCommit { - time: Mutex, - } - - impl BlockExecutorTrait for RecordedCommit { - fn committed_block_id(&self) -> HashValue { - HashValue::zero() - } - - fn reset(&self) -> Result<()> { - Ok(()) - } - - fn execute_block( - &self, - _block: ExecutableBlock, - _parent_block_id: HashValue, - _onchain_config: BlockExecutorConfigFromOnchain, - ) -> ExecutorResult { - Ok(StateComputeResult::new_dummy()) - } - - fn execute_and_update_state( - &self, - _block: ExecutableBlock, - _parent_block_id: HashValue, - _onchain_config: BlockExecutorConfigFromOnchain, - ) -> ExecutorResult<()> { - todo!() - } - - fn ledger_update( - &self, - _block_id: HashValue, - _parent_block_id: HashValue, - ) -> ExecutorResult { - todo!() - } - - fn pre_commit_block(&self, _block_id: HashValue) -> ExecutorResult<()> { - todo!() - } - - fn commit_ledger( - &self, - ledger_info_with_sigs: LedgerInfoWithSignatures, - ) -> ExecutorResult<()> { - *self.time.lock() = LogicalTime::new( - ledger_info_with_sigs.ledger_info().epoch(), - ledger_info_with_sigs.ledger_info().round(), - ); - Ok(()) - } - - fn finish(&self) {} - } - - #[async_trait::async_trait] - impl TxnNotifier for RecordedCommit { - async fn notify_failed_txn( - &self, - _txns: &[SignedTransaction], - _compute_results: &[TransactionStatus], - ) -> Result<(), MempoolError> { - Ok(()) - } - } - - #[async_trait::async_trait] - impl ConsensusNotificationSender for RecordedCommit { - async fn notify_new_commit( - &self, - _transactions: Vec, - _subscribable_events: Vec, - ) -> std::result::Result<(), Error> { - Ok(()) - } - - async fn sync_for_duration( - &self, - _duration: std::time::Duration, - ) -> std::result::Result { - Err(Error::UnexpectedErrorEncountered( - "sync_for_duration() is not supported by the RecordedCommit!".into(), - )) - } - - async fn sync_to_target( - &self, - target: LedgerInfoWithSignatures, - ) -> std::result::Result<(), Error> { - let logical_time = - LogicalTime::new(target.ledger_info().epoch(), target.ledger_info().round()); - if logical_time <= *self.time.lock() { - return Err(Error::NotificationError( - "Decreasing logical time".to_string(), - )); - } - *self.time.lock() = logical_time; - Ok(()) - } - } - - let callback = Box::new(move |_a: &[Arc], _b: LedgerInfoWithSignatures| {}); - let recorded_commit = Arc::new(RecordedCommit { - time: Mutex::new(LogicalTime::new(0, 0)), - }); - let generate_li = |epoch, round| { - LedgerInfoWithSignatures::new( - LedgerInfo::new( - BlockInfo::random_with_epoch(epoch, round), - HashValue::zero(), - ), - AggregateSignature::empty(), - ) - }; - let executor = ExecutionProxy::new( - recorded_commit.clone(), - recorded_commit.clone(), - recorded_commit.clone(), - &tokio::runtime::Handle::current(), - TransactionFilter::new(Filter::empty()), - true, - ); - - executor.new_epoch( - &EpochState::empty(), - Arc::new(DirectMempoolPayloadManager {}), - create_transaction_shuffler(TransactionShufflerType::NoShuffling), - BlockExecutorConfigFromOnchain::new_no_block_limit(), - create_transaction_deduper(TransactionDeduperType::NoDedup), - false, - false, - ); - executor - .commit(vec![], generate_li(1, 1), callback.clone()) - .await - .unwrap(); - executor - .commit(vec![], generate_li(1, 10), callback) - .await - .unwrap(); - assert!(executor.sync_to_target(generate_li(1, 8)).await.is_ok()); - assert_eq!(*recorded_commit.time.lock(), LogicalTime::new(1, 10)); - assert!(executor.sync_to_target(generate_li(2, 8)).await.is_ok()); - assert_eq!(*recorded_commit.time.lock(), LogicalTime::new(2, 8)); -} diff --git a/consensus/src/state_computer_tests.rs b/consensus/src/state_computer_tests.rs deleted file mode 100644 index 611bbdba785b9..0000000000000 --- a/consensus/src/state_computer_tests.rs +++ /dev/null @@ -1,230 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -use crate::{ - error::MempoolError, pipeline::pipeline_phase::CountedRequest, state_computer::ExecutionProxy, - state_replication::StateComputer, transaction_deduper::NoOpDeduper, - transaction_filter::TransactionFilter, transaction_shuffler::NoOpShuffler, - txn_notifier::TxnNotifier, -}; -use aptos_config::config::transaction_filter_type::Filter; -use aptos_consensus_notifications::{ConsensusNotificationSender, Error}; -use aptos_consensus_types::{block::Block, block_data::BlockData}; -use aptos_crypto::HashValue; -use aptos_executor_types::{ - state_compute_result::StateComputeResult, BlockExecutorTrait, ExecutorResult, -}; -use aptos_infallible::Mutex; -use aptos_types::{ - block_executor::{config::BlockExecutorConfigFromOnchain, partitioner::ExecutableBlock}, - contract_event::ContractEvent, - epoch_state::EpochState, - ledger_info::LedgerInfoWithSignatures, - transaction::{SignedTransaction, Transaction, TransactionStatus}, - validator_txn::ValidatorTransaction, -}; -use std::{ - sync::{atomic::AtomicU64, Arc}, - time::Duration, -}; -use tokio::{runtime::Handle, sync::Mutex as AsyncMutex}; - -struct DummyStateSyncNotifier { - invocations: Mutex, Vec)>>, - tx: tokio::sync::mpsc::Sender<()>, - rx: AsyncMutex>, -} - -impl DummyStateSyncNotifier { - fn new() -> Self { - let (tx, rx) = tokio::sync::mpsc::channel(10); - Self { - invocations: Mutex::new(vec![]), - tx, - rx: AsyncMutex::new(rx), - } - } - - async fn wait_for_notification(&self) { - self.rx.lock().await.recv().await; - } -} - -#[async_trait::async_trait] -impl ConsensusNotificationSender for DummyStateSyncNotifier { - async fn notify_new_commit( - &self, - transactions: Vec, - subscribable_events: Vec, - ) -> Result<(), Error> { - self.invocations - .lock() - .push((transactions, subscribable_events)); - self.tx.send(()).await.unwrap(); - Ok(()) - } - - async fn sync_for_duration( - &self, - _duration: Duration, - ) -> Result { - Err(Error::UnexpectedErrorEncountered( - "sync_for_duration() is not supported by the DummyStateSyncNotifier!".into(), - )) - } - - async fn sync_to_target(&self, _target: LedgerInfoWithSignatures) -> Result<(), Error> { - unreachable!() - } -} - -struct DummyTxnNotifier {} - -#[async_trait::async_trait] -impl TxnNotifier for DummyTxnNotifier { - async fn notify_failed_txn( - &self, - _txns: &[SignedTransaction], - _statuses: &[TransactionStatus], - ) -> anyhow::Result<(), MempoolError> { - Ok(()) - } -} - -struct DummyBlockExecutor { - blocks_received: Mutex>, -} - -impl DummyBlockExecutor { - fn new() -> Self { - Self { - blocks_received: Mutex::new(vec![]), - } - } -} - -impl BlockExecutorTrait for DummyBlockExecutor { - fn committed_block_id(&self) -> HashValue { - HashValue::zero() - } - - fn reset(&self) -> anyhow::Result<()> { - Ok(()) - } - - fn execute_and_update_state( - &self, - block: ExecutableBlock, - _parent_block_id: HashValue, - _onchain_config: BlockExecutorConfigFromOnchain, - ) -> ExecutorResult<()> { - self.blocks_received.lock().push(block); - Ok(()) - } - - fn ledger_update( - &self, - _block_id: HashValue, - _parent_block_id: HashValue, - ) -> ExecutorResult { - let txns = self - .blocks_received - .lock() - .last() - .unwrap() - .transactions - .clone() - .into_txns() - .into_iter() - .map(|t| t.into_inner()) - .collect(); - - Ok(StateComputeResult::new_dummy_with_input_txns(txns)) - } - - fn pre_commit_block(&self, _block_id: HashValue) -> ExecutorResult<()> { - Ok(()) - } - - fn commit_ledger( - &self, - _ledger_info_with_sigs: LedgerInfoWithSignatures, - ) -> ExecutorResult<()> { - Ok(()) - } - - fn finish(&self) {} -} - -#[tokio::test] -#[cfg(test)] -async fn should_see_and_notify_validator_txns() { - use crate::payload_manager::DirectMempoolPayloadManager; - - let executor = Arc::new(DummyBlockExecutor::new()); - - let state_sync_notifier = Arc::new(DummyStateSyncNotifier::new()); - let execution_policy = ExecutionProxy::new( - executor.clone(), - Arc::new(DummyTxnNotifier {}), - state_sync_notifier.clone(), - &Handle::current(), - TransactionFilter::new(Filter::empty()), - true, - ); - - let validator_txn_0 = ValidatorTransaction::dummy(vec![0xFF; 99]); - let validator_txn_1 = ValidatorTransaction::dummy(vec![0xFF; 999]); - - let block = Block::new_for_testing( - HashValue::zero(), - BlockData::dummy_with_validator_txns(vec![ - validator_txn_0.clone(), - validator_txn_1.clone(), - ]), - None, - ); - - let epoch_state = EpochState::empty(); - - execution_policy.new_epoch( - &epoch_state, - Arc::new(DirectMempoolPayloadManager::new()), - Arc::new(NoOpShuffler {}), - BlockExecutorConfigFromOnchain::new_no_block_limit(), - Arc::new(NoOpDeduper {}), - false, - false, - ); - - // Ensure the dummy executor has received the txns. - let _ = execution_policy - .schedule_compute(&block, HashValue::zero(), None, None, dummy_guard()) - .await - .await - .unwrap(); - - // Get the txns from the view of the dummy executor. - let txns = executor.blocks_received.lock()[0] - .transactions - .clone() - .into_txns(); - - let supposed_validator_txn_0 = txns[1].expect_valid().try_as_validator_txn().unwrap(); - let supposed_validator_txn_1 = txns[2].expect_valid().try_as_validator_txn().unwrap(); - assert_eq!(&validator_txn_0, supposed_validator_txn_0); - assert_eq!(&validator_txn_1, supposed_validator_txn_1); - - // Get all txns that state sync was notified with (when pre-commit finishes) - state_sync_notifier.wait_for_notification().await; - let (txns, _) = state_sync_notifier.invocations.lock()[0].clone(); - - let supposed_validator_txn_0 = txns[1].try_as_validator_txn().unwrap(); - let supposed_validator_txn_1 = txns[2].try_as_validator_txn().unwrap(); - assert_eq!(&validator_txn_0, supposed_validator_txn_0); - assert_eq!(&validator_txn_1, supposed_validator_txn_1); -} - -fn dummy_guard() -> CountedRequest<()> { - CountedRequest::new((), Arc::new(AtomicU64::new(0))) -} diff --git a/consensus/src/state_replication.rs b/consensus/src/state_replication.rs index 444691be5b8c3..9ad9c679f4d4c 100644 --- a/consensus/src/state_replication.rs +++ b/consensus/src/state_replication.rs @@ -4,18 +4,13 @@ use crate::{ error::StateSyncError, payload_manager::TPayloadManager, - pipeline::pipeline_phase::CountedRequest, state_computer::StateComputeResultFut, transaction_deduper::TransactionDeduper, transaction_shuffler::TransactionShuffler, }; use anyhow::Result; -use aptos_consensus_types::{ - block::Block, pipelined_block::PipelinedBlock, quorum_cert::QuorumCert, -}; -use aptos_crypto::HashValue; -use aptos_executor_types::ExecutorResult; +use aptos_consensus_types::pipelined_block::PipelinedBlock; use aptos_types::{ block_executor::config::BlockExecutorConfigFromOnchain, epoch_state::EpochState, - ledger_info::LedgerInfoWithSignatures, randomness::Randomness, + ledger_info::LedgerInfoWithSignatures, }; use std::{sync::Arc, time::Duration}; @@ -27,27 +22,6 @@ pub type StateComputerCommitCallBackType = /// StateComputer is using proposed block ids for identifying the transactions. #[async_trait::async_trait] pub trait StateComputer: Send + Sync { - async fn schedule_compute( - &self, - // The block that will be computed. - _block: &Block, - // The parent block root hash. - _parent_block_id: HashValue, - _randomness: Option, - _block_qc: Option>, - _lifetime_guard: CountedRequest<()>, - ) -> StateComputeResultFut { - unimplemented!(); - } - - /// Send a successful commit. A future is fulfilled when the state is finalized. - async fn commit( - &self, - blocks: Vec>, - finality_proof: LedgerInfoWithSignatures, - callback: StateComputerCommitCallBackType, - ) -> ExecutorResult<()>; - /// Best effort state synchronization for the specified duration. /// This function returns the latest synced ledger info after state syncing. /// Note: it is possible that state sync may run longer than the specified diff --git a/consensus/src/test_utils/mock_execution_client.rs b/consensus/src/test_utils/mock_execution_client.rs index 84fcfc32cca67..dad3a798f566a 100644 --- a/consensus/src/test_utils/mock_execution_client.rs +++ b/consensus/src/test_utils/mock_execution_client.rs @@ -11,7 +11,6 @@ use crate::{ pipeline_builder::PipelineBuilder, signing_phase::CommitSignerProvider, }, rand::rand_gen::types::RandConfig, - state_replication::StateComputerCommitCallBackType, test_utils::mock_storage::MockStorage, }; use anyhow::{anyhow, format_err, Result}; @@ -19,6 +18,7 @@ use aptos_channels::aptos_channel; use aptos_consensus_types::{ common::{Payload, Round}, pipelined_block::PipelinedBlock, + vote_data::VoteData, wrapped_ledger_info::WrappedLedgerInfo, }; use aptos_crypto::{bls12381::PrivateKey, HashValue}; @@ -43,6 +43,8 @@ pub struct MockExecutionClient { consensus_db: Arc, block_cache: Mutex>, payload_manager: Arc, + block_store_callback: + Mutex>>, } impl MockExecutionClient { @@ -57,18 +59,34 @@ impl MockExecutionClient { consensus_db, block_cache: Mutex::new(HashMap::new()), payload_manager: Arc::from(DirectMempoolPayloadManager::new()), + block_store_callback: Mutex::new(None), } } + pub fn set_callback( + &self, + callback: Box, + ) { + *self.block_store_callback.lock() = Some(callback); + } + pub async fn commit_to_storage(&self, blocks: OrderedBlocks) -> ExecutorResult<()> { let OrderedBlocks { ordered_blocks, ordered_proof, - callback, } = blocks; self.consensus_db .commit_to_storage(ordered_proof.ledger_info().clone()); + if let Some(callback) = self.block_store_callback.lock().as_ref() { + for block in &ordered_blocks { + callback( + block.id(), + block.round(), + WrappedLedgerInfo::new(VoteData::dummy(), ordered_proof.clone()), + ); + } + } // mock sending commit notif to state sync let mut txns = vec![]; for block in &ordered_blocks { @@ -85,8 +103,6 @@ impl MockExecutionClient { // they may fail during shutdown let _ = self.state_sync_client.unbounded_send(txns); - callback(&ordered_blocks, ordered_proof); - Ok(()) } } @@ -106,7 +122,6 @@ impl TExecutionClient for MockExecutionClient { _fast_rand_config: Option, _rand_msg_rx: aptos_channel::Receiver, _highest_committed_round: Round, - _new_pipeline_enabled: bool, ) { } @@ -118,7 +133,6 @@ impl TExecutionClient for MockExecutionClient { &self, blocks: Vec>, finality_proof: WrappedLedgerInfo, - callback: StateComputerCommitCallBackType, ) -> ExecutorResult<()> { assert!(!blocks.is_empty()); info!( @@ -142,7 +156,6 @@ impl TExecutionClient for MockExecutionClient { .send(OrderedBlocks { ordered_blocks: blocks, ordered_proof: finality_proof.ledger_info().clone(), - callback, }) .await .is_err() diff --git a/consensus/src/test_utils/mock_payload_manager.rs b/consensus/src/test_utils/mock_payload_manager.rs index 43da1a164e29c..0dd3a80d44ad1 100644 --- a/consensus/src/test_utils/mock_payload_manager.rs +++ b/consensus/src/test_utils/mock_payload_manager.rs @@ -16,7 +16,7 @@ use aptos_types::{ vm_status::StatusCode, }; use aptos_validator_transaction_pool as vtxn_pool; -use futures::{channel::mpsc, future::BoxFuture}; +use futures::channel::mpsc; use rand::Rng; #[allow(dead_code)] @@ -56,7 +56,6 @@ impl PayloadClient for MockPayloadManager { &self, _params: PayloadPullParameters, _validator_txn_filter: vtxn_pool::TransactionFilter, - _wait_callback: BoxFuture<'static, ()>, ) -> Result<(Vec, Payload), QuorumStoreError> { // generate 1k txn is too slow with coverage instrumentation Ok(( diff --git a/consensus/src/test_utils/mock_state_computer.rs b/consensus/src/test_utils/mock_state_computer.rs index b578318a99981..d056464b8979a 100644 --- a/consensus/src/test_utils/mock_state_computer.rs +++ b/consensus/src/test_utils/mock_state_computer.rs @@ -3,100 +3,17 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - error::StateSyncError, - payload_manager::TPayloadManager, - pipeline::{buffer_manager::OrderedBlocks, pipeline_phase::CountedRequest}, - state_computer::StateComputeResultFut, - state_replication::{StateComputer, StateComputerCommitCallBackType}, - transaction_deduper::TransactionDeduper, - transaction_shuffler::TransactionShuffler, + error::StateSyncError, payload_manager::TPayloadManager, state_replication::StateComputer, + transaction_deduper::TransactionDeduper, transaction_shuffler::TransactionShuffler, }; use anyhow::{anyhow, Result}; -use aptos_consensus_types::{ - block::Block, pipeline_execution_result::PipelineExecutionResult, - pipelined_block::PipelinedBlock, quorum_cert::QuorumCert, -}; use aptos_crypto::HashValue; -use aptos_executor_types::{ - state_compute_result::StateComputeResult, ExecutorError, ExecutorResult, -}; -use aptos_logger::debug; use aptos_types::{ block_executor::config::BlockExecutorConfigFromOnchain, epoch_state::EpochState, - ledger_info::LedgerInfoWithSignatures, randomness::Randomness, + ledger_info::LedgerInfoWithSignatures, }; -use futures::SinkExt; -use futures_channel::mpsc::UnboundedSender; use std::{sync::Arc, time::Duration}; -pub struct EmptyStateComputer { - executor_channel: UnboundedSender, -} - -impl EmptyStateComputer { - pub fn new(executor_channel: UnboundedSender) -> Self { - Self { executor_channel } - } -} - -#[async_trait::async_trait] -impl StateComputer for EmptyStateComputer { - async fn commit( - &self, - blocks: Vec>, - commit: LedgerInfoWithSignatures, - call_back: StateComputerCommitCallBackType, - ) -> ExecutorResult<()> { - assert!(!blocks.is_empty()); - - if self - .executor_channel - .clone() - .send(OrderedBlocks { - ordered_blocks: blocks, - ordered_proof: commit, - callback: call_back, - }) - .await - .is_err() - { - debug!("Failed to send to buffer manager, maybe epoch ends"); - } - - Ok(()) - } - - async fn sync_for_duration( - &self, - _duration: Duration, - ) -> Result { - Err(StateSyncError::from(anyhow!( - "sync_for_duration() is not supported by the EmptyStateComputer!" - ))) - } - - async fn sync_to_target( - &self, - _target: LedgerInfoWithSignatures, - ) -> Result<(), StateSyncError> { - Ok(()) - } - - fn new_epoch( - &self, - _: &EpochState, - _: Arc, - _: Arc, - _: BlockExecutorConfigFromOnchain, - _: Arc, - _: bool, - _: bool, - ) { - } - - fn end_epoch(&self) {} -} - /// Random Compute Result State Computer /// When compute(), if parent id is random_compute_result_root_hash, it returns Err(Error::BlockNotFound(parent_block_id)) /// Otherwise, it returns a dummy StateComputeResult with root hash as random_compute_result_root_hash. @@ -118,42 +35,6 @@ impl RandomComputeResultStateComputer { #[async_trait::async_trait] impl StateComputer for RandomComputeResultStateComputer { - async fn schedule_compute( - &self, - _block: &Block, - parent_block_id: HashValue, - _randomness: Option, - _block_qc: Option>, - _lifetime_guard: CountedRequest<()>, - ) -> StateComputeResultFut { - // trapdoor for Execution Error - let res = if parent_block_id == self.random_compute_result_root_hash { - Err(ExecutorError::BlockNotFound(parent_block_id)) - } else { - Ok(StateComputeResult::new_dummy_with_root_hash( - self.random_compute_result_root_hash, - )) - }; - let pipeline_execution_res = res.map(|res| { - PipelineExecutionResult::new( - vec![], - res, - Duration::from_secs(0), - Box::pin(async { Ok(()) }), - ) - }); - Box::pin(async move { pipeline_execution_res }) - } - - async fn commit( - &self, - _blocks: Vec>, - _commit: LedgerInfoWithSignatures, - _call_back: StateComputerCommitCallBackType, - ) -> ExecutorResult<()> { - Ok(()) - } - async fn sync_for_duration( &self, _duration: Duration, diff --git a/consensus/src/test_utils/mod.rs b/consensus/src/test_utils/mod.rs index c451f2c447f90..13d025d796691 100644 --- a/consensus/src/test_utils/mod.rs +++ b/consensus/src/test_utils/mod.rs @@ -49,8 +49,6 @@ use aptos_types::{ }; pub use mock_payload_manager::MockPayloadManager; #[cfg(test)] -pub use mock_state_computer::EmptyStateComputer; -#[cfg(test)] pub use mock_state_computer::RandomComputeResultStateComputer; pub use mock_storage::{EmptyStorage, MockStorage}; use move_core_types::account_address::AccountAddress; diff --git a/consensus/src/transaction_filter/mod.rs b/consensus/src/transaction_filter/mod.rs deleted file mode 100644 index 6b9573cbe8120..0000000000000 --- a/consensus/src/transaction_filter/mod.rs +++ /dev/null @@ -1,622 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -use aptos_config::config::transaction_filter_type::Filter; -use aptos_crypto::HashValue; -use aptos_types::transaction::SignedTransaction; - -pub struct TransactionFilter { - filter: Filter, -} - -impl TransactionFilter { - pub(crate) fn new(filter: Filter) -> Self { - Self { filter } - } - - pub fn filter( - &self, - block_id: HashValue, - block_epoch: u64, - block_timestamp: u64, - txns: Vec, - ) -> Vec { - // Special case for no filter to avoid unnecessary iteration through all transactions in the default case - if self.filter.is_empty() { - return txns; - } - - txns.into_iter() - .filter(|txn| { - self.filter - .allows(block_id, block_epoch, block_timestamp, txn) - }) - .collect() - } -} - -#[cfg(test)] -mod test { - use crate::transaction_filter::TransactionFilter; - use aptos_config::config::transaction_filter_type::{Filter, Matcher}; - use aptos_crypto::{ed25519::Ed25519PrivateKey, HashValue, PrivateKey, SigningKey, Uniform}; - use aptos_types::{ - chain_id::ChainId, - move_utils::MemberId, - transaction::{ - EntryFunction, RawTransaction, SignedTransaction, TransactionExecutableRef, - TransactionPayload, - }, - }; - use move_core_types::account_address::AccountAddress; - - fn create_signed_transaction(function: MemberId) -> SignedTransaction { - let private_key = Ed25519PrivateKey::generate_for_testing(); - let public_key = private_key.public_key(); - let sender = AccountAddress::random(); - let sequence_number = 0; - let MemberId { - module_id, - member_id: function_id, - } = function; - - // TODO[Orderless]: Test with payload v2 format as well. - let payload = TransactionPayload::EntryFunction(EntryFunction::new( - module_id, - function_id, - vec![], - vec![], - )); - let raw_transaction = - RawTransaction::new(sender, sequence_number, payload, 0, 0, 0, ChainId::new(10)); - - SignedTransaction::new( - raw_transaction.clone(), - public_key.clone(), - private_key.sign(&raw_transaction).unwrap(), - ) - } - - fn get_transactions() -> Vec { - vec![ - create_signed_transaction(str::parse("0x0::test0::add").unwrap()), - create_signed_transaction(str::parse("0x1::test1::check").unwrap()), - create_signed_transaction(str::parse("0x2::test2::new").unwrap()), - create_signed_transaction(str::parse("0x3::test3::sub").unwrap()), - create_signed_transaction(str::parse("0x4::test4::mul").unwrap()), - create_signed_transaction(str::parse("0x5::test5::div").unwrap()), - create_signed_transaction(str::parse("0x6::test6::mod").unwrap()), - ] - } - - fn get_block_id_and_transactions() -> (HashValue, Vec) { - let txns = get_transactions(); - let block_id = HashValue::random(); - (block_id, txns) - } - - fn get_module_address(txn: &SignedTransaction) -> AccountAddress { - match txn.payload().executable_ref() { - Ok(TransactionExecutableRef::EntryFunction(entry_func)) => { - *entry_func.module().address() - }, - _ => panic!("Unexpected transaction payload"), - } - } - - fn get_module_name(txn: &SignedTransaction) -> String { - match txn.payload().executable_ref() { - Ok(TransactionExecutableRef::EntryFunction(entry_func)) => { - entry_func.module().name().to_string() - }, - _ => panic!("Unexpected transaction payload"), - } - } - - fn get_function_name(txn: &SignedTransaction) -> String { - match txn.payload().executable_ref() { - Ok(TransactionExecutableRef::EntryFunction(entry_func)) => { - entry_func.function().to_string() - }, - _ => panic!("Unexpected transaction payload"), - } - } - - #[test] - fn test_empty_filter() { - // Create an empty filter - let filter = TransactionFilter::new(Filter::empty()); - - // Verify that it returns all transactions - let (block_id, txns) = get_block_id_and_transactions(); - let filtered_txns = filter.filter(block_id, 0, 0, txns.clone()); - assert_eq!(filtered_txns, txns); - } - - #[test] - fn test_all_filter() { - // Create a filter that allows all transactions - let filter = TransactionFilter::new(Filter::empty().add_all_filter(true)); - - // Verify that it returns all transactions - let (block_id, txns) = get_block_id_and_transactions(); - let filtered_txns = filter.filter(block_id, 0, 0, txns.clone()); - assert_eq!(filtered_txns, txns); - - // Create a filter that denies all transactions - let filter = TransactionFilter::new(Filter::empty().add_all_filter(false)); - - // Verify that it returns no transactions - let filtered_txns = filter.filter(block_id, 0, 0, txns.clone()); - assert_eq!(filtered_txns, vec![]); - } - - #[test] - fn test_block_id_filter() { - // Create a filter that only allows transactions with a specific block ID - let (block_id, txns) = get_block_id_and_transactions(); - let filter = TransactionFilter::new( - Filter::empty() - .add_block_id_filter(true, block_id) - .add_all_filter(false), - ); - - // Verify that it returns all transactions with the specified block ID - let filtered_txns = filter.filter(block_id, 0, 0, txns.clone()); - assert_eq!(filtered_txns, txns); - - // Verify that it returns no transactions with a different block ID - let different_block_id = HashValue::random(); - let filtered_txns = filter.filter(different_block_id, 0, 0, txns.clone()); - assert_eq!(filtered_txns, vec![]); - - // Create a filter that denies transactions with a specific block ID - let filter = TransactionFilter::new( - Filter::empty() - .add_block_id_filter(false, block_id) - .add_all_filter(true), - ); - - // Verify that it returns all transactions except those with the specified block ID - let filtered_txns = filter.filter(block_id, 0, 0, txns.clone()); - assert_eq!(filtered_txns, vec![]); - - // Verify that it returns all transactions with a different block ID - let different_block_id = HashValue::random(); - let filtered_txns = filter.filter(different_block_id, 0, 0, txns.clone()); - assert_eq!(filtered_txns, txns); - } - - #[test] - fn test_block_timestamp_greater_than_filter() { - // Create a filter that only allows transactions with a block timestamp greater than a specific value - let filter = TransactionFilter::new( - Filter::empty() - .add_block_timestamp_greater_than_filter(true, 1000) - .add_all_filter(false), - ); - - // Verify that it returns no transactions with a block timestamp less than or equal to 1000 - let (block_id, txns) = get_block_id_and_transactions(); - for block_timestamp in [0, 999, 1000] { - let filtered_txns = filter.filter(block_id, 0, block_timestamp, txns.clone()); - assert_eq!(filtered_txns, vec![]); - } - - // Verify that it returns all transactions with a block timestamp greater than 1000 - let filtered_txns = filter.filter(block_id, 0, 1001, txns.clone()); - assert_eq!(filtered_txns, txns); - - // Create a filter that denies transactions with a block timestamp greater than a specific value - let filter = TransactionFilter::new( - Filter::empty() - .add_block_timestamp_greater_than_filter(false, 1000) - .add_all_filter(true), - ); - - // Verify that it returns all transactions with a block timestamp less than or equal to 1000 - for block_timestamp in [0, 999, 1000] { - let filtered_txns = filter.filter(block_id, 0, block_timestamp, txns.clone()); - assert_eq!(filtered_txns, txns); - } - - // Verify that it returns no transactions with a block timestamp greater than 1000 - let filtered_txns = filter.filter(block_id, 0, 1001, txns.clone()); - assert_eq!(filtered_txns, vec![]); - } - - #[test] - fn test_block_timestamp_less_than_filter() { - // Create a filter that only allows transactions with a block timestamp less than a specific value - let filter = TransactionFilter::new( - Filter::empty() - .add_block_timestamp_less_than_filter(true, 1000) - .add_all_filter(false), - ); - - // Verify that it returns all transactions with a block timestamp less than 1000 - let (block_id, txns) = get_block_id_and_transactions(); - let filtered_txns = filter.filter(block_id, 0, 999, txns.clone()); - assert_eq!(filtered_txns, txns); - - // Verify that it returns no transactions with a block timestamp greater than or equal to 1000 - for block_timestamp in [1000, 1001] { - let filtered_txns = filter.filter(block_id, 0, block_timestamp, txns.clone()); - assert_eq!(filtered_txns, vec![]); - } - - // Create a filter that denies transactions with a block timestamp less than a specific value - let filter = TransactionFilter::new( - Filter::empty() - .add_block_timestamp_less_than_filter(false, 1000) - .add_all_filter(true), - ); - - // Verify that it returns no transactions with a block timestamp less than 1000 - let filtered_txns = filter.filter(block_id, 0, 999, txns.clone()); - assert_eq!(filtered_txns, vec![]); - - // Verify that it returns all transactions with a block timestamp greater than or equal to 1000 - for block_timestamp in [1000, 1001] { - let filtered_txns = filter.filter(block_id, 0, block_timestamp, txns.clone()); - assert_eq!(filtered_txns, txns); - } - } - - #[test] - fn test_transaction_id_filter() { - // Create a filter that only allows transactions with a specific transaction ID (txn 0) - let (block_id, txns) = get_block_id_and_transactions(); - let filter = TransactionFilter::new( - Filter::empty() - .add_transaction_id_filter(true, txns[0].committed_hash()) - .add_all_filter(false), - ); - - // Verify that it returns the transaction with the specified ID - let filtered_txns = filter.filter(block_id, 0, 0, txns.clone()); - assert_eq!(filtered_txns, vec![txns[0].clone()]); - - // Create a filter that denies transactions with a specific transaction ID (txn 0) - let filter = TransactionFilter::new( - Filter::empty() - .add_transaction_id_filter(false, txns[0].committed_hash()) - .add_all_filter(true), - ); - - // Verify that it returns all transactions except the one with the specified ID - let filtered_txns = filter.filter(block_id, 0, 0, txns.clone()); - assert_eq!(filtered_txns, txns[1..].to_vec()); - } - - #[test] - fn test_sender_filter() { - // Create a filter that only allows transactions from a specific sender (txn 0 and txn 1) - let (block_id, txns) = get_block_id_and_transactions(); - let filter = TransactionFilter::new( - Filter::empty() - .add_sender_filter(true, txns[0].sender()) - .add_sender_filter(true, txns[1].sender()) - .add_all_filter(false), - ); - - // Verify that it returns transactions from the specified senders - let filtered_txns = filter.filter(block_id, 0, 0, txns.clone()); - assert_eq!(filtered_txns, txns[0..2].to_vec()); - - // Create a filter that denies transactions from a specific sender (txn 0, txn 1 and txn 2) - let filter = TransactionFilter::new( - Filter::empty() - .add_sender_filter(false, txns[0].sender()) - .add_sender_filter(false, txns[1].sender()) - .add_sender_filter(false, txns[2].sender()), - ); - - // Verify that it returns transactions from other senders - let filtered_txns = filter.filter(block_id, 0, 0, txns.clone()); - assert_eq!(filtered_txns, txns[3..].to_vec()); - } - - #[test] - fn test_module_address_filter() { - // Create a filter that only allows transactions from a specific module address (txn 0 and txn 1) - let (block_id, txns) = get_block_id_and_transactions(); - let filter = TransactionFilter::new( - Filter::empty() - .add_module_address_filter(true, get_module_address(&txns[0])) - .add_module_address_filter(true, get_module_address(&txns[1])) - .add_all_filter(false), - ); - - // Verify that it returns transactions from the specified module addresses - let filtered_txns = filter.filter(block_id, 0, 0, txns.clone()); - assert_eq!(filtered_txns, txns[0..2].to_vec()); - - // Create a filter that denies transactions from a specific module address (txn 0) - let filter = TransactionFilter::new( - Filter::empty().add_module_address_filter(false, get_module_address(&txns[0])), - ); - - // Verify that it returns transactions from other module addresses - let filtered_txns = filter.filter(block_id, 0, 0, txns.clone()); - assert_eq!(filtered_txns, txns[1..].to_vec()); - } - - #[test] - fn test_entry_function_filter() { - // Create a filter that only allows transactions with specific entry functions (txn 0 and txn 1) - let filter = TransactionFilter::new( - Filter::empty() - .add_entry_function_filter( - true, - get_module_address(&get_transactions()[0]), - get_module_name(&get_transactions()[0]), - get_function_name(&get_transactions()[0]), - ) - .add_entry_function_filter( - true, - get_module_address(&get_transactions()[1]), - get_module_name(&get_transactions()[1]), - get_function_name(&get_transactions()[1]), - ) - .add_all_filter(false), - ); - - // Verify that it returns transactions with the specified entry functions - let (block_id, txns) = get_block_id_and_transactions(); - let filtered_txns = filter.filter(block_id, 0, 0, txns.clone()); - assert_eq!(filtered_txns, txns[0..2].to_vec()); - - // Create a filter that denies transactions with specific entry functions (txn 0) - let filter = TransactionFilter::new(Filter::empty().add_entry_function_filter( - false, - get_module_address(&get_transactions()[0]), - get_module_name(&get_transactions()[0]), - get_function_name(&get_transactions()[0]), - )); - - // Verify that it returns transactions with other entry functions - let filtered_txns = filter.filter(block_id, 0, 0, txns.clone()); - assert_eq!(filtered_txns, txns[1..].to_vec()); - } - - #[test] - fn test_block_epoch_greater_than_filter() { - // Create a filter that only allows transactions with a block epoch greater than a specific value - let filter = TransactionFilter::new( - Filter::empty() - .add_block_epoch_greater_than_filter(true, 1000) - .add_all_filter(false), - ); - - // Verify that it returns no transactions with a block epoch less than or equal to 1000 - let (block_id, txns) = get_block_id_and_transactions(); - for block_epoch in [0, 999, 1000] { - let filtered_txns = filter.filter(block_id, block_epoch, 0, txns.clone()); - assert_eq!(filtered_txns, vec![]); - } - - // Verify that it returns all transactions with a block epoch greater than 1000 - let filtered_txns = filter.filter(block_id, 1001, 0, txns.clone()); - assert_eq!(filtered_txns, txns); - - // Create a filter that denies transactions with a block epoch greater than a specific value - let filter = TransactionFilter::new( - Filter::empty() - .add_block_epoch_greater_than_filter(false, 1000) - .add_all_filter(true), - ); - - // Verify that it returns all transactions with a block epoch less than or equal to 1000 - for block_epoch in [0, 999, 1000] { - let filtered_txns = filter.filter(block_id, block_epoch, 0, txns.clone()); - assert_eq!(filtered_txns, txns); - } - - // Verify that it returns no transactions with a block epoch greater than 1000 - let filtered_txns = filter.filter(block_id, 1001, 0, txns.clone()); - assert_eq!(filtered_txns, vec![]); - } - - #[test] - fn test_block_epoch_less_than_filter() { - // Create a filter that only allows transactions with a block epoch less than a specific value - let filter = TransactionFilter::new( - Filter::empty() - .add_block_epoch_less_than_filter(true, 1000) - .add_all_filter(false), - ); - - // Verify that it returns all transactions with a block epoch less than 1000 - let (block_id, txns) = get_block_id_and_transactions(); - let filtered_txns = filter.filter(block_id, 999, 0, txns.clone()); - assert_eq!(filtered_txns, txns); - - // Verify that it returns no transactions with a block epoch greater than or equal to 1000 - for block_epoch in [1000, 1001] { - let filtered_txns = filter.filter(block_id, block_epoch, 0, txns.clone()); - assert_eq!(filtered_txns, vec![]); - } - - // Create a filter that denies transactions with a block epoch less than a specific value - let filter = TransactionFilter::new( - Filter::empty() - .add_block_epoch_less_than_filter(false, 1000) - .add_all_filter(true), - ); - - // Verify that it returns no transactions with a block epoch less than 1000 - let filtered_txns = filter.filter(block_id, 999, 0, txns.clone()); - assert_eq!(filtered_txns, vec![]); - - // Verify that it returns all transactions with a block epoch greater than or equal to 1000 - for block_epoch in [1000, 1001] { - let filtered_txns = filter.filter(block_id, block_epoch, 0, txns.clone()); - assert_eq!(filtered_txns, txns); - } - } - - #[test] - fn test_matches_all_of_filter() { - // Create a filter that only matches transactions with epoch greater than 1000 and a specific sender (only txn 0) - let (block_id, txns) = get_block_id_and_transactions(); - let matchers = vec![ - Matcher::BlockEpochGreaterThan(1000), - Matcher::Sender(txns[0].sender()), - ]; - let filter = TransactionFilter::new( - Filter::empty() - .add_matches_all_of_filter(true, matchers) - .add_all_filter(false), - ); - - // Verify that it returns no transactions with block epoch less than or equal to 1000 - for block_epoch in [0, 999, 1000] { - let filtered_txns = filter.filter(block_id, block_epoch, 0, txns.clone()); - assert_eq!(filtered_txns, vec![]); - } - - // Verify that it returns transactions with block epoch greater than 1000 and the specified sender - let filtered_txns = filter.filter(block_id, 1001, 0, txns.clone()); - assert_eq!(filtered_txns, txns[0..1].to_vec()); - - // Create a filter that denies transactions with timestamp greater than 1000 and a specific sender (only txn 0) - let matchers = vec![ - Matcher::BlockTimeStampGreaterThan(1000), - Matcher::Sender(txns[0].sender()), - ]; - let filter = TransactionFilter::new( - Filter::empty() - .add_matches_all_of_filter(false, matchers) - .add_all_filter(true), - ); - - // Verify that it returns all transactions with block timestamp less than or equal to 1000 - for block_timestamp in [0, 999, 1000] { - let filtered_txns = filter.filter(block_id, 0, block_timestamp, txns.clone()); - assert_eq!(filtered_txns, txns); - } - - // Verify that it returns no transactions with block timestamp greater than 1000 and the specified sender - let filtered_txns = filter.filter(block_id, 0, 1001, txns.clone()); - assert_eq!(filtered_txns, txns[1..].to_vec()); - } - - #[test] - fn test_composite_allow_list_filter() { - // Create a filter that only allows transactions based on multiple criteria - let (block_id, txns) = get_block_id_and_transactions(); - let filter_string = format!( - r#" - rules: - - Allow: - Sender: "{}" - - Allow: - ModuleAddress: "0000000000000000000000000000000000000000000000000000000000000001" - - Allow: - EntryFunction: - - "0000000000000000000000000000000000000000000000000000000000000002" - - test2 - - new - - Allow: - EntryFunction: - - "0000000000000000000000000000000000000000000000000000000000000003" - - test3 - - sub - - Deny: All - "#, - txns[0].sender().to_standard_string() - ); - let filter = serde_yaml::from_str::(&filter_string).unwrap(); - let allow_list_filter = TransactionFilter::new(filter); - - // Verify that only the first four transactions are allowed - let filtered_txns = allow_list_filter.filter(block_id, 0, 0, txns.clone()); - assert_eq!(filtered_txns, txns[0..4].to_vec()); - } - - #[test] - fn test_composite_block_list_filter() { - // Create a filter that denies transactions based on multiple criteria - let (block_id, txns) = get_block_id_and_transactions(); - let filter_string = format!( - r#" - rules: - - Deny: - ModuleAddress: "0000000000000000000000000000000000000000000000000000000000000000" - - Deny: - Sender: "{}" - - Deny: - EntryFunction: - - "0000000000000000000000000000000000000000000000000000000000000002" - - test2 - - new - - Deny: - ModuleAddress: "0000000000000000000000000000000000000000000000000000000000000003" - - Allow: All - "#, - txns[1].sender().to_standard_string() - ); - let filter = serde_yaml::from_str::(&filter_string).unwrap(); - let block_list_filter = TransactionFilter::new(filter); - - // Verify that the first four transactions are denied - let filtered_txns = block_list_filter.filter(block_id, 0, 0, txns.clone()); - assert_eq!(filtered_txns, txns[4..].to_vec()); - } - - #[test] - fn test_composite_matches_all_of_filter() { - // Create a filter that denies transactions based on the matches all of rule - let (block_id, txns) = get_block_id_and_transactions(); - let filter_string = format!( - r#" - rules: - - Deny: - MatchesAllOf: - - Sender: "{}" - - ModuleAddress: "0000000000000000000000000000000000000000000000000000000000000000" - - BlockEpochGreaterThan: 10 - - Deny: - MatchesAllOf: - - Sender: "{}" - - ModuleAddress: "0000000000000000000000000000000000000000000000000000000000000001" - - BlockEpochGreaterThan: 10 - - BlockTimeStampGreaterThan: 1000 - - Deny: - MatchesAllOf: - - Sender: "{}" - - ModuleAddress: "0000000000000000000000000000000000000000000000000000000000000002" - - BlockEpochGreaterThan: 10 - - BlockTimeStampGreaterThan: 1000 - - BlockId: "{}" - - Allow: All - "#, - txns[0].sender().to_standard_string(), - txns[1].sender().to_standard_string(), - txns[2].sender().to_standard_string(), - block_id.to_hex() - ); - let filter = serde_yaml::from_str::(&filter_string).unwrap(); - let block_list_filter = TransactionFilter::new(filter); - - // Filter transactions with a block epoch of 11, timestamp of 1001, and the expected block ID - let filtered_txns = block_list_filter.filter(block_id, 11, 1001, txns.clone()); - - // Verify that only the first three transactions are denied - assert_eq!(filtered_txns, txns[3..].to_vec()); - - // Filter transactions with a block epoch of 11, timestamp of 1001, and a random block ID - let random_block_id = HashValue::random(); - let filtered_txns = block_list_filter.filter(random_block_id, 11, 1001, txns.clone()); - - // Verify that only the first two transactions are denied - assert_eq!(filtered_txns, txns[2..].to_vec()); - - // Filter transactions with a block epoch of 11, timestamp of 999, and the expected block ID - let filtered_txns = block_list_filter.filter(block_id, 11, 999, txns.clone()); - - // Verify that only the first transaction is denied - assert_eq!(filtered_txns, txns[1..].to_vec()); - } -} diff --git a/consensus/src/twins/basic_twins_test.rs b/consensus/src/twins/basic_twins_test.rs index 9893efebd5cae..524f2bbf3e789 100644 --- a/consensus/src/twins/basic_twins_test.rs +++ b/consensus/src/twins/basic_twins_test.rs @@ -25,6 +25,7 @@ use std::collections::HashMap; /// /// Run the test: /// cargo xtest -p consensus basic_start_test -- --nocapture +#[ignore] fn basic_start_test() { let runtime = consensus_runtime(); let mut playground = NetworkPlayground::new(runtime.handle().clone()); diff --git a/consensus/src/twins/twins_node.rs b/consensus/src/twins/twins_node.rs index a469bc6094a89..a1b511cb8c915 100644 --- a/consensus/src/twins/twins_node.rs +++ b/consensus/src/twins/twins_node.rs @@ -279,7 +279,6 @@ impl SMRNode { config.base.waypoint = WaypointConfig::FromConfig(waypoint); // Disable timeout in twins test to avoid flakiness config.consensus.round_initial_timeout_ms = 2_000_000; - config.consensus.enable_pipeline = false; let author = author_from_config(&config); diff --git a/crates/aptos-rosetta/src/common.rs b/crates/aptos-rosetta/src/common.rs index 1ffbd0e1433cd..ea5e8f7646e4e 100644 --- a/crates/aptos-rosetta/src/common.rs +++ b/crates/aptos-rosetta/src/common.rs @@ -156,7 +156,7 @@ pub fn native_coin() -> Currency { symbol: APT_SYMBOL.to_string(), decimals: APT_DECIMALS, metadata: Some(CurrencyMetadata { - move_type: Some(native_coin_tag().to_string()), + move_type: Some(native_coin_tag().to_canonical_string()), fa_address: None, }), } @@ -213,7 +213,7 @@ pub fn find_coin_currency(currencies: &HashSet, type_tag: &TypeTag) -> fa_address: _, }) = currency.metadata { - move_type == &type_tag.to_string() + move_type == &type_tag.to_canonical_string() } else { false } @@ -394,7 +394,7 @@ pub fn parse_coin_currency( .as_ref() .and_then(|inner| inner.move_type.as_ref()) { - struct_tag.to_string() == *move_type + struct_tag.to_canonical_string() == *move_type } else { false } @@ -403,7 +403,7 @@ pub fn parse_coin_currency( } else { Err(ApiError::TransactionParseError(Some(format!( "Invalid coin for transfer {}", - struct_tag + struct_tag.to_canonical_string() )))) } } diff --git a/crates/aptos-rosetta/src/error.rs b/crates/aptos-rosetta/src/error.rs index d1e9fffab7d50..b91b7c47a7bbf 100644 --- a/crates/aptos-rosetta/src/error.rs +++ b/crates/aptos-rosetta/src/error.rs @@ -52,6 +52,7 @@ pub enum ApiError { SequenceNumberTooOld(Option), VmError(Option), MempoolIsFull(Option), + RejectedByFilter(Option), } impl std::fmt::Display for ApiError { @@ -142,6 +143,7 @@ impl ApiError { MempoolIsFull(_) => 32, CoinTypeFailedToBeFetched(_) => 33, StateValueNotFound(_) => 34, + RejectedByFilter(_) => 35, } } @@ -201,6 +203,7 @@ impl ApiError { ApiError::VmError(_) => "Transaction submission failed due to VM error", ApiError::MempoolIsFull(_) => "Mempool is full all accounts", ApiError::GasEstimationFailed(_) => "Gas estimation failed", + ApiError::RejectedByFilter(_) => "Transaction was rejected by the transaction filter", } } @@ -300,6 +303,9 @@ impl From for ApiError { ApiError::SequenceNumberTooOld(Some(err.error.message)) }, AptosErrorCode::VmError => ApiError::VmError(Some(err.error.message)), + AptosErrorCode::RejectedByFilter => { + ApiError::RejectedByFilter(Some(err.error.message)) + }, AptosErrorCode::HealthCheckFailed => { ApiError::InternalError(Some(err.error.message)) }, diff --git a/crates/aptos-rosetta/src/test/mod.rs b/crates/aptos-rosetta/src/test/mod.rs index ecaa1fc54cb6c..67f8a8fb16b21 100644 --- a/crates/aptos-rosetta/src/test/mod.rs +++ b/crates/aptos-rosetta/src/test/mod.rs @@ -84,6 +84,7 @@ fn test_transaction( None, 178, // gas used, chosen arbitrarily ExecutionStatus::Success, // TODO: Add other statuses + None, )), events, accumulator_root_hash: Default::default(), diff --git a/crates/aptos-rosetta/src/types/objects.rs b/crates/aptos-rosetta/src/types/objects.rs index 6bb2a412fdbb1..fac15b9b1b421 100644 --- a/crates/aptos-rosetta/src/types/objects.rs +++ b/crates/aptos-rosetta/src/types/objects.rs @@ -1385,7 +1385,7 @@ async fn parse_operations_from_write_set( if let Some(currency) = maybe_currency { parse_coinstore_changes( currency.clone(), - type_tag.to_string(), + type_tag.to_canonical_string(), version, address, data, @@ -1398,7 +1398,8 @@ async fn parse_operations_from_write_set( } else { warn!( "Failed to parse coinstore {} at version {}", - struct_tag, version + struct_tag.to_canonical_string(), + version ); Ok(vec![]) } diff --git a/crates/aptos-transaction-filters/Cargo.toml b/crates/aptos-transaction-filters/Cargo.toml new file mode 100644 index 0000000000000..3169ee318d247 --- /dev/null +++ b/crates/aptos-transaction-filters/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "aptos-transaction-filters" +description = "The transaction filters used by various Aptos components." +version = "0.1.0" + +# Workspace inherited keys +authors = { workspace = true } +edition = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +publish = { workspace = true } +repository = { workspace = true } +rust-version = { workspace = true } + +[dependencies] +aptos-crypto = { workspace = true } +aptos-types = { workspace = true } +move-core-types = { workspace = true } +rand = { workspace = true } +serde = { workspace = true } +serde_yaml = { workspace = true } + +[dev-dependencies] +aptos-crypto = { workspace = true, features = ["fuzzing", "testing"] } + +[features] +fuzzing = [] diff --git a/crates/aptos-transaction-filters/src/batch_transaction_filter.rs b/crates/aptos-transaction-filters/src/batch_transaction_filter.rs new file mode 100644 index 0000000000000..098b8f328f14a --- /dev/null +++ b/crates/aptos-transaction-filters/src/batch_transaction_filter.rs @@ -0,0 +1,299 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::transaction_filter::TransactionMatcher; +use aptos_crypto::HashValue; +use aptos_types::{quorum_store::BatchId, transaction::SignedTransaction, PeerId}; +#[cfg(any(test, feature = "fuzzing"))] +use move_core_types::account_address::AccountAddress; +use serde::{Deserialize, Serialize}; + +/// A batch transaction filter that applies a set of rules to determine +/// if a transaction in a batch should be allowed or denied. +/// +/// Rules are applied in the order they are defined, and the first +/// matching rule determines the outcome for the transaction. +/// If no rules match, the transaction is allowed by default. +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] +pub struct BatchTransactionFilter { + batch_transaction_rules: Vec, +} + +impl BatchTransactionFilter { + pub fn new(batch_transaction_rules: Vec) -> Self { + Self { + batch_transaction_rules, + } + } + + /// Returns true iff the filter allows the transaction in the batch + pub fn allows_transaction( + &self, + batch_id: BatchId, + batch_author: PeerId, + batch_digest: &HashValue, + signed_transaction: &SignedTransaction, + ) -> bool { + // If the filter is empty, allow the transaction by default + if self.is_empty() { + return true; + } + + // Check if any rule matches the batch transaction + for batch_transaction_rule in &self.batch_transaction_rules { + if batch_transaction_rule.matches( + batch_id, + batch_author, + batch_digest, + signed_transaction, + ) { + return match batch_transaction_rule { + BatchTransactionRule::Allow(_) => true, + BatchTransactionRule::Deny(_) => false, + }; + } + } + + true // No rules match (allow the batch transaction by default) + } + + /// Returns an empty batch transaction filter with no rules + pub fn empty() -> Self { + Self { + batch_transaction_rules: Vec::new(), + } + } + + /// Filters the transactions in the given batch and returns only those that are allowed + pub fn filter_batch_transactions( + &self, + batch_id: BatchId, + batch_author: PeerId, + batch_digest: HashValue, + transactions: Vec, + ) -> Vec { + transactions + .into_iter() + .filter(|txn| self.allows_transaction(batch_id, batch_author, &batch_digest, txn)) + .collect() + } + + /// Returns true iff the filter is empty (i.e., has no rules) + pub fn is_empty(&self) -> bool { + self.batch_transaction_rules.is_empty() + } +} + +// These are useful test-only methods for creating and testing filters +#[cfg(any(test, feature = "fuzzing"))] +impl BatchTransactionFilter { + /// Adds a filter that matches all batch transactions + pub fn add_all_filter(self, allow: bool) -> Self { + let batch_matcher = BatchTransactionMatcher::Batch(BatchMatcher::All); + self.add_multiple_matchers_filter(allow, vec![batch_matcher]) + } + + /// Adds a filter rule that matches a specific batch ID + pub fn add_batch_id_filter(self, allow: bool, batch_id: BatchId) -> Self { + let batch_matcher = BatchTransactionMatcher::Batch(BatchMatcher::BatchId(batch_id)); + self.add_multiple_matchers_filter(allow, vec![batch_matcher]) + } + + /// Adds a filter rule that matches a specific batch author + pub fn add_batch_author_filter(self, allow: bool, batch_author: PeerId) -> Self { + let batch_matcher = BatchTransactionMatcher::Batch(BatchMatcher::BatchAuthor(batch_author)); + self.add_multiple_matchers_filter(allow, vec![batch_matcher]) + } + + /// Adds a filter rule that matches a specific batch digest + pub fn add_batch_digest_filter(self, allow: bool, batch_digest: HashValue) -> Self { + let batch_matcher = BatchTransactionMatcher::Batch(BatchMatcher::BatchDigest(batch_digest)); + self.add_multiple_matchers_filter(allow, vec![batch_matcher]) + } + + /// Adds a filter rule that matches a specific transaction sender + pub fn add_sender_filter(self, allow: bool, sender: AccountAddress) -> Self { + let transaction_matcher = TransactionMatcher::Sender(sender); + self.add_multiple_matchers_filter(allow, vec![BatchTransactionMatcher::Transaction( + transaction_matcher, + )]) + } + + /// Adds a filter rule containing multiple matchers + pub fn add_multiple_matchers_filter( + mut self, + allow: bool, + batch_transaction_matchers: Vec, + ) -> Self { + let transaction_rule = if allow { + BatchTransactionRule::Allow(batch_transaction_matchers) + } else { + BatchTransactionRule::Deny(batch_transaction_matchers) + }; + self.batch_transaction_rules.push(transaction_rule); + + self + } +} + +/// A batch transaction rule that defines whether to allow or deny +/// transactions in a batch based on a set of matchers. All matchers +/// must match for the rule to apply. +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] +pub enum BatchTransactionRule { + Allow(Vec), + Deny(Vec), +} + +impl BatchTransactionRule { + /// Returns true iff the rule matches the given batch transaction. This + /// requires that all matchers in the rule match the batch transaction. + pub fn matches( + &self, + batch_id: BatchId, + batch_author: PeerId, + batch_digest: &HashValue, + signed_transaction: &SignedTransaction, + ) -> bool { + let batch_transaction_matchers = match self { + BatchTransactionRule::Allow(matchers) => matchers, + BatchTransactionRule::Deny(matchers) => matchers, + }; + batch_transaction_matchers.iter().all(|matcher| { + matcher.matches(batch_id, batch_author, batch_digest, signed_transaction) + }) + } +} + +/// A matcher that defines the criteria for matching batches or transactions +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] +pub enum BatchTransactionMatcher { + Batch(BatchMatcher), + Transaction(TransactionMatcher), +} + +impl BatchTransactionMatcher { + /// Returns true iff the matcher matches the given batch transaction + pub fn matches( + &self, + batch_id: BatchId, + batch_author: PeerId, + batch_digest: &HashValue, + signed_transaction: &SignedTransaction, + ) -> bool { + match self { + BatchTransactionMatcher::Batch(batch_matcher) => { + batch_matcher.matches(batch_id, batch_author, batch_digest) + }, + BatchTransactionMatcher::Transaction(transaction_matcher) => { + transaction_matcher.matches(signed_transaction) + }, + } + } +} + +/// A matcher that defines the criteria for matching batches +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] +pub enum BatchMatcher { + All, // Matches any batch + BatchId(BatchId), // Matches batches with the specified ID + BatchAuthor(PeerId), // Matches batches authored by the specified peer + BatchDigest(HashValue), // Matches batches with the specified digest +} + +impl BatchMatcher { + /// Returns true iff the matcher matches the given batch information + fn matches(&self, batch_id: BatchId, batch_author: PeerId, batch_digest: &HashValue) -> bool { + match self { + BatchMatcher::All => true, + BatchMatcher::BatchId(target_batch_id) => matches_batch_id(batch_id, target_batch_id), + BatchMatcher::BatchAuthor(target_author) => { + matches_batch_author(batch_author, target_author) + }, + BatchMatcher::BatchDigest(target_digest) => { + matches_batch_digest(batch_digest, target_digest) + }, + } + } +} + +/// Returns true iff the batch ID matches the target batch ID +fn matches_batch_id(batch_id: BatchId, target_batch_id: &BatchId) -> bool { + batch_id == *target_batch_id +} + +/// Returns true iff the batch author matches the target author +fn matches_batch_author(batch_author: PeerId, target_author: &PeerId) -> bool { + batch_author == *target_author +} + +/// Returns true iff the batch digest matches the target digest +fn matches_batch_digest(batch_digest: &HashValue, target_digest: &HashValue) -> bool { + batch_digest == target_digest +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_matches_batch_id() { + // Create a batch ID + let batch_id = BatchId::new_for_test(1000); + + // Verify that the batch ID matches itself + verify_matches_batch_id(batch_id, &batch_id, true); + + // Verify that a different batch ID does not match + let different_batch_id = BatchId::new_for_test(122); + verify_matches_batch_id(batch_id, &different_batch_id, false); + } + + #[test] + fn test_matches_batch_author() { + // Create a batch author + let batch_author = PeerId::random(); + + // Verify that the batch author matches itself + verify_matches_batch_author(batch_author, &batch_author, true); + + // Verify that a different batch author does not match + let different_batch_author = PeerId::random(); + verify_matches_batch_author(batch_author, &different_batch_author, false); + } + + #[test] + fn test_matches_batch_digest() { + // Create a batch digest + let batch_digest = HashValue::random(); + + // Verify that the batch digest matches itself + verify_matches_batch_digest(&batch_digest, &batch_digest, true); + + // Verify that a different batch digest does not match + let different_batch_digest = HashValue::random(); + verify_matches_batch_digest(&batch_digest, &different_batch_digest, false); + } + + /// Verifies that the batch ID matches the target batch ID + fn verify_matches_batch_id(batch_id: BatchId, target_batch_id: &BatchId, matches: bool) { + let result = matches_batch_id(batch_id, target_batch_id); + assert_eq!(matches, result); + } + + /// Verifies that the batch author matches the target author + fn verify_matches_batch_author(batch_author: PeerId, target_author: &PeerId, matches: bool) { + let result = matches_batch_author(batch_author, target_author); + assert_eq!(matches, result); + } + + /// Verifies that the batch digest matches the target digest + fn verify_matches_batch_digest( + batch_digest: &HashValue, + target_digest: &HashValue, + matches: bool, + ) { + let result = matches_batch_digest(batch_digest, target_digest); + assert_eq!(matches, result); + } +} diff --git a/crates/aptos-transaction-filters/src/block_transaction_filter.rs b/crates/aptos-transaction-filters/src/block_transaction_filter.rs new file mode 100644 index 0000000000000..3e86a2db679c7 --- /dev/null +++ b/crates/aptos-transaction-filters/src/block_transaction_filter.rs @@ -0,0 +1,439 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::transaction_filter::TransactionMatcher; +use aptos_crypto::HashValue; +use aptos_types::transaction::SignedTransaction; +use move_core_types::account_address::AccountAddress; +use serde::{Deserialize, Serialize}; + +/// A block transaction filter that applies a set of rules to determine +/// if a transaction in a block should be allowed or denied. +/// +/// Rules are applied in the order they are defined, and the first +/// matching rule determines the outcome for the transaction. +/// If no rules match, the transaction is allowed by default. +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] +pub struct BlockTransactionFilter { + block_transaction_rules: Vec, +} + +impl BlockTransactionFilter { + pub fn new(block_transaction_rules: Vec) -> Self { + Self { + block_transaction_rules, + } + } + + /// Returns true iff the filter allows the transaction in the block + pub fn allows_transaction( + &self, + block_id: HashValue, + block_author: Option, + block_epoch: u64, + block_timestamp: u64, + signed_transaction: &SignedTransaction, + ) -> bool { + // If the filter is empty, allow the transaction by default + if self.is_empty() { + return true; + } + + // Check if any rule matches the block transaction + for block_transaction_rule in &self.block_transaction_rules { + if block_transaction_rule.matches( + block_id, + block_author, + block_epoch, + block_timestamp, + signed_transaction, + ) { + return match block_transaction_rule { + BlockTransactionRule::Allow(_) => true, + BlockTransactionRule::Deny(_) => false, + }; + } + } + + true // No rules match (allow the block transaction by default) + } + + /// Returns an empty block transaction filter with no rules + pub fn empty() -> Self { + Self { + block_transaction_rules: Vec::new(), + } + } + + /// Filters the transactions in the given block and returns only those that are allowed + pub fn filter_block_transactions( + &self, + block_id: HashValue, + block_author: Option, + block_epoch: u64, + block_timestamp_usecs: u64, + transactions: Vec, + ) -> Vec { + transactions + .into_iter() + .filter(|txn| { + self.allows_transaction( + block_id, + block_author, + block_epoch, + block_timestamp_usecs, + txn, + ) + }) + .collect() + } + + /// Returns true iff the filter is empty (i.e., has no rules) + pub fn is_empty(&self) -> bool { + self.block_transaction_rules.is_empty() + } +} + +// These are useful test-only methods for creating and testing filters +#[cfg(any(test, feature = "fuzzing"))] +impl BlockTransactionFilter { + /// Adds a filter that matches all block transactions + pub fn add_all_filter(self, allow: bool) -> Self { + let block_matcher = BlockTransactionMatcher::Block(BlockMatcher::All); + self.add_multiple_matchers_filter(allow, vec![block_matcher]) + } + + /// Adds a block author matcher to the filter + pub fn add_block_author_filter(self, allow: bool, block_author: AccountAddress) -> Self { + let block_matcher = BlockTransactionMatcher::Block(BlockMatcher::Author(block_author)); + self.add_multiple_matchers_filter(allow, vec![block_matcher]) + } + + /// Adds a block ID matcher to the filter + pub fn add_block_id_filter(self, allow: bool, block_id: HashValue) -> Self { + let block_matcher = BlockTransactionMatcher::Block(BlockMatcher::BlockId(block_id)); + self.add_multiple_matchers_filter(allow, vec![block_matcher]) + } + + /// Adds a block epoch greater than matcher to the filter + pub fn add_block_epoch_greater_than_filter(self, allow: bool, epoch: u64) -> Self { + let block_matcher = + BlockTransactionMatcher::Block(BlockMatcher::BlockEpochGreaterThan(epoch)); + self.add_multiple_matchers_filter(allow, vec![block_matcher]) + } + + /// Adds a block epoch less than matcher to the filter + pub fn add_block_epoch_less_than_filter(self, allow: bool, epoch: u64) -> Self { + let block_matcher = BlockTransactionMatcher::Block(BlockMatcher::BlockEpochLessThan(epoch)); + self.add_multiple_matchers_filter(allow, vec![block_matcher]) + } + + /// Adds a block timestamp greater than matcher to the filter + pub fn add_block_timestamp_greater_than_filter(self, allow: bool, timestamp: u64) -> Self { + let block_matcher = + BlockTransactionMatcher::Block(BlockMatcher::BlockTimeStampGreaterThan(timestamp)); + self.add_multiple_matchers_filter(allow, vec![block_matcher]) + } + + /// Adds a block timestamp less than matcher to the filter + pub fn add_block_timestamp_less_than_filter(self, allow: bool, timestamp: u64) -> Self { + let block_matcher = + BlockTransactionMatcher::Block(BlockMatcher::BlockTimeStampLessThan(timestamp)); + self.add_multiple_matchers_filter(allow, vec![block_matcher]) + } + + /// Adds a filter rule containing multiple matchers + pub fn add_multiple_matchers_filter( + mut self, + allow: bool, + block_transaction_matchers: Vec, + ) -> Self { + let transaction_rule = if allow { + BlockTransactionRule::Allow(block_transaction_matchers) + } else { + BlockTransactionRule::Deny(block_transaction_matchers) + }; + self.block_transaction_rules.push(transaction_rule); + + self + } +} + +/// A block transaction rule that defines whether to allow or deny +/// transactions in a block based on a set of matchers. All matchers +/// must match for the rule to apply. +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] +pub enum BlockTransactionRule { + Allow(Vec), + Deny(Vec), +} + +impl BlockTransactionRule { + /// Returns true iff the rule matches the given block transaction. This + /// requires that all matchers in the rule match the block transaction. + pub fn matches( + &self, + block_id: HashValue, + block_author: Option, + block_epoch: u64, + block_timestamp: u64, + signed_transaction: &SignedTransaction, + ) -> bool { + let block_transaction_matchers = match self { + BlockTransactionRule::Allow(matchers) => matchers, + BlockTransactionRule::Deny(matchers) => matchers, + }; + block_transaction_matchers.iter().all(|matcher| { + matcher.matches( + block_id, + block_author, + block_epoch, + block_timestamp, + signed_transaction, + ) + }) + } +} + +/// A matcher that defines the criteria for matching blocks or transactions +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] +pub enum BlockTransactionMatcher { + Block(BlockMatcher), + Transaction(TransactionMatcher), +} + +impl BlockTransactionMatcher { + /// Returns true iff the matcher matches the given block transaction + pub fn matches( + &self, + block_id: HashValue, + block_author: Option, + block_epoch: u64, + block_timestamp: u64, + signed_transaction: &SignedTransaction, + ) -> bool { + match self { + BlockTransactionMatcher::Block(block_matcher) => { + block_matcher.matches(block_id, block_author, block_epoch, block_timestamp) + }, + BlockTransactionMatcher::Transaction(transaction_matcher) => { + transaction_matcher.matches(signed_transaction) + }, + } + } +} + +/// A matcher that defines the criteria for matching blocks +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] +pub enum BlockMatcher { + All, // Matches any block + Author(AccountAddress), // Matches blocks proposed by the specified author + BlockId(HashValue), // Matches blocks with the specified ID + BlockEpochGreaterThan(u64), // Matches blocks with epochs greater than the specified value + BlockEpochLessThan(u64), // Matches blocks with epochs less than the specified value + BlockTimeStampGreaterThan(u64), // Matches blocks with timestamps greater than the specified value + BlockTimeStampLessThan(u64), // Matches blocks with timestamps less than the specified value +} + +impl BlockMatcher { + /// Returns true iff the matcher matches the given block information + fn matches( + &self, + block_id: HashValue, + block_author: Option, + block_epoch: u64, + block_timestamp: u64, + ) -> bool { + match self { + BlockMatcher::All => true, + BlockMatcher::Author(target_author) => { + matches_block_author(block_author, target_author) + }, + BlockMatcher::BlockId(target_block_id) => matches_block_id(block_id, target_block_id), + BlockMatcher::BlockEpochGreaterThan(target_epoch) => { + matches_epoch_greater_than(block_epoch, target_epoch) + }, + BlockMatcher::BlockEpochLessThan(target_epoch) => { + matches_epoch_less_than(block_epoch, target_epoch) + }, + BlockMatcher::BlockTimeStampGreaterThan(target_timestamp) => { + matches_timestamp_greater_than(block_timestamp, target_timestamp) + }, + BlockMatcher::BlockTimeStampLessThan(target_timestamp) => { + matches_timestamp_less_than(block_timestamp, target_timestamp) + }, + } + } +} + +/// Returns true iff the block author matches the target author. +/// Note: if the block author is None, it does not match the target author. +fn matches_block_author( + block_author: Option, + target_author: &AccountAddress, +) -> bool { + match block_author { + Some(block_author) => block_author == *target_author, + None => false, + } +} + +/// Returns true iff the block ID matches the target block ID +fn matches_block_id(block_id: HashValue, target_block_id: &HashValue) -> bool { + block_id == *target_block_id +} + +/// Returns true iff the block epoch is greater than the target epoch +fn matches_epoch_greater_than(block_epoch: u64, target_epoch: &u64) -> bool { + block_epoch > *target_epoch +} + +/// Returns true iff the block epoch is less than the target epoch +fn matches_epoch_less_than(block_epoch: u64, target_epoch: &u64) -> bool { + block_epoch < *target_epoch +} + +/// Returns true iff the block timestamp is greater than the target timestamp +fn matches_timestamp_greater_than(block_timestamp: u64, target_timestamp: &u64) -> bool { + block_timestamp > *target_timestamp +} + +/// Returns true iff the block timestamp is less than the target timestamp +fn matches_timestamp_less_than(block_timestamp: u64, target_timestamp: &u64) -> bool { + block_timestamp < *target_timestamp +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_matches_block_author() { + // Create a block author + let block_author = AccountAddress::random(); + + // Verify that a missing block author does not match + assert!(!matches_block_author(None, &block_author)); + + // Verify that the block author matches itself + assert!(matches_block_author(Some(block_author), &block_author)); + + // Verify that a different block author does not match + let different_block_author = AccountAddress::random(); + assert!(!matches_block_author( + Some(block_author), + &different_block_author + )); + } + + #[test] + fn test_matches_block_id() { + // Create a block ID + let block_id = HashValue::random(); + + // Verify that the block ID matches itself + verify_matches_block_id(block_id, &block_id, true); + + // Verify that a different block ID does not match + let different_block_id = HashValue::random(); + verify_matches_block_id(block_id, &different_block_id, false); + } + + #[test] + fn test_matches_epoch_greater_than() { + // Create an epoch + let epoch = 10; + + // Verify that a greater epoch matches + verify_matches_epoch_greater_than(epoch + 1, &epoch, true); + + // Verify that an equal epoch does not match + verify_matches_epoch_greater_than(epoch, &epoch, false); + + // Verify that a lesser epoch does not match + verify_matches_epoch_greater_than(epoch - 1, &epoch, false); + } + + #[test] + fn test_matches_epoch_less_than() { + // Create an epoch + let epoch = 10; + + // Verify that a lesser epoch matches + verify_matches_epoch_less_than(epoch - 1, &epoch, true); + + // Verify that an equal epoch does not match + verify_matches_epoch_less_than(epoch, &epoch, false); + + // Verify that a greater epoch does not match + verify_matches_epoch_less_than(epoch + 1, &epoch, false); + } + + #[test] + fn test_matches_timestamp_greater_than() { + // Create a timestamp + let timestamp = 100; + + // Verify that a greater timestamp matches + verify_matches_timestamp_greater_than(timestamp + 1, ×tamp, true); + + // Verify that an equal timestamp does not match + verify_matches_timestamp_greater_than(timestamp, ×tamp, false); + + // Verify that a lesser timestamp does not match + verify_matches_timestamp_greater_than(timestamp - 1, ×tamp, false); + } + + #[test] + fn test_matches_timestamp_less_than() { + // Create a timestamp + let timestamp = 100; + + // Verify that a lesser timestamp matches + verify_matches_timestamp_less_than(timestamp - 1, ×tamp, true); + + // Verify that an equal timestamp does not match + verify_matches_timestamp_less_than(timestamp, ×tamp, false); + + // Verify that a greater timestamp does not match + verify_matches_timestamp_less_than(timestamp + 1, ×tamp, false); + } + + /// Verifies that the block ID matches the target block ID + fn verify_matches_block_id(block_id: HashValue, target_block_id: &HashValue, matches: bool) { + let result = matches_block_id(block_id, target_block_id); + assert_eq!(matches, result); + } + + /// Verifies that the block epoch is greater than the target epoch + fn verify_matches_epoch_greater_than(block_epoch: u64, target_epoch: &u64, matches: bool) { + let result = matches_epoch_greater_than(block_epoch, target_epoch); + assert_eq!(matches, result); + } + + /// Verifies that the block epoch is less than the target epoch + fn verify_matches_epoch_less_than(block_epoch: u64, target_epoch: &u64, matches: bool) { + let result = matches_epoch_less_than(block_epoch, target_epoch); + assert_eq!(matches, result); + } + + /// Verifies that the block timestamp is greater than the target timestamp + fn verify_matches_timestamp_greater_than( + block_timestamp: u64, + target_timestamp: &u64, + matches: bool, + ) { + let result = matches_timestamp_greater_than(block_timestamp, target_timestamp); + assert_eq!(matches, result); + } + + /// Verifies that the block timestamp is less than the target timestamp + fn verify_matches_timestamp_less_than( + block_timestamp: u64, + target_timestamp: &u64, + matches: bool, + ) { + let result = matches_timestamp_less_than(block_timestamp, target_timestamp); + assert_eq!(matches, result); + } +} diff --git a/crates/aptos-transaction-filters/src/lib.rs b/crates/aptos-transaction-filters/src/lib.rs new file mode 100644 index 0000000000000..3e1fc2f9154c1 --- /dev/null +++ b/crates/aptos-transaction-filters/src/lib.rs @@ -0,0 +1,11 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +#![forbid(unsafe_code)] + +pub mod batch_transaction_filter; +pub mod block_transaction_filter; +pub mod transaction_filter; + +#[cfg(test)] +mod tests; diff --git a/crates/aptos-transaction-filters/src/tests/batch_transaction_filter.rs b/crates/aptos-transaction-filters/src/tests/batch_transaction_filter.rs new file mode 100644 index 0000000000000..b62bc054a1458 --- /dev/null +++ b/crates/aptos-transaction-filters/src/tests/batch_transaction_filter.rs @@ -0,0 +1,335 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + batch_transaction_filter::{BatchMatcher, BatchTransactionFilter, BatchTransactionMatcher}, + tests::utils, + transaction_filter::TransactionMatcher, +}; +use aptos_crypto::HashValue; +use aptos_types::{quorum_store::BatchId, transaction::SignedTransaction, PeerId}; + +#[test] +fn test_all_filter() { + for use_new_txn_payload_format in [false, true] { + // Create a filter that allows all transactions + let filter = BatchTransactionFilter::empty().add_all_filter(true); + + // Create a batch ID, author, and digest + let (batch_id, batch_author, batch_digest) = utils::get_random_batch_info(); + + // Verify that all transactions are allowed + let transactions = utils::create_entry_function_transactions(use_new_txn_payload_format); + verify_all_transactions_allowed( + filter, + batch_id, + batch_author, + batch_digest, + transactions.clone(), + ); + + // Create a filter that denies all transactions + let filter = BatchTransactionFilter::empty().add_all_filter(false); + + // Verify that all transactions are denied + verify_all_transactions_rejected( + filter, + batch_id, + batch_author, + batch_digest, + transactions.clone(), + ); + } +} + +#[test] +fn test_batch_id_filter() { + for use_new_txn_payload_format in [false, true] { + // Create a batch ID, author, and digest + let (batch_id, batch_author, batch_digest) = utils::get_random_batch_info(); + + // Create a filter that only allows transactions with a specific batch ID + let transactions = utils::create_entry_function_transactions(use_new_txn_payload_format); + let filter = BatchTransactionFilter::empty() + .add_batch_id_filter(true, batch_id) + .add_all_filter(false); + + // Verify that the filter allows transactions within the specified batch ID + verify_all_transactions_allowed( + filter.clone(), + batch_id, + batch_author, + batch_digest, + transactions.clone(), + ); + + // Verify that the filter denies transactions with a different batch ID + let different_batch_id = BatchId::new_for_test(1000); + verify_all_transactions_rejected( + filter.clone(), + different_batch_id, + batch_author, + batch_digest, + transactions.clone(), + ); + + // Create a filter that denies transactions with a specific batch ID + let filter = BatchTransactionFilter::empty().add_batch_id_filter(false, batch_id); + + // Verify that the filter denies transactions within the specified batch ID + verify_all_transactions_rejected( + filter.clone(), + batch_id, + batch_author, + batch_digest, + transactions.clone(), + ); + + // Verify that the filter allows transactions with a different batch ID + let different_batch_id = BatchId::new_for_test(200); + verify_all_transactions_allowed( + filter.clone(), + different_batch_id, + batch_author, + batch_digest, + transactions.clone(), + ); + } +} + +#[test] +fn test_batch_author_filter() { + for use_new_txn_payload_format in [false, true] { + // Create a batch ID, author, and digest + let (batch_id, batch_author, batch_digest) = utils::get_random_batch_info(); + + // Create a filter that only allows transactions with a specific batch author + let transactions = utils::create_entry_function_transactions(use_new_txn_payload_format); + let filter = BatchTransactionFilter::empty() + .add_batch_author_filter(true, batch_author) + .add_all_filter(false); + + // Verify that the filter allows transactions with the specified batch author + verify_all_transactions_allowed( + filter.clone(), + batch_id, + batch_author, + batch_digest, + transactions.clone(), + ); + + // Verify that the filter denies transactions with a different batch author + let different_batch_author = PeerId::random(); + verify_all_transactions_rejected( + filter.clone(), + batch_id, + different_batch_author, + batch_digest, + transactions.clone(), + ); + + // Create a filter that denies transactions with a specific batch author + let filter = BatchTransactionFilter::empty().add_batch_author_filter(false, batch_author); + + // Verify that the filter denies transactions with the specified batch author + verify_all_transactions_rejected( + filter.clone(), + batch_id, + batch_author, + batch_digest, + transactions.clone(), + ); + + // Verify that the filter allows transactions with a different batch author + verify_all_transactions_allowed( + filter.clone(), + batch_id, + different_batch_author, + batch_digest, + transactions.clone(), + ); + } +} + +#[test] +fn test_batch_digest_filter() { + for use_new_txn_payload_format in [false, true] { + // Create a batch ID, author, and digest + let (batch_id, batch_author, batch_digest) = utils::get_random_batch_info(); + + // Create a filter that only allows transactions with a specific batch digest + let transactions = utils::create_entry_function_transactions(use_new_txn_payload_format); + let filter = BatchTransactionFilter::empty() + .add_batch_digest_filter(true, batch_digest) + .add_all_filter(false); + + // Verify that the filter allows transactions with the specified batch digest + verify_all_transactions_allowed( + filter.clone(), + batch_id, + batch_author, + batch_digest, + transactions.clone(), + ); + + // Verify that the filter denies transactions with a different batch digest + let different_batch_digest = HashValue::random(); + verify_all_transactions_rejected( + filter.clone(), + batch_id, + batch_author, + different_batch_digest, + transactions.clone(), + ); + + // Create a filter that denies transactions with a specific batch digest + let filter = BatchTransactionFilter::empty().add_batch_digest_filter(false, batch_digest); + + // Verify that the filter denies transactions with the specified batch digest + verify_all_transactions_rejected( + filter.clone(), + batch_id, + batch_author, + batch_digest, + transactions.clone(), + ); + + // Verify that the filter allows transactions with a different batch digest + verify_all_transactions_allowed( + filter.clone(), + batch_id, + batch_author, + different_batch_digest, + transactions.clone(), + ); + } +} + +#[test] +fn test_empty_filter() { + for use_new_txn_payload_format in [false, true] { + // Create an empty filter + let filter = BatchTransactionFilter::empty(); + + // Create a batch ID, author, and digest + let (batch_id, batch_author, batch_digest) = utils::get_random_batch_info(); + + // Verify that all transactions are allowed + let transactions = utils::create_entry_function_transactions(use_new_txn_payload_format); + verify_all_transactions_allowed( + filter.clone(), + batch_id, + batch_author, + batch_digest, + transactions.clone(), + ); + } +} + +#[test] +fn test_multiple_matchers_filter() { + for use_new_txn_payload_format in [false, true] { + // Create a batch ID, author, and digest + let (batch_id, batch_author, batch_digest) = utils::get_random_batch_info(); + + // Create a filter that only allows batch transactions with a specific author and sender (txn 0) + let transactions = utils::create_entry_function_transactions(use_new_txn_payload_format); + let batch_transaction_matchers = vec![ + BatchTransactionMatcher::Batch(BatchMatcher::BatchAuthor(batch_author)), + BatchTransactionMatcher::Transaction(TransactionMatcher::Sender( + transactions[0].sender(), + )), + ]; + let filter = BatchTransactionFilter::empty() + .add_multiple_matchers_filter(true, batch_transaction_matchers) + .add_all_filter(false); + + // Verify that the filter returns no transactions with a different batch author + verify_all_transactions_rejected( + filter.clone(), + batch_id, + PeerId::random(), // Use a different author + batch_digest, + transactions.clone(), + ); + + // Verify that the filter returns transactions with the specified batch author and sender + let filtered_transactions = filter.filter_batch_transactions( + batch_id, + batch_author, + batch_digest, + transactions.clone(), + ); + assert_eq!(filtered_transactions, transactions[0..1].to_vec()); + + // Create a filter that denies batch transactions with a specific author and sender (txn 0 and 1) + let batch_transaction_matchers_0 = vec![ + BatchTransactionMatcher::Batch(BatchMatcher::BatchAuthor(batch_author)), + BatchTransactionMatcher::Transaction(TransactionMatcher::Sender( + transactions[0].sender(), + )), + ]; + let batch_transaction_matchers_1 = vec![ + BatchTransactionMatcher::Batch(BatchMatcher::BatchAuthor(batch_author)), + BatchTransactionMatcher::Transaction(TransactionMatcher::Sender( + transactions[1].sender(), + )), + ]; + let filter = BatchTransactionFilter::empty() + .add_multiple_matchers_filter(false, batch_transaction_matchers_0) + .add_multiple_matchers_filter(false, batch_transaction_matchers_1) + .add_all_filter(true); + + // Verify that the filter returns all transaction with a different batch author + verify_all_transactions_allowed( + filter.clone(), + batch_id, + PeerId::random(), // Use a different author + batch_digest, + transactions.clone(), + ); + + // Verify that the filter rejects transactions with the specified batch author and senders + let filtered_transactions = filter.filter_batch_transactions( + batch_id, + batch_author, + batch_digest, + transactions.clone(), + ); + assert_eq!(filtered_transactions, transactions[2..].to_vec()); + } +} + +/// Verifies that all transactions are allowed by the given filter +fn verify_all_transactions_allowed( + filter: BatchTransactionFilter, + batch_id: BatchId, + batch_author: PeerId, + batch_digest: HashValue, + transactions: Vec, +) { + let filtered_transactions = filter.filter_batch_transactions( + batch_id, + batch_author, + batch_digest, + transactions.clone(), + ); + assert_eq!(filtered_transactions, transactions); +} + +/// Verifies that all transactions are rejected by the given filter +fn verify_all_transactions_rejected( + filter: BatchTransactionFilter, + batch_id: BatchId, + batch_author: PeerId, + batch_digest: HashValue, + transactions: Vec, +) { + let filtered_transactions = filter.filter_batch_transactions( + batch_id, + batch_author, + batch_digest, + transactions.clone(), + ); + assert!(filtered_transactions.is_empty()); +} diff --git a/crates/aptos-transaction-filters/src/tests/batch_transaction_filter_config.rs b/crates/aptos-transaction-filters/src/tests/batch_transaction_filter_config.rs new file mode 100644 index 0000000000000..56881d5697b8f --- /dev/null +++ b/crates/aptos-transaction-filters/src/tests/batch_transaction_filter_config.rs @@ -0,0 +1,196 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{batch_transaction_filter::BatchTransactionFilter, tests::utils}; +use aptos_types::{quorum_store::BatchId, PeerId}; + +#[test] +fn test_batch_transaction_filter_config_allow() { + for use_new_txn_payload_format in [false, true] { + // Create a filter that only allows transactions based on multiple criteria + let transactions = utils::create_entry_function_transactions(use_new_txn_payload_format); + let batch_transaction_filter_string = format!( + r#" + batch_transaction_rules: + - Allow: + - Transaction: + Sender: "{}" + - Allow: + - Transaction: + ModuleAddress: "0000000000000000000000000000000000000000000000000000000000000001" + - Allow: + - Transaction: + PublicKey: + Ed25519: + - "{}" + - Allow: + - Transaction: + EntryFunction: + - "0000000000000000000000000000000000000000000000000000000000000003" + - entry + - sub + - Allow: + - Transaction: + AccountAddress: "{}" + - Deny: + - Batch: + All + "#, + transactions[0].sender().to_standard_string(), + utils::get_ed25519_public_key(&transactions[2]), + utils::get_module_address(&transactions[4]).to_standard_string(), + ); + let batch_transaction_filter = + serde_yaml::from_str::(&batch_transaction_filter_string) + .unwrap(); + + // Create a batch ID, author and digest + let (batch_id, batch_author, batch_digest) = utils::get_random_batch_info(); + + // Verify that only the first five transactions are allowed + let filtered_transactions = batch_transaction_filter.filter_batch_transactions( + batch_id, + batch_author, + batch_digest, + transactions.clone(), + ); + assert_eq!(filtered_transactions, transactions[0..5].to_vec()); + } +} + +#[test] +fn test_batch_transaction_filter_config_deny() { + for use_new_txn_payload_format in [false, true] { + // Create a filter that denies transactions based on multiple criteria + let transactions = utils::create_entry_function_transactions(use_new_txn_payload_format); + let batch_transaction_filter_string = format!( + r#" + batch_transaction_rules: + - Deny: + - Transaction: + ModuleAddress: "0000000000000000000000000000000000000000000000000000000000000000" + - Deny: + - Transaction: + Sender: "{}" + - Deny: + - Transaction: + EntryFunction: + - "0000000000000000000000000000000000000000000000000000000000000002" + - entry + - new + - Deny: + - Transaction: + ModuleAddress: "0000000000000000000000000000000000000000000000000000000000000003" + - Deny: + - Transaction: + AccountAddress: "{}" + - Allow: + - Transaction: + All + "#, + transactions[1].sender().to_standard_string(), + utils::get_module_address(&transactions[4]).to_standard_string(), + ); + let batch_transaction_filter = + serde_yaml::from_str::(&batch_transaction_filter_string) + .unwrap(); + + // Create a batch ID, author and digest + let (batch_id, batch_author, batch_digest) = utils::get_random_batch_info(); + + // Verify that the first five transactions are denied + let filtered_transactions = batch_transaction_filter.filter_batch_transactions( + batch_id, + batch_author, + batch_digest, + transactions.clone(), + ); + assert_eq!(filtered_transactions, transactions[5..].to_vec()); + } +} + +#[test] +fn test_batch_transaction_filter_config_multiple_matchers() { + for use_new_txn_payload_format in [false, true] { + // Create a batch ID, author and digest + let (batch_id, batch_author, batch_digest) = utils::get_random_batch_info(); + + // Create a filter that denies transactions based on multiple criteria + let transactions = utils::create_entry_function_transactions(use_new_txn_payload_format); + let batch_transaction_filter_string = format!( + r#" + batch_transaction_rules: + - Deny: + - Transaction: + Sender: "{}" + - Transaction: + ModuleAddress: "0000000000000000000000000000000000000000000000000000000000000000" + - Batch: + BatchId: + id: {} + nonce: 0 + - Deny: + - Transaction: + Sender: "{}" + - Transaction: + EntryFunction: + - "0000000000000000000000000000000000000000000000000000000000000001" + - entry + - check + - Batch: + BatchId: + id: {} + nonce: 0 + - Deny: + - Transaction: + Sender: "{}" + - Batch: + BatchId: + id: 999 + nonce: 0 + - Deny: + - Batch: + BatchAuthor: {} + - Allow: + - Transaction: + All + "#, + transactions[0].sender().to_standard_string(), + batch_id.id, + transactions[1].sender().to_standard_string(), + batch_id.id, + transactions[2].sender().to_standard_string(), + batch_author.to_standard_string(), + ); + let batch_transaction_filter = + serde_yaml::from_str::(&batch_transaction_filter_string) + .unwrap(); + + // Verify that the first two transactions are denied when the batch has a different author + let filtered_transactions = batch_transaction_filter.filter_batch_transactions( + batch_id, + PeerId::random(), // Use a different author + batch_digest, + transactions.clone(), + ); + assert_eq!(filtered_transactions, transactions[2..].to_vec()); + + // Verify that all transactions are denied when the batch has the rejected author + let filtered_transactions = batch_transaction_filter.filter_batch_transactions( + batch_id, + batch_author, + batch_digest, + transactions.clone(), + ); + assert!(filtered_transactions.is_empty()); + + // Verify that all transactions are allowed in a different batch (different author and ID) + let filtered_transactions = batch_transaction_filter.filter_batch_transactions( + BatchId::new_for_test(0), // Use a different batch ID + PeerId::random(), // Use a different author + batch_digest, + transactions.clone(), + ); + assert_eq!(filtered_transactions, transactions); + } +} diff --git a/crates/aptos-transaction-filters/src/tests/block_transaction_filter.rs b/crates/aptos-transaction-filters/src/tests/block_transaction_filter.rs new file mode 100644 index 0000000000000..64c896de25b1b --- /dev/null +++ b/crates/aptos-transaction-filters/src/tests/block_transaction_filter.rs @@ -0,0 +1,581 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + block_transaction_filter::{BlockMatcher, BlockTransactionFilter, BlockTransactionMatcher}, + tests::utils, + transaction_filter::TransactionMatcher, +}; +use aptos_crypto::HashValue; +use aptos_types::transaction::SignedTransaction; +use move_core_types::account_address::AccountAddress; + +#[test] +fn test_all_filter() { + for use_new_txn_payload_format in [false, true] { + // Create a filter that allows all transactions + let filter = BlockTransactionFilter::empty().add_all_filter(true); + + // Create a block ID, author, epoch, and timestamp + let (block_id, block_author, block_epoch, block_timestamp) = utils::get_random_block_info(); + + // Verify that all transactions are allowed + let transactions = utils::create_entry_function_transactions(use_new_txn_payload_format); + verify_all_transactions_allowed( + filter, + block_id, + Some(block_author), + block_epoch, + block_timestamp, + transactions.clone(), + ); + + // Create a filter that denies all transactions + let filter = BlockTransactionFilter::empty().add_all_filter(false); + + // Verify that all transactions are denied + verify_all_transactions_rejected( + filter, + block_id, + Some(block_author), + block_epoch, + block_timestamp, + transactions.clone(), + ); + } +} + +#[test] +fn test_block_author_filter() { + for use_new_txn_payload_format in [false, true] { + // Create a block ID, author, epoch, and timestamp + let (block_id, block_author, block_epoch, block_timestamp) = utils::get_random_block_info(); + + // Create a filter that only allows transactions with a specific block author + let transactions = utils::create_entry_function_transactions(use_new_txn_payload_format); + let filter = BlockTransactionFilter::empty() + .add_block_author_filter(true, block_author) + .add_all_filter(false); + + // Verify that the filter allows transactions with the specified block author + verify_all_transactions_allowed( + filter.clone(), + block_id, + Some(block_author), + block_epoch, + block_timestamp, + transactions.clone(), + ); + + // Verify that the filter denies transactions with a different block author + let different_block_author = AccountAddress::random(); + verify_all_transactions_rejected( + filter.clone(), + block_id, + Some(different_block_author), + block_epoch, + block_timestamp, + transactions.clone(), + ); + + // Verify that the filter denies transactions with a missing block author + verify_all_transactions_rejected( + filter.clone(), + block_id, + None, + block_epoch, + block_timestamp, + transactions.clone(), + ); + + // Create a filter that denies transactions with a specific block author + let filter = BlockTransactionFilter::empty().add_block_author_filter(false, block_author); + + // Verify that the filter denies transactions with the specified block author + verify_all_transactions_rejected( + filter.clone(), + block_id, + Some(block_author), + block_epoch, + block_timestamp, + transactions.clone(), + ); + + // Verify that the filter allows transactions with a different block author + verify_all_transactions_allowed( + filter.clone(), + block_id, + Some(different_block_author), + block_epoch, + block_timestamp, + transactions.clone(), + ); + + // Verify that the filter allows transactions with a missing block author + verify_all_transactions_allowed( + filter.clone(), + block_id, + None, + block_epoch, + block_timestamp, + transactions.clone(), + ); + } +} + +#[test] +fn test_block_id_filter() { + for use_new_txn_payload_format in [false, true] { + // Create a block ID, author, epoch, and timestamp + let (block_id, block_author, block_epoch, block_timestamp) = utils::get_random_block_info(); + + // Create a filter that only allows transactions with a specific block ID + let transactions = utils::create_entry_function_transactions(use_new_txn_payload_format); + let filter = BlockTransactionFilter::empty() + .add_block_id_filter(true, block_id) + .add_all_filter(false); + + // Verify that the filter allows transactions within the specified block ID + verify_all_transactions_allowed( + filter.clone(), + block_id, + Some(block_author), + block_epoch, + block_timestamp, + transactions.clone(), + ); + + // Verify that the filter denies transactions with a different block ID + let different_block_id = HashValue::random(); + verify_all_transactions_rejected( + filter.clone(), + different_block_id, + Some(block_author), + block_epoch, + block_timestamp, + transactions.clone(), + ); + + // Create a filter that denies transactions with a specific block ID + let filter = BlockTransactionFilter::empty().add_block_id_filter(false, block_id); + + // Verify that the filter denies transactions within the specified block ID + verify_all_transactions_rejected( + filter.clone(), + block_id, + Some(block_author), + block_epoch, + block_timestamp, + transactions.clone(), + ); + + // Verify that the filter allows transactions with a different block ID + let different_block_id = HashValue::random(); + verify_all_transactions_allowed( + filter.clone(), + different_block_id, + Some(block_author), + block_epoch, + block_timestamp, + transactions.clone(), + ); + } +} + +#[test] +fn test_block_epoch_greater_than_filter() { + for use_new_txn_payload_format in [false, true] { + // Create a filter that only allows transactions with a block epoch greater than a specific value + let filter = BlockTransactionFilter::empty() + .add_block_epoch_greater_than_filter(true, 1000) + .add_all_filter(false); + + // Create a block ID, author, and timestamp + let (block_id, block_author, _, block_timestamp) = utils::get_random_block_info(); + + // Verify that the filter only allows transactions with a block epoch greater than 1000 + let transactions = utils::create_entry_function_transactions(use_new_txn_payload_format); + for block_epoch in [0, 999, 1000] { + verify_all_transactions_rejected( + filter.clone(), + block_id, + Some(block_author), + block_epoch, + block_timestamp, + transactions.clone(), + ); + } + for block_epoch in [1001, 1002] { + verify_all_transactions_allowed( + filter.clone(), + block_id, + Some(block_author), + block_epoch, + block_timestamp, + transactions.clone(), + ); + } + + // Create a filter that denies transactions with a block epoch greater than a specific value + let filter = BlockTransactionFilter::empty() + .add_block_epoch_greater_than_filter(false, 1000) + .add_all_filter(true); + + // Verify that the filter only allows transactions with a block epoch less than or equal to 1000 + for block_epoch in [0, 999, 1000] { + verify_all_transactions_allowed( + filter.clone(), + block_id, + Some(block_author), + block_epoch, + block_timestamp, + transactions.clone(), + ); + } + for block_epoch in [1001, 2000] { + verify_all_transactions_rejected( + filter.clone(), + block_id, + Some(block_author), + block_epoch, + block_timestamp, + transactions.clone(), + ); + } + } +} + +#[test] +fn test_block_epoch_less_than_filter() { + for use_new_txn_payload_format in [false, true] { + // Create a filter that only allows transactions with a block epoch less than a specific value + let filter = BlockTransactionFilter::empty() + .add_block_epoch_less_than_filter(true, 1000) + .add_all_filter(false); + + // Create a block ID, author, and timestamp + let (block_id, block_author, _, block_timestamp) = utils::get_random_block_info(); + + // Verify that the filter only allows transactions with a block epoch less than 1000 + let transactions = utils::create_entry_function_transactions(use_new_txn_payload_format); + for block_epoch in [0, 999] { + verify_all_transactions_allowed( + filter.clone(), + block_id, + Some(block_author), + block_epoch, + block_timestamp, + transactions.clone(), + ); + } + for block_epoch in [1000, 1001] { + verify_all_transactions_rejected( + filter.clone(), + block_id, + Some(block_author), + block_epoch, + block_timestamp, + transactions.clone(), + ); + } + + // Create a filter that denies transactions with a block epoch less than a specific value + let filter = BlockTransactionFilter::empty() + .add_block_epoch_less_than_filter(false, 1000) + .add_all_filter(true); + + // Verify that the filter only allows transactions with a block epoch greater than or equal to 1000 + for block_epoch in [0, 999] { + verify_all_transactions_rejected( + filter.clone(), + block_id, + Some(block_author), + block_epoch, + block_timestamp, + transactions.clone(), + ); + } + for block_epoch in [1000, 1001] { + verify_all_transactions_allowed( + filter.clone(), + block_id, + Some(block_author), + block_epoch, + block_timestamp, + transactions.clone(), + ); + } + } +} + +#[test] +fn test_block_timestamp_greater_than_filter() { + for use_new_txn_payload_format in [false, true] { + // Create a filter that only allows transactions with a block timestamp greater than a specific value + let filter = BlockTransactionFilter::empty() + .add_block_timestamp_greater_than_filter(true, 1000) + .add_all_filter(false); + + // Create a block ID, author, and epoch + let (block_id, block_author, block_epoch, _) = utils::get_random_block_info(); + + // Verify that the filter only allows transactions with a block timestamp greater than 1000 + let transactions = utils::create_entry_function_transactions(use_new_txn_payload_format); + for block_timestamp in [0, 999, 1000] { + verify_all_transactions_rejected( + filter.clone(), + block_id, + Some(block_author), + block_epoch, + block_timestamp, + transactions.clone(), + ); + } + for block_timestamp in [1001, 2000] { + verify_all_transactions_allowed( + filter.clone(), + block_id, + Some(block_author), + block_epoch, + block_timestamp, + transactions.clone(), + ); + } + + // Create a filter that denies transactions with a block timestamp greater than a specific value + let filter = BlockTransactionFilter::empty() + .add_block_timestamp_greater_than_filter(false, 1000) + .add_all_filter(true); + + // Verify that the filter only allows transactions with a block timestamp less than or equal to 1000 + for block_timestamp in [0, 999, 1000] { + verify_all_transactions_allowed( + filter.clone(), + block_id, + Some(block_author), + block_epoch, + block_timestamp, + transactions.clone(), + ); + } + for block_timestamp in [1001, 2000] { + verify_all_transactions_rejected( + filter.clone(), + block_id, + Some(block_author), + block_epoch, + block_timestamp, + transactions.clone(), + ); + } + } +} + +#[test] +fn test_block_timestamp_less_than_filter() { + for use_new_txn_payload_format in [false, true] { + // Create a filter that only allows transactions with a block timestamp less than a specific value + let filter = BlockTransactionFilter::empty() + .add_block_timestamp_less_than_filter(true, 1000) + .add_all_filter(false); + + // Create a block ID, author, and epoch + let (block_id, block_author, block_epoch, _) = utils::get_random_block_info(); + + // Verify that the filter only allows transactions with a block timestamp less than 1000 + let transactions = utils::create_entry_function_transactions(use_new_txn_payload_format); + for block_timestamp in [0, 999] { + verify_all_transactions_allowed( + filter.clone(), + block_id, + Some(block_author), + block_epoch, + block_timestamp, + transactions.clone(), + ); + } + for block_timestamp in [1000, 1001, 2000] { + verify_all_transactions_rejected( + filter.clone(), + block_id, + Some(block_author), + block_epoch, + block_timestamp, + transactions.clone(), + ); + } + + // Create a filter that denies transactions with a block timestamp less than a specific value + let filter = BlockTransactionFilter::empty() + .add_block_timestamp_less_than_filter(false, 1000) + .add_all_filter(true); + + // Verify that the filter only allows transactions with a block timestamp greater than or equal to 1000 + for block_timestamp in [0, 999] { + verify_all_transactions_rejected( + filter.clone(), + block_id, + Some(block_author), + block_epoch, + block_timestamp, + transactions.clone(), + ); + } + for block_timestamp in [1000, 1001, 2000] { + verify_all_transactions_allowed( + filter.clone(), + block_id, + Some(block_author), + block_epoch, + block_timestamp, + transactions.clone(), + ); + } + } +} + +#[test] +fn test_empty_filter() { + for use_new_txn_payload_format in [false, true] { + // Create an empty filter + let filter = BlockTransactionFilter::empty(); + + // Create a block ID, author, epoch, and timestamp + let (block_id, block_author, block_epoch, block_timestamp) = utils::get_random_block_info(); + + // Verify that all transactions are allowed + let transactions = utils::create_entry_function_transactions(use_new_txn_payload_format); + verify_all_transactions_allowed( + filter.clone(), + block_id, + Some(block_author), + block_epoch, + block_timestamp, + transactions.clone(), + ); + } +} + +#[test] +fn test_multiple_matchers_filter() { + for use_new_txn_payload_format in [false, true] { + // Create a block ID, author, epoch, and timestamp + let (block_id, block_author, block_epoch, block_timestamp) = utils::get_random_block_info(); + + // Create a filter that only allows block transactions with epoch > 1000 and a specific sender (txn 0) + let transactions = utils::create_entry_function_transactions(use_new_txn_payload_format); + let block_transaction_matchers = vec![ + BlockTransactionMatcher::Block(BlockMatcher::BlockEpochGreaterThan(1000)), + BlockTransactionMatcher::Transaction(TransactionMatcher::Sender( + transactions[0].sender(), + )), + ]; + let filter = BlockTransactionFilter::empty() + .add_multiple_matchers_filter(true, block_transaction_matchers) + .add_all_filter(false); + + // Verify that the filter returns no transactions with block epoch less than or equal to 1000 + for block_epoch in [0, 999, 1000] { + verify_all_transactions_rejected( + filter.clone(), + block_id, + Some(block_author), + block_epoch, + block_timestamp, + transactions.clone(), + ); + } + + // Verify that the filter returns transactions with block epoch greater than 1000 and the specified sender + for block_epoch in [1001, 2002] { + let filtered_transactions = filter.filter_block_transactions( + block_id, + Some(block_author), + block_epoch, + block_timestamp, + transactions.clone(), + ); + assert_eq!(filtered_transactions, transactions[0..1].to_vec()); + } + + // Create a filter that denies block transactions with timestamp < 1000 and a specific sender (txn 0 and 1) + let block_transaction_matchers_0 = vec![ + BlockTransactionMatcher::Block(BlockMatcher::BlockTimeStampLessThan(1000)), + BlockTransactionMatcher::Transaction(TransactionMatcher::Sender( + transactions[0].sender(), + )), + ]; + let block_transaction_matchers_1 = vec![ + BlockTransactionMatcher::Block(BlockMatcher::BlockTimeStampLessThan(1000)), + BlockTransactionMatcher::Transaction(TransactionMatcher::Sender( + transactions[1].sender(), + )), + ]; + let filter = BlockTransactionFilter::empty() + .add_multiple_matchers_filter(false, block_transaction_matchers_0) + .add_multiple_matchers_filter(false, block_transaction_matchers_1) + .add_all_filter(true); + + // Verify that it returns all transactions with block timestamp greater than or equal to 1000 + for block_timestamp in [1000, 1001, 2000] { + verify_all_transactions_allowed( + filter.clone(), + block_id, + Some(block_author), + block_epoch, + block_timestamp, + transactions.clone(), + ); + } + + // Verify that it returns no transactions with block timestamp less than 1000 and the specified senders + for block_timestamp in [0, 999] { + let filtered_transactions = filter.filter_block_transactions( + block_id, + Some(block_author), + block_epoch, + block_timestamp, + transactions.clone(), + ); + assert_eq!(filtered_transactions, transactions[2..].to_vec()); + } + } +} + +/// Verifies that all transactions are allowed by the given filter +fn verify_all_transactions_allowed( + filter: BlockTransactionFilter, + block_id: HashValue, + block_author: Option, + block_epoch: u64, + block_timestamp: u64, + transactions: Vec, +) { + let filtered_transactions = filter.filter_block_transactions( + block_id, + block_author, + block_epoch, + block_timestamp, + transactions.clone(), + ); + assert_eq!(filtered_transactions, transactions); +} + +/// Verifies that all transactions are rejected by the given filter +fn verify_all_transactions_rejected( + filter: BlockTransactionFilter, + block_id: HashValue, + block_author: Option, + block_epoch: u64, + block_timestamp: u64, + transactions: Vec, +) { + let filtered_transactions = filter.filter_block_transactions( + block_id, + block_author, + block_epoch, + block_timestamp, + transactions, + ); + assert!(filtered_transactions.is_empty()); +} diff --git a/crates/aptos-transaction-filters/src/tests/block_transaction_filter_config.rs b/crates/aptos-transaction-filters/src/tests/block_transaction_filter_config.rs new file mode 100644 index 0000000000000..985b7c40c6493 --- /dev/null +++ b/crates/aptos-transaction-filters/src/tests/block_transaction_filter_config.rs @@ -0,0 +1,213 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{block_transaction_filter::BlockTransactionFilter, tests::utils}; +use aptos_crypto::HashValue; +use move_core_types::account_address::AccountAddress; + +#[test] +fn test_block_transaction_filter_config_allow() { + for use_new_txn_payload_format in [false, true] { + // Create a filter that only allows transactions based on multiple criteria + let transactions = utils::create_entry_function_transactions(use_new_txn_payload_format); + let block_transaction_filter_string = format!( + r#" + block_transaction_rules: + - Allow: + - Transaction: + Sender: "{}" + - Allow: + - Transaction: + ModuleAddress: "0000000000000000000000000000000000000000000000000000000000000001" + - Allow: + - Transaction: + PublicKey: + Ed25519: + - "{}" + - Allow: + - Transaction: + EntryFunction: + - "0000000000000000000000000000000000000000000000000000000000000003" + - entry + - sub + - Allow: + - Transaction: + AccountAddress: "{}" + - Deny: + - Block: + All + "#, + transactions[0].sender().to_standard_string(), + utils::get_ed25519_public_key(&transactions[2]), + utils::get_module_address(&transactions[4]).to_standard_string(), + ); + let block_transaction_filter = + serde_yaml::from_str::(&block_transaction_filter_string) + .unwrap(); + + // Create a block ID, author, epoch, and timestamp + let (block_id, block_author, block_epoch, block_timestamp) = utils::get_random_block_info(); + + // Verify that only the first five transactions are allowed + let filtered_transactions = block_transaction_filter.filter_block_transactions( + block_id, + Some(block_author), + block_epoch, + block_timestamp, + transactions.clone(), + ); + assert_eq!(filtered_transactions, transactions[0..5].to_vec()); + } +} + +#[test] +fn test_block_transaction_filter_config_deny() { + for use_new_txn_payload_format in [false, true] { + // Create a filter that denies transactions based on multiple criteria + let transactions = utils::create_entry_function_transactions(use_new_txn_payload_format); + let block_transaction_filter_string = format!( + r#" + block_transaction_rules: + - Deny: + - Transaction: + ModuleAddress: "0000000000000000000000000000000000000000000000000000000000000000" + - Deny: + - Transaction: + Sender: "{}" + - Deny: + - Transaction: + EntryFunction: + - "0000000000000000000000000000000000000000000000000000000000000002" + - entry + - new + - Deny: + - Transaction: + ModuleAddress: "0000000000000000000000000000000000000000000000000000000000000003" + - Deny: + - Transaction: + AccountAddress: "{}" + - Allow: + - Transaction: + All + "#, + transactions[1].sender().to_standard_string(), + utils::get_module_address(&transactions[4]).to_standard_string(), + ); + let block_transaction_filter = + serde_yaml::from_str::(&block_transaction_filter_string) + .unwrap(); + + // Create a block ID, author, epoch, and timestamp + let (block_id, block_author, block_epoch, block_timestamp) = utils::get_random_block_info(); + + // Verify that the first five transactions are denied + let filtered_transactions = block_transaction_filter.filter_block_transactions( + block_id, + Some(block_author), + block_epoch, + block_timestamp, + transactions.clone(), + ); + assert_eq!(filtered_transactions, transactions[5..].to_vec()); + } +} + +#[test] +fn test_block_transaction_filter_config_multiple_matchers() { + for use_new_txn_payload_format in [false, true] { + // Create a block ID, author, epoch, and timestamp + let (block_id, block_author, block_epoch, block_timestamp) = utils::get_random_block_info(); + + // Create a malicious block author (where blocks are not allowed) + let malicious_block_author = AccountAddress::random(); + + // Create a filter that denies transactions based on multiple criteria + let transactions = utils::create_entry_function_transactions(use_new_txn_payload_format); + let block_transaction_filter_string = format!( + r#" + block_transaction_rules: + - Deny: + - Transaction: + Sender: "{}" + - Transaction: + ModuleAddress: "0000000000000000000000000000000000000000000000000000000000000000" + - Block: + BlockId: "{}" + - Deny: + - Transaction: + Sender: "{}" + - Transaction: + EntryFunction: + - "0000000000000000000000000000000000000000000000000000000000000001" + - entry + - check + - Block: + BlockId: "{}" + - Deny: + - Transaction: + Sender: "{}" + - Block: + BlockId: "0000000000000000000000000000000000000000000000000000000000000000" + - Deny: + - Block: + Author: "{}" + - Deny: + - Block: + BlockEpochLessThan: {} + - Allow: + - Transaction: + All + "#, + transactions[0].sender().to_standard_string(), + block_id.to_hex(), + transactions[1].sender().to_standard_string(), + block_id.to_hex(), + transactions[2].sender().to_standard_string(), + malicious_block_author, + block_epoch, + ); + let block_transaction_filter = + serde_yaml::from_str::(&block_transaction_filter_string) + .unwrap(); + + // Verify that the first two transactions are denied in the current block + let filtered_transactions = block_transaction_filter.filter_block_transactions( + block_id, + Some(block_author), + block_epoch, + block_timestamp, + transactions.clone(), + ); + assert_eq!(filtered_transactions, transactions[2..].to_vec()); + + // Verify that all transactions are denied in the previous block + let filtered_transactions = block_transaction_filter.filter_block_transactions( + HashValue::random(), + Some(block_author), + block_epoch - 1, + block_timestamp - 1, + transactions.clone(), + ); + assert!(filtered_transactions.is_empty()); + + // Verify that all transactions are allowed in a completely different block + let filtered_transactions = block_transaction_filter.filter_block_transactions( + HashValue::random(), + Some(block_author), + block_epoch + 1, + block_timestamp + 1, + transactions.clone(), + ); + assert_eq!(filtered_transactions, transactions); + + // Verify that all transactions are denied in a block with a malicious author + let filtered_transactions = block_transaction_filter.filter_block_transactions( + HashValue::random(), + Some(malicious_block_author), + block_epoch + 1, + block_timestamp + 1, + transactions.clone(), + ); + assert!(filtered_transactions.is_empty()); + } +} diff --git a/crates/aptos-transaction-filters/src/tests/mod.rs b/crates/aptos-transaction-filters/src/tests/mod.rs new file mode 100644 index 0000000000000..5abb829c6b7dd --- /dev/null +++ b/crates/aptos-transaction-filters/src/tests/mod.rs @@ -0,0 +1,10 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +mod batch_transaction_filter; +mod batch_transaction_filter_config; +mod block_transaction_filter; +mod block_transaction_filter_config; +mod transaction_filter; +mod transaction_filter_config; +mod utils; diff --git a/crates/aptos-transaction-filters/src/tests/transaction_filter.rs b/crates/aptos-transaction-filters/src/tests/transaction_filter.rs new file mode 100644 index 0000000000000..dadd4a9aa3a60 --- /dev/null +++ b/crates/aptos-transaction-filters/src/tests/transaction_filter.rs @@ -0,0 +1,348 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + tests::utils, + transaction_filter::{TransactionFilter, TransactionMatcher}, +}; + +#[test] +fn test_account_address_filter_simple() { + for use_new_txn_payload_format in [false, true] { + // Create a filter that only allows transactions from specific account addresses. + // These are: (i) txn 0 sender; (ii) txn 1 sender; and (iii) txn 2 entry function address. + let transactions = utils::create_entry_function_transactions(use_new_txn_payload_format); + let filter = TransactionFilter::empty() + .add_account_address_filter(true, transactions[0].sender()) + .add_account_address_filter(true, transactions[1].sender()) + .add_account_address_filter(true, utils::get_module_address(&transactions[2])) + .add_all_filter(false); + + // Verify that the filter returns transactions from the specified account addresses + let filtered_transactions = filter.filter_transactions(transactions.clone()); + assert_eq!(filtered_transactions, transactions[0..3].to_vec()); + + // Create a filter that denies transactions from the specified account addresses (as above) + let filter = TransactionFilter::empty() + .add_account_address_filter(false, transactions[0].sender()) + .add_account_address_filter(false, transactions[1].sender()) + .add_account_address_filter(false, utils::get_module_address(&transactions[2])) + .add_all_filter(true); + + // Verify that the filter returns transactions from other account addresses + let filtered_transactions = filter.filter_transactions(transactions.clone()); + assert_eq!(filtered_transactions, transactions[3..].to_vec()); + } +} + +#[test] +fn test_account_address_filter_multisig() { + for use_new_txn_payload_format in [false, true] { + // Create a filter that only allows transactions from specific account addresses. + // These are: (i) txn 0 multisig address; (ii) txn 1 sender; and (iii) txn 2 multisig address. + let transactions = utils::create_multisig_transactions(use_new_txn_payload_format); + let filter = TransactionFilter::empty() + .add_account_address_filter(true, utils::get_multisig_address(&transactions[0])) + .add_account_address_filter(true, transactions[1].sender()) + .add_account_address_filter(true, utils::get_multisig_address(&transactions[2])) + .add_all_filter(false); + + // Verify that the filter returns transactions from the specified account addresses + let filtered_transactions = filter.filter_transactions(transactions.clone()); + assert_eq!(filtered_transactions, transactions[0..3].to_vec()); + + // Create a filter that denies transactions from the specified account addresses (as above) + let filter = TransactionFilter::empty() + .add_account_address_filter(false, utils::get_multisig_address(&transactions[0])) + .add_account_address_filter(false, transactions[1].sender()) + .add_account_address_filter(false, utils::get_multisig_address(&transactions[2])) + .add_all_filter(true); + + // Verify that the filter returns transactions from other account addresses + let filtered_transactions = filter.filter_transactions(transactions.clone()); + assert_eq!(filtered_transactions, transactions[3..].to_vec()); + } +} + +#[test] +fn test_account_address_filter_script_argument() { + for use_new_txn_payload_format in [false, true] { + // Create a filter that only allows transactions from specific account addresses. + // These are: (i) txn 0 script arg address; (ii) txn 1 sender; and (iii) txn 2 script arg address. + let transactions = utils::create_script_transactions(use_new_txn_payload_format); + let filter = TransactionFilter::empty() + .add_account_address_filter(true, utils::get_script_argument_address(&transactions[0])) + .add_account_address_filter(true, transactions[1].sender()) + .add_account_address_filter(true, utils::get_script_argument_address(&transactions[2])) + .add_all_filter(false); + + // Verify that the filter returns transactions from the specified account addresses + let filtered_transactions = filter.filter_transactions(transactions.clone()); + assert_eq!(filtered_transactions, transactions[0..3].to_vec()); + + // Create a filter that denies transactions from the specified account addresses (as above) + let filter = TransactionFilter::empty() + .add_account_address_filter(false, utils::get_script_argument_address(&transactions[0])) + .add_account_address_filter(false, transactions[1].sender()) + .add_account_address_filter(false, utils::get_script_argument_address(&transactions[2])) + .add_all_filter(true); + + // Verify that the filter returns transactions from other account addresses + let filtered_transactions = filter.filter_transactions(transactions.clone()); + assert_eq!(filtered_transactions, transactions[3..].to_vec()); + } +} + +#[test] +fn test_account_address_filter_transaction_authenticator() { + // Create a filter that only allows transactions from specific account addresses. + // These are: (i) txn 0 account authenticator; (ii) txn 1 account authenticator; and (iii) txn 2 sender. + let transactions = utils::create_fee_payer_transactions(); + let filter = TransactionFilter::empty() + .add_account_address_filter(true, utils::get_fee_payer_address(&transactions[0])) + .add_account_address_filter(true, utils::get_fee_payer_address(&transactions[1])) + .add_account_address_filter(true, transactions[2].sender()) + .add_all_filter(false); + + // Verify that the filter returns transactions from the specified account addresses + let filtered_transactions = filter.filter_transactions(transactions.clone()); + assert_eq!(filtered_transactions, transactions[0..3].to_vec()); + + // Create a filter that denies transactions from the specified account addresses (as above) + let filter = TransactionFilter::empty() + .add_account_address_filter(false, utils::get_fee_payer_address(&transactions[0])) + .add_account_address_filter(false, utils::get_fee_payer_address(&transactions[1])) + .add_account_address_filter(false, transactions[2].sender()) + .add_all_filter(true); + + // Verify that the filter returns transactions from other account addresses + let filtered_transactions = filter.filter_transactions(transactions.clone()); + assert_eq!(filtered_transactions, transactions[3..].to_vec()); +} + +#[test] +fn test_all_filter() { + for use_new_txn_payload_format in [false, true] { + // Create a filter that allows all transactions + let filter = TransactionFilter::empty().add_all_filter(true); + + // Verify that all transactions are allowed + let transactions = utils::create_entry_function_transactions(use_new_txn_payload_format); + let filtered_transactions = filter.filter_transactions(transactions.clone()); + assert_eq!(filtered_transactions, transactions); + + // Create a filter that denies all transactions + let filter = TransactionFilter::empty().add_all_filter(false); + + // Verify that all transactions are denied + let filtered_transactions = filter.filter_transactions(transactions.clone()); + assert!(filtered_transactions.is_empty()); + } +} + +#[test] +fn test_empty_filter() { + for use_new_txn_payload_format in [false, true] { + // Create an empty filter + let filter = TransactionFilter::empty(); + + // Verify that all transactions are allowed + let transactions = utils::create_entry_function_transactions(use_new_txn_payload_format); + let filtered_transactions = filter.filter_transactions(transactions.clone()); + assert_eq!(filtered_transactions, transactions); + } +} + +#[test] +fn test_entry_function_filter() { + for use_new_txn_payload_format in [false, true] { + // Create a filter that only allows transactions with specific entry functions (txn 0 and txn 1) + let transactions = utils::create_entry_function_transactions(use_new_txn_payload_format); + let filter = TransactionFilter::empty() + .add_entry_function_filter( + true, + utils::get_module_address(&transactions[0]), + utils::get_module_name(&transactions[0]), + utils::get_function_name(&transactions[0]), + ) + .add_entry_function_filter( + true, + utils::get_module_address(&transactions[1]), + utils::get_module_name(&transactions[1]), + utils::get_function_name(&transactions[1]), + ) + .add_all_filter(false); + + // Verify that the filter returns only transactions with the specified entry functions + let filtered_transactions = filter.filter_transactions(transactions.clone()); + assert_eq!(filtered_transactions, transactions[0..2].to_vec()); + + // Create a filter that denies transactions with specific entry functions (txn 0) + let filter = TransactionFilter::empty() + .add_entry_function_filter( + false, + utils::get_module_address(&transactions[0]), + utils::get_module_name(&transactions[0]), + utils::get_function_name(&transactions[0]), + ) + .add_all_filter(true); + + // Verify that the filter returns all transactions except those with the specified entry functions + let filtered_transactions = filter.filter_transactions(transactions.clone()); + assert_eq!(filtered_transactions, transactions[1..].to_vec()); + } +} + +#[test] +fn test_module_address_filter() { + for use_new_txn_payload_format in [false, true] { + // Create a filter that only allows transactions from a specific module address (txn 0 and txn 1) + let transactions = utils::create_entry_function_transactions(use_new_txn_payload_format); + let filter = TransactionFilter::empty() + .add_module_address_filter(true, utils::get_module_address(&transactions[0])) + .add_module_address_filter(true, utils::get_module_address(&transactions[1])) + .add_all_filter(false); + + // Verify that the filter returns only transactions from the specified module addresses + let filtered_transactions = filter.filter_transactions(transactions.clone()); + assert_eq!(filtered_transactions, transactions[0..2].to_vec()); + + // Create a filter that denies transactions from a specific module address (txn 0 and txn 1) + let filter = TransactionFilter::empty() + .add_module_address_filter(false, utils::get_module_address(&transactions[0])) + .add_module_address_filter(false, utils::get_module_address(&transactions[1])) + .add_all_filter(true); + + // Verify that the filter returns all transactions except those from the specified module addresses + let filtered_transactions = filter.filter_transactions(transactions.clone()); + assert_eq!(filtered_transactions, transactions[2..].to_vec()); + } +} + +#[test] +fn test_multiple_matchers_filter() { + for use_new_txn_payload_format in [false, true] { + // Create a filter that only allows transactions with specific criteria (only txn 1 should match) + let transactions = utils::create_entry_function_transactions(use_new_txn_payload_format); + let transaction_matchers = vec![ + TransactionMatcher::Sender(transactions[1].sender()), + TransactionMatcher::ModuleAddress(utils::get_module_address(&transactions[1])), + TransactionMatcher::EntryFunction( + utils::get_module_address(&transactions[1]), + utils::get_module_name(&transactions[1]), + utils::get_function_name(&transactions[1]), + ), + ]; + let filter = TransactionFilter::empty() + .add_multiple_matchers_filter(true, transaction_matchers.clone()) + .add_all_filter(false); + + // Verify that the filter returns only transactions that match all specified matchers + let filtered_transactions = filter.filter_transactions(transactions.clone()); + assert_eq!(filtered_transactions, vec![transactions[1].clone()]); + + // Create a filter that only allows transactions with a specific criteria (none should match) + let transaction_matchers = vec![ + TransactionMatcher::Sender(transactions[0].sender()), + TransactionMatcher::ModuleAddress(utils::get_module_address(&transactions[1])), + TransactionMatcher::ModuleAddress(utils::get_module_address(&transactions[2])), + ]; + let filter = TransactionFilter::empty() + .add_multiple_matchers_filter(true, transaction_matchers) + .add_all_filter(false); + + // Verify that the filter returns no transactions (none should match) + let filtered_transactions = filter.filter_transactions(transactions.clone()); + assert!(filtered_transactions.is_empty()); + + // Create a filter that denies transactions with a specific sender and module address (txn 0) + let transaction_matchers = vec![ + TransactionMatcher::Sender(transactions[0].sender()), + TransactionMatcher::ModuleAddress(utils::get_module_address(&transactions[0])), + ]; + let filter = TransactionFilter::empty() + .add_multiple_matchers_filter(false, transaction_matchers) + .add_all_filter(true); + + // Verify that it returns all transactions except those with the specified sender and module address + let filtered_transactions = filter.filter_transactions(transactions.clone()); + assert_eq!(filtered_transactions, transactions[1..].to_vec()); + } +} + +#[test] +fn test_public_key_filter() { + for use_new_txn_payload_format in [false, true] { + // Create a filter that only allows transactions from specific public keys. + // These are: (i) txn 0 authenticator public key; and (ii) txn 1 authenticator public key. + let transactions = utils::create_entry_function_transactions(use_new_txn_payload_format); + let filter = TransactionFilter::empty() + .add_public_key_filter(true, utils::get_auth_public_key(&transactions[0])) + .add_public_key_filter(true, utils::get_auth_public_key(&transactions[1])) + .add_all_filter(false); + + // Verify that the filter returns transactions with the specified public keys + let filtered_transactions = filter.filter_transactions(transactions.clone()); + assert_eq!(filtered_transactions, transactions[0..2].to_vec()); + + // Create a filter that denies transactions from the specified public keys (as above) + let filter = TransactionFilter::empty() + .add_public_key_filter(false, utils::get_auth_public_key(&transactions[0])) + .add_public_key_filter(false, utils::get_auth_public_key(&transactions[1])) + .add_all_filter(true); + + // Verify that it returns transactions from other public keys + let filtered_transactions = filter.filter_transactions(transactions.clone()); + assert_eq!(filtered_transactions, transactions[2..].to_vec()); + } +} + +#[test] +fn test_sender_filter() { + for use_new_txn_payload_format in [false, true] { + // Create a filter that only allows transactions from a specific sender (txn 0 and txn 1) + let transactions = utils::create_entry_function_transactions(use_new_txn_payload_format); + let filter = TransactionFilter::empty() + .add_sender_filter(true, transactions[0].sender()) + .add_sender_filter(true, transactions[1].sender()) + .add_all_filter(false); + + // Verify that the filter returns only transactions from the specified senders + let filtered_transactions = filter.filter_transactions(transactions.clone()); + assert_eq!(filtered_transactions, transactions[0..2].to_vec()); + + // Create a filter that denies transactions from a specific sender (txn 0 and txn 1) + let filter = TransactionFilter::empty() + .add_sender_filter(false, transactions[0].sender()) + .add_sender_filter(false, transactions[1].sender()) + .add_all_filter(true); + + // Verify that the filter returns all transactions except those from the specified senders + let filtered_transactions = filter.filter_transactions(transactions.clone()); + assert_eq!(filtered_transactions, transactions[2..].to_vec()); + } +} + +#[test] +fn test_transaction_id_filter() { + for use_new_txn_payload_format in [false, true] { + // Create a filter that only allows transactions with a specific transaction ID (txn 0) + let transactions = utils::create_entry_function_transactions(use_new_txn_payload_format); + let filter = TransactionFilter::empty() + .add_transaction_id_filter(true, transactions[0].committed_hash()) + .add_all_filter(false); + + // Verify that the filter returns only the transaction with the specified ID + let filtered_transactions = filter.filter_transactions(transactions.clone()); + assert_eq!(filtered_transactions, vec![transactions[0].clone()]); + + // Create a filter that denies transactions with a specific transaction ID (txn 0) + let filter = TransactionFilter::empty() + .add_transaction_id_filter(false, transactions[0].committed_hash()) + .add_all_filter(true); + + // Verify that the filter returns all transactions except the one with the specified ID + let filtered_transactions = filter.filter_transactions(transactions.clone()); + assert_eq!(filtered_transactions, transactions[1..].to_vec()); + } +} diff --git a/crates/aptos-transaction-filters/src/tests/transaction_filter_config.rs b/crates/aptos-transaction-filters/src/tests/transaction_filter_config.rs new file mode 100644 index 0000000000000..4a6442dd09230 --- /dev/null +++ b/crates/aptos-transaction-filters/src/tests/transaction_filter_config.rs @@ -0,0 +1,120 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{tests::utils, transaction_filter::TransactionFilter}; + +#[test] +fn test_transaction_filter_config_allow() { + for use_new_txn_payload_format in [false, true] { + // Create a filter that only allows transactions based on multiple criteria + let transactions = utils::create_entry_function_transactions(use_new_txn_payload_format); + let transaction_filter_string = format!( + r#" + transaction_rules: + - Allow: + - Sender: "{}" + - Allow: + - ModuleAddress: "0000000000000000000000000000000000000000000000000000000000000001" + - Allow: + - PublicKey: + Ed25519: + - "{}" + - Allow: + - EntryFunction: + - "0000000000000000000000000000000000000000000000000000000000000003" + - entry + - sub + - Allow: + - AccountAddress: "{}" + - Deny: + - All + "#, + transactions[0].sender().to_standard_string(), + utils::get_ed25519_public_key(&transactions[2]), + utils::get_module_address(&transactions[4]).to_standard_string(), + ); + let transaction_filter = + serde_yaml::from_str::(&transaction_filter_string).unwrap(); + + // Verify that only the first five transactions are allowed + let filtered_transactions = transaction_filter.filter_transactions(transactions.clone()); + assert_eq!(filtered_transactions, transactions[0..5].to_vec()); + } +} + +#[test] +fn test_transaction_filter_config_deny() { + for use_new_txn_payload_format in [false, true] { + // Create a filter that denies transactions based on multiple criteria + let transactions = utils::create_entry_function_transactions(use_new_txn_payload_format); + let transaction_filter_string = format!( + r#" + transaction_rules: + - Deny: + - ModuleAddress: "0000000000000000000000000000000000000000000000000000000000000000" + - Deny: + - Sender: "{}" + - Deny: + - EntryFunction: + - "0000000000000000000000000000000000000000000000000000000000000002" + - entry + - new + - Deny: + - ModuleAddress: "0000000000000000000000000000000000000000000000000000000000000003" + - Deny: + - AccountAddress: "{}" + - Allow: + - All + "#, + transactions[1].sender().to_standard_string(), + utils::get_module_address(&transactions[4]).to_standard_string(), + ); + let transaction_filter = + serde_yaml::from_str::(&transaction_filter_string).unwrap(); + + // Verify that the first five transactions are denied + let filtered_transactions = transaction_filter.filter_transactions(transactions.clone()); + assert_eq!(filtered_transactions, transactions[5..].to_vec()); + } +} + +#[test] +fn test_transaction_filter_config_multiple_matchers() { + for use_new_txn_payload_format in [false, true] { + // Create a filter that denies transactions based on multiple matching criteria + let transactions = utils::create_entry_function_transactions(use_new_txn_payload_format); + let transaction_filter_string = format!( + r#" + transaction_rules: + - Deny: + - Sender: "{}" + - ModuleAddress: "0000000000000000000000000000000000000000000000000000000000000000" + - Deny: + - Sender: "{}" + - ModuleAddress: "0000000000000000000000000000000000000000000000000000000000000001" + - Deny: + - Sender: "{}" + - EntryFunction: + - "0000000000000000000000000000000000000000000000000000000000000002" + - entry + - new + - Deny: + - Sender: "{}" + - AccountAddress: "{}" + - Allow: + - All + "#, + transactions[0].sender().to_standard_string(), + transactions[1].sender().to_standard_string(), + transactions[2].sender().to_standard_string(), + transactions[3].sender().to_standard_string(), + utils::get_module_address(&transactions[3]).to_standard_string(), + ); + let transaction_filter = + serde_yaml::from_str::(&transaction_filter_string).unwrap(); + + // Verify that the first four transactions are denied + let filtered_transactions = transaction_filter.filter_transactions(transactions.clone()); + assert_eq!(filtered_transactions, transactions[4..].to_vec()); + } +} diff --git a/crates/aptos-transaction-filters/src/tests/utils.rs b/crates/aptos-transaction-filters/src/tests/utils.rs new file mode 100644 index 0000000000000..679f6a916e85f --- /dev/null +++ b/crates/aptos-transaction-filters/src/tests/utils.rs @@ -0,0 +1,344 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::tests::utils; +use aptos_crypto::{ + ed25519::{Ed25519PrivateKey, Ed25519PublicKey, Ed25519Signature}, + HashValue, PrivateKey, SigningKey, Uniform, +}; +use aptos_types::{ + chain_id::ChainId, + move_utils::MemberId, + quorum_store::BatchId, + transaction::{ + authenticator::{AccountAuthenticator, AnyPublicKey, TransactionAuthenticator}, + EntryFunction, Multisig, MultisigTransactionPayload, RawTransaction, Script, + SignedTransaction, TransactionExecutable, TransactionExecutableRef, TransactionExtraConfig, + TransactionPayload, TransactionPayloadInner, + }, + PeerId, +}; +use move_core_types::{account_address::AccountAddress, transaction_argument::TransactionArgument}; +use rand::{rngs::OsRng, thread_rng, Rng}; + +/// Creates and returns an account authenticator with the given public key +pub fn create_account_authenticator(public_key: Ed25519PublicKey) -> AccountAuthenticator { + AccountAuthenticator::Ed25519 { + public_key, + signature: Ed25519Signature::dummy_signature(), + } +} + +/// Creates and returns an entry function with the given member ID +pub fn create_entry_function(function: MemberId) -> EntryFunction { + let MemberId { + module_id, + member_id: function_id, + } = function; + EntryFunction::new(module_id, function_id, vec![], vec![]) +} + +/// Creates and returns a signed transaction with an entry function payload +pub fn create_entry_function_transaction( + function: MemberId, + use_new_txn_payload_format: bool, +) -> SignedTransaction { + let entry_function = create_entry_function(function); + let transaction_payload = if use_new_txn_payload_format { + // Use the new payload format + let executable = TransactionExecutable::EntryFunction(entry_function); + let extra_config = TransactionExtraConfig::V1 { + multisig_address: None, + replay_protection_nonce: None, + }; + TransactionPayload::Payload(TransactionPayloadInner::V1 { + executable, + extra_config, + }) + } else { + // Use the old payload format + TransactionPayload::EntryFunction(entry_function) + }; + + create_signed_transaction(transaction_payload, false) +} + +/// Creates and returns a list of signed entry function transactions +pub fn create_entry_function_transactions( + use_new_txn_payload_format: bool, +) -> Vec { + let mut entry_function_txns = vec![]; + + for (i, function_name) in [ + "add", "check", "new", "sub", "mul", "div", "mod", "pow", "exp", "sqrt", + ] + .iter() + .enumerate() + { + let transaction = create_entry_function_transaction( + str::parse(&format!("0x{}::entry::{}", i, function_name)).unwrap(), + use_new_txn_payload_format, + ); + entry_function_txns.push(transaction); + } + + entry_function_txns +} + +/// Creates and returns a signed fee payer transaction +pub fn create_fee_payer_transaction() -> SignedTransaction { + let entry_function = create_entry_function(str::parse("0x0::fee_payer::pay").unwrap()); + let transaction_payload = TransactionPayload::EntryFunction(entry_function); + + create_signed_transaction(transaction_payload, true) +} + +/// Creates and returns a list of signed fee payer transactions +pub fn create_fee_payer_transactions() -> Vec { + let mut fee_payer_transactions = vec![]; + + for _ in 0..10 { + let transaction = create_fee_payer_transaction(); + fee_payer_transactions.push(transaction) + } + + fee_payer_transactions +} + +/// Creates and returns a multisig transaction with the given multisig address and function +pub fn create_multisig_transaction( + multisig_address: AccountAddress, + function: MemberId, + use_new_txn_payload_format: bool, +) -> SignedTransaction { + let transaction_payload = if use_new_txn_payload_format { + // Use the new payload format + let executable = TransactionExecutable::EntryFunction(create_entry_function(function)); + let extra_config = TransactionExtraConfig::V1 { + multisig_address: Some(multisig_address), + replay_protection_nonce: None, + }; + TransactionPayload::Payload(TransactionPayloadInner::V1 { + executable, + extra_config, + }) + } else { + // Use the old payload format + TransactionPayload::Multisig(Multisig { + multisig_address, + transaction_payload: Some(MultisigTransactionPayload::EntryFunction( + create_entry_function(function), + )), + }) + }; + + create_signed_transaction(transaction_payload, false) +} + +/// Creates and returns a list of signed multisig transactions +pub fn create_multisig_transactions(use_new_txn_payload_format: bool) -> Vec { + let mut multisig_transactions = vec![]; + + for i in 0..10 { + let transaction = create_multisig_transaction( + AccountAddress::random(), + str::parse(&format!("0x{}::multisig::sign", i)).unwrap(), + use_new_txn_payload_format, + ); + multisig_transactions.push(transaction); + } + + multisig_transactions +} + +/// Creates and returns a signed transaction with the given payload and fee payer +pub fn create_signed_transaction( + transaction_payload: TransactionPayload, + fee_payer: bool, +) -> SignedTransaction { + let sender = AccountAddress::random(); + let sequence_number = 0; + let raw_transaction = RawTransaction::new( + sender, + sequence_number, + transaction_payload, + 0, + 0, + 0, + ChainId::new(10), + ); + + let private_key = Ed25519PrivateKey::generate(&mut thread_rng()); + let public_key = private_key.public_key(); + + if fee_payer { + SignedTransaction::new_fee_payer( + raw_transaction.clone(), + create_account_authenticator(public_key.clone()), + vec![], + vec![], + AccountAddress::random(), + create_account_authenticator(public_key.clone()), + ) + } else { + SignedTransaction::new( + raw_transaction.clone(), + public_key.clone(), + private_key.sign(&raw_transaction).unwrap(), + ) + } +} + +/// Creates and returns a script transaction with the given payload +pub fn create_script_transaction(use_new_txn_payload_format: bool) -> SignedTransaction { + let script_arguments = vec![ + TransactionArgument::U64(0), + TransactionArgument::U128(0), + TransactionArgument::Address(AccountAddress::random()), + TransactionArgument::Bool(true), + ]; + let script = Script::new(vec![], vec![], script_arguments); + + let transaction_payload = if use_new_txn_payload_format { + // Use the new payload format + let executable = TransactionExecutable::Script(script); + let extra_config = TransactionExtraConfig::V1 { + multisig_address: None, + replay_protection_nonce: None, + }; + TransactionPayload::Payload(TransactionPayloadInner::V1 { + executable, + extra_config, + }) + } else { + // Use the old payload format + TransactionPayload::Script(script) + }; + + create_signed_transaction(transaction_payload, false) +} + +/// Creates and returns a list of signed script transactions +pub fn create_script_transactions(use_new_txn_payload_format: bool) -> Vec { + let mut script_transactions = vec![]; + + for _ in 0..10 { + let transaction = create_script_transaction(use_new_txn_payload_format); + script_transactions.push(transaction); + } + + script_transactions +} + +/// Returns the first address argument of the given script +pub fn get_address_argument(script: &Script) -> AccountAddress { + for arg in script.args() { + if let TransactionArgument::Address(address) = arg { + return *address; + } + } + panic!("No address argument found in script transaction"); +} + +/// Returns the public key of the authenticator of the given transaction +pub fn get_auth_public_key(signed_transaction: &SignedTransaction) -> AnyPublicKey { + match signed_transaction.authenticator() { + TransactionAuthenticator::Ed25519 { public_key, .. } => AnyPublicKey::ed25519(public_key), + authenticator => panic!("Unexpected transaction authenticator: {:?}", authenticator), + } +} + +/// Returns the Ed25519 public key of the authenticator of the given transaction +pub fn get_ed25519_public_key(signed_transaction: &SignedTransaction) -> Ed25519PublicKey { + match signed_transaction.authenticator() { + TransactionAuthenticator::Ed25519 { public_key, .. } => public_key.clone(), + authenticator => panic!("Unexpected transaction authenticator: {:?}", authenticator), + } +} + +/// Returns the fee payer address of the given transaction +pub fn get_fee_payer_address(signed_transaction: &SignedTransaction) -> AccountAddress { + match signed_transaction.authenticator() { + TransactionAuthenticator::FeePayer { + fee_payer_address, .. + } => fee_payer_address, + payload => panic!("Unexpected transaction payload: {:?}", payload), + } +} + +/// Returns the function name of the given transaction +pub fn get_function_name(txn: &SignedTransaction) -> String { + match txn.payload().executable_ref() { + Ok(TransactionExecutableRef::EntryFunction(entry_func)) => { + entry_func.function().to_string() + }, + payload => panic!("Unexpected transaction payload: {:?}", payload), + } +} + +/// Returns the module address of the given transaction +pub fn get_module_address(txn: &SignedTransaction) -> AccountAddress { + match txn.payload().executable_ref() { + Ok(TransactionExecutableRef::EntryFunction(entry_func)) => *entry_func.module().address(), + payload => panic!("Unexpected transaction payload: {:?}", payload), + } +} + +/// Returns the module name of the given transaction +pub fn get_module_name(txn: &SignedTransaction) -> String { + match txn.payload().executable_ref() { + Ok(TransactionExecutableRef::EntryFunction(entry_func)) => { + entry_func.module().name().to_string() + }, + payload => panic!("Unexpected transaction payload: {:?}", payload), + } +} + +/// Returns the multisig address of the given transaction +pub fn get_multisig_address(txn: &SignedTransaction) -> AccountAddress { + match txn.payload() { + TransactionPayload::Multisig(multisig) => multisig.multisig_address, + TransactionPayload::Payload(TransactionPayloadInner::V1 { + extra_config: + TransactionExtraConfig::V1 { + multisig_address, .. + }, + .. + }) => multisig_address.expect("Expected multisig address!"), + payload => panic!("Unexpected transaction payload: {:?}", payload), + } +} + +/// Creates and returns a random batch ID, author, and digest. +pub fn get_random_batch_info() -> (BatchId, PeerId, HashValue) { + let batch_id = BatchId::new_for_test(get_random_u64()); + let batch_author = PeerId::random(); + let batch_digest = HashValue::random(); + (batch_id, batch_author, batch_digest) +} + +/// Creates and returns a random block ID, author, epoch, and timestamp. +pub fn get_random_block_info() -> (HashValue, AccountAddress, u64, u64) { + let block_id = HashValue::random(); + let block_author = AccountAddress::random(); + let block_epoch = utils::get_random_u64(); + let block_timestamp = utils::get_random_u64(); + (block_id, block_author, block_epoch, block_timestamp) +} + +/// Generates and returns a random number (u64) +pub fn get_random_u64() -> u64 { + OsRng.gen() +} + +/// Returns the script argument address of the given transaction +pub fn get_script_argument_address(txn: &SignedTransaction) -> AccountAddress { + match txn.payload() { + TransactionPayload::Script(script) => get_address_argument(script), + TransactionPayload::Payload(TransactionPayloadInner::V1 { + executable: TransactionExecutable::Script(script), + .. + }) => get_address_argument(script), + payload => panic!("Unexpected transaction payload: {:?}", payload), + } +} diff --git a/crates/aptos-transaction-filters/src/transaction_filter.rs b/crates/aptos-transaction-filters/src/transaction_filter.rs new file mode 100644 index 0000000000000..5dce17f9f7865 --- /dev/null +++ b/crates/aptos-transaction-filters/src/transaction_filter.rs @@ -0,0 +1,943 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use aptos_crypto::{ed25519::Ed25519PublicKey, HashValue}; +use aptos_types::transaction::{ + authenticator::{AccountAuthenticator, AnyPublicKey, TransactionAuthenticator}, + EntryFunction, MultisigTransactionPayload, Script, SignedTransaction, TransactionExecutableRef, + TransactionExtraConfig, TransactionPayload, TransactionPayloadInner, +}; +use move_core_types::{account_address::AccountAddress, transaction_argument::TransactionArgument}; +use serde::{Deserialize, Serialize}; + +/// A transaction filter that applies a set of rules to determine +/// if a transaction should be allowed or denied. +/// +/// Rules are applied in the order they are defined, and the first +/// matching rule determines the outcome for the transaction. +/// If no rules match, the transaction is allowed by default. +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] +pub struct TransactionFilter { + transaction_rules: Vec, +} + +impl TransactionFilter { + pub fn new(transaction_rules: Vec) -> Self { + Self { transaction_rules } + } + + /// Returns true iff the filter allows the transaction + pub fn allows_transaction(&self, signed_transaction: &SignedTransaction) -> bool { + // If the filter is empty, allow the transaction by default + if self.is_empty() { + return true; + } + + // Check if any rule matches the transaction + for transaction_rule in &self.transaction_rules { + if transaction_rule.matches(signed_transaction) { + return match transaction_rule { + TransactionRule::Allow(_) => true, + TransactionRule::Deny(_) => false, + }; + } + } + + true // No rules match (allow the transaction by default) + } + + /// Returns an empty transaction filter with no rules + pub fn empty() -> Self { + Self { + transaction_rules: Vec::new(), + } + } + + /// Filters the given transactions and returns only those that are allowed + pub fn filter_transactions( + &self, + transactions: Vec, + ) -> Vec { + transactions + .into_iter() + .filter(|txn| self.allows_transaction(txn)) + .collect() + } + + /// Returns true iff the filter is empty (i.e., has no rules) + pub fn is_empty(&self) -> bool { + self.transaction_rules.is_empty() + } +} + +// These are useful test-only methods for creating and testing filters +#[cfg(any(test, feature = "fuzzing"))] +impl TransactionFilter { + /// Adds an account address matcher to the filter + pub fn add_account_address_filter(self, allow: bool, account_address: AccountAddress) -> Self { + let transaction_matcher = TransactionMatcher::AccountAddress(account_address); + self.add_multiple_matchers_filter(allow, vec![transaction_matcher]) + } + + /// Adds an all matcher to the filter (matching all transactions) + pub fn add_all_filter(self, allow: bool) -> Self { + let transaction_matcher = TransactionMatcher::All; + self.add_multiple_matchers_filter(allow, vec![transaction_matcher]) + } + + /// Adds an entry function matcher to the filter + pub fn add_entry_function_filter( + self, + allow: bool, + address: AccountAddress, + module_name: String, + function: String, + ) -> Self { + let transaction_matcher = TransactionMatcher::EntryFunction(address, module_name, function); + self.add_multiple_matchers_filter(allow, vec![transaction_matcher]) + } + + /// Adds a module address matcher to the filter + pub fn add_module_address_filter(self, allow: bool, address: AccountAddress) -> Self { + let transaction_matcher = TransactionMatcher::ModuleAddress(address); + self.add_multiple_matchers_filter(allow, vec![transaction_matcher]) + } + + /// Adds a filter rule containing multiple matchers + pub fn add_multiple_matchers_filter( + mut self, + allow: bool, + transaction_matchers: Vec, + ) -> Self { + let transaction_rule = if allow { + TransactionRule::Allow(transaction_matchers) + } else { + TransactionRule::Deny(transaction_matchers) + }; + self.transaction_rules.push(transaction_rule); + + self + } + + /// Adds a public key matcher to the filter + pub fn add_public_key_filter(self, allow: bool, public_key: AnyPublicKey) -> Self { + let transaction_matcher = TransactionMatcher::PublicKey(public_key); + self.add_multiple_matchers_filter(allow, vec![transaction_matcher]) + } + + /// Adds a sender address matcher to the filter + pub fn add_sender_filter(self, allow: bool, sender: AccountAddress) -> Self { + let transaction_matcher = TransactionMatcher::Sender(sender); + self.add_multiple_matchers_filter(allow, vec![transaction_matcher]) + } + + /// Adds a transaction ID matcher to the filter + pub fn add_transaction_id_filter(self, allow: bool, txn_id: HashValue) -> Self { + let transaction_matcher = TransactionMatcher::TransactionId(txn_id); + self.add_multiple_matchers_filter(allow, vec![transaction_matcher]) + } +} + +/// A transaction rule that defines whether to allow or deny transactions +/// based on a set of matchers. All matchers must match for the rule to apply. +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] +pub enum TransactionRule { + Allow(Vec), + Deny(Vec), +} + +impl TransactionRule { + /// Returns true iff the rule matches the given transaction. This + /// requires that all matchers in the rule match the transaction. + fn matches(&self, signed_transaction: &SignedTransaction) -> bool { + let transaction_matchers = match self { + TransactionRule::Allow(matchers) => matchers, + TransactionRule::Deny(matchers) => matchers, + }; + transaction_matchers + .iter() + .all(|matcher| matcher.matches(signed_transaction)) + } +} + +/// A matcher that defines the criteria for matching transactions +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] +pub enum TransactionMatcher { + All, // Matches any transaction + TransactionId(HashValue), // Matches a specific transaction by its ID + Sender(AccountAddress), // Matches any transaction sent by a specific account address + ModuleAddress(AccountAddress), // Matches any transaction that calls a module at a specific address + EntryFunction(AccountAddress, String, String), // Matches any transaction that calls a specific entry function in a module + AccountAddress(AccountAddress), // Matches any transaction that involves a specific account address + PublicKey(AnyPublicKey), // Matches any transaction that involves a specific public key +} + +impl TransactionMatcher { + /// Returns true iff the matcher matches the given transaction + pub(crate) fn matches(&self, signed_transaction: &SignedTransaction) -> bool { + match self { + TransactionMatcher::All => true, + TransactionMatcher::TransactionId(id) => signed_transaction.committed_hash() == *id, + TransactionMatcher::Sender(sender) => { + matches_sender_address(signed_transaction, sender) + }, + TransactionMatcher::ModuleAddress(address) => { + matches_entry_function_module_address(signed_transaction, address) + }, + TransactionMatcher::EntryFunction(address, module_name, function) => { + matches_entry_function(signed_transaction, address, module_name, function) + }, + TransactionMatcher::AccountAddress(address) => { + matches_sender_address(signed_transaction, address) + || matches_entry_function_module_address(signed_transaction, address) + || matches_multisig_address(signed_transaction, address) + || matches_script_argument_address(signed_transaction, address) + || matches_transaction_authenticator_address(signed_transaction, address) + }, + TransactionMatcher::PublicKey(public_key) => { + matches_transaction_authenticator_public_key(signed_transaction, public_key) + }, + } + } +} + +/// Returns true iff the Ed25519 public key matches the given AnyPublicKey +fn compare_ed25519_public_key( + ed25519_public_key: &Ed25519PublicKey, + any_public_key: &AnyPublicKey, +) -> bool { + if let AnyPublicKey::Ed25519 { public_key } = any_public_key { + ed25519_public_key == public_key + } else { + false + } +} + +/// Returns true iff the entry function's module address, name, and function name +/// match the given account address, module name, and function name. +fn compare_entry_function( + entry_function: &EntryFunction, + address: &AccountAddress, + module_name: &String, + function_name: &String, +) -> bool { + entry_function.module().address() == address + && entry_function.module().name().to_string() == *module_name + && entry_function.function().to_string() == *function_name +} + +/// Returns true iff the entry function's module address matches the given account address +fn compare_entry_function_module_address( + entry_function: &EntryFunction, + address: &AccountAddress, +) -> bool { + entry_function.module().address() == address +} + +/// Returns true iff the script's arguments contain the given account address +fn compare_script_argument_address(script: &Script, address: &AccountAddress) -> bool { + script.args().iter().any(|transaction_argument| { + if let TransactionArgument::Address(argument_address) = transaction_argument { + argument_address == address + } else { + false + } + }) +} + +/// Returns true iff the account authenticator contains the given account address +fn matches_account_authenticator_address( + account_authenticator: &AccountAuthenticator, + address: &AccountAddress, +) -> bool { + // Match all variants explicitly to ensure future enum changes are caught during compilation + match account_authenticator { + AccountAuthenticator::Ed25519 { .. } + | AccountAuthenticator::MultiEd25519 { .. } + | AccountAuthenticator::NoAccountAuthenticator => false, + AccountAuthenticator::SingleKey { authenticator } => { + matches_any_public_key_address(authenticator.public_key(), address) + }, + AccountAuthenticator::MultiKey { authenticator } => authenticator + .public_keys() + .public_keys() + .iter() + .any(|any_public_key| matches_any_public_key_address(any_public_key, address)), + AccountAuthenticator::Abstraction { function_info, .. } => { + function_info.module_address == *address + }, + } +} + +/// Returns true iff the account authenticator contains the given public key +fn matches_account_authenticator_public_key( + account_authenticator: &AccountAuthenticator, + any_public_key: &AnyPublicKey, +) -> bool { + // Match all variants explicitly to ensure future enum changes are caught during compilation + match account_authenticator { + AccountAuthenticator::NoAccountAuthenticator | AccountAuthenticator::Abstraction { .. } => { + false + }, + AccountAuthenticator::Ed25519 { public_key, .. } => { + compare_ed25519_public_key(public_key, any_public_key) + }, + AccountAuthenticator::MultiEd25519 { public_key, .. } => { + public_key.public_keys().iter().any(|ed25519_public_key| { + compare_ed25519_public_key(ed25519_public_key, any_public_key) + }) + }, + AccountAuthenticator::SingleKey { authenticator } => { + authenticator.public_key() == any_public_key + }, + AccountAuthenticator::MultiKey { authenticator } => authenticator + .public_keys() + .public_keys() + .iter() + .any(|key| key == any_public_key), + } +} + +/// Returns true iff the public key contains the given account address +fn matches_any_public_key_address(any_public_key: &AnyPublicKey, address: &AccountAddress) -> bool { + // Match all variants explicitly to ensure future enum changes are caught during compilation + match any_public_key { + AnyPublicKey::Ed25519 { .. } + | AnyPublicKey::Secp256k1Ecdsa { .. } + | AnyPublicKey::Secp256r1Ecdsa { .. } + | AnyPublicKey::Keyless { .. } => false, + AnyPublicKey::FederatedKeyless { public_key } => { + // Check if the public key's JWK address matches the given address + public_key.jwk_addr == *address + }, + } +} + +/// Returns true iff the transaction's entry function matches the given account address, module name, and function name +fn matches_entry_function( + signed_transaction: &SignedTransaction, + address: &AccountAddress, + module_name: &String, + function: &String, +) -> bool { + // Match all variants explicitly to ensure future enum changes are caught during compilation + match signed_transaction.payload() { + TransactionPayload::Script(_) | TransactionPayload::ModuleBundle(_) => false, + TransactionPayload::Multisig(multisig) => multisig + .transaction_payload + .as_ref() + .map(|payload| match payload { + MultisigTransactionPayload::EntryFunction(entry_function) => { + compare_entry_function(entry_function, address, module_name, function) + }, + }) + .unwrap_or(false), + TransactionPayload::EntryFunction(entry_function) => { + compare_entry_function(entry_function, address, module_name, function) + }, + TransactionPayload::Payload(TransactionPayloadInner::V1 { executable, .. }) => { + match executable.as_ref() { + TransactionExecutableRef::Script(_) | TransactionExecutableRef::Empty => false, + TransactionExecutableRef::EntryFunction(entry_function) => { + compare_entry_function(entry_function, address, module_name, function) + }, + } + }, + } +} + +/// Returns true iff the transaction's module address matches the given account address +fn matches_entry_function_module_address( + signed_transaction: &SignedTransaction, + module_address: &AccountAddress, +) -> bool { + // Match all variants explicitly to ensure future enum changes are caught during compilation + match signed_transaction.payload() { + TransactionPayload::Script(_) | TransactionPayload::ModuleBundle(_) => false, + TransactionPayload::Multisig(multisig) => multisig + .transaction_payload + .as_ref() + .map(|payload| match payload { + MultisigTransactionPayload::EntryFunction(entry_function) => { + compare_entry_function_module_address(entry_function, module_address) + }, + }) + .unwrap_or(false), + TransactionPayload::EntryFunction(entry_function) => { + compare_entry_function_module_address(entry_function, module_address) + }, + TransactionPayload::Payload(TransactionPayloadInner::V1 { executable, .. }) => { + match executable.as_ref() { + TransactionExecutableRef::Script(_) | TransactionExecutableRef::Empty => false, + TransactionExecutableRef::EntryFunction(entry_function) => { + compare_entry_function_module_address(entry_function, module_address) + }, + } + }, + } +} + +/// Returns true iff the transaction's multisig address matches the given account address +fn matches_multisig_address( + signed_transaction: &SignedTransaction, + address: &AccountAddress, +) -> bool { + // Match all variants explicitly to ensure future enum changes are caught during compilation + match signed_transaction.payload() { + TransactionPayload::EntryFunction(_) + | TransactionPayload::Script(_) + | TransactionPayload::ModuleBundle(_) => false, + TransactionPayload::Multisig(multisig) => multisig.multisig_address == *address, + TransactionPayload::Payload(TransactionPayloadInner::V1 { extra_config, .. }) => { + match extra_config { + TransactionExtraConfig::V1 { + multisig_address, .. + } => multisig_address + .map(|multisig_address| multisig_address == *address) + .unwrap_or(false), + } + }, + } +} + +/// Returns true iff a script argument matches the given account address +fn matches_script_argument_address( + signed_transaction: &SignedTransaction, + address: &AccountAddress, +) -> bool { + // Match all variants explicitly to ensure future enum changes are caught during compilation + match signed_transaction.payload() { + TransactionPayload::EntryFunction(_) + | TransactionPayload::Multisig(_) + | TransactionPayload::ModuleBundle(_) => false, + TransactionPayload::Script(script) => compare_script_argument_address(script, address), + TransactionPayload::Payload(TransactionPayloadInner::V1 { executable, .. }) => { + match executable.as_ref() { + TransactionExecutableRef::EntryFunction(_) | TransactionExecutableRef::Empty => { + false + }, + TransactionExecutableRef::Script(script) => { + compare_script_argument_address(script, address) + }, + } + }, + } +} + +/// Returns true iff the transaction's sender matches the given account address +fn matches_sender_address(signed_transaction: &SignedTransaction, sender: &AccountAddress) -> bool { + signed_transaction.sender() == *sender +} + +/// Returns true iff the transaction's authenticator contains the given account address +fn matches_transaction_authenticator_address( + signed_transaction: &SignedTransaction, + address: &AccountAddress, +) -> bool { + // Match all variants explicitly to ensure future enum changes are caught during compilation + match signed_transaction.authenticator_ref() { + TransactionAuthenticator::Ed25519 { .. } + | TransactionAuthenticator::MultiEd25519 { .. } => false, + TransactionAuthenticator::MultiAgent { + sender, + secondary_signer_addresses, + secondary_signers, + } => { + matches_account_authenticator_address(sender, address) + || secondary_signer_addresses.contains(address) + || secondary_signers + .iter() + .any(|signer| matches_account_authenticator_address(signer, address)) + }, + TransactionAuthenticator::FeePayer { + sender, + secondary_signer_addresses, + secondary_signers, + fee_payer_address, + fee_payer_signer, + } => { + matches_account_authenticator_address(sender, address) + || secondary_signer_addresses.contains(address) + || secondary_signers + .iter() + .any(|signer| matches_account_authenticator_address(signer, address)) + || fee_payer_address == address + || matches_account_authenticator_address(fee_payer_signer, address) + }, + TransactionAuthenticator::SingleSender { sender } => { + matches_account_authenticator_address(sender, address) + }, + } +} + +/// Returns true iff the transaction's authenticator contains the given public key +fn matches_transaction_authenticator_public_key( + signed_transaction: &SignedTransaction, + any_public_key: &AnyPublicKey, +) -> bool { + // Match all variants explicitly to ensure future enum changes are caught during compilation + match signed_transaction.authenticator_ref() { + TransactionAuthenticator::Ed25519 { public_key, .. } => { + compare_ed25519_public_key(public_key, any_public_key) + }, + TransactionAuthenticator::MultiEd25519 { public_key, .. } => { + public_key.public_keys().iter().any(|ed25519_public_key| { + compare_ed25519_public_key(ed25519_public_key, any_public_key) + }) + }, + TransactionAuthenticator::MultiAgent { + sender, + secondary_signers, + .. + } => { + matches_account_authenticator_public_key(sender, any_public_key) + || secondary_signers + .iter() + .any(|signer| matches_account_authenticator_public_key(signer, any_public_key)) + }, + TransactionAuthenticator::FeePayer { + sender, + secondary_signers, + fee_payer_signer, + .. + } => { + matches_account_authenticator_public_key(sender, any_public_key) + || secondary_signers + .iter() + .any(|signer| matches_account_authenticator_public_key(signer, any_public_key)) + || matches_account_authenticator_public_key(fee_payer_signer, any_public_key) + }, + TransactionAuthenticator::SingleSender { sender } => { + matches_account_authenticator_public_key(sender, any_public_key) + }, + } +} + +#[cfg(test)] +mod test { + use super::*; + use aptos_crypto::{ + ed25519::Ed25519PrivateKey, + multi_ed25519::{MultiEd25519PublicKey, MultiEd25519Signature}, + secp256k1_ecdsa, secp256r1_ecdsa, PrivateKey, SigningKey, Uniform, + }; + use aptos_types::{ + chain_id::ChainId, + function_info::FunctionInfo, + keyless::test_utils::get_sample_groth16_sig_and_pk, + transaction::{ + authenticator::{AnySignature, SingleKeyAuthenticator}, + RawTransaction, + }, + }; + use rand::thread_rng; + + #[test] + fn test_matches_account_authenticator_address() { + // Create an empty account authenticator + let account_authenticator = AccountAuthenticator::NoAccountAuthenticator; + + // Verify that the authenticator doesn't match the target address + let target_address = AccountAddress::random(); + verify_matches_account_auth_address(&account_authenticator, &target_address, false); + + // Create an Ed25519 account authenticator + let raw_transaction = create_raw_transaction(); + let private_key = Ed25519PrivateKey::generate_for_testing(); + let public_key = private_key.public_key(); + let signature = private_key.sign(&raw_transaction).unwrap(); + let account_authenticator = AccountAuthenticator::Ed25519 { + public_key: public_key.clone(), + signature: signature.clone(), + }; + + // Verify that the authenticator doesn't match the target address + verify_matches_account_auth_address(&account_authenticator, &target_address, false); + + // Create a MultiEd25519 account authenticator + let multi_public_key = MultiEd25519PublicKey::new(vec![public_key], 1).unwrap(); + let multi_signature = MultiEd25519Signature::from(signature); + let account_authenticator = AccountAuthenticator::MultiEd25519 { + public_key: multi_public_key, + signature: multi_signature, + }; + + // Verify that the authenticator doesn't match the target address + verify_matches_account_auth_address(&account_authenticator, &target_address, false); + + // Create an Abstraction account authenticator (with the target address as the module address) + let function_info = FunctionInfo::new(target_address, "".into(), "".into()); + let account_authenticator = + AccountAuthenticator::abstraction(function_info, vec![], vec![]); + + // Verify that the authenticator matches the target address + verify_matches_account_auth_address(&account_authenticator, &target_address, true); + } + + #[test] + fn test_matches_any_public_key_address() { + // Create an Ed25519 public key + let private_key = Ed25519PrivateKey::generate_for_testing(); + let public_key = AnyPublicKey::ed25519(private_key.public_key()); + + // Verify that the public key doesn't match the target address + let target_address = AccountAddress::random(); + verify_matches_public_key_address(&public_key, &target_address, false); + + // Create a Secp256k1Ecdsa public key + let private_key = secp256k1_ecdsa::PrivateKey::generate_for_testing(); + let public_key = AnyPublicKey::secp256k1_ecdsa(private_key.public_key()); + + // Verify that the public key doesn't match the target address + verify_matches_public_key_address(&public_key, &target_address, false); + + // Create a Secp256r1Ecdsa public key + let private_key = secp256r1_ecdsa::PrivateKey::generate_for_testing(); + let public_key = AnyPublicKey::secp256r1_ecdsa(private_key.public_key()); + + // Verify that the public key doesn't match the target address + verify_matches_public_key_address(&public_key, &target_address, false); + + // Create a Keyless public key + let (_, keyless_public_key) = get_sample_groth16_sig_and_pk(); + let public_key = AnyPublicKey::keyless(keyless_public_key.clone()); + + // Verify that the public key doesn't match the target address + verify_matches_public_key_address(&public_key, &target_address, false); + + // Create a FederatedKeyless public key with the target address as the JWK address + let federated_keyless_public_key = aptos_types::keyless::FederatedKeylessPublicKey { + jwk_addr: target_address, + pk: keyless_public_key, + }; + let public_key = AnyPublicKey::federated_keyless(federated_keyless_public_key); + + // Verify that the public key matches the target address + verify_matches_public_key_address(&public_key, &target_address, true); + } + + #[test] + fn test_matches_transaction_authenticator_address() { + // Create an Ed25519 transaction authenticator + let raw_transaction = create_raw_transaction(); + let private_key = Ed25519PrivateKey::generate_for_testing(); + let signature = private_key.sign(&raw_transaction).unwrap(); + let signed_transaction = SignedTransaction::new( + raw_transaction.clone(), + private_key.public_key(), + signature.clone(), + ); + + // Verify that the authenticator doesn't match the target address + let target_address = AccountAddress::random(); + verify_matches_transaction_auth_address(&signed_transaction, &target_address, false); + + // Create a MultiEd25519 transaction authenticator + let multi_public_key = + MultiEd25519PublicKey::new(vec![private_key.public_key()], 1).unwrap(); + let multi_signature = MultiEd25519Signature::from(signature); + let signed_transaction = SignedTransaction::new_multisig( + raw_transaction.clone(), + multi_public_key, + multi_signature, + ); + + // Verify that the authenticator doesn't match the target address + verify_matches_transaction_auth_address(&signed_transaction, &target_address, false); + + // Create a multi-agent transaction authenticator with the target secondary signer + let signed_transaction = SignedTransaction::new_multi_agent( + raw_transaction.clone(), + AccountAuthenticator::NoAccountAuthenticator, + vec![ + AccountAddress::random(), + target_address, + AccountAddress::random(), + ], + vec![AccountAuthenticator::NoAccountAuthenticator], + ); + + // Verify that the authenticator matches the target address + verify_matches_transaction_auth_address(&signed_transaction, &target_address, true); + + // Create a fee payer transaction authenticator + let fee_payer_address = AccountAddress::random(); + let secondary_signer_address = AccountAddress::random(); + let signed_transaction = SignedTransaction::new_fee_payer( + raw_transaction.clone(), + AccountAuthenticator::NoAccountAuthenticator, + vec![secondary_signer_address], + vec![AccountAuthenticator::NoAccountAuthenticator], + fee_payer_address, + AccountAuthenticator::NoAccountAuthenticator, + ); + + // Verify that the authenticator matches the fee payer and secondary signer addresses + for address in [&fee_payer_address, &secondary_signer_address] { + verify_matches_transaction_auth_address(&signed_transaction, address, true); + } + + // Verify that the authenticator doesn't match the target address + verify_matches_transaction_auth_address(&signed_transaction, &target_address, false); + } + + #[test] + fn test_matches_account_authenticator_public_key() { + // Create an empty account authenticator + let account_authenticator = AccountAuthenticator::NoAccountAuthenticator; + + // Verify that the authenticator doesn't match the public key + let private_key_1 = get_random_private_key(); + verify_matches_account_auth_public_key( + &account_authenticator, + &AnyPublicKey::ed25519(private_key_1.public_key()), + false, + ); + + // Create an abstraction account authenticator + let function_info = FunctionInfo::new( + AccountAddress::random(), + "test_module".to_string(), + "test_function".to_string(), + ); + let account_authenticator = + AccountAuthenticator::abstraction(function_info, vec![], vec![]); + + // Verify that the authenticator doesn't match the public key + verify_matches_account_auth_public_key( + &account_authenticator, + &AnyPublicKey::ed25519(private_key_1.public_key()), + false, + ); + + // Create an Ed25519 account authenticator (using the private key) + let account_authenticator = AccountAuthenticator::Ed25519 { + public_key: private_key_1.public_key(), + signature: private_key_1.sign(&create_raw_transaction()).unwrap(), + }; + + // Verify that the authenticator matches the expected public key + verify_matches_account_auth_public_key( + &account_authenticator, + &AnyPublicKey::ed25519(private_key_1.public_key()), + true, + ); + verify_matches_account_auth_public_key( + &account_authenticator, + &AnyPublicKey::ed25519(get_random_private_key().public_key()), + false, + ); + + // Create a MultiEd25519 account authenticator + let private_key_2 = get_random_private_key(); + let multi_public_key = MultiEd25519PublicKey::new( + vec![private_key_1.public_key(), private_key_2.public_key()], + 1, + ) + .unwrap(); + let multi_signature = + MultiEd25519Signature::from(private_key_1.sign(&create_raw_transaction()).unwrap()); + let account_authenticator = AccountAuthenticator::MultiEd25519 { + public_key: multi_public_key, + signature: multi_signature, + }; + + // Verify that the authenticator matches the expected public key + verify_matches_account_auth_public_key( + &account_authenticator, + &AnyPublicKey::ed25519(private_key_1.public_key()), + true, + ); + verify_matches_account_auth_public_key( + &account_authenticator, + &AnyPublicKey::ed25519(private_key_2.public_key()), + true, + ); + verify_matches_account_auth_public_key( + &account_authenticator, + &AnyPublicKey::ed25519(get_random_private_key().public_key()), + false, + ); + + // Create a SingleKey account authenticator + let account_authenticator = AccountAuthenticator::SingleKey { + authenticator: SingleKeyAuthenticator::new( + AnyPublicKey::Ed25519 { + public_key: private_key_1.public_key(), + }, + AnySignature::Ed25519 { + signature: private_key_1.sign(&create_raw_transaction()).unwrap(), + }, + ), + }; + + // Verify that the authenticator matches the expected public key + verify_matches_account_auth_public_key( + &account_authenticator, + &AnyPublicKey::ed25519(private_key_1.public_key()), + true, + ); + verify_matches_account_auth_public_key( + &account_authenticator, + &AnyPublicKey::ed25519(private_key_2.public_key()), + false, + ); + } + + #[test] + fn test_matches_transaction_authenticator_public_key() { + // Create an Ed25519 transaction authenticator + let raw_transaction = create_raw_transaction(); + let private_key_1 = Ed25519PrivateKey::generate_for_testing(); + let signature = private_key_1.sign(&raw_transaction).unwrap(); + let signed_transaction = SignedTransaction::new( + raw_transaction.clone(), + private_key_1.public_key(), + signature.clone(), + ); + + // Verify that the authenticator matches the expected public key + let private_key_2 = get_random_private_key(); + verify_matches_transaction_auth_public_key( + &signed_transaction, + &AnyPublicKey::ed25519(private_key_1.public_key()), + true, + ); + verify_matches_transaction_auth_public_key( + &signed_transaction, + &AnyPublicKey::ed25519(private_key_2.public_key()), + false, + ); + + // Create a MultiEd25519 transaction authenticator + let multi_public_key = MultiEd25519PublicKey::new( + vec![private_key_1.public_key(), private_key_2.public_key()], + 1, + ) + .unwrap(); + let multi_signature = MultiEd25519Signature::from(signature.clone()); + let signed_transaction = SignedTransaction::new_multisig( + raw_transaction.clone(), + multi_public_key, + multi_signature, + ); + + // Verify that the authenticator matches the expected public key + verify_matches_transaction_auth_public_key( + &signed_transaction, + &AnyPublicKey::ed25519(private_key_1.public_key()), + true, + ); + verify_matches_transaction_auth_public_key( + &signed_transaction, + &AnyPublicKey::ed25519(private_key_2.public_key()), + true, + ); + verify_matches_transaction_auth_public_key( + &signed_transaction, + &AnyPublicKey::ed25519(get_random_private_key().public_key()), + false, + ); + + // Create a multi-agent transaction authenticator + let signed_transaction = SignedTransaction::new_multi_agent( + raw_transaction.clone(), + AccountAuthenticator::Ed25519 { + public_key: private_key_1.public_key(), + signature: signature.clone(), + }, + vec![], + vec![AccountAuthenticator::Ed25519 { + public_key: private_key_2.public_key(), + signature: signature.clone(), + }], + ); + + // Verify that the authenticator matches the expected public key + verify_matches_transaction_auth_public_key( + &signed_transaction, + &AnyPublicKey::ed25519(private_key_1.public_key()), + true, + ); + verify_matches_transaction_auth_public_key( + &signed_transaction, + &AnyPublicKey::ed25519(private_key_2.public_key()), + true, + ); + verify_matches_transaction_auth_public_key( + &signed_transaction, + &AnyPublicKey::ed25519(get_random_private_key().public_key()), + false, + ); + } + + /// Creates and returns a raw transaction + fn create_raw_transaction() -> RawTransaction { + RawTransaction::new( + AccountAddress::random(), + 0, + TransactionPayload::Script(Script::new(vec![], vec![], vec![])), + 0, + 0, + 0, + ChainId::new(10), + ) + } + + /// Generates and returns a random Ed25519 private key + fn get_random_private_key() -> Ed25519PrivateKey { + Ed25519PrivateKey::generate(&mut thread_rng()) + } + + /// Verifies that the given account authenticator contains the expected address + fn verify_matches_account_auth_address( + account_authenticator: &AccountAuthenticator, + address: &AccountAddress, + matches: bool, + ) { + let result = matches_account_authenticator_address(account_authenticator, address); + assert_eq!(matches, result); + } + + /// Verifies that the given account authenticator contains the expected public key + fn verify_matches_account_auth_public_key( + account_authenticator: &AccountAuthenticator, + any_public_key: &AnyPublicKey, + matches: bool, + ) { + let result = + matches_account_authenticator_public_key(account_authenticator, any_public_key); + assert_eq!(matches, result); + } + + /// Verifies that the given public key contains the target address + fn verify_matches_public_key_address( + any_public_key: &AnyPublicKey, + address: &AccountAddress, + matches: bool, + ) { + let result = matches_any_public_key_address(any_public_key, address); + assert_eq!(matches, result); + } + + /// Verifies that the given transaction authenticator contains the expected address + fn verify_matches_transaction_auth_address( + signed_transaction: &SignedTransaction, + address: &AccountAddress, + matches: bool, + ) { + let result = matches_transaction_authenticator_address(signed_transaction, address); + assert_eq!(matches, result); + } + + /// Verifies that the given transaction authenticator contains the expected public key + fn verify_matches_transaction_auth_public_key( + signed_transaction: &SignedTransaction, + any_public_key: &AnyPublicKey, + matches: bool, + ) { + let result = + matches_transaction_authenticator_public_key(signed_transaction, any_public_key); + assert_eq!(matches, result); + } +} diff --git a/crates/aptos/CHANGELOG.md b/crates/aptos/CHANGELOG.md index a9a64f7e1e14b..d51e55bcc2c7f 100644 --- a/crates/aptos/CHANGELOG.md +++ b/crates/aptos/CHANGELOG.md @@ -3,7 +3,13 @@ All notable changes to the Aptos CLI will be captured in this file. This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html) and the format set out by [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). # Unreleased + +## [7.6.0] +- Sets up confidential assets for localnet under the experimental address 0x7 + +## [7.5.0] - Fix auto-update CLI command to work with more OS's including Mac and Linux on ARM +- Update localnet indexer processors ## [7.4.0] - UTF-8 characters are now allowed in Move source code comments (and thus error codes). diff --git a/crates/aptos/Cargo.toml b/crates/aptos/Cargo.toml index 53028b874e660..c8ecac3581dbe 100644 --- a/crates/aptos/Cargo.toml +++ b/crates/aptos/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "aptos" description = "Aptos tool for management of nodes and interacting with the blockchain" -version = "7.4.0" +version = "7.6.0" # Workspace inherited keys authors = { workspace = true } diff --git a/crates/aptos/src/account/balance.rs b/crates/aptos/src/account/balance.rs index 748f8e4c26cf7..65eaf15c1aef7 100644 --- a/crates/aptos/src/account/balance.rs +++ b/crates/aptos/src/account/balance.rs @@ -107,7 +107,7 @@ impl Balance { Ok(vec![AccountBalance { asset_type: "coin".to_string(), - coin_type: Some(coin_type.to_string()), + coin_type: Some(coin_type.to_canonical_string()), balance, }]) } diff --git a/crates/aptos/src/account/list.rs b/crates/aptos/src/account/list.rs index 3e76c7b7bc36e..45647e75d6645 100644 --- a/crates/aptos/src/account/list.rs +++ b/crates/aptos/src/account/list.rs @@ -107,7 +107,7 @@ impl CliCommand> for ListAccount { .into_iter() .map(|resource| { let mut map = serde_json::Map::new(); - map.insert(resource.resource_type.to_string(), resource.data); + map.insert(resource.resource_type.to_canonical_string(), resource.data); serde_json::Value::Object(map) }) .collect::>(), diff --git a/crates/indexer/src/models/coin_models/coin_activities.rs b/crates/indexer/src/models/coin_models/coin_activities.rs index cb51cb6d3d08c..d756412c480d0 100644 --- a/crates/indexer/src/models/coin_models/coin_activities.rs +++ b/crates/indexer/src/models/coin_models/coin_activities.rs @@ -267,7 +267,7 @@ impl CoinActivity { event_creation_number: BURN_GAS_EVENT_CREATION_NUM, event_sequence_number: user_transaction_request.sequence_number.0 as i64, owner_address: standardize_address(&user_transaction_request.sender.to_string()), - coin_type: AptosCoinType::type_tag().to_string(), + coin_type: AptosCoinType::type_tag().to_canonical_string(), amount: aptos_coin_burned, activity_type: GAS_FEE_EVENT.to_string(), is_gas_fee: true, diff --git a/crates/indexer/src/processors/coin_processor.rs b/crates/indexer/src/processors/coin_processor.rs index 45bb1e3420883..04b0196499c8a 100644 --- a/crates/indexer/src/processors/coin_processor.rs +++ b/crates/indexer/src/processors/coin_processor.rs @@ -279,9 +279,11 @@ impl TransactionProcessor for CoinTransactionProcessor { let mut conn = self.get_conn(); // get aptos_coin info for supply tracking // TODO: This only needs to be fetched once. Need to persist somehow - let maybe_aptos_coin_info = - &CoinInfoQuery::get_by_coin_type(AptosCoinType::type_tag().to_string(), &mut conn) - .unwrap(); + let maybe_aptos_coin_info = &CoinInfoQuery::get_by_coin_type( + AptosCoinType::type_tag().to_canonical_string(), + &mut conn, + ) + .unwrap(); let mut all_coin_activities = vec![]; let mut all_coin_balances = vec![]; diff --git a/crates/transaction-workloads-lib/src/move_workloads.rs b/crates/transaction-workloads-lib/src/move_workloads.rs index 93003618d6fa7..0db9a63f63b60 100644 --- a/crates/transaction-workloads-lib/src/move_workloads.rs +++ b/crates/transaction-workloads-lib/src/move_workloads.rs @@ -844,15 +844,15 @@ impl EntryPointTrait for EntryPoints { let rng: &mut StdRng = rng.expect("Must provide RNG"); let price_range = 1000000; - let is_buy = rng.gen_bool(*buy_frequency); - let size = rng.gen_range(1, 1 + if is_buy { max_buy_size } else { max_sell_size }); - let price = if is_buy { + let is_bid = rng.gen_bool(*buy_frequency); + let size = rng.gen_range(1, 1 + if is_bid { max_buy_size } else { max_sell_size }); + let price = if is_bid { 0 } else { (price_range as f64 * (1.0 - *overlap_ratio)) as u64 } + rng.gen_range(0, price_range); - // (account_order_id: u64, bid_price: u64, volume: u64, is_buy: bool) + // (account_order_id: u64, bid_price: u64, volume: u64, is_bid: bool) get_payload(module_id, ident_str!("place_order").to_owned(), vec![ bcs::to_bytes(&AccountAddress::random()).unwrap(), bcs::to_bytes( @@ -863,7 +863,7 @@ impl EntryPointTrait for EntryPoints { .unwrap(), bcs::to_bytes(&price).unwrap(), // bid_price bcs::to_bytes(&size).unwrap(), // volume - bcs::to_bytes(&is_buy).unwrap(), // is_buy + bcs::to_bytes(&is_bid).unwrap(), // is_bid ]) }, } diff --git a/crates/transaction-workloads-lib/src/raw_module_data.rs b/crates/transaction-workloads-lib/src/raw_module_data.rs index 353427c8e41cf..1a1b10205ddc1 100644 --- a/crates/transaction-workloads-lib/src/raw_module_data.rs +++ b/crates/transaction-workloads-lib/src/raw_module_data.rs @@ -1078,11 +1078,11 @@ pub static MODULES_FRAMEWORK_USECASES: Lazy>> = Lazy::new(|| { vec![ pub static PACKAGE_EXPERIMENTAL_USECASES_METADATA: Lazy> = Lazy::new(|| { vec![ 20, 69, 120, 112, 101, 114, 105, 109, 101, 110, 116, 97, 108, 85, 115, 101, 99, 97, - 115, 101, 115, 1, 0, 0, 0, 0, 0, 0, 0, 0, 64, 66, 70, 49, 56, 69, - 53, 51, 51, 49, 56, 55, 67, 49, 54, 51, 56, 51, 51, 50, 48, 57, 48, 49, - 70, 54, 51, 55, 56, 67, 56, 49, 57, 50, 70, 55, 53, 51, 50, 70, 69, 54, - 50, 51, 50, 69, 70, 50, 49, 65, 70, 70, 51, 56, 69, 57, 51, 52, 65, 57, - 50, 66, 67, 68, 56, 232, 1, 31, 139, 8, 0, 0, 0, 0, 0, 2, 255, 165, + 115, 101, 115, 1, 0, 0, 0, 0, 0, 0, 0, 0, 64, 50, 57, 50, 67, 70, + 49, 67, 66, 67, 53, 55, 52, 50, 53, 54, 50, 49, 48, 70, 67, 57, 66, 54, + 49, 56, 67, 68, 66, 69, 50, 48, 57, 57, 68, 49, 52, 70, 51, 69, 51, 51, + 53, 67, 52, 53, 52, 69, 48, 68, 65, 68, 53, 56, 49, 67, 68, 66, 48, 56, + 67, 50, 52, 51, 55, 232, 1, 31, 139, 8, 0, 0, 0, 0, 0, 2, 255, 165, 209, 187, 110, 195, 48, 12, 5, 208, 93, 95, 65, 184, 115, 156, 254, 64, 135, 244, 181, 118, 105, 39, 35, 40, 24, 249, 198, 81, 45, 83, 130, 168, 166, 6, 138, 254, 123, 45, 228, 1, 103, 77, 32, 13, 20, 113, 121, 32, 128, 77, 100, 219, 115, 135, @@ -1119,82 +1119,83 @@ pub static PACKAGE_EXPERIMENTAL_USECASES_METADATA: Lazy> = Lazy::new(|| #[rustfmt::skip] pub static MODULE_EXPERIMENTAL_USECASES_ORDER_BOOK_EXAMPLE: Lazy> = Lazy::new(|| { vec![ - 161, 28, 235, 11, 8, 0, 0, 10, 12, 1, 0, 14, 2, 14, 58, 3, 72, 124, - 4, 196, 1, 12, 5, 208, 1, 122, 7, 202, 2, 239, 3, 8, 185, 6, 96, 6, - 153, 7, 34, 16, 187, 7, 123, 10, 182, 8, 20, 12, 202, 8, 196, 1, 13, 142, - 10, 4, 0, 0, 1, 6, 1, 8, 2, 11, 2, 17, 1, 19, 2, 20, 0, 1, - 7, 0, 0, 3, 8, 0, 1, 5, 4, 0, 0, 7, 8, 0, 2, 9, 4, 1, - 7, 0, 4, 16, 7, 1, 0, 0, 5, 18, 7, 1, 7, 0, 5, 23, 7, 0, - 5, 25, 7, 0, 2, 27, 3, 1, 7, 0, 5, 29, 3, 1, 7, 0, 5, 32, - 7, 0, 0, 10, 0, 1, 0, 1, 0, 3, 12, 0, 2, 0, 1, 1, 1, 1, - 13, 1, 3, 0, 1, 1, 1, 2, 14, 1, 5, 1, 7, 1, 1, 1, 0, 15, - 6, 1, 0, 1, 1, 1, 2, 15, 7, 8, 1, 7, 1, 1, 1, 6, 21, 6, - 6, 0, 1, 1, 1, 0, 22, 9, 1, 0, 1, 1, 1, 4, 24, 1, 11, 1, - 0, 1, 1, 1, 2, 26, 13, 14, 1, 7, 1, 1, 1, 2, 28, 15, 16, 1, - 7, 1, 1, 1, 0, 30, 9, 1, 0, 1, 1, 1, 5, 31, 17, 18, 0, 1, - 1, 1, 5, 33, 1, 10, 0, 1, 1, 1, 1, 34, 19, 1, 0, 1, 1, 1, - 3, 4, 5, 4, 8, 10, 8, 12, 9, 4, 10, 4, 1, 6, 12, 0, 1, 5, - 1, 8, 2, 1, 8, 0, 1, 11, 4, 1, 9, 0, 1, 3, 3, 7, 11, 4, - 1, 9, 0, 5, 3, 1, 11, 5, 1, 11, 6, 1, 9, 0, 5, 5, 3, 3, - 3, 1, 1, 8, 7, 1, 11, 5, 1, 9, 0, 1, 8, 8, 9, 5, 3, 11, - 5, 1, 8, 7, 3, 3, 3, 1, 11, 5, 1, 8, 8, 9, 0, 1, 11, 9, - 1, 9, 0, 2, 7, 11, 4, 1, 9, 0, 11, 9, 1, 9, 0, 1, 10, 11, - 10, 1, 9, 0, 2, 5, 3, 1, 8, 11, 6, 7, 8, 2, 8, 11, 3, 8, - 7, 3, 1, 2, 8, 11, 8, 7, 18, 111, 114, 100, 101, 114, 95, 98, 111, 111, - 107, 95, 101, 120, 97, 109, 112, 108, 101, 5, 69, 109, 112, 116, 121, 11, 100, 117, - 109, 109, 121, 95, 102, 105, 101, 108, 100, 10, 65, 99, 116, 105, 118, 101, 79, 110, - 108, 121, 11, 97, 99, 116, 105, 118, 101, 95, 111, 110, 108, 121, 15, 65, 99, 116, - 105, 118, 101, 79, 114, 100, 101, 114, 66, 111, 111, 107, 17, 97, 99, 116, 105, 118, - 101, 95, 111, 114, 100, 101, 114, 95, 98, 111, 111, 107, 3, 68, 101, 120, 10, 111, - 114, 100, 101, 114, 95, 98, 111, 111, 107, 9, 79, 114, 100, 101, 114, 66, 111, 111, - 107, 11, 105, 110, 105, 116, 95, 109, 111, 100, 117, 108, 101, 6, 115, 105, 103, 110, - 101, 114, 10, 97, 100, 100, 114, 101, 115, 115, 95, 111, 102, 21, 110, 101, 119, 95, - 97, 99, 116, 105, 118, 101, 95, 111, 114, 100, 101, 114, 95, 98, 111, 111, 107, 14, - 110, 101, 119, 95, 111, 114, 100, 101, 114, 95, 98, 111, 111, 107, 12, 99, 97, 110, - 99, 101, 108, 95, 111, 114, 100, 101, 114, 6, 79, 112, 116, 105, 111, 110, 6, 111, - 112, 116, 105, 111, 110, 5, 79, 114, 100, 101, 114, 16, 111, 114, 100, 101, 114, 95, - 98, 111, 111, 107, 95, 116, 121, 112, 101, 115, 5, 101, 114, 114, 111, 114, 16, 105, - 110, 118, 97, 108, 105, 100, 95, 97, 114, 103, 117, 109, 101, 110, 116, 11, 112, 108, - 97, 99, 101, 95, 111, 114, 100, 101, 114, 13, 85, 110, 105, 113, 117, 101, 73, 100, - 120, 84, 121, 112, 101, 4, 110, 111, 110, 101, 16, 84, 114, 105, 103, 103, 101, 114, - 67, 111, 110, 100, 105, 116, 105, 111, 110, 17, 110, 101, 119, 95, 111, 114, 100, 101, - 114, 95, 114, 101, 113, 117, 101, 115, 116, 12, 79, 114, 100, 101, 114, 82, 101, 113, - 117, 101, 115, 116, 27, 112, 108, 97, 99, 101, 95, 111, 114, 100, 101, 114, 95, 97, - 110, 100, 95, 103, 101, 116, 95, 109, 97, 116, 99, 104, 101, 115, 16, 83, 105, 110, - 103, 108, 101, 79, 114, 100, 101, 114, 77, 97, 116, 99, 104, 28, 112, 108, 97, 99, - 101, 95, 97, 99, 116, 105, 118, 101, 95, 112, 111, 115, 116, 95, 111, 110, 108, 121, - 95, 111, 114, 100, 101, 114, 17, 110, 101, 119, 95, 111, 114, 100, 101, 114, 95, 105, - 100, 95, 116, 121, 112, 101, 11, 79, 114, 100, 101, 114, 73, 100, 84, 121, 112, 101, - 34, 103, 101, 110, 101, 114, 97, 116, 101, 95, 117, 110, 105, 113, 117, 101, 95, 105, - 100, 120, 95, 102, 105, 102, 111, 95, 116, 105, 101, 98, 114, 97, 107, 101, 114, 17, - 112, 108, 97, 99, 101, 95, 109, 97, 107, 101, 114, 95, 111, 114, 100, 101, 114, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 171, 205, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 161, 28, 235, 11, 8, 0, 0, 10, 12, 1, 0, 12, 2, 12, 46, 3, 58, 136, + 1, 4, 194, 1, 20, 5, 214, 1, 183, 1, 7, 141, 3, 146, 3, 8, 159, 6, + 96, 6, 255, 6, 34, 16, 161, 7, 123, 10, 156, 8, 14, 12, 170, 8, 249, 1, + 13, 163, 10, 2, 0, 0, 1, 4, 2, 7, 1, 11, 2, 15, 2, 21, 0, 1, + 7, 0, 0, 3, 8, 0, 1, 5, 4, 1, 7, 0, 3, 13, 7, 0, 3, 14, + 7, 1, 7, 0, 3, 19, 3, 1, 7, 0, 3, 20, 7, 0, 5, 23, 7, 1, + 0, 0, 1, 27, 3, 1, 7, 0, 0, 6, 0, 1, 0, 1, 0, 2, 8, 0, + 2, 0, 1, 1, 1, 1, 9, 1, 4, 1, 7, 1, 1, 1, 0, 10, 5, 1, + 0, 1, 1, 1, 3, 12, 6, 7, 0, 1, 1, 1, 1, 10, 8, 9, 1, 7, + 1, 1, 1, 4, 16, 5, 5, 0, 1, 1, 1, 0, 17, 10, 1, 0, 1, 1, + 1, 0, 18, 11, 12, 0, 1, 1, 1, 5, 22, 1, 14, 1, 0, 1, 1, 1, + 5, 24, 16, 14, 1, 0, 1, 1, 1, 1, 25, 17, 18, 1, 7, 1, 1, 1, + 1, 26, 19, 20, 1, 7, 1, 1, 1, 1, 28, 21, 1, 1, 7, 1, 1, 1, + 1, 29, 22, 23, 1, 7, 1, 1, 1, 3, 30, 24, 5, 1, 7, 1, 1, 1, + 2, 3, 5, 3, 9, 13, 10, 5, 11, 3, 9, 5, 12, 3, 13, 3, 14, 3, + 15, 3, 1, 6, 12, 0, 1, 5, 1, 8, 0, 1, 11, 2, 1, 9, 0, 1, + 3, 1, 4, 1, 8, 3, 3, 7, 11, 2, 1, 9, 0, 5, 8, 3, 1, 11, + 4, 1, 9, 0, 5, 5, 3, 3, 3, 1, 7, 7, 11, 2, 1, 8, 0, 5, + 8, 3, 3, 3, 3, 1, 1, 10, 11, 5, 1, 8, 0, 1, 8, 6, 1, 11, + 7, 1, 9, 0, 1, 11, 5, 1, 8, 0, 1, 9, 0, 4, 6, 11, 2, 1, + 9, 0, 11, 7, 1, 3, 1, 11, 7, 1, 8, 6, 1, 1, 9, 5, 8, 3, + 11, 7, 1, 3, 3, 3, 3, 1, 11, 7, 1, 8, 6, 9, 0, 1, 11, 8, + 1, 9, 0, 2, 7, 11, 2, 1, 9, 0, 11, 8, 1, 9, 0, 4, 7, 11, + 2, 1, 9, 0, 11, 7, 1, 3, 3, 1, 1, 11, 5, 1, 9, 0, 1, 6, + 11, 5, 1, 9, 0, 4, 11, 7, 1, 8, 6, 10, 11, 5, 1, 8, 0, 11, + 5, 1, 8, 0, 3, 18, 111, 114, 100, 101, 114, 95, 98, 111, 111, 107, 95, 101, + 120, 97, 109, 112, 108, 101, 5, 69, 109, 112, 116, 121, 11, 100, 117, 109, 109, 121, + 95, 102, 105, 101, 108, 100, 3, 68, 101, 120, 10, 111, 114, 100, 101, 114, 95, 98, + 111, 111, 107, 9, 79, 114, 100, 101, 114, 66, 111, 111, 107, 11, 105, 110, 105, 116, + 95, 109, 111, 100, 117, 108, 101, 6, 115, 105, 103, 110, 101, 114, 10, 97, 100, 100, + 114, 101, 115, 115, 95, 111, 102, 14, 110, 101, 119, 95, 111, 114, 100, 101, 114, 95, + 98, 111, 111, 107, 12, 99, 97, 110, 99, 101, 108, 95, 111, 114, 100, 101, 114, 16, + 111, 114, 100, 101, 114, 95, 98, 111, 111, 107, 95, 116, 121, 112, 101, 115, 17, 110, + 101, 119, 95, 111, 114, 100, 101, 114, 95, 105, 100, 95, 116, 121, 112, 101, 11, 79, + 114, 100, 101, 114, 73, 100, 84, 121, 112, 101, 5, 79, 114, 100, 101, 114, 5, 101, + 114, 114, 111, 114, 16, 105, 110, 118, 97, 108, 105, 100, 95, 97, 114, 103, 117, 109, + 101, 110, 116, 11, 112, 108, 97, 99, 101, 95, 111, 114, 100, 101, 114, 27, 112, 108, + 97, 99, 101, 95, 111, 114, 100, 101, 114, 95, 97, 110, 100, 95, 103, 101, 116, 95, + 109, 97, 116, 99, 104, 101, 115, 16, 83, 105, 110, 103, 108, 101, 79, 114, 100, 101, + 114, 77, 97, 116, 99, 104, 16, 84, 114, 105, 103, 103, 101, 114, 67, 111, 110, 100, + 105, 116, 105, 111, 110, 6, 111, 112, 116, 105, 111, 110, 4, 110, 111, 110, 101, 6, + 79, 112, 116, 105, 111, 110, 4, 115, 111, 109, 101, 14, 105, 115, 95, 116, 97, 107, + 101, 114, 95, 111, 114, 100, 101, 114, 17, 110, 101, 119, 95, 111, 114, 100, 101, 114, + 95, 114, 101, 113, 117, 101, 115, 116, 12, 79, 114, 100, 101, 114, 82, 101, 113, 117, + 101, 115, 116, 17, 112, 108, 97, 99, 101, 95, 109, 97, 107, 101, 114, 95, 111, 114, + 100, 101, 114, 26, 103, 101, 116, 95, 115, 105, 110, 103, 108, 101, 95, 109, 97, 116, + 99, 104, 95, 102, 111, 114, 95, 116, 97, 107, 101, 114, 16, 103, 101, 116, 95, 109, + 97, 116, 99, 104, 101, 100, 95, 115, 105, 122, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 1, 5, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 171, 205, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 171, 205, 20, 99, 111, 109, 112, 105, 108, 97, 116, 105, 111, 110, 95, 109, 101, - 116, 97, 100, 97, 116, 97, 18, 1, 3, 50, 46, 48, 12, 50, 46, 51, 45, 117, - 110, 115, 116, 97, 98, 108, 101, 18, 97, 112, 116, 111, 115, 58, 58, 109, 101, 116, - 97, 100, 97, 116, 97, 95, 118, 49, 63, 2, 1, 0, 0, 0, 0, 0, 0, 0, - 15, 69, 78, 79, 84, 95, 65, 85, 84, 72, 79, 82, 73, 90, 69, 68, 0, 2, - 0, 0, 0, 0, 0, 0, 0, 25, 69, 68, 69, 88, 95, 82, 69, 83, 79, 85, - 82, 67, 69, 95, 78, 79, 84, 95, 80, 82, 69, 83, 69, 78, 84, 0, 0, 0, - 0, 2, 1, 2, 1, 1, 2, 1, 4, 8, 2, 3, 2, 1, 8, 11, 4, 1, - 8, 0, 0, 0, 0, 0, 1, 18, 10, 0, 17, 1, 7, 0, 33, 4, 14, 10, - 0, 17, 2, 18, 1, 45, 1, 11, 0, 56, 0, 18, 2, 45, 2, 2, 11, 0, - 1, 6, 1, 0, 0, 0, 0, 0, 0, 0, 39, 4, 1, 4, 1, 2, 1, 14, - 7, 0, 41, 2, 4, 11, 7, 0, 42, 2, 15, 0, 7, 0, 11, 0, 56, 1, - 1, 2, 6, 2, 0, 0, 0, 0, 0, 0, 0, 17, 6, 39, 7, 1, 4, 1, - 2, 1, 23, 7, 0, 41, 2, 4, 20, 7, 0, 42, 2, 15, 0, 11, 0, 11, - 1, 56, 2, 11, 2, 10, 3, 11, 3, 11, 4, 56, 3, 9, 18, 0, 56, 4, - 56, 5, 1, 2, 6, 2, 0, 0, 0, 0, 0, 0, 0, 17, 6, 39, 11, 1, - 4, 1, 1, 20, 22, 7, 0, 41, 1, 4, 19, 7, 0, 42, 1, 11, 0, 11, - 1, 17, 12, 12, 5, 17, 13, 12, 6, 15, 1, 11, 5, 11, 2, 11, 6, 11, - 3, 11, 4, 17, 14, 2, 6, 2, 0, 0, 0, 0, 0, 0, 0, 17, 6, 39, - 2, 0, 1, 0, 0, + 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 5, + 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 171, 205, 20, 99, 111, + 109, 112, 105, 108, 97, 116, 105, 111, 110, 95, 109, 101, 116, 97, 100, 97, 116, 97, + 18, 1, 3, 50, 46, 48, 12, 50, 46, 51, 45, 117, 110, 115, 116, 97, 98, 108, + 101, 18, 97, 112, 116, 111, 115, 58, 58, 109, 101, 116, 97, 100, 97, 116, 97, 95, + 118, 49, 63, 2, 1, 0, 0, 0, 0, 0, 0, 0, 15, 69, 78, 79, 84, 95, + 65, 85, 84, 72, 79, 82, 73, 90, 69, 68, 0, 2, 0, 0, 0, 0, 0, 0, + 0, 25, 69, 68, 69, 88, 95, 82, 69, 83, 79, 85, 82, 67, 69, 95, 78, 79, + 84, 95, 80, 82, 69, 83, 69, 78, 84, 0, 0, 0, 0, 2, 1, 2, 1, 1, + 2, 1, 4, 11, 2, 1, 8, 0, 0, 0, 0, 0, 1, 14, 10, 0, 17, 1, + 7, 0, 33, 4, 10, 11, 0, 56, 0, 18, 1, 45, 1, 2, 11, 0, 1, 6, + 1, 0, 0, 0, 0, 0, 0, 0, 39, 3, 1, 4, 1, 1, 1, 16, 7, 0, + 41, 1, 4, 13, 7, 0, 42, 1, 15, 0, 7, 0, 11, 0, 53, 17, 4, 56, + 1, 1, 2, 6, 2, 0, 0, 0, 0, 0, 0, 0, 17, 6, 39, 7, 1, 4, + 1, 1, 1, 20, 7, 0, 41, 1, 4, 17, 7, 0, 42, 1, 15, 0, 11, 0, + 11, 1, 53, 17, 4, 11, 2, 10, 3, 11, 3, 11, 4, 17, 8, 1, 2, 6, + 2, 0, 0, 0, 0, 0, 0, 0, 17, 6, 39, 8, 1, 0, 0, 25, 53, 56, + 2, 12, 7, 64, 15, 0, 0, 0, 0, 0, 0, 0, 0, 12, 8, 10, 5, 6, + 0, 0, 0, 0, 0, 0, 0, 0, 36, 4, 49, 10, 0, 46, 10, 3, 56, 3, + 10, 6, 10, 7, 56, 4, 3, 31, 11, 0, 11, 1, 11, 2, 56, 5, 11, 3, + 11, 4, 11, 5, 11, 6, 11, 7, 9, 18, 0, 56, 6, 56, 7, 11, 8, 2, + 10, 0, 10, 3, 56, 3, 10, 5, 10, 6, 56, 8, 12, 9, 14, 9, 56, 9, + 12, 10, 13, 8, 11, 9, 68, 15, 11, 5, 11, 10, 23, 12, 5, 5, 4, 11, + 0, 1, 11, 8, 2, 1, 0, 0, ] }); diff --git a/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/generated_transactions.rs b/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/generated_transactions.rs index cc60396f20199..e602cc8b78460 100644 --- a/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/generated_transactions.rs +++ b/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/generated_transactions.rs @@ -3,6 +3,8 @@ #![allow(dead_code)] #![allow(unused_variables)] +pub const IMPORTED_MAINNET_TXNS_2386021136_TRADEPORT_V2_FILL_COLLECTION_OFFER: &[u8] = include_bytes!(concat!(env!("CARGO_MANIFEST_DIR"), "/src/json_transactions/imported_mainnet_txns/2386021136_tradeport_v2_fill_collection_offer.json")); + pub const IMPORTED_MAINNET_TXNS_121508544_STAKE_DISTRIBUTE: &[u8] = include_bytes!(concat!( env!("CARGO_MANIFEST_DIR"), "/src/json_transactions/imported_mainnet_txns/121508544_stake_distribute.json" @@ -32,6 +34,12 @@ pub const IMPORTED_MAINNET_TXNS_1831971037_STAKE_DELEGATION_POOL: &[u8] = includ "/src/json_transactions/imported_mainnet_txns/1831971037_stake_delegation_pool.json" )); +pub const IMPORTED_MAINNET_TXNS_2386133936_TRADEPORT_V2_PLACE_OFFER: &[u8] = + include_bytes!(concat!( + env!("CARGO_MANIFEST_DIR"), + "/src/json_transactions/imported_mainnet_txns/2386133936_tradeport_v2_place_offer.json" + )); + pub const IMPORTED_MAINNET_TXNS_527013476_USER_TXN_SINGLE_SENDER_SECP256K1_ECDSA: &[u8] = include_bytes!(concat!(env!("CARGO_MANIFEST_DIR"), "/src/json_transactions/imported_mainnet_txns/527013476_user_txn_single_sender_secp256k1_ecdsa.json")); pub const IMPORTED_MAINNET_TXNS_118489_PROPOSAL_VOTE: &[u8] = include_bytes!(concat!( @@ -47,11 +55,32 @@ pub const IMPORTED_MAINNET_TXNS_1845035942_DEFAULT_CURRENT_TABLE_ITEMS: &[u8] = "/src/json_transactions/imported_mainnet_txns/1845035942_default_current_table_items.json" )); +pub const IMPORTED_MAINNET_TXNS_2382373978_WAPAL_CANCEL_COLLECTION_OFFER: &[u8] = include_bytes!(concat!(env!("CARGO_MANIFEST_DIR"), "/src/json_transactions/imported_mainnet_txns/2382373978_wapal_cancel_collection_offer.json")); + +pub const IMPORTED_MAINNET_TXNS_2386455218_TRADEPORT_V2_FILL_LISTING: &[u8] = + include_bytes!(concat!( + env!("CARGO_MANIFEST_DIR"), + "/src/json_transactions/imported_mainnet_txns/2386455218_tradeport_v2_fill_listing.json" + )); + +pub const IMPORTED_MAINNET_TXNS_2382221134_WAPAL_FILL_LISTING: &[u8] = include_bytes!(concat!( + env!("CARGO_MANIFEST_DIR"), + "/src/json_transactions/imported_mainnet_txns/2382221134_wapal_fill_listing.json" +)); + +pub const IMPORTED_MAINNET_TXNS_2386889884_TRADEPORT_V2_CANCEL_COLLECTION_OFFER: &[u8] = include_bytes!(concat!(env!("CARGO_MANIFEST_DIR"), "/src/json_transactions/imported_mainnet_txns/2386889884_tradeport_v2_cancel_collection_offer.json")); + pub const IMPORTED_MAINNET_TXNS_125600867_STAKE_DELEGATION_POOL: &[u8] = include_bytes!(concat!( env!("CARGO_MANIFEST_DIR"), "/src/json_transactions/imported_mainnet_txns/125600867_stake_delegation_pool.json" )); +pub const IMPORTED_MAINNET_TXNS_2382219668_WAPAL_FILL_COLLECTION_OFFER: &[u8] = + include_bytes!(concat!( + env!("CARGO_MANIFEST_DIR"), + "/src/json_transactions/imported_mainnet_txns/2382219668_wapal_fill_collection_offer.json" + )); + pub const IMPORTED_MAINNET_TXNS_513424821_DEFAULT_BLOCK_METADATA_TRANSACTIONS: &[u8] = include_bytes!(concat!(env!("CARGO_MANIFEST_DIR"), "/src/json_transactions/imported_mainnet_txns/513424821_default_block_metadata_transactions.json")); pub const IMPORTED_MAINNET_TXNS_103958588_MULTI_AGENTS: &[u8] = include_bytes!(concat!( @@ -83,11 +112,22 @@ pub const IMPORTED_MAINNET_TXNS_2212040150_TRANSACTION_WITHOUT_EVENTS: &[u8] = "/src/json_transactions/imported_mainnet_txns/2212040150_transaction_without_events.json" )); +pub const IMPORTED_MAINNET_TXNS_2386809975_TRADEPORT_V2_PLACE_LISTING: &[u8] = + include_bytes!(concat!( + env!("CARGO_MANIFEST_DIR"), + "/src/json_transactions/imported_mainnet_txns/2386809975_tradeport_v2_place_listing.json" + )); + pub const IMPORTED_MAINNET_TXNS_550582915_MULTIPLE_TRANSFER_EVENT: &[u8] = include_bytes!(concat!( env!("CARGO_MANIFEST_DIR"), "/src/json_transactions/imported_mainnet_txns/550582915_multiple_transfer_event.json" )); +pub const IMPORTED_MAINNET_TXNS_2313248448_WAPAL_FILL_OFFER: &[u8] = include_bytes!(concat!( + env!("CARGO_MANIFEST_DIR"), + "/src/json_transactions/imported_mainnet_txns/2313248448_wapal_fill_offer.json" +)); + pub const IMPORTED_MAINNET_TXNS_464961735_USER_TXN_SINGLE_KEY_ED25519: &[u8] = include_bytes!(concat!( env!("CARGO_MANIFEST_DIR"), @@ -110,6 +150,8 @@ pub const IMPORTED_MAINNET_TXNS_2308282694_ASSET_TYPE_V1_NULL: &[u8] = include_b "/src/json_transactions/imported_mainnet_txns/2308282694_asset_type_v1_null.json" )); +pub const IMPORTED_MAINNET_TXNS_2296149225_TRADEPORT_V2_ACCEPT_TOKEN_DELIST: &[u8] = include_bytes!(concat!(env!("CARGO_MANIFEST_DIR"), "/src/json_transactions/imported_mainnet_txns/2296149225_tradeport_v2_accept_token_delist.json")); + pub const IMPORTED_MAINNET_TXNS_999930475_TOKEN_V2_CONCURRENT_MINT: &[u8] = include_bytes!(concat!( env!("CARGO_MANIFEST_DIR"), @@ -129,6 +171,8 @@ pub const IMPORTED_MAINNET_TXNS_124094774_DELEGATED_POOL_BALANCE: &[u8] = includ "/src/json_transactions/imported_mainnet_txns/124094774_delegated_pool_balance.json" )); +pub const IMPORTED_MAINNET_TXNS_2277018899_TRADEPORT_V2_ACCEPT_TOKEN_DELIST_SAME_TOKEN_DATA_ID: &[u8] = include_bytes!(concat!(env!("CARGO_MANIFEST_DIR"), "/src/json_transactions/imported_mainnet_txns/2277018899_tradeport_v2_accept_token_delist_same_token_data_id.json")); + pub const IMPORTED_MAINNET_TXNS_144201980_MULTIPLE_TRANSFER_EVENT: &[u8] = include_bytes!(concat!( env!("CARGO_MANIFEST_DIR"), "/src/json_transactions/imported_mainnet_txns/144201980_multiple_transfer_event.json" @@ -139,6 +183,11 @@ pub const IMPORTED_MAINNET_TXNS_97963136_TOKEN_V2_CANCEL_OFFER: &[u8] = include_ "/src/json_transactions/imported_mainnet_txns/97963136_token_v2_cancel_offer.json" )); +pub const IMPORTED_MAINNET_TXNS_2382313982_WAPAL_PLACE_OFFER: &[u8] = include_bytes!(concat!( + env!("CARGO_MANIFEST_DIR"), + "/src/json_transactions/imported_mainnet_txns/2382313982_wapal_place_offer.json" +)); + pub const IMPORTED_MAINNET_TXNS_152449628_COIN_INFO_WRITE: &[u8] = include_bytes!(concat!( env!("CARGO_MANIFEST_DIR"), "/src/json_transactions/imported_mainnet_txns/152449628_coin_info_write.json" @@ -150,6 +199,19 @@ pub const IMPORTED_MAINNET_TXNS_602320562_TOKEN_V2_APTOS_TOKEN_MINT: &[u8] = "/src/json_transactions/imported_mainnet_txns/602320562_token_v2_aptos_token_mint.json" )); +pub const IMPORTED_MAINNET_TXNS_2386716658_TRADEPORT_V2_CANCEL_LISTING: &[u8] = + include_bytes!(concat!( + env!("CARGO_MANIFEST_DIR"), + "/src/json_transactions/imported_mainnet_txns/2386716658_tradeport_v2_cancel_listing.json" + )); + +pub const IMPORTED_MAINNET_TXNS_2386891051_TRADEPORT_V2_PLACE_COLLECTION_OFFER: &[u8] = include_bytes!(concat!(env!("CARGO_MANIFEST_DIR"), "/src/json_transactions/imported_mainnet_txns/2386891051_tradeport_v2_place_collection_offer.json")); + +pub const IMPORTED_MAINNET_TXNS_2381742315_WAPAL_CANCEL_LISTING: &[u8] = include_bytes!(concat!( + env!("CARGO_MANIFEST_DIR"), + "/src/json_transactions/imported_mainnet_txns/2381742315_wapal_cancel_listing.json" +)); + pub const IMPORTED_MAINNET_TXNS_1830706009_STAKER_GOVERNANCE_RECORD: &[u8] = include_bytes!(concat!( env!("CARGO_MANIFEST_DIR"), @@ -209,6 +271,17 @@ pub const IMPORTED_MAINNET_TXNS_141135867_TOKEN_V1_OFFER: &[u8] = include_bytes! "/src/json_transactions/imported_mainnet_txns/141135867_token_v1_offer.json" )); +pub const IMPORTED_MAINNET_TXNS_2382251863_WAPAL_PLACE_LISTING: &[u8] = include_bytes!(concat!( + env!("CARGO_MANIFEST_DIR"), + "/src/json_transactions/imported_mainnet_txns/2382251863_wapal_place_listing.json" +)); + +pub const IMPORTED_MAINNET_TXNS_2386142672_TRADEPORT_V2_CANCEL_OFFER: &[u8] = + include_bytes!(concat!( + env!("CARGO_MANIFEST_DIR"), + "/src/json_transactions/imported_mainnet_txns/2386142672_tradeport_v2_cancel_offer.json" + )); + pub const IMPORTED_MAINNET_TXNS_1957950162_FA_MIGRATION_V2_STORE_ONLY: &[u8] = include_bytes!(concat!( env!("CARGO_MANIFEST_DIR"), @@ -228,6 +301,12 @@ pub const IMPORTED_MAINNET_TXNS_2080538_ANS_LOOKUP_V1: &[u8] = include_bytes!(co "/src/json_transactions/imported_mainnet_txns/2080538_ans_lookup_v1.json" )); +pub const IMPORTED_MAINNET_TXNS_2382373209_WAPAL_PLACE_COLLECTION_OFFER: &[u8] = + include_bytes!(concat!( + env!("CARGO_MANIFEST_DIR"), + "/src/json_transactions/imported_mainnet_txns/2382373209_wapal_place_collection_offer.json" + )); + pub const IMPORTED_MAINNET_TXNS_139449359_STAKE_REACTIVATE: &[u8] = include_bytes!(concat!( env!("CARGO_MANIFEST_DIR"), "/src/json_transactions/imported_mainnet_txns/139449359_stake_reactivate.json" @@ -256,6 +335,12 @@ pub const IMPORTED_MAINNET_TXNS_1080786089_TOKEN_V2_BURN_EVENT_V1: &[u8] = inclu "/src/json_transactions/imported_mainnet_txns/1080786089_token_v2_burn_event_v1.json" )); +pub const IMPORTED_MAINNET_TXNS_2298838662_TRADEPORT_V2_FILL_OFFER: &[u8] = + include_bytes!(concat!( + env!("CARGO_MANIFEST_DIR"), + "/src/json_transactions/imported_mainnet_txns/2298838662_tradeport_v2_fill_offer.json" + )); + pub const IMPORTED_MAINNET_TXNS_445585423_TOKEN_MINT_AND_BURN_EVENT: &[u8] = include_bytes!(concat!( env!("CARGO_MANIFEST_DIR"), @@ -277,11 +362,18 @@ pub const IMPORTED_MAINNET_TXNS_537250181_TOKEN_V2_FIXED_SUPPLY_MINT: &[u8] = pub const IMPORTED_MAINNET_TXNS_2200077673_ACCOUNT_RESTORATION_UNVERIFIED_KEY_ROTATION_TO_MULTI_KEY_TXN: &[u8] = include_bytes!(concat!(env!("CARGO_MANIFEST_DIR"), "/src/json_transactions/imported_mainnet_txns/2200077673_account_restoration_unverified_key_rotation_to_multi_key_txn.json")); +pub const IMPORTED_MAINNET_TXNS_2296098846_TRADEPORT_V2_ACCEPT_TOKEN_DELIST2: &[u8] = include_bytes!(concat!(env!("CARGO_MANIFEST_DIR"), "/src/json_transactions/imported_mainnet_txns/2296098846_tradeport_v2_accept_token_delist2.json")); + pub const IMPORTED_MAINNET_TXNS_4827964_STAKE_INITIALIZE: &[u8] = include_bytes!(concat!( env!("CARGO_MANIFEST_DIR"), "/src/json_transactions/imported_mainnet_txns/4827964_stake_initialize.json" )); +pub const IMPORTED_MAINNET_TXNS_2381810159_WAPAL_CANCEL_OFFER: &[u8] = include_bytes!(concat!( + env!("CARGO_MANIFEST_DIR"), + "/src/json_transactions/imported_mainnet_txns/2381810159_wapal_cancel_offer.json" +)); + pub const IMPORTED_MAINNET_TXNS_155112189_DEFAULT_TABLE_ITEMS: &[u8] = include_bytes!(concat!( env!("CARGO_MANIFEST_DIR"), "/src/json_transactions/imported_mainnet_txns/155112189_default_table_items.json" @@ -297,6 +389,7 @@ pub const IMPORTED_MAINNET_TXNS_685_USER_TXN_ED25519: &[u8] = include_bytes!(con "/src/json_transactions/imported_mainnet_txns/685_user_txn_ed25519.json" )); pub const ALL_IMPORTED_MAINNET_TXNS: &[&[u8]] = &[ + IMPORTED_MAINNET_TXNS_2386021136_TRADEPORT_V2_FILL_COLLECTION_OFFER, IMPORTED_MAINNET_TXNS_121508544_STAKE_DISTRIBUTE, IMPORTED_MAINNET_TXNS_590098441_USER_TXN_SINGLE_SENDER_ED25519, IMPORTED_MAINNET_TXNS_145959468_ACCOUNT_TRANSACTION, @@ -304,11 +397,17 @@ pub const ALL_IMPORTED_MAINNET_TXNS: &[&[u8]] = &[ IMPORTED_MAINNET_TXNS_2308283617_ASSET_TYPE_V1_NULL_2, IMPORTED_MAINNET_TXNS_2200077877_ACCOUNT_RESTORATION_ROTATED_TO_SINGLE_SECP256K1, IMPORTED_MAINNET_TXNS_1831971037_STAKE_DELEGATION_POOL, + IMPORTED_MAINNET_TXNS_2386133936_TRADEPORT_V2_PLACE_OFFER, IMPORTED_MAINNET_TXNS_527013476_USER_TXN_SINGLE_SENDER_SECP256K1_ECDSA, IMPORTED_MAINNET_TXNS_118489_PROPOSAL_VOTE, IMPORTED_MAINNET_TXNS_1058723093_TOKEN_V1_MINT_WITHDRAW_DEPOSIT_EVENTS, IMPORTED_MAINNET_TXNS_1845035942_DEFAULT_CURRENT_TABLE_ITEMS, + IMPORTED_MAINNET_TXNS_2382373978_WAPAL_CANCEL_COLLECTION_OFFER, + IMPORTED_MAINNET_TXNS_2386455218_TRADEPORT_V2_FILL_LISTING, + IMPORTED_MAINNET_TXNS_2382221134_WAPAL_FILL_LISTING, + IMPORTED_MAINNET_TXNS_2386889884_TRADEPORT_V2_CANCEL_COLLECTION_OFFER, IMPORTED_MAINNET_TXNS_125600867_STAKE_DELEGATION_POOL, + IMPORTED_MAINNET_TXNS_2382219668_WAPAL_FILL_COLLECTION_OFFER, IMPORTED_MAINNET_TXNS_513424821_DEFAULT_BLOCK_METADATA_TRANSACTIONS, IMPORTED_MAINNET_TXNS_103958588_MULTI_AGENTS, IMPORTED_MAINNET_TXNS_999929475_COIN_AND_FA_TRANSFERS, @@ -318,19 +417,27 @@ pub const ALL_IMPORTED_MAINNET_TXNS: &[&[u8]] = &[ IMPORTED_MAINNET_TXNS_1056780409_ANS_CURRENT_ANS_PRIMARY_NAME_V2, IMPORTED_MAINNET_TXNS_2200077591_ACCOUNT_RESTORATION_SINGLE_ED25519, IMPORTED_MAINNET_TXNS_2212040150_TRANSACTION_WITHOUT_EVENTS, + IMPORTED_MAINNET_TXNS_2386809975_TRADEPORT_V2_PLACE_LISTING, IMPORTED_MAINNET_TXNS_550582915_MULTIPLE_TRANSFER_EVENT, + IMPORTED_MAINNET_TXNS_2313248448_WAPAL_FILL_OFFER, IMPORTED_MAINNET_TXNS_464961735_USER_TXN_SINGLE_KEY_ED25519, IMPORTED_MAINNET_TXNS_438536688_ANS_CURRENT_ANS_LOOKUP_V2, IMPORTED_MAINNET_TXNS_578318306_OBJECTS_WRITE_RESOURCE, IMPORTED_MAINNET_TXNS_2308282694_ASSET_TYPE_V1_NULL, + IMPORTED_MAINNET_TXNS_2296149225_TRADEPORT_V2_ACCEPT_TOKEN_DELIST, IMPORTED_MAINNET_TXNS_999930475_TOKEN_V2_CONCURRENT_MINT, IMPORTED_MAINNET_TXNS_554229017_EVENTS_WITH_NO_EVENT_SIZE_INFO, IMPORTED_MAINNET_TXNS_2448304257_COINSTORE_DELETION_EVENT, IMPORTED_MAINNET_TXNS_124094774_DELEGATED_POOL_BALANCE, + IMPORTED_MAINNET_TXNS_2277018899_TRADEPORT_V2_ACCEPT_TOKEN_DELIST_SAME_TOKEN_DATA_ID, IMPORTED_MAINNET_TXNS_144201980_MULTIPLE_TRANSFER_EVENT, IMPORTED_MAINNET_TXNS_97963136_TOKEN_V2_CANCEL_OFFER, + IMPORTED_MAINNET_TXNS_2382313982_WAPAL_PLACE_OFFER, IMPORTED_MAINNET_TXNS_152449628_COIN_INFO_WRITE, IMPORTED_MAINNET_TXNS_602320562_TOKEN_V2_APTOS_TOKEN_MINT, + IMPORTED_MAINNET_TXNS_2386716658_TRADEPORT_V2_CANCEL_LISTING, + IMPORTED_MAINNET_TXNS_2386891051_TRADEPORT_V2_PLACE_COLLECTION_OFFER, + IMPORTED_MAINNET_TXNS_2381742315_WAPAL_CANCEL_LISTING, IMPORTED_MAINNET_TXNS_1830706009_STAKER_GOVERNANCE_RECORD, IMPORTED_MAINNET_TXNS_423176063_ACCOUNT_TRANSACTION_DELETE, IMPORTED_MAINNET_TXNS_303690531_ANS_LOOKUP_V2, @@ -344,22 +451,28 @@ pub const ALL_IMPORTED_MAINNET_TXNS: &[&[u8]] = &[ IMPORTED_MAINNET_TXNS_551057865_USER_TXN_SINGLE_SENDER_WEBAUTH, IMPORTED_MAINNET_TXNS_84023785_TOKEN_V2_CLAIM_OFFER, IMPORTED_MAINNET_TXNS_141135867_TOKEN_V1_OFFER, + IMPORTED_MAINNET_TXNS_2382251863_WAPAL_PLACE_LISTING, + IMPORTED_MAINNET_TXNS_2386142672_TRADEPORT_V2_CANCEL_OFFER, IMPORTED_MAINNET_TXNS_1957950162_FA_MIGRATION_V2_STORE_ONLY, IMPORTED_MAINNET_TXNS_976087151_USER_TXN_SINGLE_SENDER_KEYLESS, IMPORTED_MAINNET_TXNS_1737056775_COIN_TRANSFER_BURN_EVENT, IMPORTED_MAINNET_TXNS_2080538_ANS_LOOKUP_V1, + IMPORTED_MAINNET_TXNS_2382373209_WAPAL_PLACE_COLLECTION_OFFER, IMPORTED_MAINNET_TXNS_139449359_STAKE_REACTIVATE, IMPORTED_MAINNET_TXNS_308783012_FA_TRANSFER, IMPORTED_MAINNET_TXNS_2186504987_COIN_STORE_DELETION_NO_EVENT, IMPORTED_MAINNET_TXNS_1680592683_FA_MIGRATION_COIN_INFO, IMPORTED_MAINNET_TXNS_453498957_TOKEN_V2_MINT_AND_TRANSFER_EVENT_V1, IMPORTED_MAINNET_TXNS_1080786089_TOKEN_V2_BURN_EVENT_V1, + IMPORTED_MAINNET_TXNS_2298838662_TRADEPORT_V2_FILL_OFFER, IMPORTED_MAINNET_TXNS_445585423_TOKEN_MINT_AND_BURN_EVENT, IMPORTED_MAINNET_TXNS_967255533_TOKEN_V2_MUTATION_EVENT, IMPORTED_MAINNET_TXNS_407418623_USER_TXN_SINGLE_KEY_SECP256K1_ECDSA, IMPORTED_MAINNET_TXNS_537250181_TOKEN_V2_FIXED_SUPPLY_MINT, IMPORTED_MAINNET_TXNS_2200077673_ACCOUNT_RESTORATION_UNVERIFIED_KEY_ROTATION_TO_MULTI_KEY_TXN, + IMPORTED_MAINNET_TXNS_2296098846_TRADEPORT_V2_ACCEPT_TOKEN_DELIST2, IMPORTED_MAINNET_TXNS_4827964_STAKE_INITIALIZE, + IMPORTED_MAINNET_TXNS_2381810159_WAPAL_CANCEL_OFFER, IMPORTED_MAINNET_TXNS_155112189_DEFAULT_TABLE_ITEMS, IMPORTED_MAINNET_TXNS_178179220_TOKEN_V1_MUTATE_EVENT, IMPORTED_MAINNET_TXNS_685_USER_TXN_ED25519, diff --git a/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2277018899_tradeport_v2_accept_token_delist_same_token_data_id.json b/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2277018899_tradeport_v2_accept_token_delist_same_token_data_id.json new file mode 100644 index 0000000000000..214e2e7ffb147 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2277018899_tradeport_v2_accept_token_delist_same_token_data_id.json @@ -0,0 +1,535 @@ +{ + "timestamp": { + "seconds": "1738092852", + "nanos": 915658000 + }, + "version": "2277018899", + "info": { + "hash": "E74a1FC4GkGO65Ig4xmaJbM5hWFhnTR0daMRnjLnHkE=", + "stateChangeHash": "04LpT5qp05FA8E5FAwJlmYfD02vS4X3j57SgtKP758o=", + "eventRootHash": "n83wdnCkv4fRVrfCzDHsy4PglL9A0aVRZPsUbnDZnhI=", + "gasUsed": "20", + "success": true, + "vmStatus": "Executed successfully", + "accumulatorRootHash": "C06HYwg6sJXRiNRkKLBPhss+u5+IOE4+k4yxuSryzOk=", + "changes": [ + { + "type": "TYPE_DELETE_RESOURCE", + "deleteResource": { + "address": "0xe89fe77893a9bbbfb89d747005e621d4cc9e30b6a5c5f9b419c7f784a3307a5", + "stateKeyHash": "r8ki1cCzdPpvNc7TBfbbgEg2gzLB8r+QTZ9QF6qGSi8=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectGroup" + }, + "typeStr": "0x1::object::ObjectGroup" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x36f08a98302079f5bd60d811c0187171a7edaa352ae9907ae21a7014ff2dbc60", + "stateKeyHash": "hizPXgf4k6Y4LlVYvl0lQ5OM7doVSYeH0BcWJbk2oKg=", + "type": { + "address": "0x1", + "module": "coin", + "name": "CoinStore", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", + "data": "{\"coin\":{\"value\":\"27433732\"},\"deposit_events\":{\"counter\":\"4\",\"guid\":{\"id\":{\"addr\":\"0x36f08a98302079f5bd60d811c0187171a7edaa352ae9907ae21a7014ff2dbc60\",\"creation_num\":\"2\"}}},\"frozen\":false,\"withdraw_events\":{\"counter\":\"9\",\"guid\":{\"id\":{\"addr\":\"0x36f08a98302079f5bd60d811c0187171a7edaa352ae9907ae21a7014ff2dbc60\",\"creation_num\":\"3\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x36f08a98302079f5bd60d811c0187171a7edaa352ae9907ae21a7014ff2dbc60", + "stateKeyHash": "yVpnC8QMW+FdlLhFx+CuV2VCs1+9aabr1fvIb2pR98A=", + "type": { + "address": "0x1", + "module": "account", + "name": "Account" + }, + "typeStr": "0x1::account::Account", + "data": "{\"authentication_key\":\"0x36f08a98302079f5bd60d811c0187171a7edaa352ae9907ae21a7014ff2dbc60\",\"coin_register_events\":{\"counter\":\"2\",\"guid\":{\"id\":{\"addr\":\"0x36f08a98302079f5bd60d811c0187171a7edaa352ae9907ae21a7014ff2dbc60\",\"creation_num\":\"0\"}}},\"guid_creation_num\":\"11\",\"key_rotation_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x36f08a98302079f5bd60d811c0187171a7edaa352ae9907ae21a7014ff2dbc60\",\"creation_num\":\"1\"}}},\"rotation_capability_offer\":{\"for\":{\"vec\":[]}},\"sequence_number\":\"14\",\"signer_capability_offer\":{\"for\":{\"vec\":[]}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x6a03eb973cd9385d62fc2842d02a4dd6b70e52f5da77a0689e57e48d93fae1b4", + "stateKeyHash": "ia5CJAqvJ4lnS7nWhVMNNxhYGforZgoP0qtHcM/2+OM=", + "type": { + "address": "0x1", + "module": "coin", + "name": "CoinStore", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", + "data": "{\"coin\":{\"value\":\"9867565519\"},\"deposit_events\":{\"counter\":\"149203\",\"guid\":{\"id\":{\"addr\":\"0x6a03eb973cd9385d62fc2842d02a4dd6b70e52f5da77a0689e57e48d93fae1b4\",\"creation_num\":\"2\"}}},\"frozen\":false,\"withdraw_events\":{\"counter\":\"23\",\"guid\":{\"id\":{\"addr\":\"0x6a03eb973cd9385d62fc2842d02a4dd6b70e52f5da77a0689e57e48d93fae1b4\",\"creation_num\":\"3\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x6bc9d89f72ecc22dbca30025a10bc888cb1c90d6d6ee0d90f2739e6043557bf0", + "stateKeyHash": "HCdN5ItTehT8qEKwjDoHoenlf/7CyjpOBc/oU7is2ak=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectCore" + }, + "typeStr": "0x1::object::ObjectCore", + "data": "{\"allow_ungated_transfer\":true,\"guid_creation_num\":\"1125899906842626\",\"owner\":\"0x65d8eb0656a9f3c6479671b37e6243cd322a5a4f042ce9e43ac968c2a84197ab\",\"transfer_events\":{\"counter\":\"10\",\"guid\":{\"id\":{\"addr\":\"0x6bc9d89f72ecc22dbca30025a10bc888cb1c90d6d6ee0d90f2739e6043557bf0\",\"creation_num\":\"1125899906842624\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x6bc9d89f72ecc22dbca30025a10bc888cb1c90d6d6ee0d90f2739e6043557bf0", + "stateKeyHash": "HCdN5ItTehT8qEKwjDoHoenlf/7CyjpOBc/oU7is2ak=", + "type": { + "address": "0x4", + "module": "property_map", + "name": "PropertyMap" + }, + "typeStr": "0x4::property_map::PropertyMap", + "data": "{\"inner\":{\"data\":[{\"key\":\"elemental\",\"value\":{\"type\":9,\"value\":\"0x0446495245\"}},{\"key\":\"category\",\"value\":{\"type\":9,\"value\":\"0x096170746f6d696e676f\"}},{\"key\":\"grade\",\"value\":{\"type\":9,\"value\":\"0x06436f6d6d6f6e\"}},{\"key\":\"@probability_set\",\"value\":{\"type\":9,\"value\":\"0x0c67616d653a3a6578706f7274\"}}]}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x6bc9d89f72ecc22dbca30025a10bc888cb1c90d6d6ee0d90f2739e6043557bf0", + "stateKeyHash": "HCdN5ItTehT8qEKwjDoHoenlf/7CyjpOBc/oU7is2ak=", + "type": { + "address": "0x4", + "module": "royalty", + "name": "Royalty" + }, + "typeStr": "0x4::royalty::Royalty", + "data": "{\"denominator\":\"100\",\"numerator\":\"5\",\"payee_address\":\"0x9d9ae026d65ad917bffcc6984370468e751ec3e9cd7a69f114c8a58c34d408b7\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x6bc9d89f72ecc22dbca30025a10bc888cb1c90d6d6ee0d90f2739e6043557bf0", + "stateKeyHash": "HCdN5ItTehT8qEKwjDoHoenlf/7CyjpOBc/oU7is2ak=", + "type": { + "address": "0x4", + "module": "token", + "name": "Token" + }, + "typeStr": "0x4::token::Token", + "data": "{\"collection\":{\"inner\":\"0x9d9ae026d65ad917bffcc6984370468e751ec3e9cd7a69f114c8a58c34d408b7\"},\"description\":\"I am a Sidekick, always standing by my Villain. When the bond between a Villain and me deepens, something special could happen!\",\"index\":\"0\",\"mutation_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x6bc9d89f72ecc22dbca30025a10bc888cb1c90d6d6ee0d90f2739e6043557bf0\",\"creation_num\":\"1125899906842625\"}}},\"name\":\"\",\"uri\":\"https://public.vir.supervlabs.io/virweb/nft/sidekicks/mingo.png\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x6bc9d89f72ecc22dbca30025a10bc888cb1c90d6d6ee0d90f2739e6043557bf0", + "stateKeyHash": "HCdN5ItTehT8qEKwjDoHoenlf/7CyjpOBc/oU7is2ak=", + "type": { + "address": "0x4", + "module": "token", + "name": "TokenIdentifiers" + }, + "typeStr": "0x4::token::TokenIdentifiers", + "data": "{\"index\":{\"value\":\"291160\"},\"name\":{\"padding\":\"0x0000000000000000\",\"value\":\"Mingo #12541\"}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x6bc9d89f72ecc22dbca30025a10bc888cb1c90d6d6ee0d90f2739e6043557bf0", + "stateKeyHash": "HCdN5ItTehT8qEKwjDoHoenlf/7CyjpOBc/oU7is2ak=", + "type": { + "address": "0x9d518b9b84f327eafc5f6632200ea224a818a935ffd6be5d78ada250bbc44a6", + "module": "sidekick", + "name": "Sidekick" + }, + "typeStr": "0x9d518b9b84f327eafc5f6632200ea224a818a935ffd6be5d78ada250bbc44a6::sidekick::Sidekick", + "data": "{\"salt\":\"0\",\"updated_at\":\"1738028896\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x6bc9d89f72ecc22dbca30025a10bc888cb1c90d6d6ee0d90f2739e6043557bf0", + "stateKeyHash": "HCdN5ItTehT8qEKwjDoHoenlf/7CyjpOBc/oU7is2ak=", + "type": { + "address": "0xfacedd48d64a2ee04cbdf5c17608bd7a5ea9144fa1fe65320c588fffea131de3", + "module": "orm_object", + "name": "OrmObject" + }, + "typeStr": "0xfacedd48d64a2ee04cbdf5c17608bd7a5ea9144fa1fe65320c588fffea131de3::orm_object::OrmObject", + "data": "{\"class\":{\"inner\":\"0x9d9ae026d65ad917bffcc6984370468e751ec3e9cd7a69f114c8a58c34d408b7\"},\"creator\":{\"inner\":\"0x9d518b9b84f327eafc5f6632200ea224a818a935ffd6be5d78ada250bbc44a6\"},\"delete_ref\":{\"vec\":[{\"self\":\"0x6bc9d89f72ecc22dbca30025a10bc888cb1c90d6d6ee0d90f2739e6043557bf0\"}]},\"extend_ref\":{\"vec\":[{\"self\":\"0x6bc9d89f72ecc22dbca30025a10bc888cb1c90d6d6ee0d90f2739e6043557bf0\"}]},\"transfer_ref\":{\"vec\":[{\"self\":\"0x6bc9d89f72ecc22dbca30025a10bc888cb1c90d6d6ee0d90f2739e6043557bf0\"}]}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x6bc9d89f72ecc22dbca30025a10bc888cb1c90d6d6ee0d90f2739e6043557bf0", + "stateKeyHash": "HCdN5ItTehT8qEKwjDoHoenlf/7CyjpOBc/oU7is2ak=", + "type": { + "address": "0xfacedd48d64a2ee04cbdf5c17608bd7a5ea9144fa1fe65320c588fffea131de3", + "module": "orm_object", + "name": "OrmToken" + }, + "typeStr": "0xfacedd48d64a2ee04cbdf5c17608bd7a5ea9144fa1fe65320c588fffea131de3::orm_object::OrmToken", + "data": "{\"burn_ref\":{\"vec\":[{\"inner\":{\"vec\":[{\"self\":\"0x6bc9d89f72ecc22dbca30025a10bc888cb1c90d6d6ee0d90f2739e6043557bf0\"}]},\"self\":{\"vec\":[]}}]},\"mutator_ref\":{\"vec\":[{\"self\":\"0x6bc9d89f72ecc22dbca30025a10bc888cb1c90d6d6ee0d90f2739e6043557bf0\"}]},\"property_mutator_ref\":{\"vec\":[{\"self\":\"0x6bc9d89f72ecc22dbca30025a10bc888cb1c90d6d6ee0d90f2739e6043557bf0\"}]},\"royalty_mutator_ref\":{\"vec\":[{\"inner\":{\"self\":\"0x6bc9d89f72ecc22dbca30025a10bc888cb1c90d6d6ee0d90f2739e6043557bf0\"}}]}}" + } + }, + { + "type": "TYPE_DELETE_RESOURCE", + "deleteResource": { + "address": "0x6ecdec1173f2e79aeab37fc2e2bdd43f36d7c169a153a1bf6e05246b21457e90", + "stateKeyHash": "bGTRvQOhc1OgXKdmecFvmpviy9mzSfjdEY3jJYDmKNI=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectGroup" + }, + "typeStr": "0x1::object::ObjectGroup" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x9d9ae026d65ad917bffcc6984370468e751ec3e9cd7a69f114c8a58c34d408b7", + "stateKeyHash": "e3BUygmgtbbu9RwYj5qaEWzxrbT3Ynu8up05xX4yDTY=", + "type": { + "address": "0x1", + "module": "coin", + "name": "CoinStore", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", + "data": "{\"coin\":{\"value\":\"12354643426\"},\"deposit_events\":{\"counter\":\"26196\",\"guid\":{\"id\":{\"addr\":\"0x9d9ae026d65ad917bffcc6984370468e751ec3e9cd7a69f114c8a58c34d408b7\",\"creation_num\":\"2\"}}},\"frozen\":false,\"withdraw_events\":{\"counter\":\"8\",\"guid\":{\"id\":{\"addr\":\"0x9d9ae026d65ad917bffcc6984370468e751ec3e9cd7a69f114c8a58c34d408b7\",\"creation_num\":\"3\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "stateKeyHash": "YyxEM2ISNprB8ZNNbepmRFb5Yl7G+bHX/jJlJ/MnbD8=", + "type": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "module": "biddings_v2", + "name": "BidStore" + }, + "typeStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::biddings_v2::BidStore", + "data": "{\"accept_collection_bid_events\":{\"counter\":\"30309\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"23\"}}},\"accept_token_bid_events\":{\"counter\":\"612\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"20\"}}},\"delete_collection_bid_events\":{\"counter\":\"40543\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"22\"}}},\"delete_token_bid_events\":{\"counter\":\"3848\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"19\"}}},\"insert_collection_bid_events\":{\"counter\":\"113420\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"21\"}}},\"insert_token_bid_events\":{\"counter\":\"11631\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"18\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "stateKeyHash": "GziPDIxPViVKmcTdIO/LNrfAEGOLVL1z45HHLE9Rf8g=", + "type": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "module": "listings_v2", + "name": "ListingStore" + }, + "typeStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::listings_v2::ListingStore", + "data": "{\"buy_events\":{\"counter\":\"60883\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"17\"}}},\"delete_listing_events\":{\"counter\":\"45529\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"16\"}}},\"insert_listing_events\":{\"counter\":\"132734\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"15\"}}}}" + } + }, + { + "type": "TYPE_WRITE_TABLE_ITEM", + "writeTableItem": { + "stateKeyHash": "bkso1A+YoQamUWNTCSTA3LQME0nTqpFdEItNbPwd2xk=", + "handle": "0x1b854694ae746cdbd8d44186ca4929b2b337df21d1c74633be19b2710552fdca", + "key": "0x0619dc29a0aac8fa146714058e8dd6d2d0f3bdf5f6331907bf91f3acd81e6935", + "data": { + "key": "\"0x619dc29a0aac8fa146714058e8dd6d2d0f3bdf5f6331907bf91f3acd81e6935\"", + "keyType": "address", + "value": "\"113796454671434417\"", + "valueType": "u128" + } + } + } + ] + }, + "epoch": "10123", + "blockHeight": "283151355", + "type": "TRANSACTION_TYPE_USER", + "sizeInfo": { + "transactionBytes": 306, + "eventSizeInfo": [ + { + "typeTagBytes": 55, + "totalBytes": 199 + }, + { + "typeTagBytes": 65, + "totalBytes": 225 + }, + { + "typeTagBytes": 52, + "totalBytes": 108 + }, + { + "typeTagBytes": 52, + "totalBytes": 108 + }, + { + "typeTagBytes": 52, + "totalBytes": 108 + }, + { + "typeTagBytes": 55, + "totalBytes": 199 + }, + { + "typeTagBytes": 66, + "totalBytes": 258 + }, + { + "typeTagBytes": 63, + "totalBytes": 103 + } + ], + "writeOpSizeInfo": [ + { + "keyBytes": 87 + }, + { + "keyBytes": 138, + "valueBytes": 105 + }, + { + "keyBytes": 84, + "valueBytes": 147 + }, + { + "keyBytes": 138, + "valueBytes": 105 + }, + { + "keyBytes": 87, + "valueBytes": 1276 + }, + { + "keyBytes": 87 + }, + { + "keyBytes": 138, + "valueBytes": 105 + }, + { + "keyBytes": 89, + "valueBytes": 288 + }, + { + "keyBytes": 93, + "valueBytes": 144 + }, + { + "keyBytes": 66, + "valueBytes": 16 + } + ] + }, + "user": { + "request": { + "sender": "0x36f08a98302079f5bd60d811c0187171a7edaa352ae9907ae21a7014ff2dbc60", + "sequenceNumber": "13", + "maxGasAmount": "40", + "gasUnitPrice": "100", + "expirationTimestampSecs": { + "seconds": "1738092942" + }, + "payload": { + "type": "TYPE_ENTRY_FUNCTION_PAYLOAD", + "entryFunctionPayload": { + "function": { + "module": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "name": "biddings_v2" + }, + "name": "unlist_and_accept_token_bid" + }, + "arguments": [ + "{\"inner\":\"0x6ecdec1173f2e79aeab37fc2e2bdd43f36d7c169a153a1bf6e05246b21457e90\"}", + "{\"inner\":\"0xe89fe77893a9bbbfb89d747005e621d4cc9e30b6a5c5f9b419c7f784a3307a5\"}" + ], + "entryFunctionIdStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::biddings_v2::unlist_and_accept_token_bid" + } + }, + "signature": { + "type": "TYPE_ED25519", + "ed25519": { + "publicKey": "Oho059tQpX98PEOyKmD3H8tSrW20r6GzrfPObEwuyxA=", + "signature": "vVDRxEUVdUFd4E3fx9BwO6OTwDDrkjPTwtQvrVk5gUMzW+Xz9RRqThkv0G7dkLTMmRJqUu4UtjjXTAXYytjeDw==" + } + } + }, + "events": [ + { + "key": { + "creationNumber": "1125899906842624", + "accountAddress": "0x6bc9d89f72ecc22dbca30025a10bc888cb1c90d6d6ee0d90f2739e6043557bf0" + }, + "sequenceNumber": "8", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "object", + "name": "TransferEvent" + } + }, + "typeStr": "0x1::object::TransferEvent", + "data": "{\"from\":\"0x6ecdec1173f2e79aeab37fc2e2bdd43f36d7c169a153a1bf6e05246b21457e90\",\"object\":\"0x6bc9d89f72ecc22dbca30025a10bc888cb1c90d6d6ee0d90f2739e6043557bf0\",\"to\":\"0x36f08a98302079f5bd60d811c0187171a7edaa352ae9907ae21a7014ff2dbc60\"}" + }, + { + "key": { + "creationNumber": "16", + "accountAddress": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26" + }, + "sequenceNumber": "45528", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "module": "listings_v2", + "name": "DeleteListingEvent" + } + }, + "typeStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::listings_v2::DeleteListingEvent", + "data": "{\"listing\":{\"inner\":\"0x6ecdec1173f2e79aeab37fc2e2bdd43f36d7c169a153a1bf6e05246b21457e90\"},\"price\":\"6000000\",\"seller\":\"0x36f08a98302079f5bd60d811c0187171a7edaa352ae9907ae21a7014ff2dbc60\",\"timestamp\":\"1738092852915658\",\"token\":{\"inner\":\"0x6bc9d89f72ecc22dbca30025a10bc888cb1c90d6d6ee0d90f2739e6043557bf0\"}}" + }, + { + "key": { + "creationNumber": "2", + "accountAddress": "0x9d9ae026d65ad917bffcc6984370468e751ec3e9cd7a69f114c8a58c34d408b7" + }, + "sequenceNumber": "26195", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "DepositEvent" + } + }, + "typeStr": "0x1::coin::DepositEvent", + "data": "{\"amount\":\"150000\"}" + }, + { + "key": { + "creationNumber": "2", + "accountAddress": "0x6a03eb973cd9385d62fc2842d02a4dd6b70e52f5da77a0689e57e48d93fae1b4" + }, + "sequenceNumber": "149202", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "DepositEvent" + } + }, + "typeStr": "0x1::coin::DepositEvent", + "data": "{\"amount\":\"75000\"}" + }, + { + "key": { + "creationNumber": "2", + "accountAddress": "0x36f08a98302079f5bd60d811c0187171a7edaa352ae9907ae21a7014ff2dbc60" + }, + "sequenceNumber": "3", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "DepositEvent" + } + }, + "typeStr": "0x1::coin::DepositEvent", + "data": "{\"amount\":\"2775000\"}" + }, + { + "key": { + "creationNumber": "1125899906842624", + "accountAddress": "0x6bc9d89f72ecc22dbca30025a10bc888cb1c90d6d6ee0d90f2739e6043557bf0" + }, + "sequenceNumber": "9", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "object", + "name": "TransferEvent" + } + }, + "typeStr": "0x1::object::TransferEvent", + "data": "{\"from\":\"0x36f08a98302079f5bd60d811c0187171a7edaa352ae9907ae21a7014ff2dbc60\",\"object\":\"0x6bc9d89f72ecc22dbca30025a10bc888cb1c90d6d6ee0d90f2739e6043557bf0\",\"to\":\"0x65d8eb0656a9f3c6479671b37e6243cd322a5a4f042ce9e43ac968c2a84197ab\"}" + }, + { + "key": { + "creationNumber": "20", + "accountAddress": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26" + }, + "sequenceNumber": "611", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "module": "biddings_v2", + "name": "AcceptTokenBidEvent" + } + }, + "typeStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::biddings_v2::AcceptTokenBidEvent", + "data": "{\"bid\":{\"inner\":\"0xe89fe77893a9bbbfb89d747005e621d4cc9e30b6a5c5f9b419c7f784a3307a5\"},\"bid_buyer\":\"0x65d8eb0656a9f3c6479671b37e6243cd322a5a4f042ce9e43ac968c2a84197ab\",\"bid_seller\":\"0x36f08a98302079f5bd60d811c0187171a7edaa352ae9907ae21a7014ff2dbc60\",\"price\":\"3000000\",\"timestamp\":\"1738092852915658\",\"token\":{\"inner\":\"0x6bc9d89f72ecc22dbca30025a10bc888cb1c90d6d6ee0d90f2739e6043557bf0\"}}" + }, + { + "key": { + "accountAddress": "0x0" + }, + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "transaction_fee", + "name": "FeeStatement" + } + }, + "typeStr": "0x1::transaction_fee::FeeStatement", + "data": "{\"execution_gas_units\":\"8\",\"io_gas_units\":\"13\",\"storage_fee_octas\":\"0\",\"storage_fee_refund_octas\":\"112160\",\"total_charge_gas_units\":\"20\"}" + } + ] + } +} \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2296098846_tradeport_v2_accept_token_delist2.json b/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2296098846_tradeport_v2_accept_token_delist2.json new file mode 100644 index 0000000000000..a14e859b17040 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2296098846_tradeport_v2_accept_token_delist2.json @@ -0,0 +1,535 @@ +{ + "timestamp": { + "seconds": "1738220719", + "nanos": 436414000 + }, + "version": "2296098846", + "info": { + "hash": "XYdEw2WNv2x9ySEALaSioNwidRn55KvGaWaKTzumyrQ=", + "stateChangeHash": "Aq1CW+vgW70zrlP0p8b0gBTP6zUDJauZpdD7hZwPz2o=", + "eventRootHash": "nW9G3nf6QHoHyddChDPvuomAWvnc9xDSdFpe9EQMGwQ=", + "gasUsed": "20", + "success": true, + "vmStatus": "Executed successfully", + "accumulatorRootHash": "VF+NDhTFTYMsj1gOqjTZq/A2XRdlKFYzzif7wZpzfR4=", + "changes": [ + { + "type": "TYPE_DELETE_RESOURCE", + "deleteResource": { + "address": "0x601d63ebac2606de806f319e03eebbc1d72e97d3636e565f6136fa3474e9dc06", + "stateKeyHash": "dBuhSGd4DZmV9cnzMysTbPalpiBZW9poQgAkRsyGk1I=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectGroup" + }, + "typeStr": "0x1::object::ObjectGroup" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x65d8eb0656a9f3c6479671b37e6243cd322a5a4f042ce9e43ac968c2a84197ab", + "stateKeyHash": "1Fj8JHwzqA6z6TBA3RpdeSdjaF2YBUW9il+NLLzEIwM=", + "type": { + "address": "0x1", + "module": "coin", + "name": "CoinStore", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", + "data": "{\"coin\":{\"value\":\"24268001\"},\"deposit_events\":{\"counter\":\"4\",\"guid\":{\"id\":{\"addr\":\"0x65d8eb0656a9f3c6479671b37e6243cd322a5a4f042ce9e43ac968c2a84197ab\",\"creation_num\":\"2\"}}},\"frozen\":false,\"withdraw_events\":{\"counter\":\"7\",\"guid\":{\"id\":{\"addr\":\"0x65d8eb0656a9f3c6479671b37e6243cd322a5a4f042ce9e43ac968c2a84197ab\",\"creation_num\":\"3\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x65d8eb0656a9f3c6479671b37e6243cd322a5a4f042ce9e43ac968c2a84197ab", + "stateKeyHash": "sekr1Wz4gFwqIvBIIEI+fklEo7ZEmzIrsGe9cMsSkOY=", + "type": { + "address": "0x1", + "module": "account", + "name": "Account" + }, + "typeStr": "0x1::account::Account", + "data": "{\"authentication_key\":\"0x65d8eb0656a9f3c6479671b37e6243cd322a5a4f042ce9e43ac968c2a84197ab\",\"coin_register_events\":{\"counter\":\"2\",\"guid\":{\"id\":{\"addr\":\"0x65d8eb0656a9f3c6479671b37e6243cd322a5a4f042ce9e43ac968c2a84197ab\",\"creation_num\":\"0\"}}},\"guid_creation_num\":\"11\",\"key_rotation_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x65d8eb0656a9f3c6479671b37e6243cd322a5a4f042ce9e43ac968c2a84197ab\",\"creation_num\":\"1\"}}},\"rotation_capability_offer\":{\"for\":{\"vec\":[]}},\"sequence_number\":\"13\",\"signer_capability_offer\":{\"for\":{\"vec\":[]}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x6a03eb973cd9385d62fc2842d02a4dd6b70e52f5da77a0689e57e48d93fae1b4", + "stateKeyHash": "ia5CJAqvJ4lnS7nWhVMNNxhYGforZgoP0qtHcM/2+OM=", + "type": { + "address": "0x1", + "module": "coin", + "name": "CoinStore", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", + "data": "{\"coin\":{\"value\":\"10862744439\"},\"deposit_events\":{\"counter\":\"149331\",\"guid\":{\"id\":{\"addr\":\"0x6a03eb973cd9385d62fc2842d02a4dd6b70e52f5da77a0689e57e48d93fae1b4\",\"creation_num\":\"2\"}}},\"frozen\":false,\"withdraw_events\":{\"counter\":\"23\",\"guid\":{\"id\":{\"addr\":\"0x6a03eb973cd9385d62fc2842d02a4dd6b70e52f5da77a0689e57e48d93fae1b4\",\"creation_num\":\"3\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x6bc9d89f72ecc22dbca30025a10bc888cb1c90d6d6ee0d90f2739e6043557bf0", + "stateKeyHash": "HCdN5ItTehT8qEKwjDoHoenlf/7CyjpOBc/oU7is2ak=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectCore" + }, + "typeStr": "0x1::object::ObjectCore", + "data": "{\"allow_ungated_transfer\":true,\"guid_creation_num\":\"1125899906842626\",\"owner\":\"0xd8cc665d62192ecea331471956c854a15a6db7a13ba94b6aa5d75be430af0458\",\"transfer_events\":{\"counter\":\"13\",\"guid\":{\"id\":{\"addr\":\"0x6bc9d89f72ecc22dbca30025a10bc888cb1c90d6d6ee0d90f2739e6043557bf0\",\"creation_num\":\"1125899906842624\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x6bc9d89f72ecc22dbca30025a10bc888cb1c90d6d6ee0d90f2739e6043557bf0", + "stateKeyHash": "HCdN5ItTehT8qEKwjDoHoenlf/7CyjpOBc/oU7is2ak=", + "type": { + "address": "0x4", + "module": "property_map", + "name": "PropertyMap" + }, + "typeStr": "0x4::property_map::PropertyMap", + "data": "{\"inner\":{\"data\":[{\"key\":\"elemental\",\"value\":{\"type\":9,\"value\":\"0x0446495245\"}},{\"key\":\"category\",\"value\":{\"type\":9,\"value\":\"0x096170746f6d696e676f\"}},{\"key\":\"grade\",\"value\":{\"type\":9,\"value\":\"0x06436f6d6d6f6e\"}},{\"key\":\"@probability_set\",\"value\":{\"type\":9,\"value\":\"0x0c67616d653a3a6578706f7274\"}}]}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x6bc9d89f72ecc22dbca30025a10bc888cb1c90d6d6ee0d90f2739e6043557bf0", + "stateKeyHash": "HCdN5ItTehT8qEKwjDoHoenlf/7CyjpOBc/oU7is2ak=", + "type": { + "address": "0x4", + "module": "royalty", + "name": "Royalty" + }, + "typeStr": "0x4::royalty::Royalty", + "data": "{\"denominator\":\"100\",\"numerator\":\"5\",\"payee_address\":\"0x9d9ae026d65ad917bffcc6984370468e751ec3e9cd7a69f114c8a58c34d408b7\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x6bc9d89f72ecc22dbca30025a10bc888cb1c90d6d6ee0d90f2739e6043557bf0", + "stateKeyHash": "HCdN5ItTehT8qEKwjDoHoenlf/7CyjpOBc/oU7is2ak=", + "type": { + "address": "0x4", + "module": "token", + "name": "Token" + }, + "typeStr": "0x4::token::Token", + "data": "{\"collection\":{\"inner\":\"0x9d9ae026d65ad917bffcc6984370468e751ec3e9cd7a69f114c8a58c34d408b7\"},\"description\":\"I am a Sidekick, always standing by my Villain. When the bond between a Villain and me deepens, something special could happen!\",\"index\":\"0\",\"mutation_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x6bc9d89f72ecc22dbca30025a10bc888cb1c90d6d6ee0d90f2739e6043557bf0\",\"creation_num\":\"1125899906842625\"}}},\"name\":\"\",\"uri\":\"https://public.vir.supervlabs.io/virweb/nft/sidekicks/mingo.png\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x6bc9d89f72ecc22dbca30025a10bc888cb1c90d6d6ee0d90f2739e6043557bf0", + "stateKeyHash": "HCdN5ItTehT8qEKwjDoHoenlf/7CyjpOBc/oU7is2ak=", + "type": { + "address": "0x4", + "module": "token", + "name": "TokenIdentifiers" + }, + "typeStr": "0x4::token::TokenIdentifiers", + "data": "{\"index\":{\"value\":\"291160\"},\"name\":{\"padding\":\"0x0000000000000000\",\"value\":\"Mingo #12541\"}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x6bc9d89f72ecc22dbca30025a10bc888cb1c90d6d6ee0d90f2739e6043557bf0", + "stateKeyHash": "HCdN5ItTehT8qEKwjDoHoenlf/7CyjpOBc/oU7is2ak=", + "type": { + "address": "0x9d518b9b84f327eafc5f6632200ea224a818a935ffd6be5d78ada250bbc44a6", + "module": "sidekick", + "name": "Sidekick" + }, + "typeStr": "0x9d518b9b84f327eafc5f6632200ea224a818a935ffd6be5d78ada250bbc44a6::sidekick::Sidekick", + "data": "{\"salt\":\"0\",\"updated_at\":\"1738028896\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x6bc9d89f72ecc22dbca30025a10bc888cb1c90d6d6ee0d90f2739e6043557bf0", + "stateKeyHash": "HCdN5ItTehT8qEKwjDoHoenlf/7CyjpOBc/oU7is2ak=", + "type": { + "address": "0xfacedd48d64a2ee04cbdf5c17608bd7a5ea9144fa1fe65320c588fffea131de3", + "module": "orm_object", + "name": "OrmObject" + }, + "typeStr": "0xfacedd48d64a2ee04cbdf5c17608bd7a5ea9144fa1fe65320c588fffea131de3::orm_object::OrmObject", + "data": "{\"class\":{\"inner\":\"0x9d9ae026d65ad917bffcc6984370468e751ec3e9cd7a69f114c8a58c34d408b7\"},\"creator\":{\"inner\":\"0x9d518b9b84f327eafc5f6632200ea224a818a935ffd6be5d78ada250bbc44a6\"},\"delete_ref\":{\"vec\":[{\"self\":\"0x6bc9d89f72ecc22dbca30025a10bc888cb1c90d6d6ee0d90f2739e6043557bf0\"}]},\"extend_ref\":{\"vec\":[{\"self\":\"0x6bc9d89f72ecc22dbca30025a10bc888cb1c90d6d6ee0d90f2739e6043557bf0\"}]},\"transfer_ref\":{\"vec\":[{\"self\":\"0x6bc9d89f72ecc22dbca30025a10bc888cb1c90d6d6ee0d90f2739e6043557bf0\"}]}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x6bc9d89f72ecc22dbca30025a10bc888cb1c90d6d6ee0d90f2739e6043557bf0", + "stateKeyHash": "HCdN5ItTehT8qEKwjDoHoenlf/7CyjpOBc/oU7is2ak=", + "type": { + "address": "0xfacedd48d64a2ee04cbdf5c17608bd7a5ea9144fa1fe65320c588fffea131de3", + "module": "orm_object", + "name": "OrmToken" + }, + "typeStr": "0xfacedd48d64a2ee04cbdf5c17608bd7a5ea9144fa1fe65320c588fffea131de3::orm_object::OrmToken", + "data": "{\"burn_ref\":{\"vec\":[{\"inner\":{\"vec\":[{\"self\":\"0x6bc9d89f72ecc22dbca30025a10bc888cb1c90d6d6ee0d90f2739e6043557bf0\"}]},\"self\":{\"vec\":[]}}]},\"mutator_ref\":{\"vec\":[{\"self\":\"0x6bc9d89f72ecc22dbca30025a10bc888cb1c90d6d6ee0d90f2739e6043557bf0\"}]},\"property_mutator_ref\":{\"vec\":[{\"self\":\"0x6bc9d89f72ecc22dbca30025a10bc888cb1c90d6d6ee0d90f2739e6043557bf0\"}]},\"royalty_mutator_ref\":{\"vec\":[{\"inner\":{\"self\":\"0x6bc9d89f72ecc22dbca30025a10bc888cb1c90d6d6ee0d90f2739e6043557bf0\"}}]}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x9d9ae026d65ad917bffcc6984370468e751ec3e9cd7a69f114c8a58c34d408b7", + "stateKeyHash": "e3BUygmgtbbu9RwYj5qaEWzxrbT3Ynu8up05xX4yDTY=", + "type": { + "address": "0x1", + "module": "coin", + "name": "CoinStore", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", + "data": "{\"coin\":{\"value\":\"12367295121\"},\"deposit_events\":{\"counter\":\"26234\",\"guid\":{\"id\":{\"addr\":\"0x9d9ae026d65ad917bffcc6984370468e751ec3e9cd7a69f114c8a58c34d408b7\",\"creation_num\":\"2\"}}},\"frozen\":false,\"withdraw_events\":{\"counter\":\"8\",\"guid\":{\"id\":{\"addr\":\"0x9d9ae026d65ad917bffcc6984370468e751ec3e9cd7a69f114c8a58c34d408b7\",\"creation_num\":\"3\"}}}}" + } + }, + { + "type": "TYPE_DELETE_RESOURCE", + "deleteResource": { + "address": "0xa4877fd357bbb3c4464765537ecd0936403d202002ae0c70f9808f94ecf5200c", + "stateKeyHash": "bfeY5/I80k/GA01HWAY8joxArU7SucTeaZ9A/gl2Nwk=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectGroup" + }, + "typeStr": "0x1::object::ObjectGroup" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "stateKeyHash": "YyxEM2ISNprB8ZNNbepmRFb5Yl7G+bHX/jJlJ/MnbD8=", + "type": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "module": "biddings_v2", + "name": "BidStore" + }, + "typeStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::biddings_v2::BidStore", + "data": "{\"accept_collection_bid_events\":{\"counter\":\"30363\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"23\"}}},\"accept_token_bid_events\":{\"counter\":\"624\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"20\"}}},\"delete_collection_bid_events\":{\"counter\":\"40618\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"22\"}}},\"delete_token_bid_events\":{\"counter\":\"3852\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"19\"}}},\"insert_collection_bid_events\":{\"counter\":\"113573\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"21\"}}},\"insert_token_bid_events\":{\"counter\":\"11661\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"18\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "stateKeyHash": "GziPDIxPViVKmcTdIO/LNrfAEGOLVL1z45HHLE9Rf8g=", + "type": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "module": "listings_v2", + "name": "ListingStore" + }, + "typeStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::listings_v2::ListingStore", + "data": "{\"buy_events\":{\"counter\":\"60918\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"17\"}}},\"delete_listing_events\":{\"counter\":\"45629\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"16\"}}},\"insert_listing_events\":{\"counter\":\"133182\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"15\"}}}}" + } + }, + { + "type": "TYPE_WRITE_TABLE_ITEM", + "writeTableItem": { + "stateKeyHash": "bkso1A+YoQamUWNTCSTA3LQME0nTqpFdEItNbPwd2xk=", + "handle": "0x1b854694ae746cdbd8d44186ca4929b2b337df21d1c74633be19b2710552fdca", + "key": "0x0619dc29a0aac8fa146714058e8dd6d2d0f3bdf5f6331907bf91f3acd81e6935", + "data": { + "key": "\"0x619dc29a0aac8fa146714058e8dd6d2d0f3bdf5f6331907bf91f3acd81e6935\"", + "keyType": "address", + "value": "\"113819635731724186\"", + "valueType": "u128" + } + } + } + ] + }, + "epoch": "10140", + "blockHeight": "283771098", + "type": "TRANSACTION_TYPE_USER", + "sizeInfo": { + "transactionBytes": 306, + "eventSizeInfo": [ + { + "typeTagBytes": 55, + "totalBytes": 199 + }, + { + "typeTagBytes": 65, + "totalBytes": 225 + }, + { + "typeTagBytes": 52, + "totalBytes": 108 + }, + { + "typeTagBytes": 52, + "totalBytes": 108 + }, + { + "typeTagBytes": 52, + "totalBytes": 108 + }, + { + "typeTagBytes": 55, + "totalBytes": 199 + }, + { + "typeTagBytes": 66, + "totalBytes": 258 + }, + { + "typeTagBytes": 63, + "totalBytes": 103 + } + ], + "writeOpSizeInfo": [ + { + "keyBytes": 87 + }, + { + "keyBytes": 138, + "valueBytes": 105 + }, + { + "keyBytes": 84, + "valueBytes": 147 + }, + { + "keyBytes": 138, + "valueBytes": 105 + }, + { + "keyBytes": 87, + "valueBytes": 1276 + }, + { + "keyBytes": 138, + "valueBytes": 105 + }, + { + "keyBytes": 87 + }, + { + "keyBytes": 89, + "valueBytes": 288 + }, + { + "keyBytes": 93, + "valueBytes": 144 + }, + { + "keyBytes": 66, + "valueBytes": 16 + } + ] + }, + "user": { + "request": { + "sender": "0x65d8eb0656a9f3c6479671b37e6243cd322a5a4f042ce9e43ac968c2a84197ab", + "sequenceNumber": "12", + "maxGasAmount": "40", + "gasUnitPrice": "100", + "expirationTimestampSecs": { + "seconds": "1738220808" + }, + "payload": { + "type": "TYPE_ENTRY_FUNCTION_PAYLOAD", + "entryFunctionPayload": { + "function": { + "module": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "name": "biddings_v2" + }, + "name": "unlist_and_accept_token_bid" + }, + "arguments": [ + "{\"inner\":\"0x601d63ebac2606de806f319e03eebbc1d72e97d3636e565f6136fa3474e9dc06\"}", + "{\"inner\":\"0xa4877fd357bbb3c4464765537ecd0936403d202002ae0c70f9808f94ecf5200c\"}" + ], + "entryFunctionIdStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::biddings_v2::unlist_and_accept_token_bid" + } + }, + "signature": { + "type": "TYPE_ED25519", + "ed25519": { + "publicKey": "mrCFAMGwEE717ZUin2MaY1c6M89i+nOZVtW6OSEOglg=", + "signature": "YhDHyryOBrQKQd+8kgd1EUQghcNKIWi6yDNL0EE00iEO1En6HGmVLlOs6G+m1dI+QzFza52JfcDWOag7An+MAA==" + } + } + }, + "events": [ + { + "key": { + "creationNumber": "1125899906842624", + "accountAddress": "0x6bc9d89f72ecc22dbca30025a10bc888cb1c90d6d6ee0d90f2739e6043557bf0" + }, + "sequenceNumber": "11", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "object", + "name": "TransferEvent" + } + }, + "typeStr": "0x1::object::TransferEvent", + "data": "{\"from\":\"0x601d63ebac2606de806f319e03eebbc1d72e97d3636e565f6136fa3474e9dc06\",\"object\":\"0x6bc9d89f72ecc22dbca30025a10bc888cb1c90d6d6ee0d90f2739e6043557bf0\",\"to\":\"0x65d8eb0656a9f3c6479671b37e6243cd322a5a4f042ce9e43ac968c2a84197ab\"}" + }, + { + "key": { + "creationNumber": "16", + "accountAddress": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26" + }, + "sequenceNumber": "45628", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "module": "listings_v2", + "name": "DeleteListingEvent" + } + }, + "typeStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::listings_v2::DeleteListingEvent", + "data": "{\"listing\":{\"inner\":\"0x601d63ebac2606de806f319e03eebbc1d72e97d3636e565f6136fa3474e9dc06\"},\"price\":\"20000000\",\"seller\":\"0x65d8eb0656a9f3c6479671b37e6243cd322a5a4f042ce9e43ac968c2a84197ab\",\"timestamp\":\"1738220719436414\",\"token\":{\"inner\":\"0x6bc9d89f72ecc22dbca30025a10bc888cb1c90d6d6ee0d90f2739e6043557bf0\"}}" + }, + { + "key": { + "creationNumber": "2", + "accountAddress": "0x9d9ae026d65ad917bffcc6984370468e751ec3e9cd7a69f114c8a58c34d408b7" + }, + "sequenceNumber": "26233", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "DepositEvent" + } + }, + "typeStr": "0x1::coin::DepositEvent", + "data": "{\"amount\":\"100000\"}" + }, + { + "key": { + "creationNumber": "2", + "accountAddress": "0x6a03eb973cd9385d62fc2842d02a4dd6b70e52f5da77a0689e57e48d93fae1b4" + }, + "sequenceNumber": "149330", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "DepositEvent" + } + }, + "typeStr": "0x1::coin::DepositEvent", + "data": "{\"amount\":\"50000\"}" + }, + { + "key": { + "creationNumber": "2", + "accountAddress": "0x65d8eb0656a9f3c6479671b37e6243cd322a5a4f042ce9e43ac968c2a84197ab" + }, + "sequenceNumber": "3", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "DepositEvent" + } + }, + "typeStr": "0x1::coin::DepositEvent", + "data": "{\"amount\":\"1850000\"}" + }, + { + "key": { + "creationNumber": "1125899906842624", + "accountAddress": "0x6bc9d89f72ecc22dbca30025a10bc888cb1c90d6d6ee0d90f2739e6043557bf0" + }, + "sequenceNumber": "12", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "object", + "name": "TransferEvent" + } + }, + "typeStr": "0x1::object::TransferEvent", + "data": "{\"from\":\"0x65d8eb0656a9f3c6479671b37e6243cd322a5a4f042ce9e43ac968c2a84197ab\",\"object\":\"0x6bc9d89f72ecc22dbca30025a10bc888cb1c90d6d6ee0d90f2739e6043557bf0\",\"to\":\"0xd8cc665d62192ecea331471956c854a15a6db7a13ba94b6aa5d75be430af0458\"}" + }, + { + "key": { + "creationNumber": "20", + "accountAddress": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26" + }, + "sequenceNumber": "623", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "module": "biddings_v2", + "name": "AcceptTokenBidEvent" + } + }, + "typeStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::biddings_v2::AcceptTokenBidEvent", + "data": "{\"bid\":{\"inner\":\"0xa4877fd357bbb3c4464765537ecd0936403d202002ae0c70f9808f94ecf5200c\"},\"bid_buyer\":\"0xd8cc665d62192ecea331471956c854a15a6db7a13ba94b6aa5d75be430af0458\",\"bid_seller\":\"0x65d8eb0656a9f3c6479671b37e6243cd322a5a4f042ce9e43ac968c2a84197ab\",\"price\":\"2000000\",\"timestamp\":\"1738220719436414\",\"token\":{\"inner\":\"0x6bc9d89f72ecc22dbca30025a10bc888cb1c90d6d6ee0d90f2739e6043557bf0\"}}" + }, + { + "key": { + "accountAddress": "0x0" + }, + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "transaction_fee", + "name": "FeeStatement" + } + }, + "typeStr": "0x1::transaction_fee::FeeStatement", + "data": "{\"execution_gas_units\":\"8\",\"io_gas_units\":\"13\",\"storage_fee_octas\":\"0\",\"storage_fee_refund_octas\":\"112160\",\"total_charge_gas_units\":\"20\"}" + } + ] + } +} \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2296149225_tradeport_v2_accept_token_delist.json b/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2296149225_tradeport_v2_accept_token_delist.json new file mode 100644 index 0000000000000..16f78150fdc59 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2296149225_tradeport_v2_accept_token_delist.json @@ -0,0 +1,563 @@ +{ + "timestamp": { + "seconds": "1738221494", + "nanos": 554632000 + }, + "version": "2296149225", + "info": { + "hash": "OJ6ZgbLo54vRkSXu/HW9TSPcEJcv2J3No/QXJ61yAqc=", + "stateChangeHash": "m2TbtUaYHUeNIOVPEbwMyNl4Oaot5Plg0ByCeGWXd2k=", + "eventRootHash": "1/GgkNvnRhzWMm4Zlix9gCwymUWyvkfmjJZGKOwiKZY=", + "gasUsed": "20", + "success": true, + "vmStatus": "Executed successfully", + "accumulatorRootHash": "FuRYWYl0VFKM06JIyTxT7kvYYKsvdt7PmlAcOqCU9FU=", + "changes": [ + { + "type": "TYPE_DELETE_RESOURCE", + "deleteResource": { + "address": "0x23eac30752bcdfdc610896e50abc56237234634802b8b7b8b7ba21dfe5390e6", + "stateKeyHash": "ffy8nV+QK9NSrAwf9ZJH0IfI1dwSfi1QKNmSxDW3HEw=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectGroup" + }, + "typeStr": "0x1::object::ObjectGroup" + } + }, + { + "type": "TYPE_DELETE_RESOURCE", + "deleteResource": { + "address": "0x142d4aaf554ebbebb41d6fab20964a6a64b5d9396f6bdacd2533479e589c4963", + "stateKeyHash": "fzQAjY06qXuGkbnQDXZ+5g/JWscXSTpFn85NenANg+s=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectGroup" + }, + "typeStr": "0x1::object::ObjectGroup" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x43d8f99000e4be09b1145832e9b1a98725fc7b4f63fd1ba3726b6266365fc39e", + "stateKeyHash": "dOhExhE4nIEDqCFvENbEAIdihmjZWeY5MadZ5Jo/wug=", + "type": { + "address": "0x1", + "module": "coin", + "name": "CoinStore", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", + "data": "{\"coin\":{\"value\":\"80374668\"},\"deposit_events\":{\"counter\":\"5\",\"guid\":{\"id\":{\"addr\":\"0x43d8f99000e4be09b1145832e9b1a98725fc7b4f63fd1ba3726b6266365fc39e\",\"creation_num\":\"2\"}}},\"frozen\":false,\"withdraw_events\":{\"counter\":\"7\",\"guid\":{\"id\":{\"addr\":\"0x43d8f99000e4be09b1145832e9b1a98725fc7b4f63fd1ba3726b6266365fc39e\",\"creation_num\":\"3\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x43d8f99000e4be09b1145832e9b1a98725fc7b4f63fd1ba3726b6266365fc39e", + "stateKeyHash": "sozZnJkSai2ZVg5q+PwY61kth8RVENG9+8at9WjitQo=", + "type": { + "address": "0x1", + "module": "account", + "name": "Account" + }, + "typeStr": "0x1::account::Account", + "data": "{\"authentication_key\":\"0x43d8f99000e4be09b1145832e9b1a98725fc7b4f63fd1ba3726b6266365fc39e\",\"coin_register_events\":{\"counter\":\"2\",\"guid\":{\"id\":{\"addr\":\"0x43d8f99000e4be09b1145832e9b1a98725fc7b4f63fd1ba3726b6266365fc39e\",\"creation_num\":\"0\"}}},\"guid_creation_num\":\"11\",\"key_rotation_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x43d8f99000e4be09b1145832e9b1a98725fc7b4f63fd1ba3726b6266365fc39e\",\"creation_num\":\"1\"}}},\"rotation_capability_offer\":{\"for\":{\"vec\":[]}},\"sequence_number\":\"13\",\"signer_capability_offer\":{\"for\":{\"vec\":[]}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x6a03eb973cd9385d62fc2842d02a4dd6b70e52f5da77a0689e57e48d93fae1b4", + "stateKeyHash": "ia5CJAqvJ4lnS7nWhVMNNxhYGforZgoP0qtHcM/2+OM=", + "type": { + "address": "0x1", + "module": "coin", + "name": "CoinStore", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", + "data": "{\"coin\":{\"value\":\"10863232439\"},\"deposit_events\":{\"counter\":\"149335\",\"guid\":{\"id\":{\"addr\":\"0x6a03eb973cd9385d62fc2842d02a4dd6b70e52f5da77a0689e57e48d93fae1b4\",\"creation_num\":\"2\"}}},\"frozen\":false,\"withdraw_events\":{\"counter\":\"23\",\"guid\":{\"id\":{\"addr\":\"0x6a03eb973cd9385d62fc2842d02a4dd6b70e52f5da77a0689e57e48d93fae1b4\",\"creation_num\":\"3\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x9d9ae026d65ad917bffcc6984370468e751ec3e9cd7a69f114c8a58c34d408b7", + "stateKeyHash": "e3BUygmgtbbu9RwYj5qaEWzxrbT3Ynu8up05xX4yDTY=", + "type": { + "address": "0x1", + "module": "coin", + "name": "CoinStore", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", + "data": "{\"coin\":{\"value\":\"12368271121\"},\"deposit_events\":{\"counter\":\"26238\",\"guid\":{\"id\":{\"addr\":\"0x9d9ae026d65ad917bffcc6984370468e751ec3e9cd7a69f114c8a58c34d408b7\",\"creation_num\":\"2\"}}},\"frozen\":false,\"withdraw_events\":{\"counter\":\"8\",\"guid\":{\"id\":{\"addr\":\"0x9d9ae026d65ad917bffcc6984370468e751ec3e9cd7a69f114c8a58c34d408b7\",\"creation_num\":\"3\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xdb3751353dd2d7edda6a3443a71a2656c2c477caebb3bce0ae50941741fcf12d", + "stateKeyHash": "ONRyFv1LAFKMA5VH4tL7lV3lruUBZ/3Catz0W/6ZysM=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectCore" + }, + "typeStr": "0x1::object::ObjectCore", + "data": "{\"allow_ungated_transfer\":true,\"guid_creation_num\":\"1125899906842626\",\"owner\":\"0x36f08a98302079f5bd60d811c0187171a7edaa352ae9907ae21a7014ff2dbc60\",\"transfer_events\":{\"counter\":\"10\",\"guid\":{\"id\":{\"addr\":\"0xdb3751353dd2d7edda6a3443a71a2656c2c477caebb3bce0ae50941741fcf12d\",\"creation_num\":\"1125899906842624\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xdb3751353dd2d7edda6a3443a71a2656c2c477caebb3bce0ae50941741fcf12d", + "stateKeyHash": "ONRyFv1LAFKMA5VH4tL7lV3lruUBZ/3Catz0W/6ZysM=", + "type": { + "address": "0x4", + "module": "property_map", + "name": "PropertyMap" + }, + "typeStr": "0x4::property_map::PropertyMap", + "data": "{\"inner\":{\"data\":[{\"key\":\"elemental\",\"value\":{\"type\":9,\"value\":\"0x0446495245\"}},{\"key\":\"category\",\"value\":{\"type\":9,\"value\":\"0x03646f67\"}},{\"key\":\"grade\",\"value\":{\"type\":9,\"value\":\"0x06436f6d6d6f6e\"}}]}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xdb3751353dd2d7edda6a3443a71a2656c2c477caebb3bce0ae50941741fcf12d", + "stateKeyHash": "ONRyFv1LAFKMA5VH4tL7lV3lruUBZ/3Catz0W/6ZysM=", + "type": { + "address": "0x4", + "module": "royalty", + "name": "Royalty" + }, + "typeStr": "0x4::royalty::Royalty", + "data": "{\"denominator\":\"100\",\"numerator\":\"5\",\"payee_address\":\"0x9d9ae026d65ad917bffcc6984370468e751ec3e9cd7a69f114c8a58c34d408b7\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xdb3751353dd2d7edda6a3443a71a2656c2c477caebb3bce0ae50941741fcf12d", + "stateKeyHash": "ONRyFv1LAFKMA5VH4tL7lV3lruUBZ/3Catz0W/6ZysM=", + "type": { + "address": "0x4", + "module": "token", + "name": "Token" + }, + "typeStr": "0x4::token::Token", + "data": "{\"collection\":{\"inner\":\"0x9d9ae026d65ad917bffcc6984370468e751ec3e9cd7a69f114c8a58c34d408b7\"},\"description\":\"I am a Sidekick, always standing by my Villain. When the bond between a Villain and me deepens, something special could happen!\",\"index\":\"0\",\"mutation_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0xdb3751353dd2d7edda6a3443a71a2656c2c477caebb3bce0ae50941741fcf12d\",\"creation_num\":\"1125899906842625\"}}},\"name\":\"\",\"uri\":\"https://public.vir.supervlabs.io/virweb/nft/sidekicks/shiba.png\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xdb3751353dd2d7edda6a3443a71a2656c2c477caebb3bce0ae50941741fcf12d", + "stateKeyHash": "ONRyFv1LAFKMA5VH4tL7lV3lruUBZ/3Catz0W/6ZysM=", + "type": { + "address": "0x4", + "module": "token", + "name": "TokenIdentifiers" + }, + "typeStr": "0x4::token::TokenIdentifiers", + "data": "{\"index\":{\"value\":\"157156\"},\"name\":{\"padding\":\"0x000000000000000000\",\"value\":\"Shiba #7141\"}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xdb3751353dd2d7edda6a3443a71a2656c2c477caebb3bce0ae50941741fcf12d", + "stateKeyHash": "ONRyFv1LAFKMA5VH4tL7lV3lruUBZ/3Catz0W/6ZysM=", + "type": { + "address": "0x9d518b9b84f327eafc5f6632200ea224a818a935ffd6be5d78ada250bbc44a6", + "module": "gacha_rounds", + "name": "GachaRoundLog" + }, + "typeStr": "0x9d518b9b84f327eafc5f6632200ea224a818a935ffd6be5d78ada250bbc44a6::gacha_rounds::GachaRoundLog", + "data": "{\"current_round\":\"117\",\"selected_round\":\"0\",\"updated_at\":\"1718721391549743\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xdb3751353dd2d7edda6a3443a71a2656c2c477caebb3bce0ae50941741fcf12d", + "stateKeyHash": "ONRyFv1LAFKMA5VH4tL7lV3lruUBZ/3Catz0W/6ZysM=", + "type": { + "address": "0x9d518b9b84f327eafc5f6632200ea224a818a935ffd6be5d78ada250bbc44a6", + "module": "random", + "name": "RandOutputLog" + }, + "typeStr": "0x9d518b9b84f327eafc5f6632200ea224a818a935ffd6be5d78ada250bbc44a6::random::RandOutputLog", + "data": "{\"list\":[{\"max\":\"100000000\",\"min\":\"0\",\"result\":\"41967892\"},{\"max\":\"13\",\"min\":\"1\",\"result\":\"1\"}]}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xdb3751353dd2d7edda6a3443a71a2656c2c477caebb3bce0ae50941741fcf12d", + "stateKeyHash": "ONRyFv1LAFKMA5VH4tL7lV3lruUBZ/3Catz0W/6ZysM=", + "type": { + "address": "0x9d518b9b84f327eafc5f6632200ea224a818a935ffd6be5d78ada250bbc44a6", + "module": "sidekick", + "name": "Sidekick" + }, + "typeStr": "0x9d518b9b84f327eafc5f6632200ea224a818a935ffd6be5d78ada250bbc44a6::sidekick::Sidekick", + "data": "{\"salt\":\"9588\",\"updated_at\":\"1718721391\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xdb3751353dd2d7edda6a3443a71a2656c2c477caebb3bce0ae50941741fcf12d", + "stateKeyHash": "ONRyFv1LAFKMA5VH4tL7lV3lruUBZ/3Catz0W/6ZysM=", + "type": { + "address": "0xfacedd48d64a2ee04cbdf5c17608bd7a5ea9144fa1fe65320c588fffea131de3", + "module": "orm_object", + "name": "OrmObject" + }, + "typeStr": "0xfacedd48d64a2ee04cbdf5c17608bd7a5ea9144fa1fe65320c588fffea131de3::orm_object::OrmObject", + "data": "{\"class\":{\"inner\":\"0x9d9ae026d65ad917bffcc6984370468e751ec3e9cd7a69f114c8a58c34d408b7\"},\"creator\":{\"inner\":\"0x9d518b9b84f327eafc5f6632200ea224a818a935ffd6be5d78ada250bbc44a6\"},\"delete_ref\":{\"vec\":[{\"self\":\"0xdb3751353dd2d7edda6a3443a71a2656c2c477caebb3bce0ae50941741fcf12d\"}]},\"extend_ref\":{\"vec\":[{\"self\":\"0xdb3751353dd2d7edda6a3443a71a2656c2c477caebb3bce0ae50941741fcf12d\"}]},\"transfer_ref\":{\"vec\":[{\"self\":\"0xdb3751353dd2d7edda6a3443a71a2656c2c477caebb3bce0ae50941741fcf12d\"}]}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xdb3751353dd2d7edda6a3443a71a2656c2c477caebb3bce0ae50941741fcf12d", + "stateKeyHash": "ONRyFv1LAFKMA5VH4tL7lV3lruUBZ/3Catz0W/6ZysM=", + "type": { + "address": "0xfacedd48d64a2ee04cbdf5c17608bd7a5ea9144fa1fe65320c588fffea131de3", + "module": "orm_object", + "name": "OrmToken" + }, + "typeStr": "0xfacedd48d64a2ee04cbdf5c17608bd7a5ea9144fa1fe65320c588fffea131de3::orm_object::OrmToken", + "data": "{\"burn_ref\":{\"vec\":[{\"inner\":{\"vec\":[{\"self\":\"0xdb3751353dd2d7edda6a3443a71a2656c2c477caebb3bce0ae50941741fcf12d\"}]},\"self\":{\"vec\":[]}}]},\"mutator_ref\":{\"vec\":[{\"self\":\"0xdb3751353dd2d7edda6a3443a71a2656c2c477caebb3bce0ae50941741fcf12d\"}]},\"property_mutator_ref\":{\"vec\":[{\"self\":\"0xdb3751353dd2d7edda6a3443a71a2656c2c477caebb3bce0ae50941741fcf12d\"}]},\"royalty_mutator_ref\":{\"vec\":[{\"inner\":{\"self\":\"0xdb3751353dd2d7edda6a3443a71a2656c2c477caebb3bce0ae50941741fcf12d\"}}]}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "stateKeyHash": "YyxEM2ISNprB8ZNNbepmRFb5Yl7G+bHX/jJlJ/MnbD8=", + "type": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "module": "biddings_v2", + "name": "BidStore" + }, + "typeStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::biddings_v2::BidStore", + "data": "{\"accept_collection_bid_events\":{\"counter\":\"30363\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"23\"}}},\"accept_token_bid_events\":{\"counter\":\"627\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"20\"}}},\"delete_collection_bid_events\":{\"counter\":\"40618\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"22\"}}},\"delete_token_bid_events\":{\"counter\":\"3852\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"19\"}}},\"insert_collection_bid_events\":{\"counter\":\"113573\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"21\"}}},\"insert_token_bid_events\":{\"counter\":\"11661\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"18\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "stateKeyHash": "GziPDIxPViVKmcTdIO/LNrfAEGOLVL1z45HHLE9Rf8g=", + "type": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "module": "listings_v2", + "name": "ListingStore" + }, + "typeStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::listings_v2::ListingStore", + "data": "{\"buy_events\":{\"counter\":\"60919\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"17\"}}},\"delete_listing_events\":{\"counter\":\"45632\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"16\"}}},\"insert_listing_events\":{\"counter\":\"133182\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"15\"}}}}" + } + }, + { + "type": "TYPE_WRITE_TABLE_ITEM", + "writeTableItem": { + "stateKeyHash": "bkso1A+YoQamUWNTCSTA3LQME0nTqpFdEItNbPwd2xk=", + "handle": "0x1b854694ae746cdbd8d44186ca4929b2b337df21d1c74633be19b2710552fdca", + "key": "0x0619dc29a0aac8fa146714058e8dd6d2d0f3bdf5f6331907bf91f3acd81e6935", + "data": { + "key": "\"0x619dc29a0aac8fa146714058e8dd6d2d0f3bdf5f6331907bf91f3acd81e6935\"", + "keyType": "address", + "value": "\"113819573377132500\"", + "valueType": "u128" + } + } + } + ] + }, + "epoch": "10140", + "blockHeight": "283774916", + "type": "TRANSACTION_TYPE_USER", + "sizeInfo": { + "transactionBytes": 306, + "eventSizeInfo": [ + { + "typeTagBytes": 55, + "totalBytes": 199 + }, + { + "typeTagBytes": 65, + "totalBytes": 225 + }, + { + "typeTagBytes": 52, + "totalBytes": 108 + }, + { + "typeTagBytes": 52, + "totalBytes": 108 + }, + { + "typeTagBytes": 52, + "totalBytes": 108 + }, + { + "typeTagBytes": 55, + "totalBytes": 199 + }, + { + "typeTagBytes": 66, + "totalBytes": 258 + }, + { + "typeTagBytes": 63, + "totalBytes": 103 + } + ], + "writeOpSizeInfo": [ + { + "keyBytes": 87 + }, + { + "keyBytes": 87 + }, + { + "keyBytes": 138, + "valueBytes": 105 + }, + { + "keyBytes": 84, + "valueBytes": 147 + }, + { + "keyBytes": 138, + "valueBytes": 105 + }, + { + "keyBytes": 138, + "valueBytes": 105 + }, + { + "keyBytes": 87, + "valueBytes": 1427 + }, + { + "keyBytes": 89, + "valueBytes": 288 + }, + { + "keyBytes": 93, + "valueBytes": 144 + }, + { + "keyBytes": 66, + "valueBytes": 16 + } + ] + }, + "user": { + "request": { + "sender": "0x43d8f99000e4be09b1145832e9b1a98725fc7b4f63fd1ba3726b6266365fc39e", + "sequenceNumber": "12", + "maxGasAmount": "40", + "gasUnitPrice": "100", + "expirationTimestampSecs": { + "seconds": "1738221583" + }, + "payload": { + "type": "TYPE_ENTRY_FUNCTION_PAYLOAD", + "entryFunctionPayload": { + "function": { + "module": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "name": "biddings_v2" + }, + "name": "unlist_and_accept_token_bid" + }, + "arguments": [ + "{\"inner\":\"0x23eac30752bcdfdc610896e50abc56237234634802b8b7b8b7ba21dfe5390e6\"}", + "{\"inner\":\"0x142d4aaf554ebbebb41d6fab20964a6a64b5d9396f6bdacd2533479e589c4963\"}" + ], + "entryFunctionIdStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::biddings_v2::unlist_and_accept_token_bid" + } + }, + "signature": { + "type": "TYPE_ED25519", + "ed25519": { + "publicKey": "wT6+JAv+YgeoKCF1m7bIRaOXGxfZ1koNm8GRbNTrwEE=", + "signature": "cC2td9GtpSUZKQW6z7LHzOPGzNrh4OHheFAaK1oz/tD8mSeoxZhfumwmSP1duLYd+iDk40QgK6KHNcbaIMfWDw==" + } + } + }, + "events": [ + { + "key": { + "creationNumber": "1125899906842624", + "accountAddress": "0xdb3751353dd2d7edda6a3443a71a2656c2c477caebb3bce0ae50941741fcf12d" + }, + "sequenceNumber": "8", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "object", + "name": "TransferEvent" + } + }, + "typeStr": "0x1::object::TransferEvent", + "data": "{\"from\":\"0x23eac30752bcdfdc610896e50abc56237234634802b8b7b8b7ba21dfe5390e6\",\"object\":\"0xdb3751353dd2d7edda6a3443a71a2656c2c477caebb3bce0ae50941741fcf12d\",\"to\":\"0x43d8f99000e4be09b1145832e9b1a98725fc7b4f63fd1ba3726b6266365fc39e\"}" + }, + { + "key": { + "creationNumber": "16", + "accountAddress": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26" + }, + "sequenceNumber": "45631", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "module": "listings_v2", + "name": "DeleteListingEvent" + } + }, + "typeStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::listings_v2::DeleteListingEvent", + "data": "{\"listing\":{\"inner\":\"0x23eac30752bcdfdc610896e50abc56237234634802b8b7b8b7ba21dfe5390e6\"},\"price\":\"9000000\",\"seller\":\"0x43d8f99000e4be09b1145832e9b1a98725fc7b4f63fd1ba3726b6266365fc39e\",\"timestamp\":\"1738221494554632\",\"token\":{\"inner\":\"0xdb3751353dd2d7edda6a3443a71a2656c2c477caebb3bce0ae50941741fcf12d\"}}" + }, + { + "key": { + "creationNumber": "2", + "accountAddress": "0x9d9ae026d65ad917bffcc6984370468e751ec3e9cd7a69f114c8a58c34d408b7" + }, + "sequenceNumber": "26237", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "DepositEvent" + } + }, + "typeStr": "0x1::coin::DepositEvent", + "data": "{\"amount\":\"150000\"}" + }, + { + "key": { + "creationNumber": "2", + "accountAddress": "0x6a03eb973cd9385d62fc2842d02a4dd6b70e52f5da77a0689e57e48d93fae1b4" + }, + "sequenceNumber": "149334", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "DepositEvent" + } + }, + "typeStr": "0x1::coin::DepositEvent", + "data": "{\"amount\":\"75000\"}" + }, + { + "key": { + "creationNumber": "2", + "accountAddress": "0x43d8f99000e4be09b1145832e9b1a98725fc7b4f63fd1ba3726b6266365fc39e" + }, + "sequenceNumber": "4", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "DepositEvent" + } + }, + "typeStr": "0x1::coin::DepositEvent", + "data": "{\"amount\":\"2775000\"}" + }, + { + "key": { + "creationNumber": "1125899906842624", + "accountAddress": "0xdb3751353dd2d7edda6a3443a71a2656c2c477caebb3bce0ae50941741fcf12d" + }, + "sequenceNumber": "9", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "object", + "name": "TransferEvent" + } + }, + "typeStr": "0x1::object::TransferEvent", + "data": "{\"from\":\"0x43d8f99000e4be09b1145832e9b1a98725fc7b4f63fd1ba3726b6266365fc39e\",\"object\":\"0xdb3751353dd2d7edda6a3443a71a2656c2c477caebb3bce0ae50941741fcf12d\",\"to\":\"0x36f08a98302079f5bd60d811c0187171a7edaa352ae9907ae21a7014ff2dbc60\"}" + }, + { + "key": { + "creationNumber": "20", + "accountAddress": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26" + }, + "sequenceNumber": "626", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "module": "biddings_v2", + "name": "AcceptTokenBidEvent" + } + }, + "typeStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::biddings_v2::AcceptTokenBidEvent", + "data": "{\"bid\":{\"inner\":\"0x142d4aaf554ebbebb41d6fab20964a6a64b5d9396f6bdacd2533479e589c4963\"},\"bid_buyer\":\"0x36f08a98302079f5bd60d811c0187171a7edaa352ae9907ae21a7014ff2dbc60\",\"bid_seller\":\"0x43d8f99000e4be09b1145832e9b1a98725fc7b4f63fd1ba3726b6266365fc39e\",\"price\":\"3000000\",\"timestamp\":\"1738221494554632\",\"token\":{\"inner\":\"0xdb3751353dd2d7edda6a3443a71a2656c2c477caebb3bce0ae50941741fcf12d\"}}" + }, + { + "key": { + "accountAddress": "0x0" + }, + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "transaction_fee", + "name": "FeeStatement" + } + }, + "typeStr": "0x1::transaction_fee::FeeStatement", + "data": "{\"execution_gas_units\":\"8\",\"io_gas_units\":\"13\",\"storage_fee_octas\":\"0\",\"storage_fee_refund_octas\":\"112160\",\"total_charge_gas_units\":\"20\"}" + } + ] + } +} \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2298838662_tradeport_v2_fill_offer.json b/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2298838662_tradeport_v2_fill_offer.json new file mode 100644 index 0000000000000..24dcc62d5e265 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2298838662_tradeport_v2_fill_offer.json @@ -0,0 +1,563 @@ +{ + "timestamp": { + "seconds": "1738263799", + "nanos": 318155000 + }, + "version": "2298838662", + "info": { + "hash": "VgSAZQzJPcRT5ZEEPfpPrqZN/ZlbhezhBBg4tzxpTaI=", + "stateChangeHash": "Z54NcLdBrFvWcXJG/o50EbOyEjtHmYUchRGyMLD7Gfw=", + "eventRootHash": "pMh9oGidH5bQQ5IWOx0xRqlktyQERko8GUhUJPKJSoM=", + "gasUsed": "20", + "success": true, + "vmStatus": "Executed successfully", + "accumulatorRootHash": "Y06dhpAMg+F9XZX0r8XRGgYVW9bDS/lqOZCPLg38m6Q=", + "changes": [ + { + "type": "TYPE_DELETE_RESOURCE", + "deleteResource": { + "address": "0x1b4a34336112158ef3d58708f1d54da931c1ed9786c4b1f074498e2c19be4719", + "stateKeyHash": "FqthjBXFLLP/TmZC+mT60i5uzYGPhWPrun/rD5Ttwb0=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectGroup" + }, + "typeStr": "0x1::object::ObjectGroup" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x496627a8924237cbcf4ac7efd743eee5e6004ab35bbad6c64a9280fa6ebea26f", + "stateKeyHash": "WDhAmkHG4vQjye5WlpB57hIJCbf76nKLzej0pIMBPOE=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectCore" + }, + "typeStr": "0x1::object::ObjectCore", + "data": "{\"allow_ungated_transfer\":true,\"guid_creation_num\":\"1125899906842626\",\"owner\":\"0x4d575e42589f77401881170b9038fa0332fc8a40dd77bf260bf10023591ae891\",\"transfer_events\":{\"counter\":\"7\",\"guid\":{\"id\":{\"addr\":\"0x496627a8924237cbcf4ac7efd743eee5e6004ab35bbad6c64a9280fa6ebea26f\",\"creation_num\":\"1125899906842624\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x496627a8924237cbcf4ac7efd743eee5e6004ab35bbad6c64a9280fa6ebea26f", + "stateKeyHash": "WDhAmkHG4vQjye5WlpB57hIJCbf76nKLzej0pIMBPOE=", + "type": { + "address": "0x4", + "module": "property_map", + "name": "PropertyMap" + }, + "typeStr": "0x4::property_map::PropertyMap", + "data": "{\"inner\":{\"data\":[{\"key\":\"elemental\",\"value\":{\"type\":9,\"value\":\"0x04574f4f44\"}},{\"key\":\"category\",\"value\":{\"type\":9,\"value\":\"0x056661697279\"}},{\"key\":\"grade\",\"value\":{\"type\":9,\"value\":\"0x06436f6d6d6f6e\"}}]}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x496627a8924237cbcf4ac7efd743eee5e6004ab35bbad6c64a9280fa6ebea26f", + "stateKeyHash": "WDhAmkHG4vQjye5WlpB57hIJCbf76nKLzej0pIMBPOE=", + "type": { + "address": "0x4", + "module": "royalty", + "name": "Royalty" + }, + "typeStr": "0x4::royalty::Royalty", + "data": "{\"denominator\":\"100\",\"numerator\":\"5\",\"payee_address\":\"0x9d9ae026d65ad917bffcc6984370468e751ec3e9cd7a69f114c8a58c34d408b7\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x496627a8924237cbcf4ac7efd743eee5e6004ab35bbad6c64a9280fa6ebea26f", + "stateKeyHash": "WDhAmkHG4vQjye5WlpB57hIJCbf76nKLzej0pIMBPOE=", + "type": { + "address": "0x4", + "module": "token", + "name": "Token" + }, + "typeStr": "0x4::token::Token", + "data": "{\"collection\":{\"inner\":\"0x9d9ae026d65ad917bffcc6984370468e751ec3e9cd7a69f114c8a58c34d408b7\"},\"description\":\"I am a Sidekick, always standing by my Villain. When the bond between a Villain and me deepens, something special could happen!\",\"index\":\"0\",\"mutation_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x496627a8924237cbcf4ac7efd743eee5e6004ab35bbad6c64a9280fa6ebea26f\",\"creation_num\":\"1125899906842625\"}}},\"name\":\"\",\"uri\":\"https://public.vir.supervlabs.io/virweb/nft/sidekicks/greeny.png\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x496627a8924237cbcf4ac7efd743eee5e6004ab35bbad6c64a9280fa6ebea26f", + "stateKeyHash": "WDhAmkHG4vQjye5WlpB57hIJCbf76nKLzej0pIMBPOE=", + "type": { + "address": "0x4", + "module": "token", + "name": "TokenIdentifiers" + }, + "typeStr": "0x4::token::TokenIdentifiers", + "data": "{\"index\":{\"value\":\"66118\"},\"name\":{\"padding\":\"0x0000000000000000\",\"value\":\"Greeny #3578\"}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x496627a8924237cbcf4ac7efd743eee5e6004ab35bbad6c64a9280fa6ebea26f", + "stateKeyHash": "WDhAmkHG4vQjye5WlpB57hIJCbf76nKLzej0pIMBPOE=", + "type": { + "address": "0x9d518b9b84f327eafc5f6632200ea224a818a935ffd6be5d78ada250bbc44a6", + "module": "gacha_rounds", + "name": "GachaRoundLog" + }, + "typeStr": "0x9d518b9b84f327eafc5f6632200ea224a818a935ffd6be5d78ada250bbc44a6::gacha_rounds::GachaRoundLog", + "data": "{\"current_round\":\"68\",\"selected_round\":\"0\",\"updated_at\":\"1713254600311352\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x496627a8924237cbcf4ac7efd743eee5e6004ab35bbad6c64a9280fa6ebea26f", + "stateKeyHash": "WDhAmkHG4vQjye5WlpB57hIJCbf76nKLzej0pIMBPOE=", + "type": { + "address": "0x9d518b9b84f327eafc5f6632200ea224a818a935ffd6be5d78ada250bbc44a6", + "module": "random", + "name": "RandLog" + }, + "typeStr": "0x9d518b9b84f327eafc5f6632200ea224a818a935ffd6be5d78ada250bbc44a6::random::RandLog", + "data": "{\"list\":[{\"i1\":\"0xad68439dd08605fd29881911f64c9ac4ed7ad02f005875bbe9eeb1bf90648be7\",\"i2\":\"1713254600311352\",\"i3\":\"100\",\"i4\":\"0x554d6cde3dc8f2c994af260a7a2085f81291f10d569ca732476d5a16260c4824\",\"max\":\"100000000\",\"min\":\"0\"},{\"i1\":\"0xad68439dd08605fd29881911f64c9ac4ed7ad02f005875bbe9eeb1bf90648be7\",\"i2\":\"1713254600311352\",\"i3\":\"877\",\"i4\":\"0x554d6cde3dc8f2c994af260a7a2085f81291f10d569ca732476d5a16260c4824\",\"max\":\"13\",\"min\":\"1\"}]}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x496627a8924237cbcf4ac7efd743eee5e6004ab35bbad6c64a9280fa6ebea26f", + "stateKeyHash": "WDhAmkHG4vQjye5WlpB57hIJCbf76nKLzej0pIMBPOE=", + "type": { + "address": "0x9d518b9b84f327eafc5f6632200ea224a818a935ffd6be5d78ada250bbc44a6", + "module": "sidekick", + "name": "Sidekick" + }, + "typeStr": "0x9d518b9b84f327eafc5f6632200ea224a818a935ffd6be5d78ada250bbc44a6::sidekick::Sidekick", + "data": "{\"salt\":\"100\",\"updated_at\":\"1713254600\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x496627a8924237cbcf4ac7efd743eee5e6004ab35bbad6c64a9280fa6ebea26f", + "stateKeyHash": "WDhAmkHG4vQjye5WlpB57hIJCbf76nKLzej0pIMBPOE=", + "type": { + "address": "0xfacedd48d64a2ee04cbdf5c17608bd7a5ea9144fa1fe65320c588fffea131de3", + "module": "orm_object", + "name": "OrmObject" + }, + "typeStr": "0xfacedd48d64a2ee04cbdf5c17608bd7a5ea9144fa1fe65320c588fffea131de3::orm_object::OrmObject", + "data": "{\"class\":{\"inner\":\"0x9d9ae026d65ad917bffcc6984370468e751ec3e9cd7a69f114c8a58c34d408b7\"},\"creator\":{\"inner\":\"0x9d518b9b84f327eafc5f6632200ea224a818a935ffd6be5d78ada250bbc44a6\"},\"delete_ref\":{\"vec\":[{\"self\":\"0x496627a8924237cbcf4ac7efd743eee5e6004ab35bbad6c64a9280fa6ebea26f\"}]},\"extend_ref\":{\"vec\":[{\"self\":\"0x496627a8924237cbcf4ac7efd743eee5e6004ab35bbad6c64a9280fa6ebea26f\"}]},\"transfer_ref\":{\"vec\":[{\"self\":\"0x496627a8924237cbcf4ac7efd743eee5e6004ab35bbad6c64a9280fa6ebea26f\"}]}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x496627a8924237cbcf4ac7efd743eee5e6004ab35bbad6c64a9280fa6ebea26f", + "stateKeyHash": "WDhAmkHG4vQjye5WlpB57hIJCbf76nKLzej0pIMBPOE=", + "type": { + "address": "0xfacedd48d64a2ee04cbdf5c17608bd7a5ea9144fa1fe65320c588fffea131de3", + "module": "orm_object", + "name": "OrmToken" + }, + "typeStr": "0xfacedd48d64a2ee04cbdf5c17608bd7a5ea9144fa1fe65320c588fffea131de3::orm_object::OrmToken", + "data": "{\"burn_ref\":{\"vec\":[{\"inner\":{\"vec\":[{\"self\":\"0x496627a8924237cbcf4ac7efd743eee5e6004ab35bbad6c64a9280fa6ebea26f\"}]},\"self\":{\"vec\":[]}}]},\"mutator_ref\":{\"vec\":[{\"self\":\"0x496627a8924237cbcf4ac7efd743eee5e6004ab35bbad6c64a9280fa6ebea26f\"}]},\"property_mutator_ref\":{\"vec\":[{\"self\":\"0x496627a8924237cbcf4ac7efd743eee5e6004ab35bbad6c64a9280fa6ebea26f\"}]},\"royalty_mutator_ref\":{\"vec\":[{\"inner\":{\"self\":\"0x496627a8924237cbcf4ac7efd743eee5e6004ab35bbad6c64a9280fa6ebea26f\"}}]}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x5c85d808ce379ba504a346508c878f72080ded0aa8b0df48b8eb2966d47a0a72", + "stateKeyHash": "zK7ddVtDKFmYjpVmV/5JZ0Mz9FkHcsXtM1ln5JEZkhc=", + "type": { + "address": "0x1", + "module": "coin", + "name": "CoinStore", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", + "data": "{\"coin\":{\"value\":\"208065570\"},\"deposit_events\":{\"counter\":\"388\",\"guid\":{\"id\":{\"addr\":\"0x5c85d808ce379ba504a346508c878f72080ded0aa8b0df48b8eb2966d47a0a72\",\"creation_num\":\"2\"}}},\"frozen\":false,\"withdraw_events\":{\"counter\":\"2203\",\"guid\":{\"id\":{\"addr\":\"0x5c85d808ce379ba504a346508c878f72080ded0aa8b0df48b8eb2966d47a0a72\",\"creation_num\":\"3\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x5c85d808ce379ba504a346508c878f72080ded0aa8b0df48b8eb2966d47a0a72", + "stateKeyHash": "Jyr6V4gr+I2XpvcJql/GdzPetJjxe8uBY3IQaSdXG9M=", + "type": { + "address": "0x1", + "module": "account", + "name": "Account" + }, + "typeStr": "0x1::account::Account", + "data": "{\"authentication_key\":\"0x5c85d808ce379ba504a346508c878f72080ded0aa8b0df48b8eb2966d47a0a72\",\"coin_register_events\":{\"counter\":\"33\",\"guid\":{\"id\":{\"addr\":\"0x5c85d808ce379ba504a346508c878f72080ded0aa8b0df48b8eb2966d47a0a72\",\"creation_num\":\"0\"}}},\"guid_creation_num\":\"1286\",\"key_rotation_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x5c85d808ce379ba504a346508c878f72080ded0aa8b0df48b8eb2966d47a0a72\",\"creation_num\":\"1\"}}},\"rotation_capability_offer\":{\"for\":{\"vec\":[]}},\"sequence_number\":\"1270\",\"signer_capability_offer\":{\"for\":{\"vec\":[]}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x6a03eb973cd9385d62fc2842d02a4dd6b70e52f5da77a0689e57e48d93fae1b4", + "stateKeyHash": "ia5CJAqvJ4lnS7nWhVMNNxhYGforZgoP0qtHcM/2+OM=", + "type": { + "address": "0x1", + "module": "coin", + "name": "CoinStore", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", + "data": "{\"coin\":{\"value\":\"11011043110\"},\"deposit_events\":{\"counter\":\"149389\",\"guid\":{\"id\":{\"addr\":\"0x6a03eb973cd9385d62fc2842d02a4dd6b70e52f5da77a0689e57e48d93fae1b4\",\"creation_num\":\"2\"}}},\"frozen\":false,\"withdraw_events\":{\"counter\":\"23\",\"guid\":{\"id\":{\"addr\":\"0x6a03eb973cd9385d62fc2842d02a4dd6b70e52f5da77a0689e57e48d93fae1b4\",\"creation_num\":\"3\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x9d9ae026d65ad917bffcc6984370468e751ec3e9cd7a69f114c8a58c34d408b7", + "stateKeyHash": "e3BUygmgtbbu9RwYj5qaEWzxrbT3Ynu8up05xX4yDTY=", + "type": { + "address": "0x1", + "module": "coin", + "name": "CoinStore", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", + "data": "{\"coin\":{\"value\":\"12374967220\"},\"deposit_events\":{\"counter\":\"26255\",\"guid\":{\"id\":{\"addr\":\"0x9d9ae026d65ad917bffcc6984370468e751ec3e9cd7a69f114c8a58c34d408b7\",\"creation_num\":\"2\"}}},\"frozen\":false,\"withdraw_events\":{\"counter\":\"8\",\"guid\":{\"id\":{\"addr\":\"0x9d9ae026d65ad917bffcc6984370468e751ec3e9cd7a69f114c8a58c34d408b7\",\"creation_num\":\"3\"}}}}" + } + }, + { + "type": "TYPE_DELETE_RESOURCE", + "deleteResource": { + "address": "0xdb7392368c123f65922ba55c82cafab598aeea93ba83f8d217ef3a0093be54bf", + "stateKeyHash": "I1hG4NGDNmAcB5RDSmYjlBFA37Ws2z+9KIB6TV5f/CA=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectGroup" + }, + "typeStr": "0x1::object::ObjectGroup" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "stateKeyHash": "YyxEM2ISNprB8ZNNbepmRFb5Yl7G+bHX/jJlJ/MnbD8=", + "type": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "module": "biddings_v2", + "name": "BidStore" + }, + "typeStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::biddings_v2::BidStore", + "data": "{\"accept_collection_bid_events\":{\"counter\":\"30379\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"23\"}}},\"accept_token_bid_events\":{\"counter\":\"630\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"20\"}}},\"delete_collection_bid_events\":{\"counter\":\"40624\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"22\"}}},\"delete_token_bid_events\":{\"counter\":\"3852\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"19\"}}},\"insert_collection_bid_events\":{\"counter\":\"113581\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"21\"}}},\"insert_token_bid_events\":{\"counter\":\"11665\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"18\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "stateKeyHash": "GziPDIxPViVKmcTdIO/LNrfAEGOLVL1z45HHLE9Rf8g=", + "type": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "module": "listings_v2", + "name": "ListingStore" + }, + "typeStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::listings_v2::ListingStore", + "data": "{\"buy_events\":{\"counter\":\"60946\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"17\"}}},\"delete_listing_events\":{\"counter\":\"45650\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"16\"}}},\"insert_listing_events\":{\"counter\":\"133250\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"15\"}}}}" + } + }, + { + "type": "TYPE_WRITE_TABLE_ITEM", + "writeTableItem": { + "stateKeyHash": "bkso1A+YoQamUWNTCSTA3LQME0nTqpFdEItNbPwd2xk=", + "handle": "0x1b854694ae746cdbd8d44186ca4929b2b337df21d1c74633be19b2710552fdca", + "key": "0x0619dc29a0aac8fa146714058e8dd6d2d0f3bdf5f6331907bf91f3acd81e6935", + "data": { + "key": "\"0x619dc29a0aac8fa146714058e8dd6d2d0f3bdf5f6331907bf91f3acd81e6935\"", + "keyType": "address", + "value": "\"113810766359769605\"", + "valueType": "u128" + } + } + } + ] + }, + "epoch": "10146", + "blockHeight": "283990492", + "type": "TRANSACTION_TYPE_USER", + "sizeInfo": { + "transactionBytes": 306, + "eventSizeInfo": [ + { + "typeTagBytes": 55, + "totalBytes": 199 + }, + { + "typeTagBytes": 65, + "totalBytes": 225 + }, + { + "typeTagBytes": 52, + "totalBytes": 108 + }, + { + "typeTagBytes": 52, + "totalBytes": 108 + }, + { + "typeTagBytes": 52, + "totalBytes": 108 + }, + { + "typeTagBytes": 55, + "totalBytes": 199 + }, + { + "typeTagBytes": 66, + "totalBytes": 258 + }, + { + "typeTagBytes": 63, + "totalBytes": 103 + } + ], + "writeOpSizeInfo": [ + { + "keyBytes": 87 + }, + { + "keyBytes": 87, + "valueBytes": 1571 + }, + { + "keyBytes": 138, + "valueBytes": 105 + }, + { + "keyBytes": 84, + "valueBytes": 147 + }, + { + "keyBytes": 138, + "valueBytes": 105 + }, + { + "keyBytes": 138, + "valueBytes": 105 + }, + { + "keyBytes": 87 + }, + { + "keyBytes": 89, + "valueBytes": 288 + }, + { + "keyBytes": 93, + "valueBytes": 144 + }, + { + "keyBytes": 66, + "valueBytes": 16 + } + ] + }, + "user": { + "request": { + "sender": "0x5c85d808ce379ba504a346508c878f72080ded0aa8b0df48b8eb2966d47a0a72", + "sequenceNumber": "1269", + "maxGasAmount": "40", + "gasUnitPrice": "100", + "expirationTimestampSecs": { + "seconds": "1738263888" + }, + "payload": { + "type": "TYPE_ENTRY_FUNCTION_PAYLOAD", + "entryFunctionPayload": { + "function": { + "module": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "name": "biddings_v2" + }, + "name": "unlist_and_accept_token_bid" + }, + "arguments": [ + "{\"inner\":\"0x1b4a34336112158ef3d58708f1d54da931c1ed9786c4b1f074498e2c19be4719\"}", + "{\"inner\":\"0xdb7392368c123f65922ba55c82cafab598aeea93ba83f8d217ef3a0093be54bf\"}" + ], + "entryFunctionIdStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::biddings_v2::unlist_and_accept_token_bid" + } + }, + "signature": { + "type": "TYPE_ED25519", + "ed25519": { + "publicKey": "SoRAJvPZNYM+urrYSi/D1rmbVEqoO+FSQ5E8s8TAuPI=", + "signature": "TEi5O+2IRwQndW8O8elebO4gHHMMjm0UBSpazyu2tJAGyqCRG0fFRLCnViofPvyvgRHJaRaF+TEgC1pUFsDyBA==" + } + } + }, + "events": [ + { + "key": { + "creationNumber": "1125899906842624", + "accountAddress": "0x496627a8924237cbcf4ac7efd743eee5e6004ab35bbad6c64a9280fa6ebea26f" + }, + "sequenceNumber": "5", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "object", + "name": "TransferEvent" + } + }, + "typeStr": "0x1::object::TransferEvent", + "data": "{\"from\":\"0x1b4a34336112158ef3d58708f1d54da931c1ed9786c4b1f074498e2c19be4719\",\"object\":\"0x496627a8924237cbcf4ac7efd743eee5e6004ab35bbad6c64a9280fa6ebea26f\",\"to\":\"0x5c85d808ce379ba504a346508c878f72080ded0aa8b0df48b8eb2966d47a0a72\"}" + }, + { + "key": { + "creationNumber": "16", + "accountAddress": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26" + }, + "sequenceNumber": "45649", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "module": "listings_v2", + "name": "DeleteListingEvent" + } + }, + "typeStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::listings_v2::DeleteListingEvent", + "data": "{\"listing\":{\"inner\":\"0x1b4a34336112158ef3d58708f1d54da931c1ed9786c4b1f074498e2c19be4719\"},\"price\":\"50000000\",\"seller\":\"0x5c85d808ce379ba504a346508c878f72080ded0aa8b0df48b8eb2966d47a0a72\",\"timestamp\":\"1738263799318155\",\"token\":{\"inner\":\"0x496627a8924237cbcf4ac7efd743eee5e6004ab35bbad6c64a9280fa6ebea26f\"}}" + }, + { + "key": { + "creationNumber": "2", + "accountAddress": "0x9d9ae026d65ad917bffcc6984370468e751ec3e9cd7a69f114c8a58c34d408b7" + }, + "sequenceNumber": "26254", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "DepositEvent" + } + }, + "typeStr": "0x1::coin::DepositEvent", + "data": "{\"amount\":\"500000\"}" + }, + { + "key": { + "creationNumber": "2", + "accountAddress": "0x6a03eb973cd9385d62fc2842d02a4dd6b70e52f5da77a0689e57e48d93fae1b4" + }, + "sequenceNumber": "149388", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "DepositEvent" + } + }, + "typeStr": "0x1::coin::DepositEvent", + "data": "{\"amount\":\"250000\"}" + }, + { + "key": { + "creationNumber": "2", + "accountAddress": "0x5c85d808ce379ba504a346508c878f72080ded0aa8b0df48b8eb2966d47a0a72" + }, + "sequenceNumber": "387", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "DepositEvent" + } + }, + "typeStr": "0x1::coin::DepositEvent", + "data": "{\"amount\":\"9250000\"}" + }, + { + "key": { + "creationNumber": "1125899906842624", + "accountAddress": "0x496627a8924237cbcf4ac7efd743eee5e6004ab35bbad6c64a9280fa6ebea26f" + }, + "sequenceNumber": "6", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "object", + "name": "TransferEvent" + } + }, + "typeStr": "0x1::object::TransferEvent", + "data": "{\"from\":\"0x5c85d808ce379ba504a346508c878f72080ded0aa8b0df48b8eb2966d47a0a72\",\"object\":\"0x496627a8924237cbcf4ac7efd743eee5e6004ab35bbad6c64a9280fa6ebea26f\",\"to\":\"0x4d575e42589f77401881170b9038fa0332fc8a40dd77bf260bf10023591ae891\"}" + }, + { + "key": { + "creationNumber": "20", + "accountAddress": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26" + }, + "sequenceNumber": "629", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "module": "biddings_v2", + "name": "AcceptTokenBidEvent" + } + }, + "typeStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::biddings_v2::AcceptTokenBidEvent", + "data": "{\"bid\":{\"inner\":\"0xdb7392368c123f65922ba55c82cafab598aeea93ba83f8d217ef3a0093be54bf\"},\"bid_buyer\":\"0x4d575e42589f77401881170b9038fa0332fc8a40dd77bf260bf10023591ae891\",\"bid_seller\":\"0x5c85d808ce379ba504a346508c878f72080ded0aa8b0df48b8eb2966d47a0a72\",\"price\":\"10000000\",\"timestamp\":\"1738263799318155\",\"token\":{\"inner\":\"0x496627a8924237cbcf4ac7efd743eee5e6004ab35bbad6c64a9280fa6ebea26f\"}}" + }, + { + "key": { + "accountAddress": "0x0" + }, + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "transaction_fee", + "name": "FeeStatement" + } + }, + "typeStr": "0x1::transaction_fee::FeeStatement", + "data": "{\"execution_gas_units\":\"8\",\"io_gas_units\":\"13\",\"storage_fee_octas\":\"0\",\"storage_fee_refund_octas\":\"112160\",\"total_charge_gas_units\":\"20\"}" + } + ] + } +} \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2313248448_wapal_fill_offer.json b/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2313248448_wapal_fill_offer.json new file mode 100644 index 0000000000000..c537fdf6dbd17 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2313248448_wapal_fill_offer.json @@ -0,0 +1,545 @@ +{ + "timestamp": { + "seconds": "1738517570", + "nanos": 466815000 + }, + "version": "2313248448", + "info": { + "hash": "QwW/k6+7Wey4WQvPX4X024mevfiuf2h7HuSOqfZVzlg=", + "stateChangeHash": "9r33rTcrOLhks60eG5skep8lxn6LVJw6PBQbZqugGh8=", + "eventRootHash": "9ctX1/kHT4Tjh2riLd6VBT5UfhIRmCUL9zeLur8qrSk=", + "gasUsed": "30", + "success": true, + "vmStatus": "Executed successfully", + "accumulatorRootHash": "nSK4Xn8kHl5nGB3QmNd4935jl2x8GOcMm1c/Xm0PzGg=", + "changes": [ + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x22113f16f9b7c6761ef14df757c016b8736a9023e8881cd5e11579b0b98ef562", + "stateKeyHash": "fvjBO5IEqzNiVObviU8o0AcxtAqfZK4sUvsLkp1CzS0=", + "type": { + "address": "0x1", + "module": "coin", + "name": "CoinStore", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", + "data": "{\"coin\":{\"value\":\"999640005\"},\"deposit_events\":{\"counter\":\"30\",\"guid\":{\"id\":{\"addr\":\"0x22113f16f9b7c6761ef14df757c016b8736a9023e8881cd5e11579b0b98ef562\",\"creation_num\":\"2\"}}},\"frozen\":false,\"withdraw_events\":{\"counter\":\"51\",\"guid\":{\"id\":{\"addr\":\"0x22113f16f9b7c6761ef14df757c016b8736a9023e8881cd5e11579b0b98ef562\",\"creation_num\":\"3\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x268d4a7a2ad93274edf6116f9f20ad8455223a7ab5fc73154f687e7dbc3e3ec6", + "stateKeyHash": "EMqTMQksaw5v/er1udU4S/BFYuz2Ov0YBKb63F/2uGY=", + "type": { + "address": "0x1", + "module": "coin", + "name": "CoinStore", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", + "data": "{\"coin\":{\"value\":\"28859565319\"},\"deposit_events\":{\"counter\":\"3639\",\"guid\":{\"id\":{\"addr\":\"0x268d4a7a2ad93274edf6116f9f20ad8455223a7ab5fc73154f687e7dbc3e3ec6\",\"creation_num\":\"2\"}}},\"frozen\":false,\"withdraw_events\":{\"counter\":\"61\",\"guid\":{\"id\":{\"addr\":\"0x268d4a7a2ad93274edf6116f9f20ad8455223a7ab5fc73154f687e7dbc3e3ec6\",\"creation_num\":\"3\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "stateKeyHash": "CnIoX/rU8s/MEWBmiMymIKAzkKuqzEhHPNEaXeZekPM=", + "type": { + "address": "0x1", + "module": "coin", + "name": "CoinStore", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", + "data": "{\"coin\":{\"value\":\"42813802003\"},\"deposit_events\":{\"counter\":\"2495573\",\"guid\":{\"id\":{\"addr\":\"0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9\",\"creation_num\":\"2\"}}},\"frozen\":false,\"withdraw_events\":{\"counter\":\"25\",\"guid\":{\"id\":{\"addr\":\"0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9\",\"creation_num\":\"3\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectCore" + }, + "typeStr": "0x1::object::ObjectCore", + "data": "{\"allow_ungated_transfer\":true,\"guid_creation_num\":\"1125899906842636\",\"owner\":\"0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9\",\"transfer_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842624\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "events", + "name": "EventsV1" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::events::EventsV1", + "data": "{\"auction_bid_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842626\"}}},\"collection_offer_canceled_events\":{\"counter\":\"27576\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842631\"}}},\"collection_offer_filled_events\":{\"counter\":\"77512\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842632\"}}},\"collection_offer_placed_events\":{\"counter\":\"153879\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842630\"}}},\"listing_canceled_events\":{\"counter\":\"73460\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842628\"}}},\"listing_filled_events\":{\"counter\":\"691082\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842629\"}}},\"listing_placed_events\":{\"counter\":\"1770085\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842627\"}}},\"token_offer_canceled_events\":{\"counter\":\"9047\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842634\"}}},\"token_offer_filled_events\":{\"counter\":\"600\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842635\"}}},\"token_offer_placed_events\":{\"counter\":\"77808\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842633\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "fee_schedule", + "name": "FeeSchedule" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::fee_schedule::FeeSchedule", + "data": "{\"extend_ref\":{\"self\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\"},\"fee_address\":\"0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9\",\"mutation_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842625\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "fee_schedule", + "name": "FixedRateBiddingFee" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::fee_schedule::FixedRateBiddingFee", + "data": "{\"bidding_fee\":\"0\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "fee_schedule", + "name": "FixedRateListingFee" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::fee_schedule::FixedRateListingFee", + "data": "{\"listing_fee\":\"0\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "fee_schedule", + "name": "PercentageRateCommission" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::fee_schedule::PercentageRateCommission", + "data": "{\"denominator\":\"1000\",\"numerator\":\"15\"}" + } + }, + { + "type": "TYPE_DELETE_RESOURCE", + "deleteResource": { + "address": "0x9d14c489b6f56ac55e8707022400c23bb83bd0b0cd486c862defccf6241a219e", + "stateKeyHash": "/DTBy6biPnGSeHv/rQkM0XX26vmAgUHqPjvMnfc0OMU=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectGroup" + }, + "typeStr": "0x1::object::ObjectGroup" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xc60f124dc24f4ea97232bc5ead5f37252b7cbee47f48ef05932998050c414d14", + "stateKeyHash": "ptGzC3WgeGpmQ+ufD2RK21sdAc+zXBgEJvJigOXurzM=", + "type": { + "address": "0x1", + "module": "coin", + "name": "CoinStore", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", + "data": "{\"coin\":{\"value\":\"3216164648\"},\"deposit_events\":{\"counter\":\"17\",\"guid\":{\"id\":{\"addr\":\"0xc60f124dc24f4ea97232bc5ead5f37252b7cbee47f48ef05932998050c414d14\",\"creation_num\":\"2\"}}},\"frozen\":false,\"withdraw_events\":{\"counter\":\"20\",\"guid\":{\"id\":{\"addr\":\"0xc60f124dc24f4ea97232bc5ead5f37252b7cbee47f48ef05932998050c414d14\",\"creation_num\":\"3\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xc60f124dc24f4ea97232bc5ead5f37252b7cbee47f48ef05932998050c414d14", + "stateKeyHash": "5tYuOAAcvEMS78Vt6pXOsj8sT1fWVHQvPIqVgrZtRP4=", + "type": { + "address": "0x1", + "module": "account", + "name": "Account" + }, + "typeStr": "0x1::account::Account", + "data": "{\"authentication_key\":\"0xc60f124dc24f4ea97232bc5ead5f37252b7cbee47f48ef05932998050c414d14\",\"coin_register_events\":{\"counter\":\"2\",\"guid\":{\"id\":{\"addr\":\"0xc60f124dc24f4ea97232bc5ead5f37252b7cbee47f48ef05932998050c414d14\",\"creation_num\":\"0\"}}},\"guid_creation_num\":\"22\",\"key_rotation_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0xc60f124dc24f4ea97232bc5ead5f37252b7cbee47f48ef05932998050c414d14\",\"creation_num\":\"1\"}}},\"rotation_capability_offer\":{\"for\":{\"vec\":[]}},\"sequence_number\":\"74\",\"signer_capability_offer\":{\"for\":{\"vec\":[]}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xc821b5c1712fca97553c85830b91dc212cd2fcdd2a2490b65f945ed901d9f126", + "stateKeyHash": "9RqY26JbTo63xdxhhbTWbO5NYwQMyvUgsJj79715VoA=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectCore" + }, + "typeStr": "0x1::object::ObjectCore", + "data": "{\"allow_ungated_transfer\":true,\"guid_creation_num\":\"1125899906842626\",\"owner\":\"0x22113f16f9b7c6761ef14df757c016b8736a9023e8881cd5e11579b0b98ef562\",\"transfer_events\":{\"counter\":\"28\",\"guid\":{\"id\":{\"addr\":\"0xc821b5c1712fca97553c85830b91dc212cd2fcdd2a2490b65f945ed901d9f126\",\"creation_num\":\"1125899906842624\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xc821b5c1712fca97553c85830b91dc212cd2fcdd2a2490b65f945ed901d9f126", + "stateKeyHash": "9RqY26JbTo63xdxhhbTWbO5NYwQMyvUgsJj79715VoA=", + "type": { + "address": "0x4", + "module": "aptos_token", + "name": "AptosToken" + }, + "typeStr": "0x4::aptos_token::AptosToken", + "data": "{\"burn_ref\":{\"vec\":[{\"inner\":{\"vec\":[{\"self\":\"0xc821b5c1712fca97553c85830b91dc212cd2fcdd2a2490b65f945ed901d9f126\"}]},\"self\":{\"vec\":[]}}]},\"mutator_ref\":{\"vec\":[{\"self\":\"0xc821b5c1712fca97553c85830b91dc212cd2fcdd2a2490b65f945ed901d9f126\"}]},\"property_mutator_ref\":{\"self\":\"0xc821b5c1712fca97553c85830b91dc212cd2fcdd2a2490b65f945ed901d9f126\"},\"transfer_ref\":{\"vec\":[{\"self\":\"0xc821b5c1712fca97553c85830b91dc212cd2fcdd2a2490b65f945ed901d9f126\"}]}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xc821b5c1712fca97553c85830b91dc212cd2fcdd2a2490b65f945ed901d9f126", + "stateKeyHash": "9RqY26JbTo63xdxhhbTWbO5NYwQMyvUgsJj79715VoA=", + "type": { + "address": "0x4", + "module": "property_map", + "name": "PropertyMap" + }, + "typeStr": "0x4::property_map::PropertyMap", + "data": "{\"inner\":{\"data\":[]}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xc821b5c1712fca97553c85830b91dc212cd2fcdd2a2490b65f945ed901d9f126", + "stateKeyHash": "9RqY26JbTo63xdxhhbTWbO5NYwQMyvUgsJj79715VoA=", + "type": { + "address": "0x4", + "module": "token", + "name": "Token" + }, + "typeStr": "0x4::token::Token", + "data": "{\"collection\":{\"inner\":\"0xa2485c3b392d211770ed161e73a1097d21016c7dd41f53592434380b2aa14cba\"},\"description\":\"The Loonies is a collection of 5000 human PFPs. It's the official NFT collection by Wapal, the leading community-based NFT marketplace and launchpad on Aptos.\\n\\nDITCH LIMITS ⚡️\",\"index\":\"0\",\"mutation_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0xc821b5c1712fca97553c85830b91dc212cd2fcdd2a2490b65f945ed901d9f126\",\"creation_num\":\"1125899906842625\"}}},\"name\":\"\",\"uri\":\"https://arweave.net/p_hpKSyXV3YXXhdDPOXQGsAYuI5YKcKitOm9DO9xAuI/399.json\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xc821b5c1712fca97553c85830b91dc212cd2fcdd2a2490b65f945ed901d9f126", + "stateKeyHash": "9RqY26JbTo63xdxhhbTWbO5NYwQMyvUgsJj79715VoA=", + "type": { + "address": "0x4", + "module": "token", + "name": "TokenIdentifiers" + }, + "typeStr": "0x4::token::TokenIdentifiers", + "data": "{\"index\":{\"value\":\"1891\"},\"name\":{\"padding\":\"0x00000000\",\"value\":\"The Loonies #399\"}}" + } + }, + { + "type": "TYPE_WRITE_TABLE_ITEM", + "writeTableItem": { + "stateKeyHash": "bkso1A+YoQamUWNTCSTA3LQME0nTqpFdEItNbPwd2xk=", + "handle": "0x1b854694ae746cdbd8d44186ca4929b2b337df21d1c74633be19b2710552fdca", + "key": "0x0619dc29a0aac8fa146714058e8dd6d2d0f3bdf5f6331907bf91f3acd81e6935", + "data": { + "key": "\"0x619dc29a0aac8fa146714058e8dd6d2d0f3bdf5f6331907bf91f3acd81e6935\"", + "keyType": "address", + "value": "\"113842588164654499\"", + "valueType": "u128" + } + } + } + ] + }, + "epoch": "10182", + "blockHeight": "285278421", + "type": "TRANSACTION_TYPE_USER", + "sizeInfo": { + "transactionBytes": 313, + "eventSizeInfo": [ + { + "typeTagBytes": 55, + "totalBytes": 199 + }, + { + "typeTagBytes": 52, + "totalBytes": 108 + }, + { + "typeTagBytes": 52, + "totalBytes": 108 + }, + { + "typeTagBytes": 52, + "totalBytes": 108 + }, + { + "typeTagBytes": 63, + "totalBytes": 359 + }, + { + "typeTagBytes": 52, + "totalBytes": 108 + }, + { + "typeTagBytes": 63, + "totalBytes": 103 + } + ], + "writeOpSizeInfo": [ + { + "keyBytes": 138, + "valueBytes": 105 + }, + { + "keyBytes": 138, + "valueBytes": 105 + }, + { + "keyBytes": 138, + "valueBytes": 105 + }, + { + "keyBytes": 87, + "valueBytes": 1082 + }, + { + "keyBytes": 87 + }, + { + "keyBytes": 138, + "valueBytes": 105 + }, + { + "keyBytes": 84, + "valueBytes": 147 + }, + { + "keyBytes": 87, + "valueBytes": 870 + }, + { + "keyBytes": 66, + "valueBytes": 16 + } + ] + }, + "user": { + "request": { + "sender": "0xc60f124dc24f4ea97232bc5ead5f37252b7cbee47f48ef05932998050c414d14", + "sequenceNumber": "73", + "maxGasAmount": "60", + "gasUnitPrice": "100", + "expirationTimestampSecs": { + "seconds": "1738517660" + }, + "payload": { + "type": "TYPE_ENTRY_FUNCTION_PAYLOAD", + "entryFunctionPayload": { + "function": { + "module": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "name": "token_offer" + }, + "name": "sell_tokenv2" + }, + "typeArguments": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ], + "arguments": [ + "{\"inner\":\"0x9d14c489b6f56ac55e8707022400c23bb83bd0b0cd486c862defccf6241a219e\"}" + ], + "entryFunctionIdStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::token_offer::sell_tokenv2" + } + }, + "signature": { + "type": "TYPE_ED25519", + "ed25519": { + "publicKey": "uK9WDzFH5bPIaObVg+RiA4pDXiMzeB0maYco7fcDees=", + "signature": "tqwhIyCdDcZTNVAP+w7h3xyOLEQvyov3haKGlccqkmnCrtPHk9zMZqqUARr+83WP1BvdKW5xvyGPotH0UO0lCQ==" + } + } + }, + "events": [ + { + "key": { + "creationNumber": "1125899906842624", + "accountAddress": "0xc821b5c1712fca97553c85830b91dc212cd2fcdd2a2490b65f945ed901d9f126" + }, + "sequenceNumber": "27", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "object", + "name": "TransferEvent" + } + }, + "typeStr": "0x1::object::TransferEvent", + "data": "{\"from\":\"0xc60f124dc24f4ea97232bc5ead5f37252b7cbee47f48ef05932998050c414d14\",\"object\":\"0xc821b5c1712fca97553c85830b91dc212cd2fcdd2a2490b65f945ed901d9f126\",\"to\":\"0x22113f16f9b7c6761ef14df757c016b8736a9023e8881cd5e11579b0b98ef562\"}" + }, + { + "key": { + "creationNumber": "2", + "accountAddress": "0x268d4a7a2ad93274edf6116f9f20ad8455223a7ab5fc73154f687e7dbc3e3ec6" + }, + "sequenceNumber": "3638", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "DepositEvent" + } + }, + "typeStr": "0x1::coin::DepositEvent", + "data": "{\"amount\":\"142800000\"}" + }, + { + "key": { + "creationNumber": "2", + "accountAddress": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9" + }, + "sequenceNumber": "2495572", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "DepositEvent" + } + }, + "typeStr": "0x1::coin::DepositEvent", + "data": "{\"amount\":\"51000000\"}" + }, + { + "key": { + "creationNumber": "2", + "accountAddress": "0xc60f124dc24f4ea97232bc5ead5f37252b7cbee47f48ef05932998050c414d14" + }, + "sequenceNumber": "16", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "DepositEvent" + } + }, + "typeStr": "0x1::coin::DepositEvent", + "data": "{\"amount\":\"3206200000\"}" + }, + { + "key": { + "creationNumber": "1125899906842635", + "accountAddress": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08" + }, + "sequenceNumber": "599", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "events", + "name": "TokenOfferFilledEvent" + } + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::events::TokenOfferFilledEvent", + "data": "{\"commission\":\"51000000\",\"price\":\"3400000000\",\"purchaser\":\"0x22113f16f9b7c6761ef14df757c016b8736a9023e8881cd5e11579b0b98ef562\",\"royalties\":\"142800000\",\"seller\":\"0xc60f124dc24f4ea97232bc5ead5f37252b7cbee47f48ef05932998050c414d14\",\"token_metadata\":{\"collection\":{\"vec\":[{\"inner\":\"0xa2485c3b392d211770ed161e73a1097d21016c7dd41f53592434380b2aa14cba\"}]},\"collection_name\":\"The Loonies\",\"creator_address\":\"0xf54f8f7ffc2b779d81b721b3d42fe9a53f96e1d3459a8001934307783d493725\",\"property_version\":{\"vec\":[]},\"token\":{\"vec\":[{\"inner\":\"0xc821b5c1712fca97553c85830b91dc212cd2fcdd2a2490b65f945ed901d9f126\"}]},\"token_name\":\"The Loonies #399\"},\"token_offer\":\"0x9d14c489b6f56ac55e8707022400c23bb83bd0b0cd486c862defccf6241a219e\"}" + }, + { + "key": { + "creationNumber": "2", + "accountAddress": "0x22113f16f9b7c6761ef14df757c016b8736a9023e8881cd5e11579b0b98ef562" + }, + "sequenceNumber": "29", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "DepositEvent" + } + }, + "typeStr": "0x1::coin::DepositEvent", + "data": "{\"amount\":\"0\"}" + }, + { + "key": { + "accountAddress": "0x0" + }, + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "transaction_fee", + "name": "FeeStatement" + } + }, + "typeStr": "0x1::transaction_fee::FeeStatement", + "data": "{\"execution_gas_units\":\"8\",\"io_gas_units\":\"23\",\"storage_fee_octas\":\"0\",\"storage_fee_refund_octas\":\"63160\",\"total_charge_gas_units\":\"30\"}" + } + ] + } +} \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2381742315_wapal_cancel_listing.json b/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2381742315_wapal_cancel_listing.json new file mode 100644 index 0000000000000..9de637714a0f0 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2381742315_wapal_cancel_listing.json @@ -0,0 +1,377 @@ +{ + "timestamp": { + "seconds": "1739898952", + "nanos": 960045000 + }, + "version": "2381742315", + "info": { + "hash": "2UQ2DoppaZkgYq3srOjz+PI3GFV8d5HZ675y9n1UrXE=", + "stateChangeHash": "BUzJXtEm8TXDSMgt11iDoGX6CSaN7qtCR0IaohZ6cuA=", + "eventRootHash": "Mjz4qFSGWKrk5eokDDA9E6yJCKdqGSnpmad1J4QJ7ms=", + "gasUsed": "14", + "success": true, + "vmStatus": "Executed successfully", + "accumulatorRootHash": "bWvP4BN3OPoB0UfzBmuwZmmKr4noLnWccG6NozLkgcc=", + "changes": [ + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xa8b76ee68f7574dafb6f19988880c16571ccd10ac159a8684067a9fc0df293", + "stateKeyHash": "hDtQiei/k4yNgRlJmtYF3cpxy4EtQoquj19vhTTf0fI=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectCore" + }, + "typeStr": "0x1::object::ObjectCore", + "data": "{\"allow_ungated_transfer\":true,\"guid_creation_num\":\"1125899906842626\",\"owner\":\"0xecd896bfa7eae31fb5085dca8e2f3c88ea3577bd54fafeaeb4ad6ede1e13e81e\",\"transfer_events\":{\"counter\":\"11\",\"guid\":{\"id\":{\"addr\":\"0xa8b76ee68f7574dafb6f19988880c16571ccd10ac159a8684067a9fc0df293\",\"creation_num\":\"1125899906842624\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xa8b76ee68f7574dafb6f19988880c16571ccd10ac159a8684067a9fc0df293", + "stateKeyHash": "hDtQiei/k4yNgRlJmtYF3cpxy4EtQoquj19vhTTf0fI=", + "type": { + "address": "0x4", + "module": "aptos_token", + "name": "AptosToken" + }, + "typeStr": "0x4::aptos_token::AptosToken", + "data": "{\"burn_ref\":{\"vec\":[{\"inner\":{\"vec\":[{\"self\":\"0xa8b76ee68f7574dafb6f19988880c16571ccd10ac159a8684067a9fc0df293\"}]},\"self\":{\"vec\":[]}}]},\"mutator_ref\":{\"vec\":[{\"self\":\"0xa8b76ee68f7574dafb6f19988880c16571ccd10ac159a8684067a9fc0df293\"}]},\"property_mutator_ref\":{\"self\":\"0xa8b76ee68f7574dafb6f19988880c16571ccd10ac159a8684067a9fc0df293\"},\"transfer_ref\":{\"vec\":[{\"self\":\"0xa8b76ee68f7574dafb6f19988880c16571ccd10ac159a8684067a9fc0df293\"}]}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xa8b76ee68f7574dafb6f19988880c16571ccd10ac159a8684067a9fc0df293", + "stateKeyHash": "hDtQiei/k4yNgRlJmtYF3cpxy4EtQoquj19vhTTf0fI=", + "type": { + "address": "0x4", + "module": "property_map", + "name": "PropertyMap" + }, + "typeStr": "0x4::property_map::PropertyMap", + "data": "{\"inner\":{\"data\":[]}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xa8b76ee68f7574dafb6f19988880c16571ccd10ac159a8684067a9fc0df293", + "stateKeyHash": "hDtQiei/k4yNgRlJmtYF3cpxy4EtQoquj19vhTTf0fI=", + "type": { + "address": "0x4", + "module": "token", + "name": "Token" + }, + "typeStr": "0x4::token::Token", + "data": "{\"collection\":{\"inner\":\"0xa2485c3b392d211770ed161e73a1097d21016c7dd41f53592434380b2aa14cba\"},\"description\":\"The Loonies is a collection of 5000 human PFPs. It's the official NFT collection by Wapal, the leading community-based NFT marketplace and launchpad on Aptos.\\n\\nDITCH LIMITS ⚡️\",\"index\":\"0\",\"mutation_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0xa8b76ee68f7574dafb6f19988880c16571ccd10ac159a8684067a9fc0df293\",\"creation_num\":\"1125899906842625\"}}},\"name\":\"\",\"uri\":\"https://arweave.net/p_hpKSyXV3YXXhdDPOXQGsAYuI5YKcKitOm9DO9xAuI/3210.json\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xa8b76ee68f7574dafb6f19988880c16571ccd10ac159a8684067a9fc0df293", + "stateKeyHash": "hDtQiei/k4yNgRlJmtYF3cpxy4EtQoquj19vhTTf0fI=", + "type": { + "address": "0x4", + "module": "token", + "name": "TokenIdentifiers" + }, + "typeStr": "0x4::token::TokenIdentifiers", + "data": "{\"index\":{\"value\":\"1351\"},\"name\":{\"padding\":\"0x000000\",\"value\":\"The Loonies #3210\"}}" + } + }, + { + "type": "TYPE_DELETE_RESOURCE", + "deleteResource": { + "address": "0x560197dcdc27af1cadc1cc75b51d9f0e3a0f40d7a761397c13bfdb4097924c1f", + "stateKeyHash": "OVL21jmU2HHxcnUxCQtaRFQH9Co9kMiZl0SOEkQvfPQ=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectGroup" + }, + "typeStr": "0x1::object::ObjectGroup" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectCore" + }, + "typeStr": "0x1::object::ObjectCore", + "data": "{\"allow_ungated_transfer\":true,\"guid_creation_num\":\"1125899906842636\",\"owner\":\"0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9\",\"transfer_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842624\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "events", + "name": "EventsV1" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::events::EventsV1", + "data": "{\"auction_bid_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842626\"}}},\"collection_offer_canceled_events\":{\"counter\":\"28184\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842631\"}}},\"collection_offer_filled_events\":{\"counter\":\"78048\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842632\"}}},\"collection_offer_placed_events\":{\"counter\":\"154728\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842630\"}}},\"listing_canceled_events\":{\"counter\":\"74311\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842628\"}}},\"listing_filled_events\":{\"counter\":\"692261\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842629\"}}},\"listing_placed_events\":{\"counter\":\"1773300\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842627\"}}},\"token_offer_canceled_events\":{\"counter\":\"9075\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842634\"}}},\"token_offer_filled_events\":{\"counter\":\"600\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842635\"}}},\"token_offer_placed_events\":{\"counter\":\"77859\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842633\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "fee_schedule", + "name": "FeeSchedule" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::fee_schedule::FeeSchedule", + "data": "{\"extend_ref\":{\"self\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\"},\"fee_address\":\"0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9\",\"mutation_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842625\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "fee_schedule", + "name": "FixedRateBiddingFee" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::fee_schedule::FixedRateBiddingFee", + "data": "{\"bidding_fee\":\"0\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "fee_schedule", + "name": "FixedRateListingFee" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::fee_schedule::FixedRateListingFee", + "data": "{\"listing_fee\":\"0\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "fee_schedule", + "name": "PercentageRateCommission" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::fee_schedule::PercentageRateCommission", + "data": "{\"denominator\":\"1000\",\"numerator\":\"15\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xecd896bfa7eae31fb5085dca8e2f3c88ea3577bd54fafeaeb4ad6ede1e13e81e", + "stateKeyHash": "ogHDIkWjeyR9OvNcu0982yW6p1O2r/U4Joileq2fiyg=", + "type": { + "address": "0x1", + "module": "coin", + "name": "CoinStore", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", + "data": "{\"coin\":{\"value\":\"17784903\"},\"deposit_events\":{\"counter\":\"10\",\"guid\":{\"id\":{\"addr\":\"0xecd896bfa7eae31fb5085dca8e2f3c88ea3577bd54fafeaeb4ad6ede1e13e81e\",\"creation_num\":\"2\"}}},\"frozen\":false,\"withdraw_events\":{\"counter\":\"5\",\"guid\":{\"id\":{\"addr\":\"0xecd896bfa7eae31fb5085dca8e2f3c88ea3577bd54fafeaeb4ad6ede1e13e81e\",\"creation_num\":\"3\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xecd896bfa7eae31fb5085dca8e2f3c88ea3577bd54fafeaeb4ad6ede1e13e81e", + "stateKeyHash": "v9CRc6lWl+PSrmuF/JoB6YJ2Jjd0KQpWmBZjenGYSq8=", + "type": { + "address": "0x1", + "module": "account", + "name": "Account" + }, + "typeStr": "0x1::account::Account", + "data": "{\"authentication_key\":\"0xecd896bfa7eae31fb5085dca8e2f3c88ea3577bd54fafeaeb4ad6ede1e13e81e\",\"coin_register_events\":{\"counter\":\"5\",\"guid\":{\"id\":{\"addr\":\"0xecd896bfa7eae31fb5085dca8e2f3c88ea3577bd54fafeaeb4ad6ede1e13e81e\",\"creation_num\":\"0\"}}},\"guid_creation_num\":\"15\",\"key_rotation_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0xecd896bfa7eae31fb5085dca8e2f3c88ea3577bd54fafeaeb4ad6ede1e13e81e\",\"creation_num\":\"1\"}}},\"rotation_capability_offer\":{\"for\":{\"vec\":[]}},\"sequence_number\":\"194\",\"signer_capability_offer\":{\"for\":{\"vec\":[]}}}" + } + }, + { + "type": "TYPE_WRITE_TABLE_ITEM", + "writeTableItem": { + "stateKeyHash": "bkso1A+YoQamUWNTCSTA3LQME0nTqpFdEItNbPwd2xk=", + "handle": "0x1b854694ae746cdbd8d44186ca4929b2b337df21d1c74633be19b2710552fdca", + "key": "0x0619dc29a0aac8fa146714058e8dd6d2d0f3bdf5f6331907bf91f3acd81e6935", + "data": { + "key": "\"0x619dc29a0aac8fa146714058e8dd6d2d0f3bdf5f6331907bf91f3acd81e6935\"", + "keyType": "address", + "value": "\"114060193964301574\"", + "valueType": "u128" + } + } + } + ] + }, + "epoch": "10374", + "blockHeight": "292395841", + "type": "TRANSACTION_TYPE_USER", + "sizeInfo": { + "transactionBytes": 317, + "eventSizeInfo": [ + { + "typeTagBytes": 55, + "totalBytes": 199 + }, + { + "typeTagBytes": 62, + "totalBytes": 323 + }, + { + "typeTagBytes": 63, + "totalBytes": 103 + } + ], + "writeOpSizeInfo": [ + { + "keyBytes": 87, + "valueBytes": 871 + }, + { + "keyBytes": 87 + }, + { + "keyBytes": 87, + "valueBytes": 1082 + }, + { + "keyBytes": 138, + "valueBytes": 105 + }, + { + "keyBytes": 84, + "valueBytes": 147 + }, + { + "keyBytes": 66, + "valueBytes": 16 + } + ] + }, + "user": { + "request": { + "sender": "0xecd896bfa7eae31fb5085dca8e2f3c88ea3577bd54fafeaeb4ad6ede1e13e81e", + "sequenceNumber": "193", + "maxGasAmount": "28", + "gasUnitPrice": "100", + "expirationTimestampSecs": { + "seconds": "1739899042" + }, + "payload": { + "type": "TYPE_ENTRY_FUNCTION_PAYLOAD", + "entryFunctionPayload": { + "function": { + "module": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "name": "coin_listing" + }, + "name": "end_fixed_price" + }, + "typeArguments": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ], + "arguments": [ + "{\"inner\":\"0x560197dcdc27af1cadc1cc75b51d9f0e3a0f40d7a761397c13bfdb4097924c1f\"}" + ], + "entryFunctionIdStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::coin_listing::end_fixed_price" + } + }, + "signature": { + "type": "TYPE_ED25519", + "ed25519": { + "publicKey": "MVUQCW2GGstiVSBD99QdWIqTpnwtkQLFI+HCerNpAs0=", + "signature": "VclecH5C6jq7VCx5oHjrvQoxJZXl9BkW8WZsAtTufEKxMFbioAFr56a/Z9NB7HajbgnKHgzYZw9JZ4NAiQqWBA==" + } + } + }, + "events": [ + { + "key": { + "creationNumber": "1125899906842624", + "accountAddress": "0x00a8b76ee68f7574dafb6f19988880c16571ccd10ac159a8684067a9fc0df293" + }, + "sequenceNumber": "10", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "object", + "name": "TransferEvent" + } + }, + "typeStr": "0x1::object::TransferEvent", + "data": "{\"from\":\"0x560197dcdc27af1cadc1cc75b51d9f0e3a0f40d7a761397c13bfdb4097924c1f\",\"object\":\"0xa8b76ee68f7574dafb6f19988880c16571ccd10ac159a8684067a9fc0df293\",\"to\":\"0xecd896bfa7eae31fb5085dca8e2f3c88ea3577bd54fafeaeb4ad6ede1e13e81e\"}" + }, + { + "key": { + "creationNumber": "1125899906842628", + "accountAddress": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08" + }, + "sequenceNumber": "74310", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "events", + "name": "ListingCanceledEvent" + } + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::events::ListingCanceledEvent", + "data": "{\"listing\":\"0x560197dcdc27af1cadc1cc75b51d9f0e3a0f40d7a761397c13bfdb4097924c1f\",\"price\":\"3200000000\",\"seller\":\"0xecd896bfa7eae31fb5085dca8e2f3c88ea3577bd54fafeaeb4ad6ede1e13e81e\",\"token_metadata\":{\"collection\":{\"vec\":[{\"inner\":\"0xa2485c3b392d211770ed161e73a1097d21016c7dd41f53592434380b2aa14cba\"}]},\"collection_name\":\"The Loonies\",\"creator_address\":\"0xf54f8f7ffc2b779d81b721b3d42fe9a53f96e1d3459a8001934307783d493725\",\"property_version\":{\"vec\":[]},\"token\":{\"vec\":[{\"inner\":\"0xa8b76ee68f7574dafb6f19988880c16571ccd10ac159a8684067a9fc0df293\"}]},\"token_name\":\"The Loonies #3210\"},\"type\":\"fixed price\"}" + }, + { + "key": { + "accountAddress": "0x0" + }, + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "transaction_fee", + "name": "FeeStatement" + } + }, + "typeStr": "0x1::transaction_fee::FeeStatement", + "data": "{\"execution_gas_units\":\"6\",\"io_gas_units\":\"8\",\"storage_fee_octas\":\"0\",\"storage_fee_refund_octas\":\"62640\",\"total_charge_gas_units\":\"14\"}" + } + ] + } +} \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2381810159_wapal_cancel_offer.json b/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2381810159_wapal_cancel_offer.json new file mode 100644 index 0000000000000..94988e8b10419 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2381810159_wapal_cancel_offer.json @@ -0,0 +1,303 @@ +{ + "timestamp": { + "seconds": "1739900115", + "nanos": 692708000 + }, + "version": "2381810159", + "info": { + "hash": "MG+F8DhmoVBjgt8PfnSyDr7vADgJlYl6cxh9JyEmcjo=", + "stateChangeHash": "1Z8OxWFz2tATl+HGaQpJvvNCALsAcwnY8x98QlWZUv8=", + "eventRootHash": "EuB5AwnM/hLmcwSHaRxTcfbvBfsPNI/p/EdsdXci88Q=", + "gasUsed": "16", + "success": true, + "vmStatus": "Executed successfully", + "accumulatorRootHash": "Sukr33AJB9dKqGQetU/br84wRLHVSwYaptw/C4UQi5c=", + "changes": [ + { + "type": "TYPE_DELETE_RESOURCE", + "deleteResource": { + "address": "0x6132638f6078196e0a2c554cf195f08f73b56e672134c429cdda7f04272c3b13", + "stateKeyHash": "zwqNfbxj51JIyaK3jZ1MRoRgu8a3Qk0IVzBZhcXaA94=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectGroup" + }, + "typeStr": "0x1::object::ObjectGroup" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x6ce9d41971de334c94541fcdd8f4cd2a69456057d444192e1257dacfc528ca0a", + "stateKeyHash": "d+XtQCwiMzFkcm6d61f74jrLW7JkcBH1/qUZYMp6HBM=", + "type": { + "address": "0x1", + "module": "coin", + "name": "CoinStore", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", + "data": "{\"coin\":{\"value\":\"30097930219\"},\"deposit_events\":{\"counter\":\"2189\",\"guid\":{\"id\":{\"addr\":\"0x6ce9d41971de334c94541fcdd8f4cd2a69456057d444192e1257dacfc528ca0a\",\"creation_num\":\"2\"}}},\"frozen\":false,\"withdraw_events\":{\"counter\":\"2319\",\"guid\":{\"id\":{\"addr\":\"0x6ce9d41971de334c94541fcdd8f4cd2a69456057d444192e1257dacfc528ca0a\",\"creation_num\":\"3\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x6ce9d41971de334c94541fcdd8f4cd2a69456057d444192e1257dacfc528ca0a", + "stateKeyHash": "+U88S6d+WvvLdwXhIE/tizv1Q8COIfN9raV8YUtilPM=", + "type": { + "address": "0x1", + "module": "account", + "name": "Account" + }, + "typeStr": "0x1::account::Account", + "data": "{\"authentication_key\":\"0x6ce9d41971de334c94541fcdd8f4cd2a69456057d444192e1257dacfc528ca0a\",\"coin_register_events\":{\"counter\":\"4\",\"guid\":{\"id\":{\"addr\":\"0x6ce9d41971de334c94541fcdd8f4cd2a69456057d444192e1257dacfc528ca0a\",\"creation_num\":\"0\"}}},\"guid_creation_num\":\"1085\",\"key_rotation_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x6ce9d41971de334c94541fcdd8f4cd2a69456057d444192e1257dacfc528ca0a\",\"creation_num\":\"1\"}}},\"rotation_capability_offer\":{\"for\":{\"vec\":[]}},\"sequence_number\":\"5797\",\"signer_capability_offer\":{\"for\":{\"vec\":[]}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectCore" + }, + "typeStr": "0x1::object::ObjectCore", + "data": "{\"allow_ungated_transfer\":true,\"guid_creation_num\":\"1125899906842636\",\"owner\":\"0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9\",\"transfer_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842624\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "events", + "name": "EventsV1" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::events::EventsV1", + "data": "{\"auction_bid_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842626\"}}},\"collection_offer_canceled_events\":{\"counter\":\"28184\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842631\"}}},\"collection_offer_filled_events\":{\"counter\":\"78048\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842632\"}}},\"collection_offer_placed_events\":{\"counter\":\"154728\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842630\"}}},\"listing_canceled_events\":{\"counter\":\"74311\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842628\"}}},\"listing_filled_events\":{\"counter\":\"692261\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842629\"}}},\"listing_placed_events\":{\"counter\":\"1773300\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842627\"}}},\"token_offer_canceled_events\":{\"counter\":\"9078\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842634\"}}},\"token_offer_filled_events\":{\"counter\":\"600\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842635\"}}},\"token_offer_placed_events\":{\"counter\":\"77859\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842633\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "fee_schedule", + "name": "FeeSchedule" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::fee_schedule::FeeSchedule", + "data": "{\"extend_ref\":{\"self\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\"},\"fee_address\":\"0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9\",\"mutation_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842625\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "fee_schedule", + "name": "FixedRateBiddingFee" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::fee_schedule::FixedRateBiddingFee", + "data": "{\"bidding_fee\":\"0\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "fee_schedule", + "name": "FixedRateListingFee" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::fee_schedule::FixedRateListingFee", + "data": "{\"listing_fee\":\"0\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "fee_schedule", + "name": "PercentageRateCommission" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::fee_schedule::PercentageRateCommission", + "data": "{\"denominator\":\"1000\",\"numerator\":\"15\"}" + } + }, + { + "type": "TYPE_WRITE_TABLE_ITEM", + "writeTableItem": { + "stateKeyHash": "bkso1A+YoQamUWNTCSTA3LQME0nTqpFdEItNbPwd2xk=", + "handle": "0x1b854694ae746cdbd8d44186ca4929b2b337df21d1c74633be19b2710552fdca", + "key": "0x0619dc29a0aac8fa146714058e8dd6d2d0f3bdf5f6331907bf91f3acd81e6935", + "data": { + "key": "\"0x619dc29a0aac8fa146714058e8dd6d2d0f3bdf5f6331907bf91f3acd81e6935\"", + "keyType": "address", + "value": "\"114059809480760161\"", + "valueType": "u128" + } + } + } + ] + }, + "epoch": "10374", + "blockHeight": "292401684", + "type": "TRANSACTION_TYPE_USER", + "sizeInfo": { + "transactionBytes": 307, + "eventSizeInfo": [ + { + "typeTagBytes": 65, + "totalBytes": 314 + }, + { + "typeTagBytes": 52, + "totalBytes": 108 + }, + { + "typeTagBytes": 63, + "totalBytes": 103 + } + ], + "writeOpSizeInfo": [ + { + "keyBytes": 87 + }, + { + "keyBytes": 138, + "valueBytes": 105 + }, + { + "keyBytes": 84, + "valueBytes": 147 + }, + { + "keyBytes": 87, + "valueBytes": 1082 + }, + { + "keyBytes": 66, + "valueBytes": 16 + } + ] + }, + "user": { + "request": { + "sender": "0x6ce9d41971de334c94541fcdd8f4cd2a69456057d444192e1257dacfc528ca0a", + "sequenceNumber": "5796", + "maxGasAmount": "20", + "gasUnitPrice": "100", + "expirationTimestampSecs": { + "seconds": "1739900203" + }, + "payload": { + "type": "TYPE_ENTRY_FUNCTION_PAYLOAD", + "entryFunctionPayload": { + "function": { + "module": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "name": "token_offer" + }, + "name": "cancel" + }, + "typeArguments": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ], + "arguments": [ + "{\"inner\":\"0x6132638f6078196e0a2c554cf195f08f73b56e672134c429cdda7f04272c3b13\"}" + ], + "entryFunctionIdStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::token_offer::cancel" + } + }, + "signature": { + "type": "TYPE_ED25519", + "ed25519": { + "publicKey": "3ljDucX/Hr+Fkwogem+SamNccf64m4DVjhhSCstUiCg=", + "signature": "ue4qM65zil8dd+P2OKTWRJZ0rVXEevt0TBdV/rc1xdFGsr1NTy2I+19t+JD1KVX+D0NoZs2p3L3Be17oZcHUDw==" + } + } + }, + "events": [ + { + "key": { + "creationNumber": "1125899906842634", + "accountAddress": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08" + }, + "sequenceNumber": "9077", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "events", + "name": "TokenOfferCanceledEvent" + } + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::events::TokenOfferCanceledEvent", + "data": "{\"price\":\"2800000000\",\"purchaser\":\"0x6ce9d41971de334c94541fcdd8f4cd2a69456057d444192e1257dacfc528ca0a\",\"token_metadata\":{\"collection\":{\"vec\":[{\"inner\":\"0xa2485c3b392d211770ed161e73a1097d21016c7dd41f53592434380b2aa14cba\"}]},\"collection_name\":\"The Loonies\",\"creator_address\":\"0xf54f8f7ffc2b779d81b721b3d42fe9a53f96e1d3459a8001934307783d493725\",\"property_version\":{\"vec\":[]},\"token\":{\"vec\":[{\"inner\":\"0x5eebc18e7f35cc5db4abd90631f35d0454cf0181fbd0042c023e858372b2e2a8\"}]},\"token_name\":\"The Loonies #1768\"},\"token_offer\":\"0x6132638f6078196e0a2c554cf195f08f73b56e672134c429cdda7f04272c3b13\"}" + }, + { + "key": { + "creationNumber": "2", + "accountAddress": "0x6ce9d41971de334c94541fcdd8f4cd2a69456057d444192e1257dacfc528ca0a" + }, + "sequenceNumber": "2188", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "DepositEvent" + } + }, + "typeStr": "0x1::coin::DepositEvent", + "data": "{\"amount\":\"2800000000\"}" + }, + { + "key": { + "accountAddress": "0x0" + }, + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "transaction_fee", + "name": "FeeStatement" + } + }, + "typeStr": "0x1::transaction_fee::FeeStatement", + "data": "{\"execution_gas_units\":\"6\",\"io_gas_units\":\"10\",\"storage_fee_octas\":\"0\",\"storage_fee_refund_octas\":\"63160\",\"total_charge_gas_units\":\"16\"}" + } + ] + } +} \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2382219668_wapal_fill_collection_offer.json b/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2382219668_wapal_fill_collection_offer.json new file mode 100644 index 0000000000000..ca08fb35ca472 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2382219668_wapal_fill_collection_offer.json @@ -0,0 +1,546 @@ +{ + "timestamp": { + "seconds": "1739908306", + "nanos": 707377000 + }, + "version": "2382219668", + "info": { + "hash": "+MKB6Qj8OiS19jwtMhGw5m9r47+9gNAZYsTBr8TrmDE=", + "stateChangeHash": "I8VT3jh9YPG34kGuWf1HD+b7+YAbsC0vhnm5IZy62to=", + "eventRootHash": "buSvd8Je9CaTvm4W5h5VHnKjlG3761NOEyEyk0GIbCQ=", + "gasUsed": "28", + "success": true, + "vmStatus": "Executed successfully", + "accumulatorRootHash": "yTp/aFGBASzPT5rfYRqmdhZQQXGc+ajAJwa9DsH6MAI=", + "changes": [ + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x268d4a7a2ad93274edf6116f9f20ad8455223a7ab5fc73154f687e7dbc3e3ec6", + "stateKeyHash": "EMqTMQksaw5v/er1udU4S/BFYuz2Ov0YBKb63F/2uGY=", + "type": { + "address": "0x1", + "module": "coin", + "name": "CoinStore", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", + "data": "{\"coin\":{\"value\":\"17040483549\"},\"deposit_events\":{\"counter\":\"3747\",\"guid\":{\"id\":{\"addr\":\"0x268d4a7a2ad93274edf6116f9f20ad8455223a7ab5fc73154f687e7dbc3e3ec6\",\"creation_num\":\"2\"}}},\"frozen\":false,\"withdraw_events\":{\"counter\":\"64\",\"guid\":{\"id\":{\"addr\":\"0x268d4a7a2ad93274edf6116f9f20ad8455223a7ab5fc73154f687e7dbc3e3ec6\",\"creation_num\":\"3\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "stateKeyHash": "CnIoX/rU8s/MEWBmiMymIKAzkKuqzEhHPNEaXeZekPM=", + "type": { + "address": "0x1", + "module": "coin", + "name": "CoinStore", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", + "data": "{\"coin\":{\"value\":\"43281703969\"},\"deposit_events\":{\"counter\":\"2500623\",\"guid\":{\"id\":{\"addr\":\"0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9\",\"creation_num\":\"2\"}}},\"frozen\":false,\"withdraw_events\":{\"counter\":\"26\",\"guid\":{\"id\":{\"addr\":\"0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9\",\"creation_num\":\"3\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x67d1d1e79787a57df644b192f67959be26bb0330a5b8d782f13a324d86ee91b2", + "stateKeyHash": "itoiDuFCtmfSP99PbS3l4EdeiQsxFiLiy64mY29NGr8=", + "type": { + "address": "0x1", + "module": "coin", + "name": "CoinStore", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", + "data": "{\"coin\":{\"value\":\"2453838459\"},\"deposit_events\":{\"counter\":\"209\",\"guid\":{\"id\":{\"addr\":\"0x67d1d1e79787a57df644b192f67959be26bb0330a5b8d782f13a324d86ee91b2\",\"creation_num\":\"2\"}}},\"frozen\":false,\"withdraw_events\":{\"counter\":\"495\",\"guid\":{\"id\":{\"addr\":\"0x67d1d1e79787a57df644b192f67959be26bb0330a5b8d782f13a324d86ee91b2\",\"creation_num\":\"3\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x67d1d1e79787a57df644b192f67959be26bb0330a5b8d782f13a324d86ee91b2", + "stateKeyHash": "oZeuTwgXWy+0nfNAJ8jTCLJo7xd35ZEISCLeLgWdPGY=", + "type": { + "address": "0x1", + "module": "account", + "name": "Account" + }, + "typeStr": "0x1::account::Account", + "data": "{\"authentication_key\":\"0x67d1d1e79787a57df644b192f67959be26bb0330a5b8d782f13a324d86ee91b2\",\"coin_register_events\":{\"counter\":\"38\",\"guid\":{\"id\":{\"addr\":\"0x67d1d1e79787a57df644b192f67959be26bb0330a5b8d782f13a324d86ee91b2\",\"creation_num\":\"0\"}}},\"guid_creation_num\":\"159\",\"key_rotation_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x67d1d1e79787a57df644b192f67959be26bb0330a5b8d782f13a324d86ee91b2\",\"creation_num\":\"1\"}}},\"rotation_capability_offer\":{\"for\":{\"vec\":[]}},\"sequence_number\":\"1254\",\"signer_capability_offer\":{\"for\":{\"vec\":[]}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectCore" + }, + "typeStr": "0x1::object::ObjectCore", + "data": "{\"allow_ungated_transfer\":true,\"guid_creation_num\":\"1125899906842636\",\"owner\":\"0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9\",\"transfer_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842624\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "events", + "name": "EventsV1" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::events::EventsV1", + "data": "{\"auction_bid_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842626\"}}},\"collection_offer_canceled_events\":{\"counter\":\"28185\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842631\"}}},\"collection_offer_filled_events\":{\"counter\":\"78049\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842632\"}}},\"collection_offer_placed_events\":{\"counter\":\"154729\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842630\"}}},\"listing_canceled_events\":{\"counter\":\"74311\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842628\"}}},\"listing_filled_events\":{\"counter\":\"692269\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842629\"}}},\"listing_placed_events\":{\"counter\":\"1773310\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842627\"}}},\"token_offer_canceled_events\":{\"counter\":\"9078\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842634\"}}},\"token_offer_filled_events\":{\"counter\":\"600\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842635\"}}},\"token_offer_placed_events\":{\"counter\":\"77859\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842633\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "fee_schedule", + "name": "FeeSchedule" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::fee_schedule::FeeSchedule", + "data": "{\"extend_ref\":{\"self\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\"},\"fee_address\":\"0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9\",\"mutation_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842625\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "fee_schedule", + "name": "FixedRateBiddingFee" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::fee_schedule::FixedRateBiddingFee", + "data": "{\"bidding_fee\":\"0\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "fee_schedule", + "name": "FixedRateListingFee" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::fee_schedule::FixedRateListingFee", + "data": "{\"listing_fee\":\"0\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "fee_schedule", + "name": "PercentageRateCommission" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::fee_schedule::PercentageRateCommission", + "data": "{\"denominator\":\"1000\",\"numerator\":\"15\"}" + } + }, + { + "type": "TYPE_DELETE_RESOURCE", + "deleteResource": { + "address": "0x8eb23351a1e2a9ed5d5516d6a592ec8fe8e3beeaedd7a7de9cdfc1cc8d9a0d73", + "stateKeyHash": "t0yeYU2yLxg3Vo5NTssJbigV2Umsbf5AIDdSg6Vg6l8=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectGroup" + }, + "typeStr": "0x1::object::ObjectGroup" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xd098ed4c8a4fecfd082149216a1029ff1ed3d4dffd628348b018ed00036eec22", + "stateKeyHash": "lsMQkeGa+zqCLJ+m0TGLUNxxvWPi3OPTWBOhKXKwtqo=", + "type": { + "address": "0x1", + "module": "coin", + "name": "CoinStore", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", + "data": "{\"coin\":{\"value\":\"5172029151\"},\"deposit_events\":{\"counter\":\"3931\",\"guid\":{\"id\":{\"addr\":\"0xd098ed4c8a4fecfd082149216a1029ff1ed3d4dffd628348b018ed00036eec22\",\"creation_num\":\"2\"}}},\"frozen\":false,\"withdraw_events\":{\"counter\":\"3424\",\"guid\":{\"id\":{\"addr\":\"0xd098ed4c8a4fecfd082149216a1029ff1ed3d4dffd628348b018ed00036eec22\",\"creation_num\":\"3\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xfa0f4628e8737fe9149070e0aa5801a4c0c872f519fa622c363c7f397cf38229", + "stateKeyHash": "7SwsaRZxhJCswnE26sxDg3YTLXwctifP/Fj3a6uPIMs=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectCore" + }, + "typeStr": "0x1::object::ObjectCore", + "data": "{\"allow_ungated_transfer\":true,\"guid_creation_num\":\"1125899906842626\",\"owner\":\"0xd098ed4c8a4fecfd082149216a1029ff1ed3d4dffd628348b018ed00036eec22\",\"transfer_events\":{\"counter\":\"22\",\"guid\":{\"id\":{\"addr\":\"0xfa0f4628e8737fe9149070e0aa5801a4c0c872f519fa622c363c7f397cf38229\",\"creation_num\":\"1125899906842624\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xfa0f4628e8737fe9149070e0aa5801a4c0c872f519fa622c363c7f397cf38229", + "stateKeyHash": "7SwsaRZxhJCswnE26sxDg3YTLXwctifP/Fj3a6uPIMs=", + "type": { + "address": "0x4", + "module": "aptos_token", + "name": "AptosToken" + }, + "typeStr": "0x4::aptos_token::AptosToken", + "data": "{\"burn_ref\":{\"vec\":[{\"inner\":{\"vec\":[{\"self\":\"0xfa0f4628e8737fe9149070e0aa5801a4c0c872f519fa622c363c7f397cf38229\"}]},\"self\":{\"vec\":[]}}]},\"mutator_ref\":{\"vec\":[{\"self\":\"0xfa0f4628e8737fe9149070e0aa5801a4c0c872f519fa622c363c7f397cf38229\"}]},\"property_mutator_ref\":{\"self\":\"0xfa0f4628e8737fe9149070e0aa5801a4c0c872f519fa622c363c7f397cf38229\"},\"transfer_ref\":{\"vec\":[{\"self\":\"0xfa0f4628e8737fe9149070e0aa5801a4c0c872f519fa622c363c7f397cf38229\"}]}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xfa0f4628e8737fe9149070e0aa5801a4c0c872f519fa622c363c7f397cf38229", + "stateKeyHash": "7SwsaRZxhJCswnE26sxDg3YTLXwctifP/Fj3a6uPIMs=", + "type": { + "address": "0x4", + "module": "property_map", + "name": "PropertyMap" + }, + "typeStr": "0x4::property_map::PropertyMap", + "data": "{\"inner\":{\"data\":[]}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xfa0f4628e8737fe9149070e0aa5801a4c0c872f519fa622c363c7f397cf38229", + "stateKeyHash": "7SwsaRZxhJCswnE26sxDg3YTLXwctifP/Fj3a6uPIMs=", + "type": { + "address": "0x4", + "module": "token", + "name": "Token" + }, + "typeStr": "0x4::token::Token", + "data": "{\"collection\":{\"inner\":\"0xa2485c3b392d211770ed161e73a1097d21016c7dd41f53592434380b2aa14cba\"},\"description\":\"The Loonies is a collection of 5000 human PFPs. It's the official NFT collection by Wapal, the leading community-based NFT marketplace and launchpad on Aptos.\\n\\nDITCH LIMITS ⚡️\",\"index\":\"0\",\"mutation_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0xfa0f4628e8737fe9149070e0aa5801a4c0c872f519fa622c363c7f397cf38229\",\"creation_num\":\"1125899906842625\"}}},\"name\":\"\",\"uri\":\"https://arweave.net/p_hpKSyXV3YXXhdDPOXQGsAYuI5YKcKitOm9DO9xAuI/1339.json\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xfa0f4628e8737fe9149070e0aa5801a4c0c872f519fa622c363c7f397cf38229", + "stateKeyHash": "7SwsaRZxhJCswnE26sxDg3YTLXwctifP/Fj3a6uPIMs=", + "type": { + "address": "0x4", + "module": "token", + "name": "TokenIdentifiers" + }, + "typeStr": "0x4::token::TokenIdentifiers", + "data": "{\"index\":{\"value\":\"1487\"},\"name\":{\"padding\":\"0x000000\",\"value\":\"The Loonies #1339\"}}" + } + }, + { + "type": "TYPE_WRITE_TABLE_ITEM", + "writeTableItem": { + "stateKeyHash": "bkso1A+YoQamUWNTCSTA3LQME0nTqpFdEItNbPwd2xk=", + "handle": "0x1b854694ae746cdbd8d44186ca4929b2b337df21d1c74633be19b2710552fdca", + "key": "0x0619dc29a0aac8fa146714058e8dd6d2d0f3bdf5f6331907bf91f3acd81e6935", + "data": { + "key": "\"0x619dc29a0aac8fa146714058e8dd6d2d0f3bdf5f6331907bf91f3acd81e6935\"", + "keyType": "address", + "value": "\"114062601547091801\"", + "valueType": "u128" + } + } + } + ] + }, + "epoch": "10375", + "blockHeight": "292443982", + "type": "TRANSACTION_TYPE_USER", + "sizeInfo": { + "transactionBytes": 351, + "eventSizeInfo": [ + { + "typeTagBytes": 55, + "totalBytes": 199 + }, + { + "typeTagBytes": 52, + "totalBytes": 108 + }, + { + "typeTagBytes": 52, + "totalBytes": 108 + }, + { + "typeTagBytes": 52, + "totalBytes": 108 + }, + { + "typeTagBytes": 68, + "totalBytes": 365 + }, + { + "typeTagBytes": 52, + "totalBytes": 108 + }, + { + "typeTagBytes": 63, + "totalBytes": 103 + } + ], + "writeOpSizeInfo": [ + { + "keyBytes": 138, + "valueBytes": 105 + }, + { + "keyBytes": 138, + "valueBytes": 105 + }, + { + "keyBytes": 138, + "valueBytes": 105 + }, + { + "keyBytes": 84, + "valueBytes": 147 + }, + { + "keyBytes": 87, + "valueBytes": 1082 + }, + { + "keyBytes": 87 + }, + { + "keyBytes": 138, + "valueBytes": 105 + }, + { + "keyBytes": 87, + "valueBytes": 871 + }, + { + "keyBytes": 66, + "valueBytes": 16 + } + ] + }, + "user": { + "request": { + "sender": "0x67d1d1e79787a57df644b192f67959be26bb0330a5b8d782f13a324d86ee91b2", + "sequenceNumber": "1253", + "maxGasAmount": "56", + "gasUnitPrice": "100", + "expirationTimestampSecs": { + "seconds": "1739908397" + }, + "payload": { + "type": "TYPE_ENTRY_FUNCTION_PAYLOAD", + "entryFunctionPayload": { + "function": { + "module": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "name": "collection_offer" + }, + "name": "sell_tokenv2" + }, + "typeArguments": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ], + "arguments": [ + "{\"inner\":\"0x8eb23351a1e2a9ed5d5516d6a592ec8fe8e3beeaedd7a7de9cdfc1cc8d9a0d73\"}", + "{\"inner\":\"0xfa0f4628e8737fe9149070e0aa5801a4c0c872f519fa622c363c7f397cf38229\"}" + ], + "entryFunctionIdStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::collection_offer::sell_tokenv2" + } + }, + "signature": { + "type": "TYPE_ED25519", + "ed25519": { + "publicKey": "Qs7FhKPbf/RNzbM1bNjiazhvB3PjEtSJ20y4dHOq0vg=", + "signature": "3GbharcvKQrsnxC8oaAgNHHAGcD0ZsqZ9Er2TOVEP0NikfGtK0/M3ExiGCGyZIPIQHDbXFp9mWVWpV00r6HQBw==" + } + } + }, + "events": [ + { + "key": { + "creationNumber": "1125899906842624", + "accountAddress": "0xfa0f4628e8737fe9149070e0aa5801a4c0c872f519fa622c363c7f397cf38229" + }, + "sequenceNumber": "21", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "object", + "name": "TransferEvent" + } + }, + "typeStr": "0x1::object::TransferEvent", + "data": "{\"from\":\"0x67d1d1e79787a57df644b192f67959be26bb0330a5b8d782f13a324d86ee91b2\",\"object\":\"0xfa0f4628e8737fe9149070e0aa5801a4c0c872f519fa622c363c7f397cf38229\",\"to\":\"0xd098ed4c8a4fecfd082149216a1029ff1ed3d4dffd628348b018ed00036eec22\"}" + }, + { + "key": { + "creationNumber": "2", + "accountAddress": "0x268d4a7a2ad93274edf6116f9f20ad8455223a7ab5fc73154f687e7dbc3e3ec6" + }, + "sequenceNumber": "3746", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "DepositEvent" + } + }, + "typeStr": "0x1::coin::DepositEvent", + "data": "{\"amount\":\"107121420\"}" + }, + { + "key": { + "creationNumber": "2", + "accountAddress": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9" + }, + "sequenceNumber": "2500622", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "DepositEvent" + } + }, + "typeStr": "0x1::coin::DepositEvent", + "data": "{\"amount\":\"38257650\"}" + }, + { + "key": { + "creationNumber": "2", + "accountAddress": "0x67d1d1e79787a57df644b192f67959be26bb0330a5b8d782f13a324d86ee91b2" + }, + "sequenceNumber": "208", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "DepositEvent" + } + }, + "typeStr": "0x1::coin::DepositEvent", + "data": "{\"amount\":\"2405130930\"}" + }, + { + "key": { + "creationNumber": "1125899906842632", + "accountAddress": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08" + }, + "sequenceNumber": "78048", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "events", + "name": "CollectionOfferFilledEvent" + } + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::events::CollectionOfferFilledEvent", + "data": "{\"collection_offer\":\"0x8eb23351a1e2a9ed5d5516d6a592ec8fe8e3beeaedd7a7de9cdfc1cc8d9a0d73\",\"commission\":\"38257650\",\"price\":\"2550510000\",\"purchaser\":\"0xd098ed4c8a4fecfd082149216a1029ff1ed3d4dffd628348b018ed00036eec22\",\"royalties\":\"107121420\",\"seller\":\"0x67d1d1e79787a57df644b192f67959be26bb0330a5b8d782f13a324d86ee91b2\",\"token_metadata\":{\"collection\":{\"vec\":[{\"inner\":\"0xa2485c3b392d211770ed161e73a1097d21016c7dd41f53592434380b2aa14cba\"}]},\"collection_name\":\"The Loonies\",\"creator_address\":\"0xf54f8f7ffc2b779d81b721b3d42fe9a53f96e1d3459a8001934307783d493725\",\"property_version\":{\"vec\":[]},\"token\":{\"vec\":[{\"inner\":\"0xfa0f4628e8737fe9149070e0aa5801a4c0c872f519fa622c363c7f397cf38229\"}]},\"token_name\":\"The Loonies #1339\"}}" + }, + { + "key": { + "creationNumber": "2", + "accountAddress": "0xd098ed4c8a4fecfd082149216a1029ff1ed3d4dffd628348b018ed00036eec22" + }, + "sequenceNumber": "3930", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "DepositEvent" + } + }, + "typeStr": "0x1::coin::DepositEvent", + "data": "{\"amount\":\"0\"}" + }, + { + "key": { + "accountAddress": "0x0" + }, + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "transaction_fee", + "name": "FeeStatement" + } + }, + "typeStr": "0x1::transaction_fee::FeeStatement", + "data": "{\"execution_gas_units\":\"8\",\"io_gas_units\":\"21\",\"storage_fee_octas\":\"0\",\"storage_fee_refund_octas\":\"64480\",\"total_charge_gas_units\":\"28\"}" + } + ] + } +} \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2382221134_wapal_fill_listing.json b/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2382221134_wapal_fill_listing.json new file mode 100644 index 0000000000000..27133c45ab067 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2382221134_wapal_fill_listing.json @@ -0,0 +1,1068 @@ +{ + "timestamp": { + "seconds": "1739908340", + "nanos": 290288000 + }, + "version": "2382221134", + "info": { + "hash": "o00Rn5rOAba9n5bT1228VC2fG45E6g/UhVywONacfY8=", + "stateChangeHash": "bB6knofg8ZJ+X8/STy7IVG1I1LlzJ1RxuPog7MfbeOw=", + "eventRootHash": "/ox3zDtxc7GJZHTyDNGcsP9UG+xILHdbC1+arg6E6yA=", + "gasUsed": "1493", + "success": true, + "vmStatus": "Executed successfully", + "accumulatorRootHash": "sExdM9XKpPF0stElF06s2QPiqQz8Vh2+1EJYjSpst9E=", + "changes": [ + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x8c557bb0a12d47c1eda90dd4883b44674111b915fa39ff862e6a0a39140dcd4", + "stateKeyHash": "kSJiqPFCska3SlG2fNyd46mssC9QXZVqf1lwRkrXuCM=", + "type": { + "address": "0x1", + "module": "coin", + "name": "CoinStore", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", + "data": "{\"coin\":{\"value\":\"36521137143\"},\"deposit_events\":{\"counter\":\"7157\",\"guid\":{\"id\":{\"addr\":\"0x8c557bb0a12d47c1eda90dd4883b44674111b915fa39ff862e6a0a39140dcd4\",\"creation_num\":\"2\"}}},\"frozen\":false,\"withdraw_events\":{\"counter\":\"7235\",\"guid\":{\"id\":{\"addr\":\"0x8c557bb0a12d47c1eda90dd4883b44674111b915fa39ff862e6a0a39140dcd4\",\"creation_num\":\"3\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "stateKeyHash": "CnIoX/rU8s/MEWBmiMymIKAzkKuqzEhHPNEaXeZekPM=", + "type": { + "address": "0x1", + "module": "coin", + "name": "CoinStore", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", + "data": "{\"coin\":{\"value\":\"43287672469\"},\"deposit_events\":{\"counter\":\"2500624\",\"guid\":{\"id\":{\"addr\":\"0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9\",\"creation_num\":\"2\"}}},\"frozen\":false,\"withdraw_events\":{\"counter\":\"26\",\"guid\":{\"id\":{\"addr\":\"0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9\",\"creation_num\":\"3\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x5aa4efe9703ffba6082906721756da68b9f7e8d256ed719cf49c40cab3938650", + "stateKeyHash": "jrLGIAldjKLN0xPzrpNQXv1b0WzZqLZ1ZONOMlpgat0=", + "type": { + "address": "0x1", + "module": "coin", + "name": "CoinStore", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", + "data": "{\"coin\":{\"value\":\"13725967085\"},\"deposit_events\":{\"counter\":\"14406\",\"guid\":{\"id\":{\"addr\":\"0x5aa4efe9703ffba6082906721756da68b9f7e8d256ed719cf49c40cab3938650\",\"creation_num\":\"2\"}}},\"frozen\":false,\"withdraw_events\":{\"counter\":\"24\",\"guid\":{\"id\":{\"addr\":\"0x5aa4efe9703ffba6082906721756da68b9f7e8d256ed719cf49c40cab3938650\",\"creation_num\":\"3\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x6a03eb973cd9385d62fc2842d02a4dd6b70e52f5da77a0689e57e48d93fae1b4", + "stateKeyHash": "ia5CJAqvJ4lnS7nWhVMNNxhYGforZgoP0qtHcM/2+OM=", + "type": { + "address": "0x1", + "module": "coin", + "name": "CoinStore", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", + "data": "{\"coin\":{\"value\":\"11614494730\"},\"deposit_events\":{\"counter\":\"150764\",\"guid\":{\"id\":{\"addr\":\"0x6a03eb973cd9385d62fc2842d02a4dd6b70e52f5da77a0689e57e48d93fae1b4\",\"creation_num\":\"2\"}}},\"frozen\":false,\"withdraw_events\":{\"counter\":\"24\",\"guid\":{\"id\":{\"addr\":\"0x6a03eb973cd9385d62fc2842d02a4dd6b70e52f5da77a0689e57e48d93fae1b4\",\"creation_num\":\"3\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectCore" + }, + "typeStr": "0x1::object::ObjectCore", + "data": "{\"allow_ungated_transfer\":true,\"guid_creation_num\":\"1125899906842636\",\"owner\":\"0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9\",\"transfer_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842624\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "events", + "name": "EventsV1" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::events::EventsV1", + "data": "{\"auction_bid_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842626\"}}},\"collection_offer_canceled_events\":{\"counter\":\"28185\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842631\"}}},\"collection_offer_filled_events\":{\"counter\":\"78049\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842632\"}}},\"collection_offer_placed_events\":{\"counter\":\"154729\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842630\"}}},\"listing_canceled_events\":{\"counter\":\"74311\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842628\"}}},\"listing_filled_events\":{\"counter\":\"692270\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842629\"}}},\"listing_placed_events\":{\"counter\":\"1773310\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842627\"}}},\"token_offer_canceled_events\":{\"counter\":\"9078\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842634\"}}},\"token_offer_filled_events\":{\"counter\":\"600\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842635\"}}},\"token_offer_placed_events\":{\"counter\":\"77859\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842633\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "fee_schedule", + "name": "FeeSchedule" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::fee_schedule::FeeSchedule", + "data": "{\"extend_ref\":{\"self\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\"},\"fee_address\":\"0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9\",\"mutation_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842625\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "fee_schedule", + "name": "FixedRateBiddingFee" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::fee_schedule::FixedRateBiddingFee", + "data": "{\"bidding_fee\":\"0\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "fee_schedule", + "name": "FixedRateListingFee" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::fee_schedule::FixedRateListingFee", + "data": "{\"listing_fee\":\"0\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "fee_schedule", + "name": "PercentageRateCommission" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::fee_schedule::PercentageRateCommission", + "data": "{\"denominator\":\"1000\",\"numerator\":\"15\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x735507953f702ddad6dbf5a98de6fd3f57f50b89da9c68672414d8431f103726", + "stateKeyHash": "ht3PjI67UKgO0puYF4UkTR7CD79lMale6wrkehknQUw=", + "type": { + "address": "0x1", + "module": "coin", + "name": "CoinStore", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", + "data": "{\"coin\":{\"value\":\"919936045\"},\"deposit_events\":{\"counter\":\"525\",\"guid\":{\"id\":{\"addr\":\"0x735507953f702ddad6dbf5a98de6fd3f57f50b89da9c68672414d8431f103726\",\"creation_num\":\"2\"}}},\"frozen\":false,\"withdraw_events\":{\"counter\":\"835\",\"guid\":{\"id\":{\"addr\":\"0x735507953f702ddad6dbf5a98de6fd3f57f50b89da9c68672414d8431f103726\",\"creation_num\":\"3\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x735507953f702ddad6dbf5a98de6fd3f57f50b89da9c68672414d8431f103726", + "stateKeyHash": "Nn23UmPeaegsjXCSgXInfZQyAs9hE8t1NzHNfVCdAoE=", + "type": { + "address": "0x1", + "module": "account", + "name": "Account" + }, + "typeStr": "0x1::account::Account", + "data": "{\"authentication_key\":\"0x735507953f702ddad6dbf5a98de6fd3f57f50b89da9c68672414d8431f103726\",\"coin_register_events\":{\"counter\":\"92\",\"guid\":{\"id\":{\"addr\":\"0x735507953f702ddad6dbf5a98de6fd3f57f50b89da9c68672414d8431f103726\",\"creation_num\":\"0\"}}},\"guid_creation_num\":\"233\",\"key_rotation_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x735507953f702ddad6dbf5a98de6fd3f57f50b89da9c68672414d8431f103726\",\"creation_num\":\"1\"}}},\"rotation_capability_offer\":{\"for\":{\"vec\":[]}},\"sequence_number\":\"971\",\"signer_capability_offer\":{\"for\":{\"vec\":[]}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x735507953f702ddad6dbf5a98de6fd3f57f50b89da9c68672414d8431f103726", + "stateKeyHash": "qdvdLgJUTcYsaIjKEb/cDXw3T88Q/PUAgYRAm0CTE4A=", + "type": { + "address": "0x3", + "module": "token", + "name": "TokenStore" + }, + "typeStr": "0x3::token::TokenStore", + "data": "{\"burn_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x735507953f702ddad6dbf5a98de6fd3f57f50b89da9c68672414d8431f103726\",\"creation_num\":\"6\"}}},\"deposit_events\":{\"counter\":\"132\",\"guid\":{\"id\":{\"addr\":\"0x735507953f702ddad6dbf5a98de6fd3f57f50b89da9c68672414d8431f103726\",\"creation_num\":\"4\"}}},\"direct_transfer\":false,\"mutate_token_property_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x735507953f702ddad6dbf5a98de6fd3f57f50b89da9c68672414d8431f103726\",\"creation_num\":\"7\"}}},\"tokens\":{\"handle\":\"0xaadb0671fadd64716243e40ca76a42e2147827be09ea16a9e99d795b37b463d1\"},\"withdraw_events\":{\"counter\":\"129\",\"guid\":{\"id\":{\"addr\":\"0x735507953f702ddad6dbf5a98de6fd3f57f50b89da9c68672414d8431f103726\",\"creation_num\":\"5\"}}}}" + } + }, + { + "type": "TYPE_DELETE_RESOURCE", + "deleteResource": { + "address": "0x7d618afb24770a0491e128c2d4db80598471795d76ed7a5bc0b88067b5f28d0d", + "stateKeyHash": "Z4B/6tZXvmuKL9X3KXJ0no3vWC1DQqH3uGkGuo8C0bI=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectGroup" + }, + "typeStr": "0x1::object::ObjectGroup" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xc24b9ea285e953bbf7470309b879bc60d5a68e71e2f2c17be047c3f74664e82b", + "stateKeyHash": "5b0VaahYzzDgHb/FFnkMnUEShP6MG5h2KtgHmd1TO7s=", + "type": { + "address": "0x1", + "module": "coin", + "name": "CoinStore", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", + "data": "{\"coin\":{\"value\":\"1234594428\"},\"deposit_events\":{\"counter\":\"973\",\"guid\":{\"id\":{\"addr\":\"0xc24b9ea285e953bbf7470309b879bc60d5a68e71e2f2c17be047c3f74664e82b\",\"creation_num\":\"2\"}}},\"frozen\":false,\"withdraw_events\":{\"counter\":\"1065\",\"guid\":{\"id\":{\"addr\":\"0xc24b9ea285e953bbf7470309b879bc60d5a68e71e2f2c17be047c3f74664e82b\",\"creation_num\":\"3\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "stateKeyHash": "yqjAt6XexkjhclahvRVH6qR4Q/72t7jBQVorKxeECtI=", + "type": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "module": "listings", + "name": "ListingStore" + }, + "typeStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::listings::ListingStore", + "data": "{\"buy_events\":{\"counter\":\"20669\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"7\"}}},\"delete_listing_events\":{\"counter\":\"40260\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"6\"}}},\"insert_listing_events\":{\"counter\":\"72981\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"4\"}}},\"listings\":{\"handle\":\"0xefdd9245f9b45b09ca9cc0fa7dbe5214cb44b7dbac49d9eafeacedc9b5fec21e\"},\"update_listing_events\":{\"counter\":\"1136\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"5\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xed4f5679c427845c8695a6a3ef7b31af448cf90c0d1b3e35653d8acf2fffd010", + "stateKeyHash": "zKuNaYlml9QP9p1H23M3PI1vCE1sp6mY4WNMnC7uhBs=", + "type": { + "address": "0x1", + "module": "coin", + "name": "CoinStore", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", + "data": "{\"coin\":{\"value\":\"855878250\"},\"deposit_events\":{\"counter\":\"1757\",\"guid\":{\"id\":{\"addr\":\"0xed4f5679c427845c8695a6a3ef7b31af448cf90c0d1b3e35653d8acf2fffd010\",\"creation_num\":\"2\"}}},\"frozen\":false,\"withdraw_events\":{\"counter\":\"2209\",\"guid\":{\"id\":{\"addr\":\"0xed4f5679c427845c8695a6a3ef7b31af448cf90c0d1b3e35653d8acf2fffd010\",\"creation_num\":\"3\"}}}}" + } + }, + { + "type": "TYPE_DELETE_RESOURCE", + "deleteResource": { + "address": "0xf244211ddd471bd7151410e4b4ecd8d22bb70eb3fee2736fdd3fdfe77bc6c4fd", + "stateKeyHash": "TR997ODfknzVuDOYGDUpuHUc6MxfuOouwJThxBC+PrE=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectGroup" + }, + "typeStr": "0x1::object::ObjectGroup" + } + }, + { + "type": "TYPE_WRITE_TABLE_ITEM", + "writeTableItem": { + "stateKeyHash": "bkso1A+YoQamUWNTCSTA3LQME0nTqpFdEItNbPwd2xk=", + "handle": "0x1b854694ae746cdbd8d44186ca4929b2b337df21d1c74633be19b2710552fdca", + "key": "0x0619dc29a0aac8fa146714058e8dd6d2d0f3bdf5f6331907bf91f3acd81e6935", + "data": { + "key": "\"0x619dc29a0aac8fa146714058e8dd6d2d0f3bdf5f6331907bf91f3acd81e6935\"", + "keyType": "address", + "value": "\"114062610335615490\"", + "valueType": "u128" + } + } + }, + { + "type": "TYPE_WRITE_TABLE_ITEM", + "writeTableItem": { + "stateKeyHash": "sYYoTPTUcPVsm11JZH4I7PSvFZYNKXrwljb1Dk14I5M=", + "handle": "0xaadb0671fadd64716243e40ca76a42e2147827be09ea16a9e99d795b37b463d1", + "key": "0x043ec2cb158e3569842d537740fd53403e992b9e7349cc5d3dfaa5aff8faaef20a427275682042656172730f4272756820426561722023333432380100000000000000", + "data": { + "key": "{\"property_version\":\"1\",\"token_data_id\":{\"collection\":\"Bruh Bears\",\"creator\":\"0x43ec2cb158e3569842d537740fd53403e992b9e7349cc5d3dfaa5aff8faaef2\",\"name\":\"Bruh Bear #3428\"}}", + "keyType": "0x3::token::TokenId", + "value": "{\"amount\":\"1\",\"id\":{\"property_version\":\"1\",\"token_data_id\":{\"collection\":\"Bruh Bears\",\"creator\":\"0x43ec2cb158e3569842d537740fd53403e992b9e7349cc5d3dfaa5aff8faaef2\",\"name\":\"Bruh Bear #3428\"}},\"token_properties\":{\"map\":{\"data\":[{\"key\":\"level\",\"value\":{\"type\":\"u64\",\"value\":\"0xd201000000000000\"}}]}}}", + "valueType": "0x3::token::Token" + } + } + }, + { + "type": "TYPE_WRITE_TABLE_ITEM", + "writeTableItem": { + "stateKeyHash": "akdpyEl/CHmID5XuEK+pQL//vVNrm9Bq7sE8znqMRg4=", + "handle": "0xaadb0671fadd64716243e40ca76a42e2147827be09ea16a9e99d795b37b463d1", + "key": "0x043ec2cb158e3569842d537740fd53403e992b9e7349cc5d3dfaa5aff8faaef20a427275682042656172730f4272756820426561722023333737300100000000000000", + "data": { + "key": "{\"property_version\":\"1\",\"token_data_id\":{\"collection\":\"Bruh Bears\",\"creator\":\"0x43ec2cb158e3569842d537740fd53403e992b9e7349cc5d3dfaa5aff8faaef2\",\"name\":\"Bruh Bear #3770\"}}", + "keyType": "0x3::token::TokenId", + "value": "{\"amount\":\"1\",\"id\":{\"property_version\":\"1\",\"token_data_id\":{\"collection\":\"Bruh Bears\",\"creator\":\"0x43ec2cb158e3569842d537740fd53403e992b9e7349cc5d3dfaa5aff8faaef2\",\"name\":\"Bruh Bear #3770\"}},\"token_properties\":{\"map\":{\"data\":[{\"key\":\"level\",\"value\":{\"type\":\"u64\",\"value\":\"0x4800000000000000\"}}]}}}", + "valueType": "0x3::token::Token" + } + } + }, + { + "type": "TYPE_WRITE_TABLE_ITEM", + "writeTableItem": { + "stateKeyHash": "Ox1dOHO+BJeab6+IMjlAutgjKiFBi1bgpOUAGfBHjWw=", + "handle": "0xaadb0671fadd64716243e40ca76a42e2147827be09ea16a9e99d795b37b463d1", + "key": "0x043ec2cb158e3569842d537740fd53403e992b9e7349cc5d3dfaa5aff8faaef20a427275682042656172730f4272756820426561722023343038340100000000000000", + "data": { + "key": "{\"property_version\":\"1\",\"token_data_id\":{\"collection\":\"Bruh Bears\",\"creator\":\"0x43ec2cb158e3569842d537740fd53403e992b9e7349cc5d3dfaa5aff8faaef2\",\"name\":\"Bruh Bear #4084\"}}", + "keyType": "0x3::token::TokenId", + "value": "{\"amount\":\"1\",\"id\":{\"property_version\":\"1\",\"token_data_id\":{\"collection\":\"Bruh Bears\",\"creator\":\"0x43ec2cb158e3569842d537740fd53403e992b9e7349cc5d3dfaa5aff8faaef2\",\"name\":\"Bruh Bear #4084\"}},\"token_properties\":{\"map\":{\"data\":[{\"key\":\"level\",\"value\":{\"type\":\"u64\",\"value\":\"0x7402000000000000\"}}]}}}", + "valueType": "0x3::token::Token" + } + } + }, + { + "type": "TYPE_DELETE_TABLE_ITEM", + "deleteTableItem": { + "stateKeyHash": "UC+KzK4XJpqSZO4uuxvyqGRpz/u0p37Mr6HiL6hmnts=", + "handle": "0xefdd9245f9b45b09ca9cc0fa7dbe5214cb44b7dbac49d9eafeacedc9b5fec21e", + "key": "0x043ec2cb158e3569842d537740fd53403e992b9e7349cc5d3dfaa5aff8faaef20a427275682042656172730f4272756820426561722023333432380100000000000000", + "data": { + "key": "{\"property_version\":\"1\",\"token_data_id\":{\"collection\":\"Bruh Bears\",\"creator\":\"0x43ec2cb158e3569842d537740fd53403e992b9e7349cc5d3dfaa5aff8faaef2\",\"name\":\"Bruh Bear #3428\"}}", + "keyType": "0x3::token::TokenId" + } + } + }, + { + "type": "TYPE_DELETE_TABLE_ITEM", + "deleteTableItem": { + "stateKeyHash": "DtpEl4VIE/GMpuCrS3yRq00mgCysYfcbsU9nUyC7j2s=", + "handle": "0xefdd9245f9b45b09ca9cc0fa7dbe5214cb44b7dbac49d9eafeacedc9b5fec21e", + "key": "0x043ec2cb158e3569842d537740fd53403e992b9e7349cc5d3dfaa5aff8faaef20a427275682042656172730f4272756820426561722023333737300100000000000000", + "data": { + "key": "{\"property_version\":\"1\",\"token_data_id\":{\"collection\":\"Bruh Bears\",\"creator\":\"0x43ec2cb158e3569842d537740fd53403e992b9e7349cc5d3dfaa5aff8faaef2\",\"name\":\"Bruh Bear #3770\"}}", + "keyType": "0x3::token::TokenId" + } + } + } + ] + }, + "epoch": "10375", + "blockHeight": "292444152", + "type": "TRANSACTION_TYPE_USER", + "sizeInfo": { + "transactionBytes": 760, + "eventSizeInfo": [ + { + "typeTagBytes": 53, + "totalBytes": 109 + }, + { + "typeTagBytes": 52, + "totalBytes": 108 + }, + { + "typeTagBytes": 53, + "totalBytes": 109 + }, + { + "typeTagBytes": 52, + "totalBytes": 108 + }, + { + "typeTagBytes": 53, + "totalBytes": 109 + }, + { + "typeTagBytes": 52, + "totalBytes": 108 + }, + { + "typeTagBytes": 53, + "totalBytes": 176 + }, + { + "typeTagBytes": 52, + "totalBytes": 247 + }, + { + "typeTagBytes": 53, + "totalBytes": 109 + }, + { + "typeTagBytes": 53, + "totalBytes": 176 + }, + { + "typeTagBytes": 52, + "totalBytes": 108 + }, + { + "typeTagBytes": 52, + "totalBytes": 108 + }, + { + "typeTagBytes": 52, + "totalBytes": 108 + }, + { + "typeTagBytes": 60, + "totalBytes": 310 + }, + { + "typeTagBytes": 53, + "totalBytes": 109 + }, + { + "typeTagBytes": 52, + "totalBytes": 108 + }, + { + "typeTagBytes": 53, + "totalBytes": 109 + }, + { + "typeTagBytes": 52, + "totalBytes": 108 + }, + { + "typeTagBytes": 53, + "totalBytes": 109 + }, + { + "typeTagBytes": 52, + "totalBytes": 108 + }, + { + "typeTagBytes": 53, + "totalBytes": 176 + }, + { + "typeTagBytes": 52, + "totalBytes": 247 + }, + { + "typeTagBytes": 53, + "totalBytes": 109 + }, + { + "typeTagBytes": 52, + "totalBytes": 108 + }, + { + "typeTagBytes": 63, + "totalBytes": 103 + } + ], + "writeOpSizeInfo": [ + { + "keyBytes": 138, + "valueBytes": 105 + }, + { + "keyBytes": 138, + "valueBytes": 105 + }, + { + "keyBytes": 138, + "valueBytes": 105 + }, + { + "keyBytes": 138, + "valueBytes": 105 + }, + { + "keyBytes": 87, + "valueBytes": 1082 + }, + { + "keyBytes": 138, + "valueBytes": 105 + }, + { + "keyBytes": 84, + "valueBytes": 147 + }, + { + "keyBytes": 85, + "valueBytes": 225 + }, + { + "keyBytes": 87 + }, + { + "keyBytes": 138, + "valueBytes": 105 + }, + { + "keyBytes": 90, + "valueBytes": 224 + }, + { + "keyBytes": 138, + "valueBytes": 105 + }, + { + "keyBytes": 87 + }, + { + "keyBytes": 66, + "valueBytes": 16 + }, + { + "keyBytes": 101, + "valueBytes": 95 + }, + { + "keyBytes": 101, + "valueBytes": 95 + }, + { + "keyBytes": 101, + "valueBytes": 95 + }, + { + "keyBytes": 101 + }, + { + "keyBytes": 101 + } + ] + }, + "user": { + "request": { + "sender": "0x735507953f702ddad6dbf5a98de6fd3f57f50b89da9c68672414d8431f103726", + "sequenceNumber": "970", + "maxGasAmount": "2986", + "gasUnitPrice": "100", + "expirationTimestampSecs": { + "seconds": "1739908430" + }, + "payload": { + "type": "TYPE_ENTRY_FUNCTION_PAYLOAD", + "entryFunctionPayload": { + "function": { + "module": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "name": "markets_v2" + }, + "name": "buy_tokens_v2" + }, + "arguments": [ + "[\"1\",\"1\",\"1\"]", + "[\"TradePort\",\"Wapal\",\"TradePort\"]", + "[\"0xed4f5679c427845c8695a6a3ef7b31af448cf90c0d1b3e35653d8acf2fffd010\",\"0xc24b9ea285e953bbf7470309b879bc60d5a68e71e2f2c17be047c3f74664e82b\",\"0x8c557bb0a12d47c1eda90dd4883b44674111b915fa39ff862e6a0a39140dcd4\"]", + "[\"398000000\",\"397900000\",\"398000000\"]", + "[\"0x43ec2cb158e3569842d537740fd53403e992b9e7349cc5d3dfaa5aff8faaef2\",\"0x43ec2cb158e3569842d537740fd53403e992b9e7349cc5d3dfaa5aff8faaef2\",\"0x43ec2cb158e3569842d537740fd53403e992b9e7349cc5d3dfaa5aff8faaef2\"]", + "[\"Bruh Bears\",\"Bruh Bears\",\"Bruh Bears\"]", + "[\"Bruh Bear #3428\",\"Bruh Bear #4084\",\"Bruh Bear #3770\"]", + "[\"1\",\"1\",\"1\"]", + "[\"0\",\"0\",\"0\"]", + "[\"0x0\",\"0x7d618afb24770a0491e128c2d4db80598471795d76ed7a5bc0b88067b5f28d0d\",\"0x0\"]" + ], + "entryFunctionIdStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::markets_v2::buy_tokens_v2" + } + }, + "signature": { + "type": "TYPE_ED25519", + "ed25519": { + "publicKey": "gQPVGQ+FdanHp4wNkHWdFD66oCkhdV699h4aXkIfwrs=", + "signature": "skqZkrqXeQJPqj72OAPxi+VWBdwaB4KhnDnQLl2edYB6MnqbQxrl1Zh/eNPBy6/M/VHN5FwVjUHsGHRr/Ur1Bg==" + } + } + }, + "events": [ + { + "key": { + "creationNumber": "3", + "accountAddress": "0x735507953f702ddad6dbf5a98de6fd3f57f50b89da9c68672414d8431f103726" + }, + "sequenceNumber": "827", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "WithdrawEvent" + } + }, + "typeStr": "0x1::coin::WithdrawEvent", + "data": "{\"amount\":\"21890000\"}" + }, + { + "key": { + "creationNumber": "2", + "accountAddress": "0x5aa4efe9703ffba6082906721756da68b9f7e8d256ed719cf49c40cab3938650" + }, + "sequenceNumber": "14403", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "DepositEvent" + } + }, + "typeStr": "0x1::coin::DepositEvent", + "data": "{\"amount\":\"21890000\"}" + }, + { + "key": { + "creationNumber": "3", + "accountAddress": "0x735507953f702ddad6dbf5a98de6fd3f57f50b89da9c68672414d8431f103726" + }, + "sequenceNumber": "828", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "WithdrawEvent" + } + }, + "typeStr": "0x1::coin::WithdrawEvent", + "data": "{\"amount\":\"9950000\"}" + }, + { + "key": { + "creationNumber": "2", + "accountAddress": "0x6a03eb973cd9385d62fc2842d02a4dd6b70e52f5da77a0689e57e48d93fae1b4" + }, + "sequenceNumber": "150761", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "DepositEvent" + } + }, + "typeStr": "0x1::coin::DepositEvent", + "data": "{\"amount\":\"9950000\"}" + }, + { + "key": { + "creationNumber": "3", + "accountAddress": "0x735507953f702ddad6dbf5a98de6fd3f57f50b89da9c68672414d8431f103726" + }, + "sequenceNumber": "829", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "WithdrawEvent" + } + }, + "typeStr": "0x1::coin::WithdrawEvent", + "data": "{\"amount\":\"366160000\"}" + }, + { + "key": { + "creationNumber": "2", + "accountAddress": "0xed4f5679c427845c8695a6a3ef7b31af448cf90c0d1b3e35653d8acf2fffd010" + }, + "sequenceNumber": "1756", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "DepositEvent" + } + }, + "typeStr": "0x1::coin::DepositEvent", + "data": "{\"amount\":\"366160000\"}" + }, + { + "key": { + "creationNumber": "4", + "accountAddress": "0x735507953f702ddad6dbf5a98de6fd3f57f50b89da9c68672414d8431f103726" + }, + "sequenceNumber": "129", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x3", + "module": "token", + "name": "DepositEvent" + } + }, + "typeStr": "0x3::token::DepositEvent", + "data": "{\"amount\":\"1\",\"id\":{\"property_version\":\"1\",\"token_data_id\":{\"collection\":\"Bruh Bears\",\"creator\":\"0x43ec2cb158e3569842d537740fd53403e992b9e7349cc5d3dfaa5aff8faaef2\",\"name\":\"Bruh Bear #3428\"}}}" + }, + { + "key": { + "creationNumber": "7", + "accountAddress": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26" + }, + "sequenceNumber": "20667", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "module": "listings", + "name": "BuyEvent" + } + }, + "typeStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::listings::BuyEvent", + "data": "{\"buyer\":\"0x735507953f702ddad6dbf5a98de6fd3f57f50b89da9c68672414d8431f103726\",\"owner\":\"0xed4f5679c427845c8695a6a3ef7b31af448cf90c0d1b3e35653d8acf2fffd010\",\"price\":\"398000000\",\"timestamp\":\"1739908340290288\",\"token_id\":{\"property_version\":\"1\",\"token_data_id\":{\"collection\":\"Bruh Bears\",\"creator\":\"0x43ec2cb158e3569842d537740fd53403e992b9e7349cc5d3dfaa5aff8faaef2\",\"name\":\"Bruh Bear #3428\"}}}" + }, + { + "key": { + "creationNumber": "3", + "accountAddress": "0x735507953f702ddad6dbf5a98de6fd3f57f50b89da9c68672414d8431f103726" + }, + "sequenceNumber": "830", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "WithdrawEvent" + } + }, + "typeStr": "0x1::coin::WithdrawEvent", + "data": "{\"amount\":\"397900000\"}" + }, + { + "key": { + "creationNumber": "4", + "accountAddress": "0x735507953f702ddad6dbf5a98de6fd3f57f50b89da9c68672414d8431f103726" + }, + "sequenceNumber": "130", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x3", + "module": "token", + "name": "DepositEvent" + } + }, + "typeStr": "0x3::token::DepositEvent", + "data": "{\"amount\":\"1\",\"id\":{\"property_version\":\"1\",\"token_data_id\":{\"collection\":\"Bruh Bears\",\"creator\":\"0x43ec2cb158e3569842d537740fd53403e992b9e7349cc5d3dfaa5aff8faaef2\",\"name\":\"Bruh Bear #4084\"}}}" + }, + { + "key": { + "creationNumber": "2", + "accountAddress": "0x5aa4efe9703ffba6082906721756da68b9f7e8d256ed719cf49c40cab3938650" + }, + "sequenceNumber": "14404", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "DepositEvent" + } + }, + "typeStr": "0x1::coin::DepositEvent", + "data": "{\"amount\":\"21884500\"}" + }, + { + "key": { + "creationNumber": "2", + "accountAddress": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9" + }, + "sequenceNumber": "2500623", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "DepositEvent" + } + }, + "typeStr": "0x1::coin::DepositEvent", + "data": "{\"amount\":\"5968500\"}" + }, + { + "key": { + "creationNumber": "2", + "accountAddress": "0xc24b9ea285e953bbf7470309b879bc60d5a68e71e2f2c17be047c3f74664e82b" + }, + "sequenceNumber": "972", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "DepositEvent" + } + }, + "typeStr": "0x1::coin::DepositEvent", + "data": "{\"amount\":\"370047000\"}" + }, + { + "key": { + "creationNumber": "1125899906842629", + "accountAddress": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08" + }, + "sequenceNumber": "692269", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "events", + "name": "ListingFilledEvent" + } + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::events::ListingFilledEvent", + "data": "{\"commission\":\"5968500\",\"listing\":\"0x7d618afb24770a0491e128c2d4db80598471795d76ed7a5bc0b88067b5f28d0d\",\"price\":\"397900000\",\"purchaser\":\"0x735507953f702ddad6dbf5a98de6fd3f57f50b89da9c68672414d8431f103726\",\"royalties\":\"21884500\",\"seller\":\"0xc24b9ea285e953bbf7470309b879bc60d5a68e71e2f2c17be047c3f74664e82b\",\"token_metadata\":{\"collection\":{\"vec\":[]},\"collection_name\":\"Bruh Bears\",\"creator_address\":\"0x43ec2cb158e3569842d537740fd53403e992b9e7349cc5d3dfaa5aff8faaef2\",\"property_version\":{\"vec\":[\"1\"]},\"token\":{\"vec\":[]},\"token_name\":\"Bruh Bear #4084\"},\"type\":\"fixed price\"}" + }, + { + "key": { + "creationNumber": "3", + "accountAddress": "0x735507953f702ddad6dbf5a98de6fd3f57f50b89da9c68672414d8431f103726" + }, + "sequenceNumber": "831", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "WithdrawEvent" + } + }, + "typeStr": "0x1::coin::WithdrawEvent", + "data": "{\"amount\":\"21890000\"}" + }, + { + "key": { + "creationNumber": "2", + "accountAddress": "0x5aa4efe9703ffba6082906721756da68b9f7e8d256ed719cf49c40cab3938650" + }, + "sequenceNumber": "14405", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "DepositEvent" + } + }, + "typeStr": "0x1::coin::DepositEvent", + "data": "{\"amount\":\"21890000\"}" + }, + { + "key": { + "creationNumber": "3", + "accountAddress": "0x735507953f702ddad6dbf5a98de6fd3f57f50b89da9c68672414d8431f103726" + }, + "sequenceNumber": "832", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "WithdrawEvent" + } + }, + "typeStr": "0x1::coin::WithdrawEvent", + "data": "{\"amount\":\"9950000\"}" + }, + { + "key": { + "creationNumber": "2", + "accountAddress": "0x6a03eb973cd9385d62fc2842d02a4dd6b70e52f5da77a0689e57e48d93fae1b4" + }, + "sequenceNumber": "150762", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "DepositEvent" + } + }, + "typeStr": "0x1::coin::DepositEvent", + "data": "{\"amount\":\"9950000\"}" + }, + { + "key": { + "creationNumber": "3", + "accountAddress": "0x735507953f702ddad6dbf5a98de6fd3f57f50b89da9c68672414d8431f103726" + }, + "sequenceNumber": "833", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "WithdrawEvent" + } + }, + "typeStr": "0x1::coin::WithdrawEvent", + "data": "{\"amount\":\"366160000\"}" + }, + { + "key": { + "creationNumber": "2", + "accountAddress": "0x08c557bb0a12d47c1eda90dd4883b44674111b915fa39ff862e6a0a39140dcd4" + }, + "sequenceNumber": "7156", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "DepositEvent" + } + }, + "typeStr": "0x1::coin::DepositEvent", + "data": "{\"amount\":\"366160000\"}" + }, + { + "key": { + "creationNumber": "4", + "accountAddress": "0x735507953f702ddad6dbf5a98de6fd3f57f50b89da9c68672414d8431f103726" + }, + "sequenceNumber": "131", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x3", + "module": "token", + "name": "DepositEvent" + } + }, + "typeStr": "0x3::token::DepositEvent", + "data": "{\"amount\":\"1\",\"id\":{\"property_version\":\"1\",\"token_data_id\":{\"collection\":\"Bruh Bears\",\"creator\":\"0x43ec2cb158e3569842d537740fd53403e992b9e7349cc5d3dfaa5aff8faaef2\",\"name\":\"Bruh Bear #3770\"}}}" + }, + { + "key": { + "creationNumber": "7", + "accountAddress": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26" + }, + "sequenceNumber": "20668", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "module": "listings", + "name": "BuyEvent" + } + }, + "typeStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::listings::BuyEvent", + "data": "{\"buyer\":\"0x735507953f702ddad6dbf5a98de6fd3f57f50b89da9c68672414d8431f103726\",\"owner\":\"0x8c557bb0a12d47c1eda90dd4883b44674111b915fa39ff862e6a0a39140dcd4\",\"price\":\"398000000\",\"timestamp\":\"1739908340290288\",\"token_id\":{\"property_version\":\"1\",\"token_data_id\":{\"collection\":\"Bruh Bears\",\"creator\":\"0x43ec2cb158e3569842d537740fd53403e992b9e7349cc5d3dfaa5aff8faaef2\",\"name\":\"Bruh Bear #3770\"}}}" + }, + { + "key": { + "creationNumber": "3", + "accountAddress": "0x735507953f702ddad6dbf5a98de6fd3f57f50b89da9c68672414d8431f103726" + }, + "sequenceNumber": "834", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "WithdrawEvent" + } + }, + "typeStr": "0x1::coin::WithdrawEvent", + "data": "{\"amount\":\"4000000\"}" + }, + { + "key": { + "creationNumber": "2", + "accountAddress": "0x6a03eb973cd9385d62fc2842d02a4dd6b70e52f5da77a0689e57e48d93fae1b4" + }, + "sequenceNumber": "150763", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "DepositEvent" + } + }, + "typeStr": "0x1::coin::DepositEvent", + "data": "{\"amount\":\"4000000\"}" + }, + { + "key": { + "accountAddress": "0x0" + }, + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "transaction_fee", + "name": "FeeStatement" + } + }, + "typeStr": "0x1::transaction_fee::FeeStatement", + "data": "{\"execution_gas_units\":\"32\",\"io_gas_units\":\"29\",\"storage_fee_octas\":\"143280\",\"storage_fee_refund_octas\":\"219200\",\"total_charge_gas_units\":\"1493\"}" + } + ] + } +} \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2382251863_wapal_place_listing.json b/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2382251863_wapal_place_listing.json new file mode 100644 index 0000000000000..6b206856d82d9 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2382251863_wapal_place_listing.json @@ -0,0 +1,400 @@ +{ + "timestamp": { + "seconds": "1739909050", + "nanos": 543248000 + }, + "version": "2382251863", + "info": { + "hash": "JcHXGOiJXFo9AAaA9A8n0mT5Uph8XJNpPlHDQ14rq30=", + "stateChangeHash": "CajDBNRT8l38Zt+ZTcd8ApSr8rfgUlSUnMzpnj4+IBo=", + "eventRootHash": "/BnkRhbfb9h7XHEzL2j9sU7zHdtE0Svzzhjze4v7qGs=", + "gasUsed": "19", + "success": true, + "vmStatus": "Executed successfully", + "accumulatorRootHash": "q5Zjd5QX1+dBGkFxJ5uo4UVc5a9qIMdDpU4XpLcZdQI=", + "changes": [ + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectCore" + }, + "typeStr": "0x1::object::ObjectCore", + "data": "{\"allow_ungated_transfer\":true,\"guid_creation_num\":\"1125899906842636\",\"owner\":\"0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9\",\"transfer_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842624\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "events", + "name": "EventsV1" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::events::EventsV1", + "data": "{\"auction_bid_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842626\"}}},\"collection_offer_canceled_events\":{\"counter\":\"28188\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842631\"}}},\"collection_offer_filled_events\":{\"counter\":\"78049\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842632\"}}},\"collection_offer_placed_events\":{\"counter\":\"154731\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842630\"}}},\"listing_canceled_events\":{\"counter\":\"74311\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842628\"}}},\"listing_filled_events\":{\"counter\":\"692270\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842629\"}}},\"listing_placed_events\":{\"counter\":\"1773321\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842627\"}}},\"token_offer_canceled_events\":{\"counter\":\"9078\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842634\"}}},\"token_offer_filled_events\":{\"counter\":\"600\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842635\"}}},\"token_offer_placed_events\":{\"counter\":\"77859\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842633\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "fee_schedule", + "name": "FeeSchedule" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::fee_schedule::FeeSchedule", + "data": "{\"extend_ref\":{\"self\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\"},\"fee_address\":\"0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9\",\"mutation_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842625\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "fee_schedule", + "name": "FixedRateBiddingFee" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::fee_schedule::FixedRateBiddingFee", + "data": "{\"bidding_fee\":\"0\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "fee_schedule", + "name": "FixedRateListingFee" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::fee_schedule::FixedRateListingFee", + "data": "{\"listing_fee\":\"0\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "fee_schedule", + "name": "PercentageRateCommission" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::fee_schedule::PercentageRateCommission", + "data": "{\"denominator\":\"1000\",\"numerator\":\"15\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x7d3092ed4eab5a0ebffb73868748f06d15cae1b3d82d6bbaeebd33f5acd36502", + "stateKeyHash": "Kp8OqvsQgmrFHTKNWLRhmpMheWF8Qd5ZlvGv27SQbic=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectCore" + }, + "typeStr": "0x1::object::ObjectCore", + "data": "{\"allow_ungated_transfer\":false,\"guid_creation_num\":\"1125899906842625\",\"owner\":\"0xc24b9ea285e953bbf7470309b879bc60d5a68e71e2f2c17be047c3f74664e82b\",\"transfer_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x7d3092ed4eab5a0ebffb73868748f06d15cae1b3d82d6bbaeebd33f5acd36502\",\"creation_num\":\"1125899906842624\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x7d3092ed4eab5a0ebffb73868748f06d15cae1b3d82d6bbaeebd33f5acd36502", + "stateKeyHash": "Kp8OqvsQgmrFHTKNWLRhmpMheWF8Qd5ZlvGv27SQbic=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "coin_listing", + "name": "FixedPriceListing", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::coin_listing::FixedPriceListing<0x1::aptos_coin::AptosCoin>", + "data": "{\"price\":\"459900000\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x7d3092ed4eab5a0ebffb73868748f06d15cae1b3d82d6bbaeebd33f5acd36502", + "stateKeyHash": "Kp8OqvsQgmrFHTKNWLRhmpMheWF8Qd5ZlvGv27SQbic=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "listing", + "name": "Listing" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::listing::Listing", + "data": "{\"delete_ref\":{\"self\":\"0x7d3092ed4eab5a0ebffb73868748f06d15cae1b3d82d6bbaeebd33f5acd36502\"},\"extend_ref\":{\"self\":\"0x7d3092ed4eab5a0ebffb73868748f06d15cae1b3d82d6bbaeebd33f5acd36502\"},\"fee_schedule\":{\"inner\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\"},\"object\":{\"inner\":\"0xd88b304033e2b615d5fae295a9986313d3ea713f3a625d06ae686c26181dcc7e\"},\"seller\":\"0xc24b9ea285e953bbf7470309b879bc60d5a68e71e2f2c17be047c3f74664e82b\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xa67bf73aee5f50029a94fcfadc751b4818b42dd9123721e9c6872bdd3fd0b505", + "stateKeyHash": "t9jRzZsZI/pIMB5ho5/yNKGdltNrLY06mUKM/K6PKcg=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectCore" + }, + "typeStr": "0x1::object::ObjectCore", + "data": "{\"allow_ungated_transfer\":false,\"guid_creation_num\":\"1125899906842625\",\"owner\":\"0xc24b9ea285e953bbf7470309b879bc60d5a68e71e2f2c17be047c3f74664e82b\",\"transfer_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0xa67bf73aee5f50029a94fcfadc751b4818b42dd9123721e9c6872bdd3fd0b505\",\"creation_num\":\"1125899906842624\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xa67bf73aee5f50029a94fcfadc751b4818b42dd9123721e9c6872bdd3fd0b505", + "stateKeyHash": "t9jRzZsZI/pIMB5ho5/yNKGdltNrLY06mUKM/K6PKcg=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "coin_listing", + "name": "FixedPriceListing", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::coin_listing::FixedPriceListing<0x1::aptos_coin::AptosCoin>", + "data": "{\"price\":\"459900000\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xa67bf73aee5f50029a94fcfadc751b4818b42dd9123721e9c6872bdd3fd0b505", + "stateKeyHash": "t9jRzZsZI/pIMB5ho5/yNKGdltNrLY06mUKM/K6PKcg=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "listing", + "name": "Listing" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::listing::Listing", + "data": "{\"delete_ref\":{\"self\":\"0xa67bf73aee5f50029a94fcfadc751b4818b42dd9123721e9c6872bdd3fd0b505\"},\"extend_ref\":{\"self\":\"0xa67bf73aee5f50029a94fcfadc751b4818b42dd9123721e9c6872bdd3fd0b505\"},\"fee_schedule\":{\"inner\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\"},\"object\":{\"inner\":\"0xbc600af43ac14abe414c209456a995baefadccf78cc1241b2119b6a8ebe98b06\"},\"seller\":\"0xc24b9ea285e953bbf7470309b879bc60d5a68e71e2f2c17be047c3f74664e82b\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xc24b9ea285e953bbf7470309b879bc60d5a68e71e2f2c17be047c3f74664e82b", + "stateKeyHash": "5b0VaahYzzDgHb/FFnkMnUEShP6MG5h2KtgHmd1TO7s=", + "type": { + "address": "0x1", + "module": "coin", + "name": "CoinStore", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", + "data": "{\"coin\":{\"value\":\"1234460328\"},\"deposit_events\":{\"counter\":\"973\",\"guid\":{\"id\":{\"addr\":\"0xc24b9ea285e953bbf7470309b879bc60d5a68e71e2f2c17be047c3f74664e82b\",\"creation_num\":\"2\"}}},\"frozen\":false,\"withdraw_events\":{\"counter\":\"1065\",\"guid\":{\"id\":{\"addr\":\"0xc24b9ea285e953bbf7470309b879bc60d5a68e71e2f2c17be047c3f74664e82b\",\"creation_num\":\"3\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xc24b9ea285e953bbf7470309b879bc60d5a68e71e2f2c17be047c3f74664e82b", + "stateKeyHash": "/OOIo15/S8NhwWU8pHHCbdtrTv5XnSvbwSv3IlygeJE=", + "type": { + "address": "0x1", + "module": "account", + "name": "Account" + }, + "typeStr": "0x1::account::Account", + "data": "{\"authentication_key\":\"0xc24b9ea285e953bbf7470309b879bc60d5a68e71e2f2c17be047c3f74664e82b\",\"coin_register_events\":{\"counter\":\"16\",\"guid\":{\"id\":{\"addr\":\"0xc24b9ea285e953bbf7470309b879bc60d5a68e71e2f2c17be047c3f74664e82b\",\"creation_num\":\"0\"}}},\"guid_creation_num\":\"1342\",\"key_rotation_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0xc24b9ea285e953bbf7470309b879bc60d5a68e71e2f2c17be047c3f74664e82b\",\"creation_num\":\"1\"}}},\"rotation_capability_offer\":{\"for\":{\"vec\":[]}},\"sequence_number\":\"2407\",\"signer_capability_offer\":{\"for\":{\"vec\":[]}}}" + } + }, + { + "type": "TYPE_WRITE_TABLE_ITEM", + "writeTableItem": { + "stateKeyHash": "bkso1A+YoQamUWNTCSTA3LQME0nTqpFdEItNbPwd2xk=", + "handle": "0x1b854694ae746cdbd8d44186ca4929b2b337df21d1c74633be19b2710552fdca", + "key": "0x0619dc29a0aac8fa146714058e8dd6d2d0f3bdf5f6331907bf91f3acd81e6935", + "data": { + "key": "\"0x619dc29a0aac8fa146714058e8dd6d2d0f3bdf5f6331907bf91f3acd81e6935\"", + "keyType": "address", + "value": "\"114062610299697248\"", + "valueType": "u128" + } + } + } + ] + }, + "epoch": "10375", + "blockHeight": "292447858", + "type": "TRANSACTION_TYPE_USER", + "sizeInfo": { + "transactionBytes": 383, + "eventSizeInfo": [ + { + "typeTagBytes": 60, + "totalBytes": 320 + }, + { + "typeTagBytes": 60, + "totalBytes": 319 + }, + { + "typeTagBytes": 63, + "totalBytes": 103 + } + ], + "writeOpSizeInfo": [ + { + "keyBytes": 87, + "valueBytes": 1082 + }, + { + "keyBytes": 87, + "valueBytes": 481 + }, + { + "keyBytes": 87, + "valueBytes": 481 + }, + { + "keyBytes": 138, + "valueBytes": 105 + }, + { + "keyBytes": 84, + "valueBytes": 147 + }, + { + "keyBytes": 66, + "valueBytes": 16 + } + ] + }, + "user": { + "request": { + "sender": "0xc24b9ea285e953bbf7470309b879bc60d5a68e71e2f2c17be047c3f74664e82b", + "sequenceNumber": "2406", + "maxGasAmount": "38", + "gasUnitPrice": "100", + "expirationTimestampSecs": { + "seconds": "1739909141" + }, + "payload": { + "type": "TYPE_ENTRY_FUNCTION_PAYLOAD", + "entryFunctionPayload": { + "function": { + "module": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "name": "marketplace_scripts" + }, + "name": "update_fixed_price_many" + }, + "typeArguments": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ], + "arguments": [ + "[\"0xa67bf73aee5f50029a94fcfadc751b4818b42dd9123721e9c6872bdd3fd0b505\",\"0x7d3092ed4eab5a0ebffb73868748f06d15cae1b3d82d6bbaeebd33f5acd36502\"]", + "[\"459900000\",\"459900000\"]" + ], + "entryFunctionIdStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::marketplace_scripts::update_fixed_price_many" + } + }, + "signature": { + "type": "TYPE_ED25519", + "ed25519": { + "publicKey": "fGvkKHnGZkyumjtIC85RbEostHPoXoKtf1R9WQZ1soE=", + "signature": "sRNmWhPFrkSgVlT/V8GEi8wLMhQ8f3yZ+KO2DrBziqRvhO36AiOy8Qwze8F63D7OqnChFp6VZNtOWTsw/AGMBw==" + } + } + }, + "events": [ + { + "key": { + "creationNumber": "1125899906842627", + "accountAddress": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08" + }, + "sequenceNumber": "1773319", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "events", + "name": "ListingPlacedEvent" + } + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::events::ListingPlacedEvent", + "data": "{\"listing\":\"0xa67bf73aee5f50029a94fcfadc751b4818b42dd9123721e9c6872bdd3fd0b505\",\"price\":\"459900000\",\"seller\":\"0xc24b9ea285e953bbf7470309b879bc60d5a68e71e2f2c17be047c3f74664e82b\",\"token_metadata\":{\"collection\":{\"vec\":[{\"inner\":\"0x53858a7a4b6faf7cb2d198efe80351abc658d9cefcd2e95587f3d473cc90a041\"}]},\"collection_name\":\"Sloth balls\",\"creator_address\":\"0x7bc33fea1f7f5e7dc023c5cb041d0aadd664c8f738f619829095c19a9cc34e0\",\"property_version\":{\"vec\":[]},\"token\":{\"vec\":[{\"inner\":\"0xbc600af43ac14abe414c209456a995baefadccf78cc1241b2119b6a8ebe98b06\"}]},\"token_name\":\"Sloth Ball #1938\"},\"type\":\"fixed price\"}" + }, + { + "key": { + "creationNumber": "1125899906842627", + "accountAddress": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08" + }, + "sequenceNumber": "1773320", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "events", + "name": "ListingPlacedEvent" + } + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::events::ListingPlacedEvent", + "data": "{\"listing\":\"0x7d3092ed4eab5a0ebffb73868748f06d15cae1b3d82d6bbaeebd33f5acd36502\",\"price\":\"459900000\",\"seller\":\"0xc24b9ea285e953bbf7470309b879bc60d5a68e71e2f2c17be047c3f74664e82b\",\"token_metadata\":{\"collection\":{\"vec\":[{\"inner\":\"0x53858a7a4b6faf7cb2d198efe80351abc658d9cefcd2e95587f3d473cc90a041\"}]},\"collection_name\":\"Sloth balls\",\"creator_address\":\"0x7bc33fea1f7f5e7dc023c5cb041d0aadd664c8f738f619829095c19a9cc34e0\",\"property_version\":{\"vec\":[]},\"token\":{\"vec\":[{\"inner\":\"0xd88b304033e2b615d5fae295a9986313d3ea713f3a625d06ae686c26181dcc7e\"}]},\"token_name\":\"Sloth Ball #109\"},\"type\":\"fixed price\"}" + }, + { + "key": { + "accountAddress": "0x0" + }, + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "transaction_fee", + "name": "FeeStatement" + } + }, + "typeStr": "0x1::transaction_fee::FeeStatement", + "data": "{\"execution_gas_units\":\"7\",\"io_gas_units\":\"12\",\"storage_fee_octas\":\"0\",\"storage_fee_refund_octas\":\"0\",\"total_charge_gas_units\":\"19\"}" + } + ] + } +} \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2382313982_wapal_place_offer.json b/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2382313982_wapal_place_offer.json new file mode 100644 index 0000000000000..d0f25bc84c62e --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2382313982_wapal_place_offer.json @@ -0,0 +1,468 @@ +{ + "timestamp": { + "seconds": "1739910634", + "nanos": 289382000 + }, + "version": "2382313982", + "info": { + "hash": "vdW1lCRfMO7VSSqO+OVzgCoW0B+AUSuFxbK9lQEHV3w=", + "stateChangeHash": "sACKl4LOdku0IprpfquIJXSkYIIOn4rfsFPOY9t4FRk=", + "eventRootHash": "J9w/Fx+lxC5gU+DHLLW1DdsUNgcVWso7qITolyLjsHY=", + "gasUsed": "1278", + "success": true, + "vmStatus": "Executed successfully", + "accumulatorRootHash": "6JbmIPw5IOxtztzh0kEKUtrtvdNOJMqygG/HKkQVTzk=", + "changes": [ + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xdd69203952afa9962f3277f2be027fad3d21d57b986a61d674279d5e395323e", + "stateKeyHash": "cpNLbASMsACy94Kkrj1vHP1HjAvK3rfGQaloXHLTD6Q=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectCore" + }, + "typeStr": "0x1::object::ObjectCore", + "data": "{\"allow_ungated_transfer\":false,\"guid_creation_num\":\"1125899906842625\",\"owner\":\"0x62928b3712d452190346090807d5cfb40dabb54740cf1d2acfc5b4d3d9e0b370\",\"transfer_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0xdd69203952afa9962f3277f2be027fad3d21d57b986a61d674279d5e395323e\",\"creation_num\":\"1125899906842624\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xdd69203952afa9962f3277f2be027fad3d21d57b986a61d674279d5e395323e", + "stateKeyHash": "cpNLbASMsACy94Kkrj1vHP1HjAvK3rfGQaloXHLTD6Q=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "token_offer", + "name": "CoinOffer", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::token_offer::CoinOffer<0x1::aptos_coin::AptosCoin>", + "data": "{\"coins\":{\"value\":\"25000000\"}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xdd69203952afa9962f3277f2be027fad3d21d57b986a61d674279d5e395323e", + "stateKeyHash": "cpNLbASMsACy94Kkrj1vHP1HjAvK3rfGQaloXHLTD6Q=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "token_offer", + "name": "TokenOffer" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::token_offer::TokenOffer", + "data": "{\"delete_ref\":{\"self\":\"0xdd69203952afa9962f3277f2be027fad3d21d57b986a61d674279d5e395323e\"},\"expiration_time\":\"1739923200\",\"fee_schedule\":{\"inner\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\"},\"item_price\":\"25000000\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xdd69203952afa9962f3277f2be027fad3d21d57b986a61d674279d5e395323e", + "stateKeyHash": "cpNLbASMsACy94Kkrj1vHP1HjAvK3rfGQaloXHLTD6Q=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "token_offer", + "name": "TokenOfferTokenV1" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::token_offer::TokenOfferTokenV1", + "data": "{\"collection_name\":\"Aptos Dogs\",\"creator_address\":\"0xee814d743d2c3b4b1b8b30f3e0c84c7017df3154bda84c31958785f1d5b70e61\",\"property_version\":\"0\",\"token_name\":\"AptosDogs #1596\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "stateKeyHash": "CnIoX/rU8s/MEWBmiMymIKAzkKuqzEhHPNEaXeZekPM=", + "type": { + "address": "0x1", + "module": "coin", + "name": "CoinStore", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", + "data": "{\"coin\":{\"value\":\"43287672469\"},\"deposit_events\":{\"counter\":\"2500630\",\"guid\":{\"id\":{\"addr\":\"0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9\",\"creation_num\":\"2\"}}},\"frozen\":false,\"withdraw_events\":{\"counter\":\"26\",\"guid\":{\"id\":{\"addr\":\"0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9\",\"creation_num\":\"3\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x62928b3712d452190346090807d5cfb40dabb54740cf1d2acfc5b4d3d9e0b370", + "stateKeyHash": "IyRYVbJUBMkMuQaFggZjn111qLTWbH/eXoWTSRLZxVU=", + "type": { + "address": "0x1", + "module": "coin", + "name": "CoinStore", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", + "data": "{\"coin\":{\"value\":\"785074663\"},\"deposit_events\":{\"counter\":\"130\",\"guid\":{\"id\":{\"addr\":\"0x62928b3712d452190346090807d5cfb40dabb54740cf1d2acfc5b4d3d9e0b370\",\"creation_num\":\"2\"}}},\"frozen\":false,\"withdraw_events\":{\"counter\":\"198\",\"guid\":{\"id\":{\"addr\":\"0x62928b3712d452190346090807d5cfb40dabb54740cf1d2acfc5b4d3d9e0b370\",\"creation_num\":\"3\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x62928b3712d452190346090807d5cfb40dabb54740cf1d2acfc5b4d3d9e0b370", + "stateKeyHash": "kpXwtHWZmkJREq5YDLqGVbaPLJ8ud7xjUhSL55k+sJE=", + "type": { + "address": "0x1", + "module": "account", + "name": "Account" + }, + "typeStr": "0x1::account::Account", + "data": "{\"authentication_key\":\"0x62928b3712d452190346090807d5cfb40dabb54740cf1d2acfc5b4d3d9e0b370\",\"coin_register_events\":{\"counter\":\"44\",\"guid\":{\"id\":{\"addr\":\"0x62928b3712d452190346090807d5cfb40dabb54740cf1d2acfc5b4d3d9e0b370\",\"creation_num\":\"0\"}}},\"guid_creation_num\":\"122\",\"key_rotation_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x62928b3712d452190346090807d5cfb40dabb54740cf1d2acfc5b4d3d9e0b370\",\"creation_num\":\"1\"}}},\"rotation_capability_offer\":{\"for\":{\"vec\":[]}},\"sequence_number\":\"396\",\"signer_capability_offer\":{\"for\":{\"vec\":[]}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x62928b3712d452190346090807d5cfb40dabb54740cf1d2acfc5b4d3d9e0b370", + "stateKeyHash": "6jrw+W8XldFnMNc9FfPoUsIyLs0OR6vP+JKFQCsXQbE=", + "type": { + "address": "0x3", + "module": "token", + "name": "TokenStore" + }, + "typeStr": "0x3::token::TokenStore", + "data": "{\"burn_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x62928b3712d452190346090807d5cfb40dabb54740cf1d2acfc5b4d3d9e0b370\",\"creation_num\":\"14\"}}},\"deposit_events\":{\"counter\":\"10\",\"guid\":{\"id\":{\"addr\":\"0x62928b3712d452190346090807d5cfb40dabb54740cf1d2acfc5b4d3d9e0b370\",\"creation_num\":\"12\"}}},\"direct_transfer\":true,\"mutate_token_property_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x62928b3712d452190346090807d5cfb40dabb54740cf1d2acfc5b4d3d9e0b370\",\"creation_num\":\"15\"}}},\"tokens\":{\"handle\":\"0xd14836610466b216bb9ca3eb09fbf3fec6d3219b038b515ff2915d539192beda\"},\"withdraw_events\":{\"counter\":\"1\",\"guid\":{\"id\":{\"addr\":\"0x62928b3712d452190346090807d5cfb40dabb54740cf1d2acfc5b4d3d9e0b370\",\"creation_num\":\"13\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x62928b3712d452190346090807d5cfb40dabb54740cf1d2acfc5b4d3d9e0b370", + "stateKeyHash": "QHBVUTmMZACoSRvtLhOTg1vXmhatuv6WMLU3RqenNM4=", + "type": { + "address": "0x3", + "module": "token_event_store", + "name": "TokenEventStoreV1" + }, + "typeStr": "0x3::token_event_store::TokenEventStoreV1", + "data": "{\"collection_description_mutate_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x62928b3712d452190346090807d5cfb40dabb54740cf1d2acfc5b4d3d9e0b370\",\"creation_num\":\"115\"}}},\"collection_maximum_mutate_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x62928b3712d452190346090807d5cfb40dabb54740cf1d2acfc5b4d3d9e0b370\",\"creation_num\":\"114\"}}},\"collection_uri_mutate_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x62928b3712d452190346090807d5cfb40dabb54740cf1d2acfc5b4d3d9e0b370\",\"creation_num\":\"113\"}}},\"default_property_mutate_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x62928b3712d452190346090807d5cfb40dabb54740cf1d2acfc5b4d3d9e0b370\",\"creation_num\":\"118\"}}},\"description_mutate_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x62928b3712d452190346090807d5cfb40dabb54740cf1d2acfc5b4d3d9e0b370\",\"creation_num\":\"119\"}}},\"extension\":{\"vec\":[]},\"maximum_mutate_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x62928b3712d452190346090807d5cfb40dabb54740cf1d2acfc5b4d3d9e0b370\",\"creation_num\":\"121\"}}},\"opt_in_events\":{\"counter\":\"1\",\"guid\":{\"id\":{\"addr\":\"0x62928b3712d452190346090807d5cfb40dabb54740cf1d2acfc5b4d3d9e0b370\",\"creation_num\":\"116\"}}},\"royalty_mutate_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x62928b3712d452190346090807d5cfb40dabb54740cf1d2acfc5b4d3d9e0b370\",\"creation_num\":\"120\"}}},\"uri_mutate_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x62928b3712d452190346090807d5cfb40dabb54740cf1d2acfc5b4d3d9e0b370\",\"creation_num\":\"117\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectCore" + }, + "typeStr": "0x1::object::ObjectCore", + "data": "{\"allow_ungated_transfer\":true,\"guid_creation_num\":\"1125899906842636\",\"owner\":\"0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9\",\"transfer_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842624\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "events", + "name": "EventsV1" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::events::EventsV1", + "data": "{\"auction_bid_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842626\"}}},\"collection_offer_canceled_events\":{\"counter\":\"28188\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842631\"}}},\"collection_offer_filled_events\":{\"counter\":\"78049\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842632\"}}},\"collection_offer_placed_events\":{\"counter\":\"154731\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842630\"}}},\"listing_canceled_events\":{\"counter\":\"74311\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842628\"}}},\"listing_filled_events\":{\"counter\":\"692270\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842629\"}}},\"listing_placed_events\":{\"counter\":\"1773321\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842627\"}}},\"token_offer_canceled_events\":{\"counter\":\"9078\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842634\"}}},\"token_offer_filled_events\":{\"counter\":\"600\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842635\"}}},\"token_offer_placed_events\":{\"counter\":\"77860\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842633\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "fee_schedule", + "name": "FeeSchedule" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::fee_schedule::FeeSchedule", + "data": "{\"extend_ref\":{\"self\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\"},\"fee_address\":\"0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9\",\"mutation_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842625\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "fee_schedule", + "name": "FixedRateBiddingFee" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::fee_schedule::FixedRateBiddingFee", + "data": "{\"bidding_fee\":\"0\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "fee_schedule", + "name": "FixedRateListingFee" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::fee_schedule::FixedRateListingFee", + "data": "{\"listing_fee\":\"0\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "fee_schedule", + "name": "PercentageRateCommission" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::fee_schedule::PercentageRateCommission", + "data": "{\"denominator\":\"1000\",\"numerator\":\"15\"}" + } + }, + { + "type": "TYPE_WRITE_TABLE_ITEM", + "writeTableItem": { + "stateKeyHash": "bkso1A+YoQamUWNTCSTA3LQME0nTqpFdEItNbPwd2xk=", + "handle": "0x1b854694ae746cdbd8d44186ca4929b2b337df21d1c74633be19b2710552fdca", + "key": "0x0619dc29a0aac8fa146714058e8dd6d2d0f3bdf5f6331907bf91f3acd81e6935", + "data": { + "key": "\"0x619dc29a0aac8fa146714058e8dd6d2d0f3bdf5f6331907bf91f3acd81e6935\"", + "keyType": "address", + "value": "\"114062714855835894\"", + "valueType": "u128" + } + } + } + ] + }, + "epoch": "10375", + "blockHeight": "292455993", + "type": "TRANSACTION_TYPE_USER", + "sizeInfo": { + "transactionBytes": 412, + "eventSizeInfo": [ + { + "typeTagBytes": 52, + "totalBytes": 108 + }, + { + "typeTagBytes": 53, + "totalBytes": 109 + }, + { + "typeTagBytes": 71, + "totalBytes": 120 + }, + { + "typeTagBytes": 63, + "totalBytes": 253 + }, + { + "typeTagBytes": 63, + "totalBytes": 103 + } + ], + "writeOpSizeInfo": [ + { + "keyBytes": 87, + "valueBytes": 529 + }, + { + "keyBytes": 138, + "valueBytes": 105 + }, + { + "keyBytes": 138, + "valueBytes": 105 + }, + { + "keyBytes": 84, + "valueBytes": 147 + }, + { + "keyBytes": 85, + "valueBytes": 225 + }, + { + "keyBytes": 104, + "valueBytes": 433 + }, + { + "keyBytes": 87, + "valueBytes": 1082 + }, + { + "keyBytes": 66, + "valueBytes": 16 + } + ] + }, + "user": { + "request": { + "sender": "0x62928b3712d452190346090807d5cfb40dabb54740cf1d2acfc5b4d3d9e0b370", + "sequenceNumber": "395", + "maxGasAmount": "2556", + "gasUnitPrice": "100", + "expirationTimestampSecs": { + "seconds": "1739910724" + }, + "payload": { + "type": "TYPE_ENTRY_FUNCTION_PAYLOAD", + "entryFunctionPayload": { + "function": { + "module": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "name": "token_offer" + }, + "name": "init_for_tokenv1_entry" + }, + "typeArguments": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ], + "arguments": [ + "\"0xee814d743d2c3b4b1b8b30f3e0c84c7017df3154bda84c31958785f1d5b70e61\"", + "\"Aptos Dogs\"", + "\"AptosDogs #1596\"", + "\"0\"", + "{\"inner\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\"}", + "\"25000000\"", + "\"1739923200\"" + ], + "entryFunctionIdStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::token_offer::init_for_tokenv1_entry" + } + }, + "signature": { + "type": "TYPE_ED25519", + "ed25519": { + "publicKey": "acCkjysfpSQy+nIQYG5FS906GeTnUafhxvZmFWNtlpk=", + "signature": "ex3BRinrrf+T5Dbo+DLaq8xy1jfdEsV1z31ktzU89KbaWUDNfbjQMKTzoz73SSfLjobBgFvkQMPpg1hnQFqSDg==" + } + } + }, + "events": [ + { + "key": { + "creationNumber": "2", + "accountAddress": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9" + }, + "sequenceNumber": "2500629", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "DepositEvent" + } + }, + "typeStr": "0x1::coin::DepositEvent", + "data": "{\"amount\":\"0\"}" + }, + { + "key": { + "creationNumber": "3", + "accountAddress": "0x62928b3712d452190346090807d5cfb40dabb54740cf1d2acfc5b4d3d9e0b370" + }, + "sequenceNumber": "197", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "WithdrawEvent" + } + }, + "typeStr": "0x1::coin::WithdrawEvent", + "data": "{\"amount\":\"25000000\"}" + }, + { + "key": { + "creationNumber": "116", + "accountAddress": "0x62928b3712d452190346090807d5cfb40dabb54740cf1d2acfc5b4d3d9e0b370" + }, + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x3", + "module": "token_event_store", + "name": "OptInTransferEvent" + } + }, + "typeStr": "0x3::token_event_store::OptInTransferEvent", + "data": "{\"opt_in\":true}" + }, + { + "key": { + "creationNumber": "1125899906842633", + "accountAddress": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08" + }, + "sequenceNumber": "77859", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "events", + "name": "TokenOfferPlacedEvent" + } + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::events::TokenOfferPlacedEvent", + "data": "{\"price\":\"25000000\",\"purchaser\":\"0x62928b3712d452190346090807d5cfb40dabb54740cf1d2acfc5b4d3d9e0b370\",\"token_metadata\":{\"collection\":{\"vec\":[]},\"collection_name\":\"Aptos Dogs\",\"creator_address\":\"0xee814d743d2c3b4b1b8b30f3e0c84c7017df3154bda84c31958785f1d5b70e61\",\"property_version\":{\"vec\":[\"0\"]},\"token\":{\"vec\":[]},\"token_name\":\"AptosDogs #1596\"},\"token_offer\":\"0xdd69203952afa9962f3277f2be027fad3d21d57b986a61d674279d5e395323e\"}" + }, + { + "key": { + "accountAddress": "0x0" + }, + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "transaction_fee", + "name": "FeeStatement" + } + }, + "typeStr": "0x1::transaction_fee::FeeStatement", + "data": "{\"execution_gas_units\":\"7\",\"io_gas_units\":\"12\",\"storage_fee_octas\":\"125960\",\"storage_fee_refund_octas\":\"0\",\"total_charge_gas_units\":\"1278\"}" + } + ] + } +} \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2382373209_wapal_place_collection_offer.json b/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2382373209_wapal_place_collection_offer.json new file mode 100644 index 0000000000000..ffdc58daecf9c --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2382373209_wapal_place_collection_offer.json @@ -0,0 +1,410 @@ +{ + "timestamp": { + "seconds": "1739912240", + "nanos": 932749000 + }, + "version": "2382373209", + "info": { + "hash": "+2O2t8eCytS4YpEySEPyxRl56vzjJzhgfVmZ46y3sek=", + "stateChangeHash": "CsZvmLuuX9CcLjTmQncWIox78lRJ9o0DzkQCw4SAE0I=", + "eventRootHash": "5OJluPga8kSuxjHYE1vjf637j9I0JJSVGceadUgsl+s=", + "gasUsed": "661", + "success": true, + "vmStatus": "Executed successfully", + "accumulatorRootHash": "o7E7OyXpLAeo4F8FKnZSnsV0Lm+AMPClKTx9BfpMHX0=", + "changes": [ + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "stateKeyHash": "CnIoX/rU8s/MEWBmiMymIKAzkKuqzEhHPNEaXeZekPM=", + "type": { + "address": "0x1", + "module": "coin", + "name": "CoinStore", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", + "data": "{\"coin\":{\"value\":\"43287672469\"},\"deposit_events\":{\"counter\":\"2500632\",\"guid\":{\"id\":{\"addr\":\"0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9\",\"creation_num\":\"2\"}}},\"frozen\":false,\"withdraw_events\":{\"counter\":\"26\",\"guid\":{\"id\":{\"addr\":\"0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9\",\"creation_num\":\"3\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectCore" + }, + "typeStr": "0x1::object::ObjectCore", + "data": "{\"allow_ungated_transfer\":true,\"guid_creation_num\":\"1125899906842636\",\"owner\":\"0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9\",\"transfer_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842624\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "events", + "name": "EventsV1" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::events::EventsV1", + "data": "{\"auction_bid_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842626\"}}},\"collection_offer_canceled_events\":{\"counter\":\"28189\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842631\"}}},\"collection_offer_filled_events\":{\"counter\":\"78049\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842632\"}}},\"collection_offer_placed_events\":{\"counter\":\"154733\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842630\"}}},\"listing_canceled_events\":{\"counter\":\"74311\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842628\"}}},\"listing_filled_events\":{\"counter\":\"692270\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842629\"}}},\"listing_placed_events\":{\"counter\":\"1773321\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842627\"}}},\"token_offer_canceled_events\":{\"counter\":\"9078\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842634\"}}},\"token_offer_filled_events\":{\"counter\":\"600\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842635\"}}},\"token_offer_placed_events\":{\"counter\":\"77860\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842633\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "fee_schedule", + "name": "FeeSchedule" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::fee_schedule::FeeSchedule", + "data": "{\"extend_ref\":{\"self\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\"},\"fee_address\":\"0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9\",\"mutation_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842625\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "fee_schedule", + "name": "FixedRateBiddingFee" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::fee_schedule::FixedRateBiddingFee", + "data": "{\"bidding_fee\":\"0\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "fee_schedule", + "name": "FixedRateListingFee" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::fee_schedule::FixedRateListingFee", + "data": "{\"listing_fee\":\"0\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "fee_schedule", + "name": "PercentageRateCommission" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::fee_schedule::PercentageRateCommission", + "data": "{\"denominator\":\"1000\",\"numerator\":\"15\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xc8a05b0e489a00c2137a7b78de208fa678d87d4664dccf5b2848ad8a4425152c", + "stateKeyHash": "KsqT1JN2+xPyls4Z6XQ0VQ5qGiPo7TPIVShXAPtTPOk=", + "type": { + "address": "0x1", + "module": "coin", + "name": "CoinStore", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", + "data": "{\"coin\":{\"value\":\"186342221\"},\"deposit_events\":{\"counter\":\"502\",\"guid\":{\"id\":{\"addr\":\"0xc8a05b0e489a00c2137a7b78de208fa678d87d4664dccf5b2848ad8a4425152c\",\"creation_num\":\"2\"}}},\"frozen\":false,\"withdraw_events\":{\"counter\":\"861\",\"guid\":{\"id\":{\"addr\":\"0xc8a05b0e489a00c2137a7b78de208fa678d87d4664dccf5b2848ad8a4425152c\",\"creation_num\":\"3\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xc8a05b0e489a00c2137a7b78de208fa678d87d4664dccf5b2848ad8a4425152c", + "stateKeyHash": "TSgc7JUH/xPSo1Mjho2il59yJ6Rdy2zwGV0fB8LIxKo=", + "type": { + "address": "0x1", + "module": "account", + "name": "Account" + }, + "typeStr": "0x1::account::Account", + "data": "{\"authentication_key\":\"0xc8a05b0e489a00c2137a7b78de208fa678d87d4664dccf5b2848ad8a4425152c\",\"coin_register_events\":{\"counter\":\"27\",\"guid\":{\"id\":{\"addr\":\"0xc8a05b0e489a00c2137a7b78de208fa678d87d4664dccf5b2848ad8a4425152c\",\"creation_num\":\"0\"}}},\"guid_creation_num\":\"721\",\"key_rotation_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0xc8a05b0e489a00c2137a7b78de208fa678d87d4664dccf5b2848ad8a4425152c\",\"creation_num\":\"1\"}}},\"rotation_capability_offer\":{\"for\":{\"vec\":[]}},\"sequence_number\":\"1253\",\"signer_capability_offer\":{\"for\":{\"vec\":[]}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xff2ba0969dfe349d37cbabc28f922201b25ecd8102cb2960d62f9a4b64756de9", + "stateKeyHash": "4L8IVIqmTSuPIgcdhGHN3i0ZOfBNcHc46s/GyJMOGGI=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectCore" + }, + "typeStr": "0x1::object::ObjectCore", + "data": "{\"allow_ungated_transfer\":false,\"guid_creation_num\":\"1125899906842625\",\"owner\":\"0xc8a05b0e489a00c2137a7b78de208fa678d87d4664dccf5b2848ad8a4425152c\",\"transfer_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0xff2ba0969dfe349d37cbabc28f922201b25ecd8102cb2960d62f9a4b64756de9\",\"creation_num\":\"1125899906842624\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xff2ba0969dfe349d37cbabc28f922201b25ecd8102cb2960d62f9a4b64756de9", + "stateKeyHash": "4L8IVIqmTSuPIgcdhGHN3i0ZOfBNcHc46s/GyJMOGGI=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "collection_offer", + "name": "CoinOffer", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::collection_offer::CoinOffer<0x1::aptos_coin::AptosCoin>", + "data": "{\"coins\":{\"value\":\"97600000\"}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xff2ba0969dfe349d37cbabc28f922201b25ecd8102cb2960d62f9a4b64756de9", + "stateKeyHash": "4L8IVIqmTSuPIgcdhGHN3i0ZOfBNcHc46s/GyJMOGGI=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "collection_offer", + "name": "CollectionOffer" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::collection_offer::CollectionOffer", + "data": "{\"delete_ref\":{\"self\":\"0xff2ba0969dfe349d37cbabc28f922201b25ecd8102cb2960d62f9a4b64756de9\"},\"expiration_time\":\"1740700800\",\"fee_schedule\":{\"inner\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\"},\"item_price\":\"24400000\",\"remaining\":\"4\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xff2ba0969dfe349d37cbabc28f922201b25ecd8102cb2960d62f9a4b64756de9", + "stateKeyHash": "4L8IVIqmTSuPIgcdhGHN3i0ZOfBNcHc46s/GyJMOGGI=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "collection_offer", + "name": "CollectionOfferTokenV2" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::collection_offer::CollectionOfferTokenV2", + "data": "{\"collection\":{\"inner\":\"0xd82f5841196bf66232316dc61188947583f418346b72758c0f45827cc5838617\"}}" + } + }, + { + "type": "TYPE_WRITE_TABLE_ITEM", + "writeTableItem": { + "stateKeyHash": "bkso1A+YoQamUWNTCSTA3LQME0nTqpFdEItNbPwd2xk=", + "handle": "0x1b854694ae746cdbd8d44186ca4929b2b337df21d1c74633be19b2710552fdca", + "key": "0x0619dc29a0aac8fa146714058e8dd6d2d0f3bdf5f6331907bf91f3acd81e6935", + "data": { + "key": "\"0x619dc29a0aac8fa146714058e8dd6d2d0f3bdf5f6331907bf91f3acd81e6935\"", + "keyType": "address", + "value": "\"114062480303824399\"", + "valueType": "u128" + } + } + } + ] + }, + "epoch": "10375", + "blockHeight": "292464304", + "type": "TRANSACTION_TYPE_USER", + "sizeInfo": { + "transactionBytes": 388, + "eventSizeInfo": [ + { + "typeTagBytes": 52, + "totalBytes": 108 + }, + { + "typeTagBytes": 53, + "totalBytes": 109 + }, + { + "typeTagBytes": 68, + "totalBytes": 273 + }, + { + "typeTagBytes": 63, + "totalBytes": 103 + } + ], + "writeOpSizeInfo": [ + { + "keyBytes": 138, + "valueBytes": 105 + }, + { + "keyBytes": 87, + "valueBytes": 1082 + }, + { + "keyBytes": 138, + "valueBytes": 105 + }, + { + "keyBytes": 84, + "valueBytes": 147 + }, + { + "keyBytes": 87, + "valueBytes": 527 + }, + { + "keyBytes": 66, + "valueBytes": 16 + } + ] + }, + "user": { + "request": { + "sender": "0xc8a05b0e489a00c2137a7b78de208fa678d87d4664dccf5b2848ad8a4425152c", + "sequenceNumber": "1252", + "maxGasAmount": "1322", + "gasUnitPrice": "100", + "expirationTimestampSecs": { + "seconds": "1739912331" + }, + "payload": { + "type": "TYPE_ENTRY_FUNCTION_PAYLOAD", + "entryFunctionPayload": { + "function": { + "module": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "name": "collection_offer" + }, + "name": "init_for_tokenv2_entry" + }, + "typeArguments": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ], + "arguments": [ + "{\"inner\":\"0xd82f5841196bf66232316dc61188947583f418346b72758c0f45827cc5838617\"}", + "{\"inner\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\"}", + "\"24400000\"", + "\"4\"", + "\"1740700800\"" + ], + "entryFunctionIdStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::collection_offer::init_for_tokenv2_entry" + } + }, + "signature": { + "type": "TYPE_ED25519", + "ed25519": { + "publicKey": "jzTNxSbl1tG2PYddDaytN3LlJtYxugmmeV8TRRHmqh0=", + "signature": "TN8LiCIkTpf6Tq1iPdsIGD0QH153vnqgTBB+1wBLR9FWa5Y73wb7MlpmPLspukLmVQlXfgjwEVXntqph7V3zCQ==" + } + } + }, + "events": [ + { + "key": { + "creationNumber": "2", + "accountAddress": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9" + }, + "sequenceNumber": "2500631", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "DepositEvent" + } + }, + "typeStr": "0x1::coin::DepositEvent", + "data": "{\"amount\":\"0\"}" + }, + { + "key": { + "creationNumber": "3", + "accountAddress": "0xc8a05b0e489a00c2137a7b78de208fa678d87d4664dccf5b2848ad8a4425152c" + }, + "sequenceNumber": "860", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "WithdrawEvent" + } + }, + "typeStr": "0x1::coin::WithdrawEvent", + "data": "{\"amount\":\"97600000\"}" + }, + { + "key": { + "creationNumber": "1125899906842630", + "accountAddress": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08" + }, + "sequenceNumber": "154732", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "events", + "name": "CollectionOfferPlacedEvent" + } + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::events::CollectionOfferPlacedEvent", + "data": "{\"collection_metadata\":{\"collection\":{\"vec\":[{\"inner\":\"0xd82f5841196bf66232316dc61188947583f418346b72758c0f45827cc5838617\"}]},\"collection_name\":\"Cool Sloths\",\"creator_address\":\"0x7bc33fea1f7f5e7dc023c5cb041d0aadd664c8f738f619829095c19a9cc34e0\"},\"collection_offer\":\"0xff2ba0969dfe349d37cbabc28f922201b25ecd8102cb2960d62f9a4b64756de9\",\"price\":\"24400000\",\"purchaser\":\"0xc8a05b0e489a00c2137a7b78de208fa678d87d4664dccf5b2848ad8a4425152c\",\"token_amount\":\"4\"}" + }, + { + "key": { + "accountAddress": "0x0" + }, + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "transaction_fee", + "name": "FeeStatement" + } + }, + "typeStr": "0x1::transaction_fee::FeeStatement", + "data": "{\"execution_gas_units\":\"7\",\"io_gas_units\":\"10\",\"storage_fee_octas\":\"64480\",\"storage_fee_refund_octas\":\"0\",\"total_charge_gas_units\":\"661\"}" + } + ] + } +} \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2382373978_wapal_cancel_collection_offer.json b/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2382373978_wapal_cancel_collection_offer.json new file mode 100644 index 0000000000000..ae6bd289b13c5 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2382373978_wapal_cancel_collection_offer.json @@ -0,0 +1,303 @@ +{ + "timestamp": { + "seconds": "1739912259", + "nanos": 38045000 + }, + "version": "2382373978", + "info": { + "hash": "dAtgKnR+kjQXAVxrJbgxBclzHjlySJevgg2RVC2cHbE=", + "stateChangeHash": "x3u7psiaoQTqD24rOh5bp2sLqXpxrNOpg6+2eEPuBnA=", + "eventRootHash": "VrJ2PzYWXE52klyDeAhjZCu38iU5TbzVW+3r/24bb/o=", + "gasUsed": "14", + "success": true, + "vmStatus": "Executed successfully", + "accumulatorRootHash": "XIEfsIt/OdIBe9Dw16ivNb1krRpaAvx0+GzM+F5G21Y=", + "changes": [ + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectCore" + }, + "typeStr": "0x1::object::ObjectCore", + "data": "{\"allow_ungated_transfer\":true,\"guid_creation_num\":\"1125899906842636\",\"owner\":\"0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9\",\"transfer_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842624\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "events", + "name": "EventsV1" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::events::EventsV1", + "data": "{\"auction_bid_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842626\"}}},\"collection_offer_canceled_events\":{\"counter\":\"28190\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842631\"}}},\"collection_offer_filled_events\":{\"counter\":\"78049\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842632\"}}},\"collection_offer_placed_events\":{\"counter\":\"154733\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842630\"}}},\"listing_canceled_events\":{\"counter\":\"74311\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842628\"}}},\"listing_filled_events\":{\"counter\":\"692270\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842629\"}}},\"listing_placed_events\":{\"counter\":\"1773321\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842627\"}}},\"token_offer_canceled_events\":{\"counter\":\"9078\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842634\"}}},\"token_offer_filled_events\":{\"counter\":\"600\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842635\"}}},\"token_offer_placed_events\":{\"counter\":\"77860\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842633\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "fee_schedule", + "name": "FeeSchedule" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::fee_schedule::FeeSchedule", + "data": "{\"extend_ref\":{\"self\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\"},\"fee_address\":\"0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9\",\"mutation_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842625\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "fee_schedule", + "name": "FixedRateBiddingFee" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::fee_schedule::FixedRateBiddingFee", + "data": "{\"bidding_fee\":\"0\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "fee_schedule", + "name": "FixedRateListingFee" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::fee_schedule::FixedRateListingFee", + "data": "{\"listing_fee\":\"0\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "fee_schedule", + "name": "PercentageRateCommission" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::fee_schedule::PercentageRateCommission", + "data": "{\"denominator\":\"1000\",\"numerator\":\"15\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xc8a05b0e489a00c2137a7b78de208fa678d87d4664dccf5b2848ad8a4425152c", + "stateKeyHash": "KsqT1JN2+xPyls4Z6XQ0VQ5qGiPo7TPIVShXAPtTPOk=", + "type": { + "address": "0x1", + "module": "coin", + "name": "CoinStore", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", + "data": "{\"coin\":{\"value\":\"283205301\"},\"deposit_events\":{\"counter\":\"503\",\"guid\":{\"id\":{\"addr\":\"0xc8a05b0e489a00c2137a7b78de208fa678d87d4664dccf5b2848ad8a4425152c\",\"creation_num\":\"2\"}}},\"frozen\":false,\"withdraw_events\":{\"counter\":\"861\",\"guid\":{\"id\":{\"addr\":\"0xc8a05b0e489a00c2137a7b78de208fa678d87d4664dccf5b2848ad8a4425152c\",\"creation_num\":\"3\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xc8a05b0e489a00c2137a7b78de208fa678d87d4664dccf5b2848ad8a4425152c", + "stateKeyHash": "TSgc7JUH/xPSo1Mjho2il59yJ6Rdy2zwGV0fB8LIxKo=", + "type": { + "address": "0x1", + "module": "account", + "name": "Account" + }, + "typeStr": "0x1::account::Account", + "data": "{\"authentication_key\":\"0xc8a05b0e489a00c2137a7b78de208fa678d87d4664dccf5b2848ad8a4425152c\",\"coin_register_events\":{\"counter\":\"27\",\"guid\":{\"id\":{\"addr\":\"0xc8a05b0e489a00c2137a7b78de208fa678d87d4664dccf5b2848ad8a4425152c\",\"creation_num\":\"0\"}}},\"guid_creation_num\":\"721\",\"key_rotation_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0xc8a05b0e489a00c2137a7b78de208fa678d87d4664dccf5b2848ad8a4425152c\",\"creation_num\":\"1\"}}},\"rotation_capability_offer\":{\"for\":{\"vec\":[]}},\"sequence_number\":\"1254\",\"signer_capability_offer\":{\"for\":{\"vec\":[]}}}" + } + }, + { + "type": "TYPE_DELETE_RESOURCE", + "deleteResource": { + "address": "0xeece745b73ee7615fc6634f2851eb84f07aa16f6a59f9343f395ca7ffafec899", + "stateKeyHash": "Mmj67e/Cp3NzgCZXa1dRIyZvVj5SU27I+t7nztLaVo4=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectGroup" + }, + "typeStr": "0x1::object::ObjectGroup" + } + }, + { + "type": "TYPE_WRITE_TABLE_ITEM", + "writeTableItem": { + "stateKeyHash": "bkso1A+YoQamUWNTCSTA3LQME0nTqpFdEItNbPwd2xk=", + "handle": "0x1b854694ae746cdbd8d44186ca4929b2b337df21d1c74633be19b2710552fdca", + "key": "0x0619dc29a0aac8fa146714058e8dd6d2d0f3bdf5f6331907bf91f3acd81e6935", + "data": { + "key": "\"0x619dc29a0aac8fa146714058e8dd6d2d0f3bdf5f6331907bf91f3acd81e6935\"", + "keyType": "address", + "value": "\"114062480294585364\"", + "valueType": "u128" + } + } + } + ] + }, + "epoch": "10375", + "blockHeight": "292464403", + "type": "TRANSACTION_TYPE_USER", + "sizeInfo": { + "transactionBytes": 312, + "eventSizeInfo": [ + { + "typeTagBytes": 70, + "totalBytes": 275 + }, + { + "typeTagBytes": 52, + "totalBytes": 108 + }, + { + "typeTagBytes": 63, + "totalBytes": 103 + } + ], + "writeOpSizeInfo": [ + { + "keyBytes": 87, + "valueBytes": 1082 + }, + { + "keyBytes": 138, + "valueBytes": 105 + }, + { + "keyBytes": 84, + "valueBytes": 147 + }, + { + "keyBytes": 87 + }, + { + "keyBytes": 66, + "valueBytes": 16 + } + ] + }, + "user": { + "request": { + "sender": "0xc8a05b0e489a00c2137a7b78de208fa678d87d4664dccf5b2848ad8a4425152c", + "sequenceNumber": "1253", + "maxGasAmount": "28", + "gasUnitPrice": "100", + "expirationTimestampSecs": { + "seconds": "1739912349" + }, + "payload": { + "type": "TYPE_ENTRY_FUNCTION_PAYLOAD", + "entryFunctionPayload": { + "function": { + "module": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "name": "collection_offer" + }, + "name": "cancel" + }, + "typeArguments": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ], + "arguments": [ + "{\"inner\":\"0xeece745b73ee7615fc6634f2851eb84f07aa16f6a59f9343f395ca7ffafec899\"}" + ], + "entryFunctionIdStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::collection_offer::cancel" + } + }, + "signature": { + "type": "TYPE_ED25519", + "ed25519": { + "publicKey": "jzTNxSbl1tG2PYddDaytN3LlJtYxugmmeV8TRRHmqh0=", + "signature": "hCLjxPrdmO5jfbsZA4kUoT+bJBMVLQAgylH5oAj1hKu2bRQF11oCkycYdu23DC3BnNh6sv6k5ALvSlRfwaygBA==" + } + } + }, + "events": [ + { + "key": { + "creationNumber": "1125899906842631", + "accountAddress": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08" + }, + "sequenceNumber": "28189", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "events", + "name": "CollectionOfferCanceledEvent" + } + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::events::CollectionOfferCanceledEvent", + "data": "{\"collection_metadata\":{\"collection\":{\"vec\":[{\"inner\":\"0xd82f5841196bf66232316dc61188947583f418346b72758c0f45827cc5838617\"}]},\"collection_name\":\"Cool Sloths\",\"creator_address\":\"0x7bc33fea1f7f5e7dc023c5cb041d0aadd664c8f738f619829095c19a9cc34e0\"},\"collection_offer\":\"0xeece745b73ee7615fc6634f2851eb84f07aa16f6a59f9343f395ca7ffafec899\",\"price\":\"24200000\",\"purchaser\":\"0xc8a05b0e489a00c2137a7b78de208fa678d87d4664dccf5b2848ad8a4425152c\",\"remaining_token_amount\":\"4\"}" + }, + { + "key": { + "creationNumber": "2", + "accountAddress": "0xc8a05b0e489a00c2137a7b78de208fa678d87d4664dccf5b2848ad8a4425152c" + }, + "sequenceNumber": "502", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "DepositEvent" + } + }, + "typeStr": "0x1::coin::DepositEvent", + "data": "{\"amount\":\"96800000\"}" + }, + { + "key": { + "accountAddress": "0x0" + }, + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "transaction_fee", + "name": "FeeStatement" + } + }, + "typeStr": "0x1::transaction_fee::FeeStatement", + "data": "{\"execution_gas_units\":\"6\",\"io_gas_units\":\"9\",\"storage_fee_octas\":\"0\",\"storage_fee_refund_octas\":\"64480\",\"total_charge_gas_units\":\"14\"}" + } + ] + } +} \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2386021136_tradeport_v2_fill_collection_offer.json b/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2386021136_tradeport_v2_fill_collection_offer.json new file mode 100644 index 0000000000000..0c5482ef55c71 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2386021136_tradeport_v2_fill_collection_offer.json @@ -0,0 +1,434 @@ +{ + "timestamp": { + "seconds": "1739967219", + "nanos": 744142000 + }, + "version": "2386021136", + "info": { + "hash": "Ghrvk7yoDX2i96oBcavk+TtTSyybz5K2eVk+fqdAOn4=", + "stateChangeHash": "HJk3/5F0h/zislhrbjh+SZq2lf4nshpChG1Yf+tUFTg=", + "eventRootHash": "XNwLfssTF3QHCRoVjz1PJGoY3xkiB+Q0eS6MSSkBNKI=", + "gasUsed": "33", + "success": true, + "vmStatus": "Executed successfully", + "accumulatorRootHash": "0EScfN5k1JT1q3rlYEjre0785E1k4vVJAFG50yJGIn0=", + "changes": [ + { + "type": "TYPE_DELETE_RESOURCE", + "deleteResource": { + "address": "0xe3c39dbed236c7ddcf847f9734baf8168911babd8384f17538102b17ab156bd", + "stateKeyHash": "17SYqUbyEjho5Gj3NVQWzLPGr/dpUOrVgCiSth5enwg=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectGroup" + }, + "typeStr": "0x1::object::ObjectGroup" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x1cad42fedce28de2e45b1de11e26abe0d540edae729341135388cdbec4b8262d", + "stateKeyHash": "gJqWi0CJ+MuK3VXAnlp0S29arFczqUUedtYC8860U94=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectCore" + }, + "typeStr": "0x1::object::ObjectCore", + "data": "{\"allow_ungated_transfer\":true,\"guid_creation_num\":\"1125899906842626\",\"owner\":\"0x8c557bb0a12d47c1eda90dd4883b44674111b915fa39ff862e6a0a39140dcd4\",\"transfer_events\":{\"counter\":\"6\",\"guid\":{\"id\":{\"addr\":\"0x1cad42fedce28de2e45b1de11e26abe0d540edae729341135388cdbec4b8262d\",\"creation_num\":\"1125899906842624\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x1cad42fedce28de2e45b1de11e26abe0d540edae729341135388cdbec4b8262d", + "stateKeyHash": "gJqWi0CJ+MuK3VXAnlp0S29arFczqUUedtYC8860U94=", + "type": { + "address": "0x4", + "module": "aptos_token", + "name": "AptosToken" + }, + "typeStr": "0x4::aptos_token::AptosToken", + "data": "{\"burn_ref\":{\"vec\":[{\"inner\":{\"vec\":[{\"self\":\"0x1cad42fedce28de2e45b1de11e26abe0d540edae729341135388cdbec4b8262d\"}]},\"self\":{\"vec\":[]}}]},\"mutator_ref\":{\"vec\":[{\"self\":\"0x1cad42fedce28de2e45b1de11e26abe0d540edae729341135388cdbec4b8262d\"}]},\"property_mutator_ref\":{\"self\":\"0x1cad42fedce28de2e45b1de11e26abe0d540edae729341135388cdbec4b8262d\"},\"transfer_ref\":{\"vec\":[{\"self\":\"0x1cad42fedce28de2e45b1de11e26abe0d540edae729341135388cdbec4b8262d\"}]}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x1cad42fedce28de2e45b1de11e26abe0d540edae729341135388cdbec4b8262d", + "stateKeyHash": "gJqWi0CJ+MuK3VXAnlp0S29arFczqUUedtYC8860U94=", + "type": { + "address": "0x4", + "module": "property_map", + "name": "PropertyMap" + }, + "typeStr": "0x4::property_map::PropertyMap", + "data": "{\"inner\":{\"data\":[{\"key\":\"Eyes\",\"value\":{\"type\":9,\"value\":\"0x054861707079\"}},{\"key\":\"Headwear\",\"value\":{\"type\":9,\"value\":\"0x07427261696e6579\"}},{\"key\":\"Outfits\",\"value\":{\"type\":9,\"value\":\"0x05476f726564\"}},{\"key\":\"Skins\",\"value\":{\"type\":9,\"value\":\"0x0354616e\"}},{\"key\":\"Items\",\"value\":{\"type\":9,\"value\":\"0x044e6f6e65\"}},{\"key\":\"Backgrounds\",\"value\":{\"type\":9,\"value\":\"0x05437265616d\"}}]}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x1cad42fedce28de2e45b1de11e26abe0d540edae729341135388cdbec4b8262d", + "stateKeyHash": "gJqWi0CJ+MuK3VXAnlp0S29arFczqUUedtYC8860U94=", + "type": { + "address": "0x4", + "module": "token", + "name": "Token" + }, + "typeStr": "0x4::token::Token", + "data": "{\"collection\":{\"inner\":\"0xae91e12aadaaa2d0e45a2f3dea52befe42002977e038edf53d15dbc298d64d89\"},\"description\":\"10,000 BITs, A key to the future of Eon Labs. Collect, Create, Vibe.\",\"index\":\"0\",\"mutation_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x1cad42fedce28de2e45b1de11e26abe0d540edae729341135388cdbec4b8262d\",\"creation_num\":\"1125899906842625\"}}},\"name\":\"\",\"uri\":\"ipfs://QmV6ZywNPnNXtkQZQF82ND3Az3FzpuTBd9614BxKKtJNjV\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x1cad42fedce28de2e45b1de11e26abe0d540edae729341135388cdbec4b8262d", + "stateKeyHash": "gJqWi0CJ+MuK3VXAnlp0S29arFczqUUedtYC8860U94=", + "type": { + "address": "0x4", + "module": "token", + "name": "TokenIdentifiers" + }, + "typeStr": "0x4::token::TokenIdentifiers", + "data": "{\"index\":{\"value\":\"1059\"},\"name\":{\"padding\":\"0x0000000000000000000000\",\"value\":\"BIT #3524\"}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x6a03eb973cd9385d62fc2842d02a4dd6b70e52f5da77a0689e57e48d93fae1b4", + "stateKeyHash": "ia5CJAqvJ4lnS7nWhVMNNxhYGforZgoP0qtHcM/2+OM=", + "type": { + "address": "0x1", + "module": "coin", + "name": "CoinStore", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", + "data": "{\"coin\":{\"value\":\"11973951477\"},\"deposit_events\":{\"counter\":\"150819\",\"guid\":{\"id\":{\"addr\":\"0x6a03eb973cd9385d62fc2842d02a4dd6b70e52f5da77a0689e57e48d93fae1b4\",\"creation_num\":\"2\"}}},\"frozen\":false,\"withdraw_events\":{\"counter\":\"24\",\"guid\":{\"id\":{\"addr\":\"0x6a03eb973cd9385d62fc2842d02a4dd6b70e52f5da77a0689e57e48d93fae1b4\",\"creation_num\":\"3\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x756e65ad425f2baf916afb562c21a259ad3bf65f7bee439a562e03251859cf90", + "stateKeyHash": "Tmognmf+Q297fd/j+zVu31WGUdSpfwPCdeqhXBej9RQ=", + "type": { + "address": "0x1", + "module": "coin", + "name": "CoinStore", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", + "data": "{\"coin\":{\"value\":\"48116540\"},\"deposit_events\":{\"counter\":\"291\",\"guid\":{\"id\":{\"addr\":\"0x756e65ad425f2baf916afb562c21a259ad3bf65f7bee439a562e03251859cf90\",\"creation_num\":\"2\"}}},\"frozen\":false,\"withdraw_events\":{\"counter\":\"738\",\"guid\":{\"id\":{\"addr\":\"0x756e65ad425f2baf916afb562c21a259ad3bf65f7bee439a562e03251859cf90\",\"creation_num\":\"3\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x756e65ad425f2baf916afb562c21a259ad3bf65f7bee439a562e03251859cf90", + "stateKeyHash": "bwvp098DFe4UeA6PX+tVBOy+qXPdOe3zcs+oVeUFRKM=", + "type": { + "address": "0x1", + "module": "account", + "name": "Account" + }, + "typeStr": "0x1::account::Account", + "data": "{\"authentication_key\":\"0x756e65ad425f2baf916afb562c21a259ad3bf65f7bee439a562e03251859cf90\",\"coin_register_events\":{\"counter\":\"25\",\"guid\":{\"id\":{\"addr\":\"0x756e65ad425f2baf916afb562c21a259ad3bf65f7bee439a562e03251859cf90\",\"creation_num\":\"0\"}}},\"guid_creation_num\":\"426\",\"key_rotation_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x756e65ad425f2baf916afb562c21a259ad3bf65f7bee439a562e03251859cf90\",\"creation_num\":\"1\"}}},\"rotation_capability_offer\":{\"for\":{\"vec\":[]}},\"sequence_number\":\"1389\",\"signer_capability_offer\":{\"for\":{\"vec\":[]}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xd456a5938d68b004c5b6341dbfb7f8e258ab3f0df08a978d785ee421b6ec9fa2", + "stateKeyHash": "usVY0WzXU52BSarHRVpsa0yE3FgmlhHFfdjkJFA2lZU=", + "type": { + "address": "0x1", + "module": "coin", + "name": "CoinStore", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", + "data": "{\"coin\":{\"value\":\"12030463\"},\"deposit_events\":{\"counter\":\"1065\",\"guid\":{\"id\":{\"addr\":\"0xd456a5938d68b004c5b6341dbfb7f8e258ab3f0df08a978d785ee421b6ec9fa2\",\"creation_num\":\"2\"}}},\"frozen\":false,\"withdraw_events\":{\"counter\":\"32\",\"guid\":{\"id\":{\"addr\":\"0xd456a5938d68b004c5b6341dbfb7f8e258ab3f0df08a978d785ee421b6ec9fa2\",\"creation_num\":\"3\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "stateKeyHash": "YyxEM2ISNprB8ZNNbepmRFb5Yl7G+bHX/jJlJ/MnbD8=", + "type": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "module": "biddings_v2", + "name": "BidStore" + }, + "typeStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::biddings_v2::BidStore", + "data": "{\"accept_collection_bid_events\":{\"counter\":\"30950\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"23\"}}},\"accept_token_bid_events\":{\"counter\":\"630\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"20\"}}},\"delete_collection_bid_events\":{\"counter\":\"41264\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"22\"}}},\"delete_token_bid_events\":{\"counter\":\"3873\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"19\"}}},\"insert_collection_bid_events\":{\"counter\":\"114639\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"21\"}}},\"insert_token_bid_events\":{\"counter\":\"11692\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"18\"}}}}" + } + }, + { + "type": "TYPE_WRITE_TABLE_ITEM", + "writeTableItem": { + "stateKeyHash": "bkso1A+YoQamUWNTCSTA3LQME0nTqpFdEItNbPwd2xk=", + "handle": "0x1b854694ae746cdbd8d44186ca4929b2b337df21d1c74633be19b2710552fdca", + "key": "0x0619dc29a0aac8fa146714058e8dd6d2d0f3bdf5f6331907bf91f3acd81e6935", + "data": { + "key": "\"0x619dc29a0aac8fa146714058e8dd6d2d0f3bdf5f6331907bf91f3acd81e6935\"", + "keyType": "address", + "value": "\"114084854019894234\"", + "valueType": "u128" + } + } + } + ] + }, + "epoch": "10385", + "blockHeight": "292748902", + "type": "TRANSACTION_TYPE_USER", + "sizeInfo": { + "transactionBytes": 438, + "eventSizeInfo": [ + { + "typeTagBytes": 52, + "totalBytes": 108 + }, + { + "typeTagBytes": 52, + "totalBytes": 108 + }, + { + "typeTagBytes": 52, + "totalBytes": 108 + }, + { + "typeTagBytes": 55, + "totalBytes": 199 + }, + { + "typeTagBytes": 71, + "totalBytes": 263 + }, + { + "typeTagBytes": 63, + "totalBytes": 103 + } + ], + "writeOpSizeInfo": [ + { + "keyBytes": 87 + }, + { + "keyBytes": 87, + "valueBytes": 832 + }, + { + "keyBytes": 138, + "valueBytes": 105 + }, + { + "keyBytes": 138, + "valueBytes": 105 + }, + { + "keyBytes": 84, + "valueBytes": 147 + }, + { + "keyBytes": 138, + "valueBytes": 105 + }, + { + "keyBytes": 89, + "valueBytes": 288 + }, + { + "keyBytes": 66, + "valueBytes": 16 + } + ] + }, + "user": { + "request": { + "sender": "0x756e65ad425f2baf916afb562c21a259ad3bf65f7bee439a562e03251859cf90", + "sequenceNumber": "1388", + "maxGasAmount": "66", + "gasUnitPrice": "100", + "expirationTimestampSecs": { + "seconds": "1739967310" + }, + "payload": { + "type": "TYPE_ENTRY_FUNCTION_PAYLOAD", + "entryFunctionPayload": { + "function": { + "module": { + "address": "0x7ccf0e6e871977c354c331aa0fccdffb562d9fceb27e3d7f61f8e12e470358e9", + "name": "aggregator" + }, + "name": "instant_sell" + }, + "typeArguments": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ], + "arguments": [ + "[\"0xe3c39dbed236c7ddcf847f9734baf8168911babd8384f17538102b17ab156bd\"]", + "[\"mercato\"]", + "[\"0x1cad42fedce28de2e45b1de11e26abe0d540edae729341135388cdbec4b8262d\"]", + "[\"Bits\"]", + "[\"BIT #3524\"]", + "\"1\"", + "[\"0\"]", + "{\"inner\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\"}", + "[\"1\"]" + ], + "entryFunctionIdStr": "0x7ccf0e6e871977c354c331aa0fccdffb562d9fceb27e3d7f61f8e12e470358e9::aggregator::instant_sell" + } + }, + "signature": { + "type": "TYPE_ED25519", + "ed25519": { + "publicKey": "OczGUbSkenxKAd/z4U2l2C7A059nfnUSrf3nJR7irXo=", + "signature": "WeVdoSAoPRoX3daPxP6ZVDHH896gJ/GKuywi+7CvJ4wBQCdSO0jT1QvFSnSG3qlkzA04SwP3/By1fI+Ye4UmDA==" + } + } + }, + "events": [ + { + "key": { + "creationNumber": "2", + "accountAddress": "0xd456a5938d68b004c5b6341dbfb7f8e258ab3f0df08a978d785ee421b6ec9fa2" + }, + "sequenceNumber": "1064", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "DepositEvent" + } + }, + "typeStr": "0x1::coin::DepositEvent", + "data": "{\"amount\":\"1031500\"}" + }, + { + "key": { + "creationNumber": "2", + "accountAddress": "0x6a03eb973cd9385d62fc2842d02a4dd6b70e52f5da77a0689e57e48d93fae1b4" + }, + "sequenceNumber": "150818", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "DepositEvent" + } + }, + "typeStr": "0x1::coin::DepositEvent", + "data": "{\"amount\":\"515750\"}" + }, + { + "key": { + "creationNumber": "2", + "accountAddress": "0x756e65ad425f2baf916afb562c21a259ad3bf65f7bee439a562e03251859cf90" + }, + "sequenceNumber": "290", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "DepositEvent" + } + }, + "typeStr": "0x1::coin::DepositEvent", + "data": "{\"amount\":\"19082750\"}" + }, + { + "key": { + "creationNumber": "1125899906842624", + "accountAddress": "0x1cad42fedce28de2e45b1de11e26abe0d540edae729341135388cdbec4b8262d" + }, + "sequenceNumber": "5", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "object", + "name": "TransferEvent" + } + }, + "typeStr": "0x1::object::TransferEvent", + "data": "{\"from\":\"0x756e65ad425f2baf916afb562c21a259ad3bf65f7bee439a562e03251859cf90\",\"object\":\"0x1cad42fedce28de2e45b1de11e26abe0d540edae729341135388cdbec4b8262d\",\"to\":\"0x8c557bb0a12d47c1eda90dd4883b44674111b915fa39ff862e6a0a39140dcd4\"}" + }, + { + "key": { + "creationNumber": "23", + "accountAddress": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26" + }, + "sequenceNumber": "30949", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "module": "biddings_v2", + "name": "AcceptCollectionBidEvent" + } + }, + "typeStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::biddings_v2::AcceptCollectionBidEvent", + "data": "{\"bid\":{\"inner\":\"0xe3c39dbed236c7ddcf847f9734baf8168911babd8384f17538102b17ab156bd\"},\"bid_buyer\":\"0x8c557bb0a12d47c1eda90dd4883b44674111b915fa39ff862e6a0a39140dcd4\",\"bid_seller\":\"0x756e65ad425f2baf916afb562c21a259ad3bf65f7bee439a562e03251859cf90\",\"price\":\"20630000\",\"timestamp\":\"1739967219744142\",\"token\":{\"inner\":\"0x1cad42fedce28de2e45b1de11e26abe0d540edae729341135388cdbec4b8262d\"}}" + }, + { + "key": { + "accountAddress": "0x0" + }, + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "transaction_fee", + "name": "FeeStatement" + } + }, + "typeStr": "0x1::transaction_fee::FeeStatement", + "data": "{\"execution_gas_units\":\"20\",\"io_gas_units\":\"14\",\"storage_fee_octas\":\"0\",\"storage_fee_refund_octas\":\"55640\",\"total_charge_gas_units\":\"33\"}" + } + ] + } +} \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2386133936_tradeport_v2_place_offer.json b/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2386133936_tradeport_v2_place_offer.json new file mode 100644 index 0000000000000..1cf3b4c4bc296 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2386133936_tradeport_v2_place_offer.json @@ -0,0 +1,240 @@ +{ + "timestamp": { + "seconds": "1739969888", + "nanos": 265903000 + }, + "version": "2386133936", + "info": { + "hash": "mNsFYqf1mOd2FQtrbtT1ZtpLZoUFpN93VARA3UyBSfA=", + "stateChangeHash": "bLkjtION2RdAaGrLWMgeWzUH3Y7YnfdKeh5TGXJQEtM=", + "eventRootHash": "Yu642k8pDy/xQjHZq4W7OQqHU2o7fpMwjRzw+tQ7koo=", + "gasUsed": "568", + "success": true, + "vmStatus": "Executed successfully", + "accumulatorRootHash": "C7im/ed+QVKMa7FL3d+1UhHQihSSO/G4qSqmweivBfk=", + "changes": [ + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x892b542fbc8724a065e3c1138b637712ab063ab8db92680ef2533121b8fba172", + "stateKeyHash": "2CRij/kdJGtiu99L7xYmREtUtkxQQLMPMi0/laaasSs=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectCore" + }, + "typeStr": "0x1::object::ObjectCore", + "data": "{\"allow_ungated_transfer\":false,\"guid_creation_num\":\"1125899906842625\",\"owner\":\"0xf05f4dbe6ad3dfe4b626751b2bd7e5b9da81fe8b4dc63e56adb03d329559a202\",\"transfer_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x892b542fbc8724a065e3c1138b637712ab063ab8db92680ef2533121b8fba172\",\"creation_num\":\"1125899906842624\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x892b542fbc8724a065e3c1138b637712ab063ab8db92680ef2533121b8fba172", + "stateKeyHash": "2CRij/kdJGtiu99L7xYmREtUtkxQQLMPMi0/laaasSs=", + "type": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "module": "biddings_v2", + "name": "TokenBid" + }, + "typeStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::biddings_v2::TokenBid", + "data": "{\"bid_buyer\":\"0xf05f4dbe6ad3dfe4b626751b2bd7e5b9da81fe8b4dc63e56adb03d329559a202\",\"coins\":{\"value\":\"3000000000\"},\"delete_ref\":{\"self\":\"0x892b542fbc8724a065e3c1138b637712ab063ab8db92680ef2533121b8fba172\"},\"token\":{\"inner\":\"0xfa0f4628e8737fe9149070e0aa5801a4c0c872f519fa622c363c7f397cf38229\"}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "stateKeyHash": "YyxEM2ISNprB8ZNNbepmRFb5Yl7G+bHX/jJlJ/MnbD8=", + "type": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "module": "biddings_v2", + "name": "BidStore" + }, + "typeStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::biddings_v2::BidStore", + "data": "{\"accept_collection_bid_events\":{\"counter\":\"30950\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"23\"}}},\"accept_token_bid_events\":{\"counter\":\"630\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"20\"}}},\"delete_collection_bid_events\":{\"counter\":\"41265\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"22\"}}},\"delete_token_bid_events\":{\"counter\":\"3873\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"19\"}}},\"insert_collection_bid_events\":{\"counter\":\"114737\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"21\"}}},\"insert_token_bid_events\":{\"counter\":\"11693\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"18\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xf05f4dbe6ad3dfe4b626751b2bd7e5b9da81fe8b4dc63e56adb03d329559a202", + "stateKeyHash": "ng9+mevGUfoc5AR3vftd7PqgkTKm2V/50+aYfbQZf2c=", + "type": { + "address": "0x1", + "module": "coin", + "name": "CoinStore", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", + "data": "{\"coin\":{\"value\":\"260780793\"},\"deposit_events\":{\"counter\":\"89\",\"guid\":{\"id\":{\"addr\":\"0xf05f4dbe6ad3dfe4b626751b2bd7e5b9da81fe8b4dc63e56adb03d329559a202\",\"creation_num\":\"2\"}}},\"frozen\":false,\"withdraw_events\":{\"counter\":\"135\",\"guid\":{\"id\":{\"addr\":\"0xf05f4dbe6ad3dfe4b626751b2bd7e5b9da81fe8b4dc63e56adb03d329559a202\",\"creation_num\":\"3\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xf05f4dbe6ad3dfe4b626751b2bd7e5b9da81fe8b4dc63e56adb03d329559a202", + "stateKeyHash": "6bxz90Ogj9/Y37uaza8hgFOVDXjI3C/OTi459ELPWgo=", + "type": { + "address": "0x1", + "module": "account", + "name": "Account" + }, + "typeStr": "0x1::account::Account", + "data": "{\"authentication_key\":\"0xf05f4dbe6ad3dfe4b626751b2bd7e5b9da81fe8b4dc63e56adb03d329559a202\",\"coin_register_events\":{\"counter\":\"33\",\"guid\":{\"id\":{\"addr\":\"0xf05f4dbe6ad3dfe4b626751b2bd7e5b9da81fe8b4dc63e56adb03d329559a202\",\"creation_num\":\"0\"}}},\"guid_creation_num\":\"90\",\"key_rotation_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0xf05f4dbe6ad3dfe4b626751b2bd7e5b9da81fe8b4dc63e56adb03d329559a202\",\"creation_num\":\"1\"}}},\"rotation_capability_offer\":{\"for\":{\"vec\":[]}},\"sequence_number\":\"300\",\"signer_capability_offer\":{\"for\":{\"vec\":[]}}}" + } + }, + { + "type": "TYPE_WRITE_TABLE_ITEM", + "writeTableItem": { + "stateKeyHash": "bkso1A+YoQamUWNTCSTA3LQME0nTqpFdEItNbPwd2xk=", + "handle": "0x1b854694ae746cdbd8d44186ca4929b2b337df21d1c74633be19b2710552fdca", + "key": "0x0619dc29a0aac8fa146714058e8dd6d2d0f3bdf5f6331907bf91f3acd81e6935", + "data": { + "key": "\"0x619dc29a0aac8fa146714058e8dd6d2d0f3bdf5f6331907bf91f3acd81e6935\"", + "keyType": "address", + "value": "\"114085190601556105\"", + "valueType": "u128" + } + } + } + ] + }, + "epoch": "10385", + "blockHeight": "292762673", + "type": "TRANSACTION_TYPE_USER", + "sizeInfo": { + "transactionBytes": 264, + "eventSizeInfo": [ + { + "typeTagBytes": 53, + "totalBytes": 109 + }, + { + "typeTagBytes": 66, + "totalBytes": 226 + }, + { + "typeTagBytes": 63, + "totalBytes": 103 + } + ], + "writeOpSizeInfo": [ + { + "keyBytes": 87, + "valueBytes": 301 + }, + { + "keyBytes": 89, + "valueBytes": 288 + }, + { + "keyBytes": 138, + "valueBytes": 105 + }, + { + "keyBytes": 84, + "valueBytes": 147 + }, + { + "keyBytes": 66, + "valueBytes": 16 + } + ] + }, + "user": { + "request": { + "sender": "0xf05f4dbe6ad3dfe4b626751b2bd7e5b9da81fe8b4dc63e56adb03d329559a202", + "sequenceNumber": "299", + "maxGasAmount": "1136", + "gasUnitPrice": "100", + "expirationTimestampSecs": { + "seconds": "1739969976" + }, + "payload": { + "type": "TYPE_ENTRY_FUNCTION_PAYLOAD", + "entryFunctionPayload": { + "function": { + "module": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "name": "biddings_v2" + }, + "name": "token_bid" + }, + "arguments": [ + "{\"inner\":\"0xfa0f4628e8737fe9149070e0aa5801a4c0c872f519fa622c363c7f397cf38229\"}", + "\"3000000000\"" + ], + "entryFunctionIdStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::biddings_v2::token_bid" + } + }, + "signature": { + "type": "TYPE_ED25519", + "ed25519": { + "publicKey": "4V6RdMWQI1vJnHxZUqGIrK5p5miGZpn3vFZGoeUiHPg=", + "signature": "Dx8vWxWm2aXihaCgnGceyva6+wWRfN2dHbj4i87IA+aWcq7yFNDrBwmEVaRNCX20BeyGyjQ0DbfK/viPyWtuBg==" + } + } + }, + "events": [ + { + "key": { + "creationNumber": "3", + "accountAddress": "0xf05f4dbe6ad3dfe4b626751b2bd7e5b9da81fe8b4dc63e56adb03d329559a202" + }, + "sequenceNumber": "134", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "WithdrawEvent" + } + }, + "typeStr": "0x1::coin::WithdrawEvent", + "data": "{\"amount\":\"3000000000\"}" + }, + { + "key": { + "creationNumber": "18", + "accountAddress": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26" + }, + "sequenceNumber": "11692", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "module": "biddings_v2", + "name": "InsertTokenBidEvent" + } + }, + "typeStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::biddings_v2::InsertTokenBidEvent", + "data": "{\"bid\":{\"inner\":\"0x892b542fbc8724a065e3c1138b637712ab063ab8db92680ef2533121b8fba172\"},\"bid_buyer\":\"0xf05f4dbe6ad3dfe4b626751b2bd7e5b9da81fe8b4dc63e56adb03d329559a202\",\"price\":\"3000000000\",\"timestamp\":\"1739969888265903\",\"token\":{\"inner\":\"0xfa0f4628e8737fe9149070e0aa5801a4c0c872f519fa622c363c7f397cf38229\"}}" + }, + { + "key": { + "accountAddress": "0x0" + }, + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "transaction_fee", + "name": "FeeStatement" + } + }, + "typeStr": "0x1::transaction_fee::FeeStatement", + "data": "{\"execution_gas_units\":\"7\",\"io_gas_units\":\"7\",\"storage_fee_octas\":\"55440\",\"storage_fee_refund_octas\":\"0\",\"total_charge_gas_units\":\"568\"}" + } + ] + } +} \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2386142672_tradeport_v2_cancel_offer.json b/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2386142672_tradeport_v2_cancel_offer.json new file mode 100644 index 0000000000000..cca324ec71cd0 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2386142672_tradeport_v2_cancel_offer.json @@ -0,0 +1,223 @@ +{ + "timestamp": { + "seconds": "1739970113", + "nanos": 672146000 + }, + "version": "2386142672", + "info": { + "hash": "kiwhMc/v8l8o5DmBexSVCT5935XIEly+As03na/ptNA=", + "stateChangeHash": "qf1cm9iNTlNhcwH3vwAxM6cne5zwcWn5Jrsr78QrYmA=", + "eventRootHash": "qSJCWS2ebZOTaD1S3xELbRxSPB1yN5RpWN19936mUuY=", + "gasUsed": "10", + "success": true, + "vmStatus": "Executed successfully", + "accumulatorRootHash": "nU0FhJKYBPYpdNVbMV4cpSkoVxUyu7asu0M51uzEVFw=", + "changes": [ + { + "type": "TYPE_DELETE_RESOURCE", + "deleteResource": { + "address": "0x892b542fbc8724a065e3c1138b637712ab063ab8db92680ef2533121b8fba172", + "stateKeyHash": "2CRij/kdJGtiu99L7xYmREtUtkxQQLMPMi0/laaasSs=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectGroup" + }, + "typeStr": "0x1::object::ObjectGroup" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "stateKeyHash": "YyxEM2ISNprB8ZNNbepmRFb5Yl7G+bHX/jJlJ/MnbD8=", + "type": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "module": "biddings_v2", + "name": "BidStore" + }, + "typeStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::biddings_v2::BidStore", + "data": "{\"accept_collection_bid_events\":{\"counter\":\"30950\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"23\"}}},\"accept_token_bid_events\":{\"counter\":\"630\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"20\"}}},\"delete_collection_bid_events\":{\"counter\":\"41265\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"22\"}}},\"delete_token_bid_events\":{\"counter\":\"3874\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"19\"}}},\"insert_collection_bid_events\":{\"counter\":\"114738\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"21\"}}},\"insert_token_bid_events\":{\"counter\":\"11693\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"18\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xf05f4dbe6ad3dfe4b626751b2bd7e5b9da81fe8b4dc63e56adb03d329559a202", + "stateKeyHash": "ng9+mevGUfoc5AR3vftd7PqgkTKm2V/50+aYfbQZf2c=", + "type": { + "address": "0x1", + "module": "coin", + "name": "CoinStore", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", + "data": "{\"coin\":{\"value\":\"3260835233\"},\"deposit_events\":{\"counter\":\"90\",\"guid\":{\"id\":{\"addr\":\"0xf05f4dbe6ad3dfe4b626751b2bd7e5b9da81fe8b4dc63e56adb03d329559a202\",\"creation_num\":\"2\"}}},\"frozen\":false,\"withdraw_events\":{\"counter\":\"135\",\"guid\":{\"id\":{\"addr\":\"0xf05f4dbe6ad3dfe4b626751b2bd7e5b9da81fe8b4dc63e56adb03d329559a202\",\"creation_num\":\"3\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xf05f4dbe6ad3dfe4b626751b2bd7e5b9da81fe8b4dc63e56adb03d329559a202", + "stateKeyHash": "6bxz90Ogj9/Y37uaza8hgFOVDXjI3C/OTi459ELPWgo=", + "type": { + "address": "0x1", + "module": "account", + "name": "Account" + }, + "typeStr": "0x1::account::Account", + "data": "{\"authentication_key\":\"0xf05f4dbe6ad3dfe4b626751b2bd7e5b9da81fe8b4dc63e56adb03d329559a202\",\"coin_register_events\":{\"counter\":\"33\",\"guid\":{\"id\":{\"addr\":\"0xf05f4dbe6ad3dfe4b626751b2bd7e5b9da81fe8b4dc63e56adb03d329559a202\",\"creation_num\":\"0\"}}},\"guid_creation_num\":\"90\",\"key_rotation_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0xf05f4dbe6ad3dfe4b626751b2bd7e5b9da81fe8b4dc63e56adb03d329559a202\",\"creation_num\":\"1\"}}},\"rotation_capability_offer\":{\"for\":{\"vec\":[]}},\"sequence_number\":\"301\",\"signer_capability_offer\":{\"for\":{\"vec\":[]}}}" + } + }, + { + "type": "TYPE_WRITE_TABLE_ITEM", + "writeTableItem": { + "stateKeyHash": "bkso1A+YoQamUWNTCSTA3LQME0nTqpFdEItNbPwd2xk=", + "handle": "0x1b854694ae746cdbd8d44186ca4929b2b337df21d1c74633be19b2710552fdca", + "key": "0x0619dc29a0aac8fa146714058e8dd6d2d0f3bdf5f6331907bf91f3acd81e6935", + "data": { + "key": "\"0x619dc29a0aac8fa146714058e8dd6d2d0f3bdf5f6331907bf91f3acd81e6935\"", + "keyType": "address", + "value": "\"114085195631212555\"", + "valueType": "u128" + } + } + } + ] + }, + "epoch": "10385", + "blockHeight": "292763848", + "type": "TRANSACTION_TYPE_USER", + "sizeInfo": { + "transactionBytes": 262, + "eventSizeInfo": [ + { + "typeTagBytes": 52, + "totalBytes": 108 + }, + { + "typeTagBytes": 66, + "totalBytes": 226 + }, + { + "typeTagBytes": 63, + "totalBytes": 103 + } + ], + "writeOpSizeInfo": [ + { + "keyBytes": 87 + }, + { + "keyBytes": 89, + "valueBytes": 288 + }, + { + "keyBytes": 138, + "valueBytes": 105 + }, + { + "keyBytes": 84, + "valueBytes": 147 + }, + { + "keyBytes": 66, + "valueBytes": 16 + } + ] + }, + "user": { + "request": { + "sender": "0xf05f4dbe6ad3dfe4b626751b2bd7e5b9da81fe8b4dc63e56adb03d329559a202", + "sequenceNumber": "300", + "maxGasAmount": "20", + "gasUnitPrice": "100", + "expirationTimestampSecs": { + "seconds": "1739970201" + }, + "payload": { + "type": "TYPE_ENTRY_FUNCTION_PAYLOAD", + "entryFunctionPayload": { + "function": { + "module": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "name": "biddings_v2" + }, + "name": "cancel_token_bid" + }, + "arguments": [ + "{\"inner\":\"0x892b542fbc8724a065e3c1138b637712ab063ab8db92680ef2533121b8fba172\"}" + ], + "entryFunctionIdStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::biddings_v2::cancel_token_bid" + } + }, + "signature": { + "type": "TYPE_ED25519", + "ed25519": { + "publicKey": "4V6RdMWQI1vJnHxZUqGIrK5p5miGZpn3vFZGoeUiHPg=", + "signature": "SYbOHKaBVkj+uBKIdhdxxpq4PLyLCqChgmp7Abjoum6R+/gIG7eUjpDSbBhvTjVQpb0slr7Jz7LOQnf7Dh9FBA==" + } + } + }, + "events": [ + { + "key": { + "creationNumber": "2", + "accountAddress": "0xf05f4dbe6ad3dfe4b626751b2bd7e5b9da81fe8b4dc63e56adb03d329559a202" + }, + "sequenceNumber": "89", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "DepositEvent" + } + }, + "typeStr": "0x1::coin::DepositEvent", + "data": "{\"amount\":\"3000000000\"}" + }, + { + "key": { + "creationNumber": "19", + "accountAddress": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26" + }, + "sequenceNumber": "3873", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "module": "biddings_v2", + "name": "DeleteTokenBidEvent" + } + }, + "typeStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::biddings_v2::DeleteTokenBidEvent", + "data": "{\"bid\":{\"inner\":\"0x892b542fbc8724a065e3c1138b637712ab063ab8db92680ef2533121b8fba172\"},\"bid_buyer\":\"0xf05f4dbe6ad3dfe4b626751b2bd7e5b9da81fe8b4dc63e56adb03d329559a202\",\"price\":\"3000000000\",\"timestamp\":\"1739970113672146\",\"token\":{\"inner\":\"0xfa0f4628e8737fe9149070e0aa5801a4c0c872f519fa622c363c7f397cf38229\"}}" + }, + { + "key": { + "accountAddress": "0x0" + }, + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "transaction_fee", + "name": "FeeStatement" + } + }, + "typeStr": "0x1::transaction_fee::FeeStatement", + "data": "{\"execution_gas_units\":\"5\",\"io_gas_units\":\"5\",\"storage_fee_octas\":\"0\",\"storage_fee_refund_octas\":\"55440\",\"total_charge_gas_units\":\"10\"}" + } + ] + } +} \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2386455218_tradeport_v2_fill_listing.json b/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2386455218_tradeport_v2_fill_listing.json new file mode 100644 index 0000000000000..d380e45471e0b --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2386455218_tradeport_v2_fill_listing.json @@ -0,0 +1,1298 @@ +{ + "timestamp": { + "seconds": "1739978001", + "nanos": 822811000 + }, + "version": "2386455218", + "info": { + "hash": "01VnT77scqp+82AT6PwZrB2O5RbYAJUAV+HwYUU2ITU=", + "stateChangeHash": "FdKOx8uDjKvb1BbHbNrCLCRb3p+DaTT899iByYY+o7A=", + "eventRootHash": "tTg7N+4LfW+gotTe83vWE8fHepTgi2GB8iLH/h395ew=", + "gasUsed": "65", + "success": true, + "vmStatus": "Executed successfully", + "accumulatorRootHash": "zyWGO88waYcV9Ek3lfL2necoscf7e1UlHRV+9oziYEE=", + "changes": [ + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x8c557bb0a12d47c1eda90dd4883b44674111b915fa39ff862e6a0a39140dcd4", + "stateKeyHash": "kSJiqPFCska3SlG2fNyd46mssC9QXZVqf1lwRkrXuCM=", + "type": { + "address": "0x1", + "module": "coin", + "name": "CoinStore", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", + "data": "{\"coin\":{\"value\":\"56421315284\"},\"deposit_events\":{\"counter\":\"7194\",\"guid\":{\"id\":{\"addr\":\"0x8c557bb0a12d47c1eda90dd4883b44674111b915fa39ff862e6a0a39140dcd4\",\"creation_num\":\"2\"}}},\"frozen\":false,\"withdraw_events\":{\"counter\":\"7260\",\"guid\":{\"id\":{\"addr\":\"0x8c557bb0a12d47c1eda90dd4883b44674111b915fa39ff862e6a0a39140dcd4\",\"creation_num\":\"3\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x189365d87b89fece2ba6b9411ce750a05cb1d3e4715107836117137da2d24fe3", + "stateKeyHash": "CpR9agEo6WJOyRGiXtQooOeImZhoDwLMDCKz3/QN9SM=", + "type": { + "address": "0x1", + "module": "coin", + "name": "CoinStore", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", + "data": "{\"coin\":{\"value\":\"2030623591\"},\"deposit_events\":{\"counter\":\"3698\",\"guid\":{\"id\":{\"addr\":\"0x189365d87b89fece2ba6b9411ce750a05cb1d3e4715107836117137da2d24fe3\",\"creation_num\":\"2\"}}},\"frozen\":false,\"withdraw_events\":{\"counter\":\"19\",\"guid\":{\"id\":{\"addr\":\"0x189365d87b89fece2ba6b9411ce750a05cb1d3e4715107836117137da2d24fe3\",\"creation_num\":\"3\"}}}}" + } + }, + { + "type": "TYPE_DELETE_RESOURCE", + "deleteResource": { + "address": "0x266aec1520a09e07ca8b831bbe6da9d419e259e8cc84e79e3d44c2051403c8cb", + "stateKeyHash": "4iwGYUW6bnSme6+Nie3LWIUNJMWB4sU+VSelnPoRPUI=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectGroup" + }, + "typeStr": "0x1::object::ObjectGroup" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x27e8e9a9ebadb462322bca29352c3a0d24f5b80caa03a529cb71a69b6a09c374", + "stateKeyHash": "mYXpq0onTfLDNxYuJjxD6xUxeSPtSTi6KgUyT/lMTvU=", + "type": { + "address": "0x1", + "module": "coin", + "name": "CoinStore", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", + "data": "{\"coin\":{\"value\":\"117976684\"},\"deposit_events\":{\"counter\":\"810\",\"guid\":{\"id\":{\"addr\":\"0x27e8e9a9ebadb462322bca29352c3a0d24f5b80caa03a529cb71a69b6a09c374\",\"creation_num\":\"2\"}}},\"frozen\":false,\"withdraw_events\":{\"counter\":\"2326\",\"guid\":{\"id\":{\"addr\":\"0x27e8e9a9ebadb462322bca29352c3a0d24f5b80caa03a529cb71a69b6a09c374\",\"creation_num\":\"3\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x27e8e9a9ebadb462322bca29352c3a0d24f5b80caa03a529cb71a69b6a09c374", + "stateKeyHash": "Js4o4egqpmrW14o4GAz1FOCvB4jmYJqbNNQg4ADzX/Y=", + "type": { + "address": "0x1", + "module": "account", + "name": "Account" + }, + "typeStr": "0x1::account::Account", + "data": "{\"authentication_key\":\"0x27e8e9a9ebadb462322bca29352c3a0d24f5b80caa03a529cb71a69b6a09c374\",\"coin_register_events\":{\"counter\":\"55\",\"guid\":{\"id\":{\"addr\":\"0x27e8e9a9ebadb462322bca29352c3a0d24f5b80caa03a529cb71a69b6a09c374\",\"creation_num\":\"0\"}}},\"guid_creation_num\":\"605\",\"key_rotation_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x27e8e9a9ebadb462322bca29352c3a0d24f5b80caa03a529cb71a69b6a09c374\",\"creation_num\":\"1\"}}},\"rotation_capability_offer\":{\"for\":{\"vec\":[]}},\"sequence_number\":\"2572\",\"signer_capability_offer\":{\"for\":{\"vec\":[]}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x2d3df7ab1e81459bf92e65cace8b8986da3c91be8d10029d1cd156edfb2317d5", + "stateKeyHash": "Nx/tbjMtJhjeIwDE9rrFQczUki+Pi4d2GL3jtlxSqtA=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectCore" + }, + "typeStr": "0x1::object::ObjectCore", + "data": "{\"allow_ungated_transfer\":true,\"guid_creation_num\":\"1125899906842626\",\"owner\":\"0x27e8e9a9ebadb462322bca29352c3a0d24f5b80caa03a529cb71a69b6a09c374\",\"transfer_events\":{\"counter\":\"14\",\"guid\":{\"id\":{\"addr\":\"0x2d3df7ab1e81459bf92e65cace8b8986da3c91be8d10029d1cd156edfb2317d5\",\"creation_num\":\"1125899906842624\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x2d3df7ab1e81459bf92e65cace8b8986da3c91be8d10029d1cd156edfb2317d5", + "stateKeyHash": "Nx/tbjMtJhjeIwDE9rrFQczUki+Pi4d2GL3jtlxSqtA=", + "type": { + "address": "0x4", + "module": "aptos_token", + "name": "AptosToken" + }, + "typeStr": "0x4::aptos_token::AptosToken", + "data": "{\"burn_ref\":{\"vec\":[{\"inner\":{\"vec\":[{\"self\":\"0x2d3df7ab1e81459bf92e65cace8b8986da3c91be8d10029d1cd156edfb2317d5\"}]},\"self\":{\"vec\":[]}}]},\"mutator_ref\":{\"vec\":[{\"self\":\"0x2d3df7ab1e81459bf92e65cace8b8986da3c91be8d10029d1cd156edfb2317d5\"}]},\"property_mutator_ref\":{\"self\":\"0x2d3df7ab1e81459bf92e65cace8b8986da3c91be8d10029d1cd156edfb2317d5\"},\"transfer_ref\":{\"vec\":[{\"self\":\"0x2d3df7ab1e81459bf92e65cace8b8986da3c91be8d10029d1cd156edfb2317d5\"}]}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x2d3df7ab1e81459bf92e65cace8b8986da3c91be8d10029d1cd156edfb2317d5", + "stateKeyHash": "Nx/tbjMtJhjeIwDE9rrFQczUki+Pi4d2GL3jtlxSqtA=", + "type": { + "address": "0x4", + "module": "property_map", + "name": "PropertyMap" + }, + "typeStr": "0x4::property_map::PropertyMap", + "data": "{\"inner\":{\"data\":[{\"key\":\"Background\",\"value\":{\"type\":9,\"value\":\"0x0441757261\"}},{\"key\":\"Effect BEHIND\",\"value\":{\"type\":9,\"value\":\"0x044e6f6e65\"}},{\"key\":\"Effect FRONT\",\"value\":{\"type\":9,\"value\":\"0x044e6f6e65\"}},{\"key\":\"Wings\",\"value\":{\"type\":9,\"value\":\"0x044e6f6e65\"}},{\"key\":\"Mask\",\"value\":{\"type\":9,\"value\":\"0x044e6f6e65\"}},{\"key\":\"Horn\",\"value\":{\"type\":9,\"value\":\"0x085265696e64656572\"}},{\"key\":\"Earrings\",\"value\":{\"type\":9,\"value\":\"0x045365616c\"}},{\"key\":\"Headwear\",\"value\":{\"type\":9,\"value\":\"0x09576869746562656172\"}},{\"key\":\"Mouth\",\"value\":{\"type\":9,\"value\":\"0x04436f6f6c\"}},{\"key\":\"Glasses\",\"value\":{\"type\":9,\"value\":\"0x0e4d617463686120476c6173736573\"}},{\"key\":\"Eyes\",\"value\":{\"type\":9,\"value\":\"0x044861696c\"}},{\"key\":\"Clothes\",\"value\":{\"type\":9,\"value\":\"0x08436172646967616e\"}},{\"key\":\"Base\",\"value\":{\"type\":9,\"value\":\"0x054e61697665\"}}]}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x2d3df7ab1e81459bf92e65cace8b8986da3c91be8d10029d1cd156edfb2317d5", + "stateKeyHash": "Nx/tbjMtJhjeIwDE9rrFQczUki+Pi4d2GL3jtlxSqtA=", + "type": { + "address": "0x4", + "module": "token", + "name": "Token" + }, + "typeStr": "0x4::token::Token", + "data": "{\"collection\":{\"inner\":\"0xdf7175d95f236ac3f9fb581645498857bc42f90ff030527bcefe30c1558a6eac\"},\"description\":\"Ice Blue is a hidden resort for people to heal and find relaxation and peace after a difficult period.\",\"index\":\"697\",\"mutation_events\":{\"counter\":\"1\",\"guid\":{\"id\":{\"addr\":\"0x2d3df7ab1e81459bf92e65cace8b8986da3c91be8d10029d1cd156edfb2317d5\",\"creation_num\":\"1125899906842625\"}}},\"name\":\"Ice Blue #1397\",\"uri\":\"ipfs://QmaNKKTo8CAyLC2YrdK1jvmfjxJE6Y5SpgRU5nL7kfnRXw\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x6a03eb973cd9385d62fc2842d02a4dd6b70e52f5da77a0689e57e48d93fae1b4", + "stateKeyHash": "ia5CJAqvJ4lnS7nWhVMNNxhYGforZgoP0qtHcM/2+OM=", + "type": { + "address": "0x1", + "module": "coin", + "name": "CoinStore", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", + "data": "{\"coin\":{\"value\":\"12793006216\"},\"deposit_events\":{\"counter\":\"150846\",\"guid\":{\"id\":{\"addr\":\"0x6a03eb973cd9385d62fc2842d02a4dd6b70e52f5da77a0689e57e48d93fae1b4\",\"creation_num\":\"2\"}}},\"frozen\":false,\"withdraw_events\":{\"counter\":\"24\",\"guid\":{\"id\":{\"addr\":\"0x6a03eb973cd9385d62fc2842d02a4dd6b70e52f5da77a0689e57e48d93fae1b4\",\"creation_num\":\"3\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x9e5d1a8c276e1153bacfc46a94b417b9d2effb62baca30000d5e419ad7e53885", + "stateKeyHash": "RGVGiGAgJJAM1fbTg7d4qvejhdVakutf4M+u32WxpFY=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectCore" + }, + "typeStr": "0x1::object::ObjectCore", + "data": "{\"allow_ungated_transfer\":true,\"guid_creation_num\":\"1125899906842626\",\"owner\":\"0x27e8e9a9ebadb462322bca29352c3a0d24f5b80caa03a529cb71a69b6a09c374\",\"transfer_events\":{\"counter\":\"12\",\"guid\":{\"id\":{\"addr\":\"0x9e5d1a8c276e1153bacfc46a94b417b9d2effb62baca30000d5e419ad7e53885\",\"creation_num\":\"1125899906842624\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x9e5d1a8c276e1153bacfc46a94b417b9d2effb62baca30000d5e419ad7e53885", + "stateKeyHash": "RGVGiGAgJJAM1fbTg7d4qvejhdVakutf4M+u32WxpFY=", + "type": { + "address": "0x4", + "module": "aptos_token", + "name": "AptosToken" + }, + "typeStr": "0x4::aptos_token::AptosToken", + "data": "{\"burn_ref\":{\"vec\":[{\"inner\":{\"vec\":[{\"self\":\"0x9e5d1a8c276e1153bacfc46a94b417b9d2effb62baca30000d5e419ad7e53885\"}]},\"self\":{\"vec\":[]}}]},\"mutator_ref\":{\"vec\":[{\"self\":\"0x9e5d1a8c276e1153bacfc46a94b417b9d2effb62baca30000d5e419ad7e53885\"}]},\"property_mutator_ref\":{\"self\":\"0x9e5d1a8c276e1153bacfc46a94b417b9d2effb62baca30000d5e419ad7e53885\"},\"transfer_ref\":{\"vec\":[{\"self\":\"0x9e5d1a8c276e1153bacfc46a94b417b9d2effb62baca30000d5e419ad7e53885\"}]}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x9e5d1a8c276e1153bacfc46a94b417b9d2effb62baca30000d5e419ad7e53885", + "stateKeyHash": "RGVGiGAgJJAM1fbTg7d4qvejhdVakutf4M+u32WxpFY=", + "type": { + "address": "0x4", + "module": "property_map", + "name": "PropertyMap" + }, + "typeStr": "0x4::property_map::PropertyMap", + "data": "{\"inner\":{\"data\":[{\"key\":\"Mouth\",\"value\":{\"type\":9,\"value\":\"0x0a536865657220636f6c64\"}},{\"key\":\"Headwear\",\"value\":{\"type\":9,\"value\":\"0x09576869746562656172\"}},{\"key\":\"Earrings\",\"value\":{\"type\":9,\"value\":\"0x045365616c\"}},{\"key\":\"Effect BEHIND\",\"value\":{\"type\":9,\"value\":\"0x044e6f6e65\"}},{\"key\":\"Effect FRONT\",\"value\":{\"type\":9,\"value\":\"0x044e6f6e65\"}},{\"key\":\"Mask\",\"value\":{\"type\":9,\"value\":\"0x044e6f6e65\"}},{\"key\":\"Glasses\",\"value\":{\"type\":9,\"value\":\"0x044e6f6e65\"}},{\"key\":\"Wings\",\"value\":{\"type\":9,\"value\":\"0x044e6f6e65\"}},{\"key\":\"Horn\",\"value\":{\"type\":9,\"value\":\"0x085265696e64656572\"}},{\"key\":\"Background\",\"value\":{\"type\":9,\"value\":\"0x0453616e64\"}},{\"key\":\"Base\",\"value\":{\"type\":9,\"value\":\"0x054e61697665\"}},{\"key\":\"Clothes\",\"value\":{\"type\":9,\"value\":\"0x08436172646967616e\"}},{\"key\":\"Eyes\",\"value\":{\"type\":9,\"value\":\"0x044861696c\"}}]}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x9e5d1a8c276e1153bacfc46a94b417b9d2effb62baca30000d5e419ad7e53885", + "stateKeyHash": "RGVGiGAgJJAM1fbTg7d4qvejhdVakutf4M+u32WxpFY=", + "type": { + "address": "0x4", + "module": "token", + "name": "Token" + }, + "typeStr": "0x4::token::Token", + "data": "{\"collection\":{\"inner\":\"0xdf7175d95f236ac3f9fb581645498857bc42f90ff030527bcefe30c1558a6eac\"},\"description\":\"Ice Blue is a hidden resort for people to heal and find relaxation and peace after a difficult period.\",\"index\":\"1311\",\"mutation_events\":{\"counter\":\"1\",\"guid\":{\"id\":{\"addr\":\"0x9e5d1a8c276e1153bacfc46a94b417b9d2effb62baca30000d5e419ad7e53885\",\"creation_num\":\"1125899906842625\"}}},\"name\":\"Ice Blue #1890\",\"uri\":\"ipfs://Qmf64CGfCejXvybCQZRVhQrkhjrvWVv2WZr7zRyr5P1AFS\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xb4576a3ec891f0508aedf9b907391e8aa2e467704f321a615cae8c8c730db76f", + "stateKeyHash": "3ZbMhWRlkhW59j52NY024K1iy5MrEAozJ52pCM/mZgo=", + "type": { + "address": "0x1", + "module": "coin", + "name": "CoinStore", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", + "data": "{\"coin\":{\"value\":\"1214445864\"},\"deposit_events\":{\"counter\":\"1646\",\"guid\":{\"id\":{\"addr\":\"0xb4576a3ec891f0508aedf9b907391e8aa2e467704f321a615cae8c8c730db76f\",\"creation_num\":\"2\"}}},\"frozen\":false,\"withdraw_events\":{\"counter\":\"2085\",\"guid\":{\"id\":{\"addr\":\"0xb4576a3ec891f0508aedf9b907391e8aa2e467704f321a615cae8c8c730db76f\",\"creation_num\":\"3\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xc473007e12bdeec420c383127a77e83236731aa22174f2af94a1476183644f35", + "stateKeyHash": "oSinGY+QHFV+wDApq7q162uzO4zdHdeFmxx4zKjnwXM=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectCore" + }, + "typeStr": "0x1::object::ObjectCore", + "data": "{\"allow_ungated_transfer\":true,\"guid_creation_num\":\"1125899906842626\",\"owner\":\"0x27e8e9a9ebadb462322bca29352c3a0d24f5b80caa03a529cb71a69b6a09c374\",\"transfer_events\":{\"counter\":\"12\",\"guid\":{\"id\":{\"addr\":\"0xc473007e12bdeec420c383127a77e83236731aa22174f2af94a1476183644f35\",\"creation_num\":\"1125899906842624\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xc473007e12bdeec420c383127a77e83236731aa22174f2af94a1476183644f35", + "stateKeyHash": "oSinGY+QHFV+wDApq7q162uzO4zdHdeFmxx4zKjnwXM=", + "type": { + "address": "0x4", + "module": "aptos_token", + "name": "AptosToken" + }, + "typeStr": "0x4::aptos_token::AptosToken", + "data": "{\"burn_ref\":{\"vec\":[{\"inner\":{\"vec\":[{\"self\":\"0xc473007e12bdeec420c383127a77e83236731aa22174f2af94a1476183644f35\"}]},\"self\":{\"vec\":[]}}]},\"mutator_ref\":{\"vec\":[{\"self\":\"0xc473007e12bdeec420c383127a77e83236731aa22174f2af94a1476183644f35\"}]},\"property_mutator_ref\":{\"self\":\"0xc473007e12bdeec420c383127a77e83236731aa22174f2af94a1476183644f35\"},\"transfer_ref\":{\"vec\":[{\"self\":\"0xc473007e12bdeec420c383127a77e83236731aa22174f2af94a1476183644f35\"}]}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xc473007e12bdeec420c383127a77e83236731aa22174f2af94a1476183644f35", + "stateKeyHash": "oSinGY+QHFV+wDApq7q162uzO4zdHdeFmxx4zKjnwXM=", + "type": { + "address": "0x4", + "module": "property_map", + "name": "PropertyMap" + }, + "typeStr": "0x4::property_map::PropertyMap", + "data": "{\"inner\":{\"data\":[{\"key\":\"Mouth\",\"value\":{\"type\":9,\"value\":\"0x05416d757365\"}},{\"key\":\"Background\",\"value\":{\"type\":9,\"value\":\"0x044a616465\"}},{\"key\":\"Base\",\"value\":{\"type\":9,\"value\":\"0x054e61697665\"}},{\"key\":\"Clothes\",\"value\":{\"type\":9,\"value\":\"0x05477265656e\"}},{\"key\":\"Eyes\",\"value\":{\"type\":9,\"value\":\"0x044861696c\"}},{\"key\":\"Glasses\",\"value\":{\"type\":9,\"value\":\"0x0d536b696920676c617373657320\"}},{\"key\":\"Headwear\",\"value\":{\"type\":9,\"value\":\"0x09576869746562656172\"}},{\"key\":\"Earrings\",\"value\":{\"type\":9,\"value\":\"0x045365616c\"}},{\"key\":\"Horn\",\"value\":{\"type\":9,\"value\":\"0x0646726f737479\"}},{\"key\":\"Mask\",\"value\":{\"type\":9,\"value\":\"0x044e6f6e65\"}},{\"key\":\"Wings\",\"value\":{\"type\":9,\"value\":\"0x044e6f6e65\"}},{\"key\":\"Effect FRONT\",\"value\":{\"type\":9,\"value\":\"0x044e6f6e65\"}},{\"key\":\"Effect BEHIND\",\"value\":{\"type\":9,\"value\":\"0x044e6f6e65\"}}]}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xc473007e12bdeec420c383127a77e83236731aa22174f2af94a1476183644f35", + "stateKeyHash": "oSinGY+QHFV+wDApq7q162uzO4zdHdeFmxx4zKjnwXM=", + "type": { + "address": "0x4", + "module": "token", + "name": "Token" + }, + "typeStr": "0x4::token::Token", + "data": "{\"collection\":{\"inner\":\"0xdf7175d95f236ac3f9fb581645498857bc42f90ff030527bcefe30c1558a6eac\"},\"description\":\"Ice Blue is a hidden resort for people to heal and find relaxation and peace after a difficult period.\",\"index\":\"2275\",\"mutation_events\":{\"counter\":\"1\",\"guid\":{\"id\":{\"addr\":\"0xc473007e12bdeec420c383127a77e83236731aa22174f2af94a1476183644f35\",\"creation_num\":\"1125899906842625\"}}},\"name\":\"Ice Blue #2577\",\"uri\":\"ipfs://QmXm8X5paXaC72RoZCYcnE1xFZkMXy6er9vjgotDJarFNv\"}" + } + }, + { + "type": "TYPE_DELETE_RESOURCE", + "deleteResource": { + "address": "0xd14be2c5e5a6f5b0c1181c4d2f28b84511a1abed28eb49c88a7c67b248bd2d9d", + "stateKeyHash": "kdNKZEpYYw+h5DFoiyDVTAHFMwDunL4DshXTDxhoDuk=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectGroup" + }, + "typeStr": "0x1::object::ObjectGroup" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xe0ad60b5be1a160e19257812c19a0a347d443500bdf8ec442cfe5f820169de59", + "stateKeyHash": "Q9PbX53c7nWDm1eXzv309w6JbzA+9B8RJHCxN6xq/fA=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectCore" + }, + "typeStr": "0x1::object::ObjectCore", + "data": "{\"allow_ungated_transfer\":true,\"guid_creation_num\":\"1125899906842626\",\"owner\":\"0x27e8e9a9ebadb462322bca29352c3a0d24f5b80caa03a529cb71a69b6a09c374\",\"transfer_events\":{\"counter\":\"10\",\"guid\":{\"id\":{\"addr\":\"0xe0ad60b5be1a160e19257812c19a0a347d443500bdf8ec442cfe5f820169de59\",\"creation_num\":\"1125899906842624\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xe0ad60b5be1a160e19257812c19a0a347d443500bdf8ec442cfe5f820169de59", + "stateKeyHash": "Q9PbX53c7nWDm1eXzv309w6JbzA+9B8RJHCxN6xq/fA=", + "type": { + "address": "0x4", + "module": "aptos_token", + "name": "AptosToken" + }, + "typeStr": "0x4::aptos_token::AptosToken", + "data": "{\"burn_ref\":{\"vec\":[{\"inner\":{\"vec\":[{\"self\":\"0xe0ad60b5be1a160e19257812c19a0a347d443500bdf8ec442cfe5f820169de59\"}]},\"self\":{\"vec\":[]}}]},\"mutator_ref\":{\"vec\":[{\"self\":\"0xe0ad60b5be1a160e19257812c19a0a347d443500bdf8ec442cfe5f820169de59\"}]},\"property_mutator_ref\":{\"self\":\"0xe0ad60b5be1a160e19257812c19a0a347d443500bdf8ec442cfe5f820169de59\"},\"transfer_ref\":{\"vec\":[{\"self\":\"0xe0ad60b5be1a160e19257812c19a0a347d443500bdf8ec442cfe5f820169de59\"}]}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xe0ad60b5be1a160e19257812c19a0a347d443500bdf8ec442cfe5f820169de59", + "stateKeyHash": "Q9PbX53c7nWDm1eXzv309w6JbzA+9B8RJHCxN6xq/fA=", + "type": { + "address": "0x4", + "module": "property_map", + "name": "PropertyMap" + }, + "typeStr": "0x4::property_map::PropertyMap", + "data": "{\"inner\":{\"data\":[{\"key\":\"Horn\",\"value\":{\"type\":9,\"value\":\"0x064d6f6f727365\"}},{\"key\":\"Mask\",\"value\":{\"type\":9,\"value\":\"0x044e6f6e65\"}},{\"key\":\"Wings\",\"value\":{\"type\":9,\"value\":\"0x044e6f6e65\"}},{\"key\":\"Mouth\",\"value\":{\"type\":9,\"value\":\"0x05416d757365\"}},{\"key\":\"Eyes\",\"value\":{\"type\":9,\"value\":\"0x044861696c\"}},{\"key\":\"Clothes\",\"value\":{\"type\":9,\"value\":\"0x08536e6f7720667572\"}},{\"key\":\"Base\",\"value\":{\"type\":9,\"value\":\"0x054e61697665\"}},{\"key\":\"Background\",\"value\":{\"type\":9,\"value\":\"0x0553756e6e79\"}},{\"key\":\"Glasses\",\"value\":{\"type\":9,\"value\":\"0x044e6f6e65\"}},{\"key\":\"Effect BEHIND\",\"value\":{\"type\":9,\"value\":\"0x044e6f6e65\"}},{\"key\":\"Headwear\",\"value\":{\"type\":9,\"value\":\"0x09537472696e67686174\"}},{\"key\":\"Earrings\",\"value\":{\"type\":9,\"value\":\"0x044e6f6e65\"}},{\"key\":\"Effect FRONT\",\"value\":{\"type\":9,\"value\":\"0x044e6f6e65\"}}]}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xe0ad60b5be1a160e19257812c19a0a347d443500bdf8ec442cfe5f820169de59", + "stateKeyHash": "Q9PbX53c7nWDm1eXzv309w6JbzA+9B8RJHCxN6xq/fA=", + "type": { + "address": "0x4", + "module": "token", + "name": "Token" + }, + "typeStr": "0x4::token::Token", + "data": "{\"collection\":{\"inner\":\"0xdf7175d95f236ac3f9fb581645498857bc42f90ff030527bcefe30c1558a6eac\"},\"description\":\"Ice Blue is a hidden resort for people to heal and find relaxation and peace after a difficult period.\",\"index\":\"22\",\"mutation_events\":{\"counter\":\"1\",\"guid\":{\"id\":{\"addr\":\"0xe0ad60b5be1a160e19257812c19a0a347d443500bdf8ec442cfe5f820169de59\",\"creation_num\":\"1125899906842625\"}}},\"name\":\"Ice Blue #1388\",\"uri\":\"ipfs://QmP5rRPGW3zy4qBa7vG83R2MGynwp5tyQ6Bjbq4NCsaRVK\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "stateKeyHash": "GziPDIxPViVKmcTdIO/LNrfAEGOLVL1z45HHLE9Rf8g=", + "type": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "module": "listings_v2", + "name": "ListingStore" + }, + "typeStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::listings_v2::ListingStore", + "data": "{\"buy_events\":{\"counter\":\"61372\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"17\"}}},\"delete_listing_events\":{\"counter\":\"46143\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"16\"}}},\"insert_listing_events\":{\"counter\":\"135597\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"15\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xeea7e965045cd02bed0649694c2c7e1e60a68ffc600c55a8af7df5f27157c0e7", + "stateKeyHash": "uBySsGcAUVoBmrzgTqQvbtp7/zgfnptbTfObVm9oMtM=", + "type": { + "address": "0x1", + "module": "coin", + "name": "CoinStore", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", + "data": "{\"coin\":{\"value\":\"726963530\"},\"deposit_events\":{\"counter\":\"8587\",\"guid\":{\"id\":{\"addr\":\"0xeea7e965045cd02bed0649694c2c7e1e60a68ffc600c55a8af7df5f27157c0e7\",\"creation_num\":\"2\"}}},\"frozen\":false,\"withdraw_events\":{\"counter\":\"12128\",\"guid\":{\"id\":{\"addr\":\"0xeea7e965045cd02bed0649694c2c7e1e60a68ffc600c55a8af7df5f27157c0e7\",\"creation_num\":\"3\"}}}}" + } + }, + { + "type": "TYPE_DELETE_RESOURCE", + "deleteResource": { + "address": "0xf0d4384157e8c7caac07e0866c4c65fa9df8151c329edf58b825ff7e9b72baf8", + "stateKeyHash": "XViv45qWMchsvksXVNEmiTgJcsUZ/DJr6d0YoWClErU=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectGroup" + }, + "typeStr": "0x1::object::ObjectGroup" + } + }, + { + "type": "TYPE_DELETE_RESOURCE", + "deleteResource": { + "address": "0xf0d4a40c5c2e9806e464b45e5c0d04545a52f44459568cbc163bec1211aaf7df", + "stateKeyHash": "PzJr43NuiFQBWhXL0RMj6h+mcUZZ83oeNBLBWgl9rfo=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectGroup" + }, + "typeStr": "0x1::object::ObjectGroup" + } + }, + { + "type": "TYPE_WRITE_TABLE_ITEM", + "writeTableItem": { + "stateKeyHash": "bkso1A+YoQamUWNTCSTA3LQME0nTqpFdEItNbPwd2xk=", + "handle": "0x1b854694ae746cdbd8d44186ca4929b2b337df21d1c74633be19b2710552fdca", + "key": "0x0619dc29a0aac8fa146714058e8dd6d2d0f3bdf5f6331907bf91f3acd81e6935", + "data": { + "key": "\"0x619dc29a0aac8fa146714058e8dd6d2d0f3bdf5f6331907bf91f3acd81e6935\"", + "keyType": "address", + "value": "\"114085797854695479\"", + "valueType": "u128" + } + } + } + ] + }, + "epoch": "10386", + "blockHeight": "292804244", + "type": "TRANSACTION_TYPE_USER", + "sizeInfo": { + "transactionBytes": 627, + "eventSizeInfo": [ + { + "typeTagBytes": 53, + "totalBytes": 109 + }, + { + "typeTagBytes": 52, + "totalBytes": 108 + }, + { + "typeTagBytes": 53, + "totalBytes": 109 + }, + { + "typeTagBytes": 52, + "totalBytes": 108 + }, + { + "typeTagBytes": 53, + "totalBytes": 109 + }, + { + "typeTagBytes": 52, + "totalBytes": 108 + }, + { + "typeTagBytes": 55, + "totalBytes": 199 + }, + { + "typeTagBytes": 55, + "totalBytes": 247 + }, + { + "typeTagBytes": 53, + "totalBytes": 109 + }, + { + "typeTagBytes": 52, + "totalBytes": 108 + }, + { + "typeTagBytes": 53, + "totalBytes": 109 + }, + { + "typeTagBytes": 52, + "totalBytes": 108 + }, + { + "typeTagBytes": 53, + "totalBytes": 109 + }, + { + "typeTagBytes": 52, + "totalBytes": 108 + }, + { + "typeTagBytes": 55, + "totalBytes": 199 + }, + { + "typeTagBytes": 55, + "totalBytes": 247 + }, + { + "typeTagBytes": 53, + "totalBytes": 109 + }, + { + "typeTagBytes": 52, + "totalBytes": 108 + }, + { + "typeTagBytes": 53, + "totalBytes": 109 + }, + { + "typeTagBytes": 52, + "totalBytes": 108 + }, + { + "typeTagBytes": 53, + "totalBytes": 109 + }, + { + "typeTagBytes": 52, + "totalBytes": 108 + }, + { + "typeTagBytes": 55, + "totalBytes": 199 + }, + { + "typeTagBytes": 55, + "totalBytes": 247 + }, + { + "typeTagBytes": 53, + "totalBytes": 109 + }, + { + "typeTagBytes": 52, + "totalBytes": 108 + }, + { + "typeTagBytes": 53, + "totalBytes": 109 + }, + { + "typeTagBytes": 52, + "totalBytes": 108 + }, + { + "typeTagBytes": 53, + "totalBytes": 109 + }, + { + "typeTagBytes": 52, + "totalBytes": 108 + }, + { + "typeTagBytes": 55, + "totalBytes": 199 + }, + { + "typeTagBytes": 55, + "totalBytes": 247 + }, + { + "typeTagBytes": 63, + "totalBytes": 103 + } + ], + "writeOpSizeInfo": [ + { + "keyBytes": 138, + "valueBytes": 105 + }, + { + "keyBytes": 138, + "valueBytes": 105 + }, + { + "keyBytes": 87 + }, + { + "keyBytes": 138, + "valueBytes": 105 + }, + { + "keyBytes": 84, + "valueBytes": 147 + }, + { + "keyBytes": 87, + "valueBytes": 920 + }, + { + "keyBytes": 138, + "valueBytes": 105 + }, + { + "keyBytes": 87, + "valueBytes": 916 + }, + { + "keyBytes": 138, + "valueBytes": 105 + }, + { + "keyBytes": 87, + "valueBytes": 915 + }, + { + "keyBytes": 87 + }, + { + "keyBytes": 87, + "valueBytes": 910 + }, + { + "keyBytes": 93, + "valueBytes": 144 + }, + { + "keyBytes": 138, + "valueBytes": 105 + }, + { + "keyBytes": 87 + }, + { + "keyBytes": 87 + }, + { + "keyBytes": 66, + "valueBytes": 16 + } + ] + }, + "user": { + "request": { + "sender": "0x27e8e9a9ebadb462322bca29352c3a0d24f5b80caa03a529cb71a69b6a09c374", + "sequenceNumber": "2571", + "maxGasAmount": "130", + "gasUnitPrice": "100", + "expirationTimestampSecs": { + "seconds": "1739978091" + }, + "payload": { + "type": "TYPE_ENTRY_FUNCTION_PAYLOAD", + "entryFunctionPayload": { + "function": { + "module": { + "address": "0x7ccf0e6e871977c354c331aa0fccdffb562d9fceb27e3d7f61f8e12e470358e9", + "name": "aggregator" + }, + "name": "purchase_many" + }, + "typeArguments": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ], + "arguments": [ + "[\"0xf0d4a40c5c2e9806e464b45e5c0d04545a52f44459568cbc163bec1211aaf7df\",\"0x266aec1520a09e07ca8b831bbe6da9d419e259e8cc84e79e3d44c2051403c8cb\",\"0xf0d4384157e8c7caac07e0866c4c65fa9df8151c329edf58b825ff7e9b72baf8\",\"0xd14be2c5e5a6f5b0c1181c4d2f28b84511a1abed28eb49c88a7c67b248bd2d9d\"]", + "[\"mercato\",\"mercato\",\"mercato\",\"mercato\"]", + "\"0x791991536b698ef3bc3270624748bbfc79a17793ec5472fbeb864d2df63cfebc\"", + "\"Ice Blue\"", + "[\"Ice Blue #1397\",\"Ice Blue #2577\",\"Ice Blue #1890\",\"Ice Blue #1388\"]", + "[\"0\",\"0\",\"0\",\"0\"]", + "[\"164860000\",\"164900000\",\"165000000\",\"166240000\"]", + "\"1\"" + ], + "entryFunctionIdStr": "0x7ccf0e6e871977c354c331aa0fccdffb562d9fceb27e3d7f61f8e12e470358e9::aggregator::purchase_many" + } + }, + "signature": { + "type": "TYPE_ED25519", + "ed25519": { + "publicKey": "DjXLB6aMmOL84UA7t1zMRhTEftK0qs4nb8PqiFT36+w=", + "signature": "FP/Xym1/fdo93vzejZ5WjVh4+aOqKWCkmHEYgs2sHv53OzOKkuekijSGA4LbzVYQRZPoqvyR5gq4GDJp3f6+DA==" + } + } + }, + "events": [ + { + "key": { + "creationNumber": "3", + "accountAddress": "0x27e8e9a9ebadb462322bca29352c3a0d24f5b80caa03a529cb71a69b6a09c374" + }, + "sequenceNumber": "2314", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "WithdrawEvent" + } + }, + "typeStr": "0x1::coin::WithdrawEvent", + "data": "{\"amount\":\"13188800\"}" + }, + { + "key": { + "creationNumber": "2", + "accountAddress": "0x189365d87b89fece2ba6b9411ce750a05cb1d3e4715107836117137da2d24fe3" + }, + "sequenceNumber": "3694", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "DepositEvent" + } + }, + "typeStr": "0x1::coin::DepositEvent", + "data": "{\"amount\":\"13188800\"}" + }, + { + "key": { + "creationNumber": "3", + "accountAddress": "0x27e8e9a9ebadb462322bca29352c3a0d24f5b80caa03a529cb71a69b6a09c374" + }, + "sequenceNumber": "2315", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "WithdrawEvent" + } + }, + "typeStr": "0x1::coin::WithdrawEvent", + "data": "{\"amount\":\"4121500\"}" + }, + { + "key": { + "creationNumber": "2", + "accountAddress": "0x6a03eb973cd9385d62fc2842d02a4dd6b70e52f5da77a0689e57e48d93fae1b4" + }, + "sequenceNumber": "150842", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "DepositEvent" + } + }, + "typeStr": "0x1::coin::DepositEvent", + "data": "{\"amount\":\"4121500\"}" + }, + { + "key": { + "creationNumber": "3", + "accountAddress": "0x27e8e9a9ebadb462322bca29352c3a0d24f5b80caa03a529cb71a69b6a09c374" + }, + "sequenceNumber": "2316", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "WithdrawEvent" + } + }, + "typeStr": "0x1::coin::WithdrawEvent", + "data": "{\"amount\":\"147549700\"}" + }, + { + "key": { + "creationNumber": "2", + "accountAddress": "0xb4576a3ec891f0508aedf9b907391e8aa2e467704f321a615cae8c8c730db76f" + }, + "sequenceNumber": "1645", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "DepositEvent" + } + }, + "typeStr": "0x1::coin::DepositEvent", + "data": "{\"amount\":\"147549700\"}" + }, + { + "key": { + "creationNumber": "1125899906842624", + "accountAddress": "0x2d3df7ab1e81459bf92e65cace8b8986da3c91be8d10029d1cd156edfb2317d5" + }, + "sequenceNumber": "13", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "object", + "name": "TransferEvent" + } + }, + "typeStr": "0x1::object::TransferEvent", + "data": "{\"from\":\"0xf0d4a40c5c2e9806e464b45e5c0d04545a52f44459568cbc163bec1211aaf7df\",\"object\":\"0x2d3df7ab1e81459bf92e65cace8b8986da3c91be8d10029d1cd156edfb2317d5\",\"to\":\"0x27e8e9a9ebadb462322bca29352c3a0d24f5b80caa03a529cb71a69b6a09c374\"}" + }, + { + "key": { + "creationNumber": "17", + "accountAddress": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26" + }, + "sequenceNumber": "61368", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "module": "listings_v2", + "name": "BuyEvent" + } + }, + "typeStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::listings_v2::BuyEvent", + "data": "{\"buyer\":\"0x27e8e9a9ebadb462322bca29352c3a0d24f5b80caa03a529cb71a69b6a09c374\",\"listing\":{\"inner\":\"0xf0d4a40c5c2e9806e464b45e5c0d04545a52f44459568cbc163bec1211aaf7df\"},\"price\":\"164860000\",\"seller\":\"0xb4576a3ec891f0508aedf9b907391e8aa2e467704f321a615cae8c8c730db76f\",\"timestamp\":\"1739978001822811\",\"token\":{\"inner\":\"0x2d3df7ab1e81459bf92e65cace8b8986da3c91be8d10029d1cd156edfb2317d5\"}}" + }, + { + "key": { + "creationNumber": "3", + "accountAddress": "0x27e8e9a9ebadb462322bca29352c3a0d24f5b80caa03a529cb71a69b6a09c374" + }, + "sequenceNumber": "2317", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "WithdrawEvent" + } + }, + "typeStr": "0x1::coin::WithdrawEvent", + "data": "{\"amount\":\"13192000\"}" + }, + { + "key": { + "creationNumber": "2", + "accountAddress": "0x189365d87b89fece2ba6b9411ce750a05cb1d3e4715107836117137da2d24fe3" + }, + "sequenceNumber": "3695", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "DepositEvent" + } + }, + "typeStr": "0x1::coin::DepositEvent", + "data": "{\"amount\":\"13192000\"}" + }, + { + "key": { + "creationNumber": "3", + "accountAddress": "0x27e8e9a9ebadb462322bca29352c3a0d24f5b80caa03a529cb71a69b6a09c374" + }, + "sequenceNumber": "2318", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "WithdrawEvent" + } + }, + "typeStr": "0x1::coin::WithdrawEvent", + "data": "{\"amount\":\"4122500\"}" + }, + { + "key": { + "creationNumber": "2", + "accountAddress": "0x6a03eb973cd9385d62fc2842d02a4dd6b70e52f5da77a0689e57e48d93fae1b4" + }, + "sequenceNumber": "150843", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "DepositEvent" + } + }, + "typeStr": "0x1::coin::DepositEvent", + "data": "{\"amount\":\"4122500\"}" + }, + { + "key": { + "creationNumber": "3", + "accountAddress": "0x27e8e9a9ebadb462322bca29352c3a0d24f5b80caa03a529cb71a69b6a09c374" + }, + "sequenceNumber": "2319", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "WithdrawEvent" + } + }, + "typeStr": "0x1::coin::WithdrawEvent", + "data": "{\"amount\":\"147585500\"}" + }, + { + "key": { + "creationNumber": "2", + "accountAddress": "0xeea7e965045cd02bed0649694c2c7e1e60a68ffc600c55a8af7df5f27157c0e7" + }, + "sequenceNumber": "8586", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "DepositEvent" + } + }, + "typeStr": "0x1::coin::DepositEvent", + "data": "{\"amount\":\"147585500\"}" + }, + { + "key": { + "creationNumber": "1125899906842624", + "accountAddress": "0xc473007e12bdeec420c383127a77e83236731aa22174f2af94a1476183644f35" + }, + "sequenceNumber": "11", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "object", + "name": "TransferEvent" + } + }, + "typeStr": "0x1::object::TransferEvent", + "data": "{\"from\":\"0x266aec1520a09e07ca8b831bbe6da9d419e259e8cc84e79e3d44c2051403c8cb\",\"object\":\"0xc473007e12bdeec420c383127a77e83236731aa22174f2af94a1476183644f35\",\"to\":\"0x27e8e9a9ebadb462322bca29352c3a0d24f5b80caa03a529cb71a69b6a09c374\"}" + }, + { + "key": { + "creationNumber": "17", + "accountAddress": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26" + }, + "sequenceNumber": "61369", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "module": "listings_v2", + "name": "BuyEvent" + } + }, + "typeStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::listings_v2::BuyEvent", + "data": "{\"buyer\":\"0x27e8e9a9ebadb462322bca29352c3a0d24f5b80caa03a529cb71a69b6a09c374\",\"listing\":{\"inner\":\"0x266aec1520a09e07ca8b831bbe6da9d419e259e8cc84e79e3d44c2051403c8cb\"},\"price\":\"164900000\",\"seller\":\"0xeea7e965045cd02bed0649694c2c7e1e60a68ffc600c55a8af7df5f27157c0e7\",\"timestamp\":\"1739978001822811\",\"token\":{\"inner\":\"0xc473007e12bdeec420c383127a77e83236731aa22174f2af94a1476183644f35\"}}" + }, + { + "key": { + "creationNumber": "3", + "accountAddress": "0x27e8e9a9ebadb462322bca29352c3a0d24f5b80caa03a529cb71a69b6a09c374" + }, + "sequenceNumber": "2320", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "WithdrawEvent" + } + }, + "typeStr": "0x1::coin::WithdrawEvent", + "data": "{\"amount\":\"13200000\"}" + }, + { + "key": { + "creationNumber": "2", + "accountAddress": "0x189365d87b89fece2ba6b9411ce750a05cb1d3e4715107836117137da2d24fe3" + }, + "sequenceNumber": "3696", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "DepositEvent" + } + }, + "typeStr": "0x1::coin::DepositEvent", + "data": "{\"amount\":\"13200000\"}" + }, + { + "key": { + "creationNumber": "3", + "accountAddress": "0x27e8e9a9ebadb462322bca29352c3a0d24f5b80caa03a529cb71a69b6a09c374" + }, + "sequenceNumber": "2321", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "WithdrawEvent" + } + }, + "typeStr": "0x1::coin::WithdrawEvent", + "data": "{\"amount\":\"4125000\"}" + }, + { + "key": { + "creationNumber": "2", + "accountAddress": "0x6a03eb973cd9385d62fc2842d02a4dd6b70e52f5da77a0689e57e48d93fae1b4" + }, + "sequenceNumber": "150844", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "DepositEvent" + } + }, + "typeStr": "0x1::coin::DepositEvent", + "data": "{\"amount\":\"4125000\"}" + }, + { + "key": { + "creationNumber": "3", + "accountAddress": "0x27e8e9a9ebadb462322bca29352c3a0d24f5b80caa03a529cb71a69b6a09c374" + }, + "sequenceNumber": "2322", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "WithdrawEvent" + } + }, + "typeStr": "0x1::coin::WithdrawEvent", + "data": "{\"amount\":\"147675000\"}" + }, + { + "key": { + "creationNumber": "2", + "accountAddress": "0x08c557bb0a12d47c1eda90dd4883b44674111b915fa39ff862e6a0a39140dcd4" + }, + "sequenceNumber": "7192", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "DepositEvent" + } + }, + "typeStr": "0x1::coin::DepositEvent", + "data": "{\"amount\":\"147675000\"}" + }, + { + "key": { + "creationNumber": "1125899906842624", + "accountAddress": "0x9e5d1a8c276e1153bacfc46a94b417b9d2effb62baca30000d5e419ad7e53885" + }, + "sequenceNumber": "11", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "object", + "name": "TransferEvent" + } + }, + "typeStr": "0x1::object::TransferEvent", + "data": "{\"from\":\"0xf0d4384157e8c7caac07e0866c4c65fa9df8151c329edf58b825ff7e9b72baf8\",\"object\":\"0x9e5d1a8c276e1153bacfc46a94b417b9d2effb62baca30000d5e419ad7e53885\",\"to\":\"0x27e8e9a9ebadb462322bca29352c3a0d24f5b80caa03a529cb71a69b6a09c374\"}" + }, + { + "key": { + "creationNumber": "17", + "accountAddress": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26" + }, + "sequenceNumber": "61370", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "module": "listings_v2", + "name": "BuyEvent" + } + }, + "typeStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::listings_v2::BuyEvent", + "data": "{\"buyer\":\"0x27e8e9a9ebadb462322bca29352c3a0d24f5b80caa03a529cb71a69b6a09c374\",\"listing\":{\"inner\":\"0xf0d4384157e8c7caac07e0866c4c65fa9df8151c329edf58b825ff7e9b72baf8\"},\"price\":\"165000000\",\"seller\":\"0x8c557bb0a12d47c1eda90dd4883b44674111b915fa39ff862e6a0a39140dcd4\",\"timestamp\":\"1739978001822811\",\"token\":{\"inner\":\"0x9e5d1a8c276e1153bacfc46a94b417b9d2effb62baca30000d5e419ad7e53885\"}}" + }, + { + "key": { + "creationNumber": "3", + "accountAddress": "0x27e8e9a9ebadb462322bca29352c3a0d24f5b80caa03a529cb71a69b6a09c374" + }, + "sequenceNumber": "2323", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "WithdrawEvent" + } + }, + "typeStr": "0x1::coin::WithdrawEvent", + "data": "{\"amount\":\"13299200\"}" + }, + { + "key": { + "creationNumber": "2", + "accountAddress": "0x189365d87b89fece2ba6b9411ce750a05cb1d3e4715107836117137da2d24fe3" + }, + "sequenceNumber": "3697", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "DepositEvent" + } + }, + "typeStr": "0x1::coin::DepositEvent", + "data": "{\"amount\":\"13299200\"}" + }, + { + "key": { + "creationNumber": "3", + "accountAddress": "0x27e8e9a9ebadb462322bca29352c3a0d24f5b80caa03a529cb71a69b6a09c374" + }, + "sequenceNumber": "2324", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "WithdrawEvent" + } + }, + "typeStr": "0x1::coin::WithdrawEvent", + "data": "{\"amount\":\"4156000\"}" + }, + { + "key": { + "creationNumber": "2", + "accountAddress": "0x6a03eb973cd9385d62fc2842d02a4dd6b70e52f5da77a0689e57e48d93fae1b4" + }, + "sequenceNumber": "150845", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "DepositEvent" + } + }, + "typeStr": "0x1::coin::DepositEvent", + "data": "{\"amount\":\"4156000\"}" + }, + { + "key": { + "creationNumber": "3", + "accountAddress": "0x27e8e9a9ebadb462322bca29352c3a0d24f5b80caa03a529cb71a69b6a09c374" + }, + "sequenceNumber": "2325", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "WithdrawEvent" + } + }, + "typeStr": "0x1::coin::WithdrawEvent", + "data": "{\"amount\":\"148784800\"}" + }, + { + "key": { + "creationNumber": "2", + "accountAddress": "0x08c557bb0a12d47c1eda90dd4883b44674111b915fa39ff862e6a0a39140dcd4" + }, + "sequenceNumber": "7193", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "DepositEvent" + } + }, + "typeStr": "0x1::coin::DepositEvent", + "data": "{\"amount\":\"148784800\"}" + }, + { + "key": { + "creationNumber": "1125899906842624", + "accountAddress": "0xe0ad60b5be1a160e19257812c19a0a347d443500bdf8ec442cfe5f820169de59" + }, + "sequenceNumber": "9", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "object", + "name": "TransferEvent" + } + }, + "typeStr": "0x1::object::TransferEvent", + "data": "{\"from\":\"0xd14be2c5e5a6f5b0c1181c4d2f28b84511a1abed28eb49c88a7c67b248bd2d9d\",\"object\":\"0xe0ad60b5be1a160e19257812c19a0a347d443500bdf8ec442cfe5f820169de59\",\"to\":\"0x27e8e9a9ebadb462322bca29352c3a0d24f5b80caa03a529cb71a69b6a09c374\"}" + }, + { + "key": { + "creationNumber": "17", + "accountAddress": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26" + }, + "sequenceNumber": "61371", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "module": "listings_v2", + "name": "BuyEvent" + } + }, + "typeStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::listings_v2::BuyEvent", + "data": "{\"buyer\":\"0x27e8e9a9ebadb462322bca29352c3a0d24f5b80caa03a529cb71a69b6a09c374\",\"listing\":{\"inner\":\"0xd14be2c5e5a6f5b0c1181c4d2f28b84511a1abed28eb49c88a7c67b248bd2d9d\"},\"price\":\"166240000\",\"seller\":\"0x8c557bb0a12d47c1eda90dd4883b44674111b915fa39ff862e6a0a39140dcd4\",\"timestamp\":\"1739978001822811\",\"token\":{\"inner\":\"0xe0ad60b5be1a160e19257812c19a0a347d443500bdf8ec442cfe5f820169de59\"}}" + }, + { + "key": { + "accountAddress": "0x0" + }, + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "transaction_fee", + "name": "FeeStatement" + } + }, + "typeStr": "0x1::transaction_fee::FeeStatement", + "data": "{\"execution_gas_units\":\"35\",\"io_gas_units\":\"30\",\"storage_fee_octas\":\"0\",\"storage_fee_refund_octas\":\"226880\",\"total_charge_gas_units\":\"65\"}" + } + ] + } +} \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2386716658_tradeport_v2_cancel_listing.json b/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2386716658_tradeport_v2_cancel_listing.json new file mode 100644 index 0000000000000..4f8a7c048bdc5 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2386716658_tradeport_v2_cancel_listing.json @@ -0,0 +1,325 @@ +{ + "timestamp": { + "seconds": "1739985031", + "nanos": 402031000 + }, + "version": "2386716658", + "info": { + "hash": "NlOlWkMfv3AdxUvg0076Br5hhSzShzpYoAuJaNnset8=", + "stateChangeHash": "fhYJWYIXFie3x9LFhaIwyfpqApfdgxyFfsNKYuzSXpk=", + "eventRootHash": "pScJskK8i51LmErG6GOWcDleQj44bGrdqSkl7IVARiM=", + "gasUsed": "10", + "success": true, + "vmStatus": "Executed successfully", + "accumulatorRootHash": "q7pjuv2nTcwavOTALw6ydrUtvuymgyWLtSmBvjiYu68=", + "changes": [ + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x7650b060223321d2a7a409abaca386b7e104910878bb5c463f0cbd727561f1ce", + "stateKeyHash": "K+lQ4IfGoax6BsDXyU0WDzQ/ELh/LPIo+97Q1pKGOKE=", + "type": { + "address": "0x1", + "module": "coin", + "name": "CoinStore", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", + "data": "{\"coin\":{\"value\":\"1307595\"},\"deposit_events\":{\"counter\":\"19604\",\"guid\":{\"id\":{\"addr\":\"0x7650b060223321d2a7a409abaca386b7e104910878bb5c463f0cbd727561f1ce\",\"creation_num\":\"2\"}}},\"frozen\":false,\"withdraw_events\":{\"counter\":\"22052\",\"guid\":{\"id\":{\"addr\":\"0x7650b060223321d2a7a409abaca386b7e104910878bb5c463f0cbd727561f1ce\",\"creation_num\":\"3\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x7650b060223321d2a7a409abaca386b7e104910878bb5c463f0cbd727561f1ce", + "stateKeyHash": "dbnVjRIZ5dnLDP1GnUgYBv0NbJTew0ELQhZBqJiJQn4=", + "type": { + "address": "0x1", + "module": "account", + "name": "Account" + }, + "typeStr": "0x1::account::Account", + "data": "{\"authentication_key\":\"0x7650b060223321d2a7a409abaca386b7e104910878bb5c463f0cbd727561f1ce\",\"coin_register_events\":{\"counter\":\"25\",\"guid\":{\"id\":{\"addr\":\"0x7650b060223321d2a7a409abaca386b7e104910878bb5c463f0cbd727561f1ce\",\"creation_num\":\"0\"}}},\"guid_creation_num\":\"5039\",\"key_rotation_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x7650b060223321d2a7a409abaca386b7e104910878bb5c463f0cbd727561f1ce\",\"creation_num\":\"1\"}}},\"rotation_capability_offer\":{\"for\":{\"vec\":[]}},\"sequence_number\":\"52946\",\"signer_capability_offer\":{\"for\":{\"vec\":[]}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "stateKeyHash": "GziPDIxPViVKmcTdIO/LNrfAEGOLVL1z45HHLE9Rf8g=", + "type": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "module": "listings_v2", + "name": "ListingStore" + }, + "typeStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::listings_v2::ListingStore", + "data": "{\"buy_events\":{\"counter\":\"61372\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"17\"}}},\"delete_listing_events\":{\"counter\":\"46146\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"16\"}}},\"insert_listing_events\":{\"counter\":\"135603\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"15\"}}}}" + } + }, + { + "type": "TYPE_DELETE_RESOURCE", + "deleteResource": { + "address": "0xe2e50b7366c7a5e5f7b6ec024465b4444f02bd0b3eed5c9d4fdfe98236f85555", + "stateKeyHash": "yzS9G1EDglfNCleiambCXBJgQ1NzVoGsDoisDNpS9B8=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectGroup" + }, + "typeStr": "0x1::object::ObjectGroup" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xf8ad2d07e1df7dfb81a63784c102996c391b040a62b8bb6e181fd649ef688218", + "stateKeyHash": "2nWy3jH1lfZtVtDENX6yYOVI5iKA5S4ui77jKecH7nc=", + "type": { + "address": "0x1", + "module": "fungible_asset", + "name": "FungibleStore" + }, + "typeStr": "0x1::fungible_asset::FungibleStore", + "data": "{\"balance\":\"122377704693\",\"frozen\":true,\"metadata\":{\"inner\":\"0x2ebb2ccac5e027a87fa0e2e5f656a3a4238d6a48d93ec9b610d570fc0aa0df12\"}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xf8ad2d07e1df7dfb81a63784c102996c391b040a62b8bb6e181fd649ef688218", + "stateKeyHash": "2nWy3jH1lfZtVtDENX6yYOVI5iKA5S4ui77jKecH7nc=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectCore" + }, + "typeStr": "0x1::object::ObjectCore", + "data": "{\"allow_ungated_transfer\":true,\"guid_creation_num\":\"1125899906842626\",\"owner\":\"0x7650b060223321d2a7a409abaca386b7e104910878bb5c463f0cbd727561f1ce\",\"transfer_events\":{\"counter\":\"3\",\"guid\":{\"id\":{\"addr\":\"0xf8ad2d07e1df7dfb81a63784c102996c391b040a62b8bb6e181fd649ef688218\",\"creation_num\":\"1125899906842624\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xf8ad2d07e1df7dfb81a63784c102996c391b040a62b8bb6e181fd649ef688218", + "stateKeyHash": "2nWy3jH1lfZtVtDENX6yYOVI5iKA5S4ui77jKecH7nc=", + "type": { + "address": "0x4", + "module": "token", + "name": "Token" + }, + "typeStr": "0x4::token::Token", + "data": "{\"collection\":{\"inner\":\"0x30e2f18b1f9c447e7dadd7a05966e721ab6512b81ee977cb053edb86cc1b1d65\"},\"description\":\"NFT representing voting power in Cellana corresponding to $CELL locked up\",\"index\":\"0\",\"mutation_events\":{\"counter\":\"1\",\"guid\":{\"id\":{\"addr\":\"0xf8ad2d07e1df7dfb81a63784c102996c391b040a62b8bb6e181fd649ef688218\",\"creation_num\":\"1125899906842625\"}}},\"name\":\"\",\"uri\":\"https://api.cellana.finance/api/v1/ve-nft/uri/@0xf8ad2d07e1df7dfb81a63784c102996c391b040a62b8bb6e181fd649ef688218\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xf8ad2d07e1df7dfb81a63784c102996c391b040a62b8bb6e181fd649ef688218", + "stateKeyHash": "2nWy3jH1lfZtVtDENX6yYOVI5iKA5S4ui77jKecH7nc=", + "type": { + "address": "0x4", + "module": "token", + "name": "TokenIdentifiers" + }, + "typeStr": "0x4::token::TokenIdentifiers", + "data": "{\"index\":{\"value\":\"383712\"},\"name\":{\"padding\":\"0x0000000000000000000000000000\",\"value\":\"veCELL\"}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xf8ad2d07e1df7dfb81a63784c102996c391b040a62b8bb6e181fd649ef688218", + "stateKeyHash": "2nWy3jH1lfZtVtDENX6yYOVI5iKA5S4ui77jKecH7nc=", + "type": { + "address": "0x4bf51972879e3b95c4781a5cdcb9e1ee24ef483e7d22f2d903626f126df62bd1", + "module": "voting_escrow", + "name": "VeCellanaDeleteRef" + }, + "typeStr": "0x4bf51972879e3b95c4781a5cdcb9e1ee24ef483e7d22f2d903626f126df62bd1::voting_escrow::VeCellanaDeleteRef", + "data": "{\"delete_ref\":{\"self\":\"0xf8ad2d07e1df7dfb81a63784c102996c391b040a62b8bb6e181fd649ef688218\"}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xf8ad2d07e1df7dfb81a63784c102996c391b040a62b8bb6e181fd649ef688218", + "stateKeyHash": "2nWy3jH1lfZtVtDENX6yYOVI5iKA5S4ui77jKecH7nc=", + "type": { + "address": "0x4bf51972879e3b95c4781a5cdcb9e1ee24ef483e7d22f2d903626f126df62bd1", + "module": "voting_escrow", + "name": "VeCellanaToken" + }, + "typeStr": "0x4bf51972879e3b95c4781a5cdcb9e1ee24ef483e7d22f2d903626f126df62bd1::voting_escrow::VeCellanaToken", + "data": "{\"end_epoch\":\"2980\",\"locked_amount\":\"122377704693\",\"next_rebase_epoch\":\"2876\",\"snapshots\":{\"big_vec\":{\"vec\":[]},\"bucket_size\":{\"vec\":[]},\"inline_capacity\":{\"vec\":[]},\"inline_vec\":[{\"end_epoch\":\"2980\",\"epoch\":\"2876\",\"locked_amount\":\"122377704693\"}]}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xf8ad2d07e1df7dfb81a63784c102996c391b040a62b8bb6e181fd649ef688218", + "stateKeyHash": "2nWy3jH1lfZtVtDENX6yYOVI5iKA5S4ui77jKecH7nc=", + "type": { + "address": "0x4bf51972879e3b95c4781a5cdcb9e1ee24ef483e7d22f2d903626f126df62bd1", + "module": "voting_escrow", + "name": "VeCellanaTokenRefs" + }, + "typeStr": "0x4bf51972879e3b95c4781a5cdcb9e1ee24ef483e7d22f2d903626f126df62bd1::voting_escrow::VeCellanaTokenRefs", + "data": "{\"burn_ref\":{\"inner\":{\"vec\":[{\"self\":\"0xf8ad2d07e1df7dfb81a63784c102996c391b040a62b8bb6e181fd649ef688218\"}]},\"self\":{\"vec\":[]}},\"transfer_ref\":{\"self\":\"0xf8ad2d07e1df7dfb81a63784c102996c391b040a62b8bb6e181fd649ef688218\"}}" + } + }, + { + "type": "TYPE_WRITE_TABLE_ITEM", + "writeTableItem": { + "stateKeyHash": "bkso1A+YoQamUWNTCSTA3LQME0nTqpFdEItNbPwd2xk=", + "handle": "0x1b854694ae746cdbd8d44186ca4929b2b337df21d1c74633be19b2710552fdca", + "key": "0x0619dc29a0aac8fa146714058e8dd6d2d0f3bdf5f6331907bf91f3acd81e6935", + "data": { + "key": "\"0x619dc29a0aac8fa146714058e8dd6d2d0f3bdf5f6331907bf91f3acd81e6935\"", + "keyType": "address", + "value": "\"114087966002510937\"", + "valueType": "u128" + } + } + } + ] + }, + "epoch": "10387", + "blockHeight": "292840561", + "type": "TRANSACTION_TYPE_USER", + "sizeInfo": { + "transactionBytes": 258, + "eventSizeInfo": [ + { + "typeTagBytes": 55, + "totalBytes": 199 + }, + { + "typeTagBytes": 65, + "totalBytes": 225 + }, + { + "typeTagBytes": 63, + "totalBytes": 103 + } + ], + "writeOpSizeInfo": [ + { + "keyBytes": 138, + "valueBytes": 105 + }, + { + "keyBytes": 84, + "valueBytes": 147 + }, + { + "keyBytes": 93, + "valueBytes": 144 + }, + { + "keyBytes": 87 + }, + { + "keyBytes": 87, + "valueBytes": 1004 + }, + { + "keyBytes": 66, + "valueBytes": 16 + } + ] + }, + "user": { + "request": { + "sender": "0x7650b060223321d2a7a409abaca386b7e104910878bb5c463f0cbd727561f1ce", + "sequenceNumber": "52945", + "maxGasAmount": "12", + "gasUnitPrice": "100", + "expirationTimestampSecs": { + "seconds": "1739985107" + }, + "payload": { + "type": "TYPE_ENTRY_FUNCTION_PAYLOAD", + "entryFunctionPayload": { + "function": { + "module": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "name": "listings_v2" + }, + "name": "unlist_token" + }, + "arguments": [ + "{\"inner\":\"0xe2e50b7366c7a5e5f7b6ec024465b4444f02bd0b3eed5c9d4fdfe98236f85555\"}" + ], + "entryFunctionIdStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::listings_v2::unlist_token" + } + }, + "signature": { + "type": "TYPE_ED25519", + "ed25519": { + "publicKey": "noZyne/i0eyr3+hoixjrQpCa5O5SzQUqNWE+OiViO4Q=", + "signature": "yFMy0yTknkCijL8PQ3kdavpC184erHe68sRgWdI46BstS289DzfPgrPY9m9k//y/WC+l0VAL+KL/ZHEqDFYSBA==" + } + } + }, + "events": [ + { + "key": { + "creationNumber": "1125899906842624", + "accountAddress": "0xf8ad2d07e1df7dfb81a63784c102996c391b040a62b8bb6e181fd649ef688218" + }, + "sequenceNumber": "2", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "object", + "name": "TransferEvent" + } + }, + "typeStr": "0x1::object::TransferEvent", + "data": "{\"from\":\"0xe2e50b7366c7a5e5f7b6ec024465b4444f02bd0b3eed5c9d4fdfe98236f85555\",\"object\":\"0xf8ad2d07e1df7dfb81a63784c102996c391b040a62b8bb6e181fd649ef688218\",\"to\":\"0x7650b060223321d2a7a409abaca386b7e104910878bb5c463f0cbd727561f1ce\"}" + }, + { + "key": { + "creationNumber": "16", + "accountAddress": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26" + }, + "sequenceNumber": "46145", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "module": "listings_v2", + "name": "DeleteListingEvent" + } + }, + "typeStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::listings_v2::DeleteListingEvent", + "data": "{\"listing\":{\"inner\":\"0xe2e50b7366c7a5e5f7b6ec024465b4444f02bd0b3eed5c9d4fdfe98236f85555\"},\"price\":\"300000000\",\"seller\":\"0x7650b060223321d2a7a409abaca386b7e104910878bb5c463f0cbd727561f1ce\",\"timestamp\":\"1739985031402031\",\"token\":{\"inner\":\"0xf8ad2d07e1df7dfb81a63784c102996c391b040a62b8bb6e181fd649ef688218\"}}" + }, + { + "key": { + "accountAddress": "0x0" + }, + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "transaction_fee", + "name": "FeeStatement" + } + }, + "typeStr": "0x1::transaction_fee::FeeStatement", + "data": "{\"execution_gas_units\":\"5\",\"io_gas_units\":\"5\",\"storage_fee_octas\":\"0\",\"storage_fee_refund_octas\":\"56720\",\"total_charge_gas_units\":\"10\"}" + } + ] + } +} \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2386809975_tradeport_v2_place_listing.json b/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2386809975_tradeport_v2_place_listing.json new file mode 100644 index 0000000000000..3cac5aa1f0b3e --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2386809975_tradeport_v2_place_listing.json @@ -0,0 +1,616 @@ +{ + "timestamp": { + "seconds": "1739988091", + "nanos": 944133000 + }, + "version": "2386809975", + "info": { + "hash": "YhkN3JCbGTBcjf1SfGHBux/Ybl/yz4Cxn8kSFEpL8LA=", + "stateChangeHash": "kshVTtgrHuRm3Cz/0mVrLz0iIjBlHibWAJQwISN4+fs=", + "eventRootHash": "VW4peTuNKUKy6Ga4ViUU+gXqZDHx8jRH3nTgVIdyetI=", + "gasUsed": "1168", + "success": true, + "vmStatus": "Executed successfully", + "accumulatorRootHash": "On8e62sOuOxxsz5cDTf/UPqeDd3/4sW8YFl6Kh40v58=", + "changes": [ + { + "type": "TYPE_DELETE_RESOURCE", + "deleteResource": { + "address": "0x254d2eccf3a4736a62f4e9eb8685fa54bb4c98a450c7c71fd4da1c6cb203855f", + "stateKeyHash": "wcpCKl8yR83C0FXuuEulbmFkfA8TrS3xWbQxIeYWSsU=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectGroup" + }, + "typeStr": "0x1::object::ObjectGroup" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x3929540e32fb063ddf7fecb764c914493d2844978621767b9f013326093258eb", + "stateKeyHash": "hOAQsoouQp7V/58t2Y7VkhYNiwwek5ghGodWSQo9qVs=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectCore" + }, + "typeStr": "0x1::object::ObjectCore", + "data": "{\"allow_ungated_transfer\":false,\"guid_creation_num\":\"1125899906842625\",\"owner\":\"0xc24b9ea285e953bbf7470309b879bc60d5a68e71e2f2c17be047c3f74664e82b\",\"transfer_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x3929540e32fb063ddf7fecb764c914493d2844978621767b9f013326093258eb\",\"creation_num\":\"1125899906842624\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x3929540e32fb063ddf7fecb764c914493d2844978621767b9f013326093258eb", + "stateKeyHash": "hOAQsoouQp7V/58t2Y7VkhYNiwwek5ghGodWSQo9qVs=", + "type": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "module": "listings_v2", + "name": "Listing" + }, + "typeStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::listings_v2::Listing", + "data": "{\"delete_ref\":{\"self\":\"0x3929540e32fb063ddf7fecb764c914493d2844978621767b9f013326093258eb\"},\"extend_ref\":{\"self\":\"0x3929540e32fb063ddf7fecb764c914493d2844978621767b9f013326093258eb\"},\"price\":\"119000000\",\"seller\":\"0xc24b9ea285e953bbf7470309b879bc60d5a68e71e2f2c17be047c3f74664e82b\",\"token\":{\"inner\":\"0x826cb695c5748ae84e9ed87902e9b214058d7b7589b32bcc48c30bdfe89f03e7\"}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x6db18abc2ba8fb386af8bc9e6d35ef88256a25fcc9cbbfffb0d785be1f64fa8c", + "stateKeyHash": "UsaAk/CETGvo7nwt82uDRtM123aprkkOwJejlsJ32mw=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectCore" + }, + "typeStr": "0x1::object::ObjectCore", + "data": "{\"allow_ungated_transfer\":false,\"guid_creation_num\":\"1125899906842625\",\"owner\":\"0xc24b9ea285e953bbf7470309b879bc60d5a68e71e2f2c17be047c3f74664e82b\",\"transfer_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x6db18abc2ba8fb386af8bc9e6d35ef88256a25fcc9cbbfffb0d785be1f64fa8c\",\"creation_num\":\"1125899906842624\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x6db18abc2ba8fb386af8bc9e6d35ef88256a25fcc9cbbfffb0d785be1f64fa8c", + "stateKeyHash": "UsaAk/CETGvo7nwt82uDRtM123aprkkOwJejlsJ32mw=", + "type": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "module": "listings_v2", + "name": "Listing" + }, + "typeStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::listings_v2::Listing", + "data": "{\"delete_ref\":{\"self\":\"0x6db18abc2ba8fb386af8bc9e6d35ef88256a25fcc9cbbfffb0d785be1f64fa8c\"},\"extend_ref\":{\"self\":\"0x6db18abc2ba8fb386af8bc9e6d35ef88256a25fcc9cbbfffb0d785be1f64fa8c\"},\"price\":\"119000000\",\"seller\":\"0xc24b9ea285e953bbf7470309b879bc60d5a68e71e2f2c17be047c3f74664e82b\",\"token\":{\"inner\":\"0x78d80871a136e75a0cde6b3ff0f45bc8e4c78f29e990b58947e49a1eecaaf6bf\"}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectCore" + }, + "typeStr": "0x1::object::ObjectCore", + "data": "{\"allow_ungated_transfer\":true,\"guid_creation_num\":\"1125899906842636\",\"owner\":\"0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9\",\"transfer_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842624\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "events", + "name": "EventsV1" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::events::EventsV1", + "data": "{\"auction_bid_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842626\"}}},\"collection_offer_canceled_events\":{\"counter\":\"28213\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842631\"}}},\"collection_offer_filled_events\":{\"counter\":\"78064\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842632\"}}},\"collection_offer_placed_events\":{\"counter\":\"154760\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842630\"}}},\"listing_canceled_events\":{\"counter\":\"74388\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842628\"}}},\"listing_filled_events\":{\"counter\":\"692349\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842629\"}}},\"listing_placed_events\":{\"counter\":\"1773587\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842627\"}}},\"token_offer_canceled_events\":{\"counter\":\"9078\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842634\"}}},\"token_offer_filled_events\":{\"counter\":\"600\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842635\"}}},\"token_offer_placed_events\":{\"counter\":\"77862\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842633\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "fee_schedule", + "name": "FeeSchedule" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::fee_schedule::FeeSchedule", + "data": "{\"extend_ref\":{\"self\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\"},\"fee_address\":\"0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9\",\"mutation_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08\",\"creation_num\":\"1125899906842625\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "fee_schedule", + "name": "FixedRateBiddingFee" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::fee_schedule::FixedRateBiddingFee", + "data": "{\"bidding_fee\":\"0\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "fee_schedule", + "name": "FixedRateListingFee" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::fee_schedule::FixedRateListingFee", + "data": "{\"listing_fee\":\"0\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08", + "stateKeyHash": "auABzXqPl+Vs2d6YJuBQ5tt+4LBOBje/7ssPbckNThw=", + "type": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "fee_schedule", + "name": "PercentageRateCommission" + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::fee_schedule::PercentageRateCommission", + "data": "{\"denominator\":\"1000\",\"numerator\":\"15\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x78d80871a136e75a0cde6b3ff0f45bc8e4c78f29e990b58947e49a1eecaaf6bf", + "stateKeyHash": "6WA49u1ciQXkmqpQ30B39Lsk1M4RBAcNdtE0O36L3W0=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectCore" + }, + "typeStr": "0x1::object::ObjectCore", + "data": "{\"allow_ungated_transfer\":true,\"guid_creation_num\":\"1125899906842626\",\"owner\":\"0x6db18abc2ba8fb386af8bc9e6d35ef88256a25fcc9cbbfffb0d785be1f64fa8c\",\"transfer_events\":{\"counter\":\"8\",\"guid\":{\"id\":{\"addr\":\"0x78d80871a136e75a0cde6b3ff0f45bc8e4c78f29e990b58947e49a1eecaaf6bf\",\"creation_num\":\"1125899906842624\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x78d80871a136e75a0cde6b3ff0f45bc8e4c78f29e990b58947e49a1eecaaf6bf", + "stateKeyHash": "6WA49u1ciQXkmqpQ30B39Lsk1M4RBAcNdtE0O36L3W0=", + "type": { + "address": "0x4", + "module": "aptos_token", + "name": "AptosToken" + }, + "typeStr": "0x4::aptos_token::AptosToken", + "data": "{\"burn_ref\":{\"vec\":[{\"inner\":{\"vec\":[{\"self\":\"0x78d80871a136e75a0cde6b3ff0f45bc8e4c78f29e990b58947e49a1eecaaf6bf\"}]},\"self\":{\"vec\":[]}}]},\"mutator_ref\":{\"vec\":[{\"self\":\"0x78d80871a136e75a0cde6b3ff0f45bc8e4c78f29e990b58947e49a1eecaaf6bf\"}]},\"property_mutator_ref\":{\"self\":\"0x78d80871a136e75a0cde6b3ff0f45bc8e4c78f29e990b58947e49a1eecaaf6bf\"},\"transfer_ref\":{\"vec\":[{\"self\":\"0x78d80871a136e75a0cde6b3ff0f45bc8e4c78f29e990b58947e49a1eecaaf6bf\"}]}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x78d80871a136e75a0cde6b3ff0f45bc8e4c78f29e990b58947e49a1eecaaf6bf", + "stateKeyHash": "6WA49u1ciQXkmqpQ30B39Lsk1M4RBAcNdtE0O36L3W0=", + "type": { + "address": "0x4", + "module": "property_map", + "name": "PropertyMap" + }, + "typeStr": "0x4::property_map::PropertyMap", + "data": "{\"inner\":{\"data\":[]}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x78d80871a136e75a0cde6b3ff0f45bc8e4c78f29e990b58947e49a1eecaaf6bf", + "stateKeyHash": "6WA49u1ciQXkmqpQ30B39Lsk1M4RBAcNdtE0O36L3W0=", + "type": { + "address": "0x4", + "module": "token", + "name": "Token" + }, + "typeStr": "0x4::token::Token", + "data": "{\"collection\":{\"inner\":\"0x7b4cd01cc85280139fbf2a11dd929a073febeba767d74904d0c691d68b74f321\"},\"description\":\"The Baptmen created by Baptman and Gawthim Labz.\",\"index\":\"0\",\"mutation_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x78d80871a136e75a0cde6b3ff0f45bc8e4c78f29e990b58947e49a1eecaaf6bf\",\"creation_num\":\"1125899906842625\"}}},\"name\":\"\",\"uri\":\"https://arweave.net/ExSxSo31Ht7qfET0CWCSpx6nvo8ueDERTjVpBI7nwkk/1745.json\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x78d80871a136e75a0cde6b3ff0f45bc8e4c78f29e990b58947e49a1eecaaf6bf", + "stateKeyHash": "6WA49u1ciQXkmqpQ30B39Lsk1M4RBAcNdtE0O36L3W0=", + "type": { + "address": "0x4", + "module": "token", + "name": "TokenIdentifiers" + }, + "typeStr": "0x4::token::TokenIdentifiers", + "data": "{\"index\":{\"value\":\"1302\"},\"name\":{\"padding\":\"0x000000\",\"value\":\"The Baptmen #1745\"}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x826cb695c5748ae84e9ed87902e9b214058d7b7589b32bcc48c30bdfe89f03e7", + "stateKeyHash": "oQiMzNFyb/vCe0ljzi23hxzkgNpOSeyE1MWtcjlPlBk=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectCore" + }, + "typeStr": "0x1::object::ObjectCore", + "data": "{\"allow_ungated_transfer\":true,\"guid_creation_num\":\"1125899906842626\",\"owner\":\"0x3929540e32fb063ddf7fecb764c914493d2844978621767b9f013326093258eb\",\"transfer_events\":{\"counter\":\"3\",\"guid\":{\"id\":{\"addr\":\"0x826cb695c5748ae84e9ed87902e9b214058d7b7589b32bcc48c30bdfe89f03e7\",\"creation_num\":\"1125899906842624\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x826cb695c5748ae84e9ed87902e9b214058d7b7589b32bcc48c30bdfe89f03e7", + "stateKeyHash": "oQiMzNFyb/vCe0ljzi23hxzkgNpOSeyE1MWtcjlPlBk=", + "type": { + "address": "0x4", + "module": "aptos_token", + "name": "AptosToken" + }, + "typeStr": "0x4::aptos_token::AptosToken", + "data": "{\"burn_ref\":{\"vec\":[{\"inner\":{\"vec\":[{\"self\":\"0x826cb695c5748ae84e9ed87902e9b214058d7b7589b32bcc48c30bdfe89f03e7\"}]},\"self\":{\"vec\":[]}}]},\"mutator_ref\":{\"vec\":[{\"self\":\"0x826cb695c5748ae84e9ed87902e9b214058d7b7589b32bcc48c30bdfe89f03e7\"}]},\"property_mutator_ref\":{\"self\":\"0x826cb695c5748ae84e9ed87902e9b214058d7b7589b32bcc48c30bdfe89f03e7\"},\"transfer_ref\":{\"vec\":[{\"self\":\"0x826cb695c5748ae84e9ed87902e9b214058d7b7589b32bcc48c30bdfe89f03e7\"}]}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x826cb695c5748ae84e9ed87902e9b214058d7b7589b32bcc48c30bdfe89f03e7", + "stateKeyHash": "oQiMzNFyb/vCe0ljzi23hxzkgNpOSeyE1MWtcjlPlBk=", + "type": { + "address": "0x4", + "module": "property_map", + "name": "PropertyMap" + }, + "typeStr": "0x4::property_map::PropertyMap", + "data": "{\"inner\":{\"data\":[]}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x826cb695c5748ae84e9ed87902e9b214058d7b7589b32bcc48c30bdfe89f03e7", + "stateKeyHash": "oQiMzNFyb/vCe0ljzi23hxzkgNpOSeyE1MWtcjlPlBk=", + "type": { + "address": "0x4", + "module": "token", + "name": "Token" + }, + "typeStr": "0x4::token::Token", + "data": "{\"collection\":{\"inner\":\"0x7b4cd01cc85280139fbf2a11dd929a073febeba767d74904d0c691d68b74f321\"},\"description\":\"The Baptmen created by Baptman and Gawthim Labz.\",\"index\":\"0\",\"mutation_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x826cb695c5748ae84e9ed87902e9b214058d7b7589b32bcc48c30bdfe89f03e7\",\"creation_num\":\"1125899906842625\"}}},\"name\":\"\",\"uri\":\"https://arweave.net/ExSxSo31Ht7qfET0CWCSpx6nvo8ueDERTjVpBI7nwkk/1385.json\"}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x826cb695c5748ae84e9ed87902e9b214058d7b7589b32bcc48c30bdfe89f03e7", + "stateKeyHash": "oQiMzNFyb/vCe0ljzi23hxzkgNpOSeyE1MWtcjlPlBk=", + "type": { + "address": "0x4", + "module": "token", + "name": "TokenIdentifiers" + }, + "typeStr": "0x4::token::TokenIdentifiers", + "data": "{\"index\":{\"value\":\"348\"},\"name\":{\"padding\":\"0x000000\",\"value\":\"The Baptmen #1385\"}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xc24b9ea285e953bbf7470309b879bc60d5a68e71e2f2c17be047c3f74664e82b", + "stateKeyHash": "5b0VaahYzzDgHb/FFnkMnUEShP6MG5h2KtgHmd1TO7s=", + "type": { + "address": "0x1", + "module": "coin", + "name": "CoinStore", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", + "data": "{\"coin\":{\"value\":\"2241957998\"},\"deposit_events\":{\"counter\":\"980\",\"guid\":{\"id\":{\"addr\":\"0xc24b9ea285e953bbf7470309b879bc60d5a68e71e2f2c17be047c3f74664e82b\",\"creation_num\":\"2\"}}},\"frozen\":false,\"withdraw_events\":{\"counter\":\"1072\",\"guid\":{\"id\":{\"addr\":\"0xc24b9ea285e953bbf7470309b879bc60d5a68e71e2f2c17be047c3f74664e82b\",\"creation_num\":\"3\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xc24b9ea285e953bbf7470309b879bc60d5a68e71e2f2c17be047c3f74664e82b", + "stateKeyHash": "/OOIo15/S8NhwWU8pHHCbdtrTv5XnSvbwSv3IlygeJE=", + "type": { + "address": "0x1", + "module": "account", + "name": "Account" + }, + "typeStr": "0x1::account::Account", + "data": "{\"authentication_key\":\"0xc24b9ea285e953bbf7470309b879bc60d5a68e71e2f2c17be047c3f74664e82b\",\"coin_register_events\":{\"counter\":\"16\",\"guid\":{\"id\":{\"addr\":\"0xc24b9ea285e953bbf7470309b879bc60d5a68e71e2f2c17be047c3f74664e82b\",\"creation_num\":\"0\"}}},\"guid_creation_num\":\"1347\",\"key_rotation_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0xc24b9ea285e953bbf7470309b879bc60d5a68e71e2f2c17be047c3f74664e82b\",\"creation_num\":\"1\"}}},\"rotation_capability_offer\":{\"for\":{\"vec\":[]}},\"sequence_number\":\"2421\",\"signer_capability_offer\":{\"for\":{\"vec\":[]}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "stateKeyHash": "GziPDIxPViVKmcTdIO/LNrfAEGOLVL1z45HHLE9Rf8g=", + "type": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "module": "listings_v2", + "name": "ListingStore" + }, + "typeStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::listings_v2::ListingStore", + "data": "{\"buy_events\":{\"counter\":\"61372\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"17\"}}},\"delete_listing_events\":{\"counter\":\"46146\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"16\"}}},\"insert_listing_events\":{\"counter\":\"135605\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"15\"}}}}" + } + }, + { + "type": "TYPE_WRITE_TABLE_ITEM", + "writeTableItem": { + "stateKeyHash": "bkso1A+YoQamUWNTCSTA3LQME0nTqpFdEItNbPwd2xk=", + "handle": "0x1b854694ae746cdbd8d44186ca4929b2b337df21d1c74633be19b2710552fdca", + "key": "0x0619dc29a0aac8fa146714058e8dd6d2d0f3bdf5f6331907bf91f3acd81e6935", + "data": { + "key": "\"0x619dc29a0aac8fa146714058e8dd6d2d0f3bdf5f6331907bf91f3acd81e6935\"", + "keyType": "address", + "value": "\"114089276232385059\"", + "valueType": "u128" + } + } + } + ] + }, + "epoch": "10388", + "blockHeight": "292856727", + "type": "TRANSACTION_TYPE_USER", + "sizeInfo": { + "transactionBytes": 684, + "eventSizeInfo": [ + { + "typeTagBytes": 55, + "totalBytes": 199 + }, + { + "typeTagBytes": 65, + "totalBytes": 225 + }, + { + "typeTagBytes": 55, + "totalBytes": 199 + }, + { + "typeTagBytes": 62, + "totalBytes": 323 + }, + { + "typeTagBytes": 55, + "totalBytes": 199 + }, + { + "typeTagBytes": 65, + "totalBytes": 225 + }, + { + "typeTagBytes": 63, + "totalBytes": 103 + } + ], + "writeOpSizeInfo": [ + { + "keyBytes": 87 + }, + { + "keyBytes": 87, + "valueBytes": 333 + }, + { + "keyBytes": 87, + "valueBytes": 333 + }, + { + "keyBytes": 87, + "valueBytes": 1082 + }, + { + "keyBytes": 87, + "valueBytes": 739 + }, + { + "keyBytes": 87, + "valueBytes": 739 + }, + { + "keyBytes": 138, + "valueBytes": 105 + }, + { + "keyBytes": 84, + "valueBytes": 147 + }, + { + "keyBytes": 93, + "valueBytes": 144 + }, + { + "keyBytes": 66, + "valueBytes": 16 + } + ] + }, + "user": { + "request": { + "sender": "0xc24b9ea285e953bbf7470309b879bc60d5a68e71e2f2c17be047c3f74664e82b", + "sequenceNumber": "2420", + "maxGasAmount": "2336", + "gasUnitPrice": "100", + "expirationTimestampSecs": { + "seconds": "1739988182" + }, + "payload": { + "type": "TYPE_ENTRY_FUNCTION_PAYLOAD", + "entryFunctionPayload": { + "function": { + "module": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "name": "markets_v2" + }, + "name": "list_tokens_v2" + }, + "arguments": [ + "[\"2\",\"2\"]", + "[\"\",\"Wapal\"]", + "[\"0x7b4cd01cc85280139fbf2a11dd929a073febeba767d74904d0c691d68b74f321\",\"0x7b4cd01cc85280139fbf2a11dd929a073febeba767d74904d0c691d68b74f321\"]", + "[\"The Baptmen\",\"The Baptmen\"]", + "[\"0x826cb695c5748ae84e9ed87902e9b214058d7b7589b32bcc48c30bdfe89f03e7\",\"0x78d80871a136e75a0cde6b3ff0f45bc8e4c78f29e990b58947e49a1eecaaf6bf\"]", + "[\"0\",\"0\"]", + "[\"119000000\",\"119000000\"]", + "[\"0\",\"0\"]", + "[\"0x0\",\"0x254d2eccf3a4736a62f4e9eb8685fa54bb4c98a450c7c71fd4da1c6cb203855f\"]", + "[\"0x826cb695c5748ae84e9ed87902e9b214058d7b7589b32bcc48c30bdfe89f03e7\",\"0x78d80871a136e75a0cde6b3ff0f45bc8e4c78f29e990b58947e49a1eecaaf6bf\"]" + ], + "entryFunctionIdStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::markets_v2::list_tokens_v2" + } + }, + "signature": { + "type": "TYPE_ED25519", + "ed25519": { + "publicKey": "fGvkKHnGZkyumjtIC85RbEostHPoXoKtf1R9WQZ1soE=", + "signature": "Bxtc/IT9I1nIpVFiZ5qGxY+8o8PL9WoidB7bJkGBsNrSNSmoc4MC4M1WROxIRZ5aeWkavxrbiRsvCCtGk2DUDQ==" + } + } + }, + "events": [ + { + "key": { + "creationNumber": "1125899906842624", + "accountAddress": "0x826cb695c5748ae84e9ed87902e9b214058d7b7589b32bcc48c30bdfe89f03e7" + }, + "sequenceNumber": "2", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "object", + "name": "TransferEvent" + } + }, + "typeStr": "0x1::object::TransferEvent", + "data": "{\"from\":\"0xc24b9ea285e953bbf7470309b879bc60d5a68e71e2f2c17be047c3f74664e82b\",\"object\":\"0x826cb695c5748ae84e9ed87902e9b214058d7b7589b32bcc48c30bdfe89f03e7\",\"to\":\"0x3929540e32fb063ddf7fecb764c914493d2844978621767b9f013326093258eb\"}" + }, + { + "key": { + "creationNumber": "15", + "accountAddress": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26" + }, + "sequenceNumber": "135603", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "module": "listings_v2", + "name": "InsertListingEvent" + } + }, + "typeStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::listings_v2::InsertListingEvent", + "data": "{\"listing\":{\"inner\":\"0x3929540e32fb063ddf7fecb764c914493d2844978621767b9f013326093258eb\"},\"price\":\"119000000\",\"seller\":\"0xc24b9ea285e953bbf7470309b879bc60d5a68e71e2f2c17be047c3f74664e82b\",\"timestamp\":\"1739988091944133\",\"token\":{\"inner\":\"0x826cb695c5748ae84e9ed87902e9b214058d7b7589b32bcc48c30bdfe89f03e7\"}}" + }, + { + "key": { + "creationNumber": "1125899906842624", + "accountAddress": "0x78d80871a136e75a0cde6b3ff0f45bc8e4c78f29e990b58947e49a1eecaaf6bf" + }, + "sequenceNumber": "6", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "object", + "name": "TransferEvent" + } + }, + "typeStr": "0x1::object::TransferEvent", + "data": "{\"from\":\"0x254d2eccf3a4736a62f4e9eb8685fa54bb4c98a450c7c71fd4da1c6cb203855f\",\"object\":\"0x78d80871a136e75a0cde6b3ff0f45bc8e4c78f29e990b58947e49a1eecaaf6bf\",\"to\":\"0xc24b9ea285e953bbf7470309b879bc60d5a68e71e2f2c17be047c3f74664e82b\"}" + }, + { + "key": { + "creationNumber": "1125899906842628", + "accountAddress": "0x71f7c94805c33d32a7f9560c95f02e9d3b5bc49884a883916f03abe6da11ac08" + }, + "sequenceNumber": "74387", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9", + "module": "events", + "name": "ListingCanceledEvent" + } + }, + "typeStr": "0x584b50b999c78ade62f8359c91b5165ff390338d45f8e55969a04e65d76258c9::events::ListingCanceledEvent", + "data": "{\"listing\":\"0x254d2eccf3a4736a62f4e9eb8685fa54bb4c98a450c7c71fd4da1c6cb203855f\",\"price\":\"154500000\",\"seller\":\"0xc24b9ea285e953bbf7470309b879bc60d5a68e71e2f2c17be047c3f74664e82b\",\"token_metadata\":{\"collection\":{\"vec\":[{\"inner\":\"0x7b4cd01cc85280139fbf2a11dd929a073febeba767d74904d0c691d68b74f321\"}]},\"collection_name\":\"The Baptmen\",\"creator_address\":\"0x777368fcbcc8d6c4de5dd13278436b519d7d8917646151dbc56c9d3a58bf51b6\",\"property_version\":{\"vec\":[]},\"token\":{\"vec\":[{\"inner\":\"0x78d80871a136e75a0cde6b3ff0f45bc8e4c78f29e990b58947e49a1eecaaf6bf\"}]},\"token_name\":\"The Baptmen #1745\"},\"type\":\"fixed price\"}" + }, + { + "key": { + "creationNumber": "1125899906842624", + "accountAddress": "0x78d80871a136e75a0cde6b3ff0f45bc8e4c78f29e990b58947e49a1eecaaf6bf" + }, + "sequenceNumber": "7", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "object", + "name": "TransferEvent" + } + }, + "typeStr": "0x1::object::TransferEvent", + "data": "{\"from\":\"0xc24b9ea285e953bbf7470309b879bc60d5a68e71e2f2c17be047c3f74664e82b\",\"object\":\"0x78d80871a136e75a0cde6b3ff0f45bc8e4c78f29e990b58947e49a1eecaaf6bf\",\"to\":\"0x6db18abc2ba8fb386af8bc9e6d35ef88256a25fcc9cbbfffb0d785be1f64fa8c\"}" + }, + { + "key": { + "creationNumber": "15", + "accountAddress": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26" + }, + "sequenceNumber": "135604", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "module": "listings_v2", + "name": "InsertListingEvent" + } + }, + "typeStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::listings_v2::InsertListingEvent", + "data": "{\"listing\":{\"inner\":\"0x6db18abc2ba8fb386af8bc9e6d35ef88256a25fcc9cbbfffb0d785be1f64fa8c\"},\"price\":\"119000000\",\"seller\":\"0xc24b9ea285e953bbf7470309b879bc60d5a68e71e2f2c17be047c3f74664e82b\",\"timestamp\":\"1739988091944133\",\"token\":{\"inner\":\"0x78d80871a136e75a0cde6b3ff0f45bc8e4c78f29e990b58947e49a1eecaaf6bf\"}}" + }, + { + "key": { + "accountAddress": "0x0" + }, + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "transaction_fee", + "name": "FeeStatement" + } + }, + "typeStr": "0x1::transaction_fee::FeeStatement", + "data": "{\"execution_gas_units\":\"19\",\"io_gas_units\":\"15\",\"storage_fee_octas\":\"113440\",\"storage_fee_refund_octas\":\"62640\",\"total_charge_gas_units\":\"1168\"}" + } + ] + } +} \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2386889884_tradeport_v2_cancel_collection_offer.json b/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2386889884_tradeport_v2_cancel_collection_offer.json new file mode 100644 index 0000000000000..82c40bc748418 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2386889884_tradeport_v2_cancel_collection_offer.json @@ -0,0 +1,223 @@ +{ + "timestamp": { + "seconds": "1739990687", + "nanos": 880613000 + }, + "version": "2386889884", + "info": { + "hash": "9iheHnWuyBq0BrLKJTcpwzRBffvtzmrONiR2pn3VtbQ=", + "stateChangeHash": "PLNL7KrmOeReA8EwxjaJ+snl8+ZKfiQ00iw2xLZY1Cc=", + "eventRootHash": "qjH7uoVTF1FcKuWsiu5XfnESnToLGgZl8DjDAQxV5TU=", + "gasUsed": "10", + "success": true, + "vmStatus": "Executed successfully", + "accumulatorRootHash": "o0ExwQXGOdZtJhIvEXx8c0WqaJYyUE6y9S5amguxg94=", + "changes": [ + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x8c557bb0a12d47c1eda90dd4883b44674111b915fa39ff862e6a0a39140dcd4", + "stateKeyHash": "kSJiqPFCska3SlG2fNyd46mssC9QXZVqf1lwRkrXuCM=", + "type": { + "address": "0x1", + "module": "coin", + "name": "CoinStore", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", + "data": "{\"coin\":{\"value\":\"58786128904\"},\"deposit_events\":{\"counter\":\"7198\",\"guid\":{\"id\":{\"addr\":\"0x8c557bb0a12d47c1eda90dd4883b44674111b915fa39ff862e6a0a39140dcd4\",\"creation_num\":\"2\"}}},\"frozen\":false,\"withdraw_events\":{\"counter\":\"7264\",\"guid\":{\"id\":{\"addr\":\"0x8c557bb0a12d47c1eda90dd4883b44674111b915fa39ff862e6a0a39140dcd4\",\"creation_num\":\"3\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x8c557bb0a12d47c1eda90dd4883b44674111b915fa39ff862e6a0a39140dcd4", + "stateKeyHash": "sGxUIhBizy+RgNIG5xaVsDlZM1aKr/4wnYoa1MAFoQQ=", + "type": { + "address": "0x1", + "module": "account", + "name": "Account" + }, + "typeStr": "0x1::account::Account", + "data": "{\"authentication_key\":\"0x08c557bb0a12d47c1eda90dd4883b44674111b915fa39ff862e6a0a39140dcd4\",\"coin_register_events\":{\"counter\":\"4\",\"guid\":{\"id\":{\"addr\":\"0x8c557bb0a12d47c1eda90dd4883b44674111b915fa39ff862e6a0a39140dcd4\",\"creation_num\":\"0\"}}},\"guid_creation_num\":\"6063\",\"key_rotation_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x8c557bb0a12d47c1eda90dd4883b44674111b915fa39ff862e6a0a39140dcd4\",\"creation_num\":\"1\"}}},\"rotation_capability_offer\":{\"for\":{\"vec\":[]}},\"sequence_number\":\"42466\",\"signer_capability_offer\":{\"for\":{\"vec\":[]}}}" + } + }, + { + "type": "TYPE_DELETE_RESOURCE", + "deleteResource": { + "address": "0x2b040f0a6dcf7a2dc0bf2c0e0cfe962983038ab3df4211daba31d5f8d7a005d7", + "stateKeyHash": "F7ZKvQ53/VFAbtkHmNzUedWGwUFC+O/JrFCw2W8cgVI=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectGroup" + }, + "typeStr": "0x1::object::ObjectGroup" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "stateKeyHash": "YyxEM2ISNprB8ZNNbepmRFb5Yl7G+bHX/jJlJ/MnbD8=", + "type": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "module": "biddings_v2", + "name": "BidStore" + }, + "typeStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::biddings_v2::BidStore", + "data": "{\"accept_collection_bid_events\":{\"counter\":\"30950\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"23\"}}},\"accept_token_bid_events\":{\"counter\":\"630\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"20\"}}},\"delete_collection_bid_events\":{\"counter\":\"41275\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"22\"}}},\"delete_token_bid_events\":{\"counter\":\"3874\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"19\"}}},\"insert_collection_bid_events\":{\"counter\":\"114748\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"21\"}}},\"insert_token_bid_events\":{\"counter\":\"11693\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"18\"}}}}" + } + }, + { + "type": "TYPE_WRITE_TABLE_ITEM", + "writeTableItem": { + "stateKeyHash": "bkso1A+YoQamUWNTCSTA3LQME0nTqpFdEItNbPwd2xk=", + "handle": "0x1b854694ae746cdbd8d44186ca4929b2b337df21d1c74633be19b2710552fdca", + "key": "0x0619dc29a0aac8fa146714058e8dd6d2d0f3bdf5f6331907bf91f3acd81e6935", + "data": { + "key": "\"0x619dc29a0aac8fa146714058e8dd6d2d0f3bdf5f6331907bf91f3acd81e6935\"", + "keyType": "address", + "value": "\"114089042018835901\"", + "valueType": "u128" + } + } + } + ] + }, + "epoch": "10388", + "blockHeight": "292870330", + "type": "TRANSACTION_TYPE_USER", + "sizeInfo": { + "transactionBytes": 267, + "eventSizeInfo": [ + { + "typeTagBytes": 52, + "totalBytes": 108 + }, + { + "typeTagBytes": 71, + "totalBytes": 231 + }, + { + "typeTagBytes": 63, + "totalBytes": 103 + } + ], + "writeOpSizeInfo": [ + { + "keyBytes": 138, + "valueBytes": 105 + }, + { + "keyBytes": 84, + "valueBytes": 147 + }, + { + "keyBytes": 87 + }, + { + "keyBytes": 89, + "valueBytes": 288 + }, + { + "keyBytes": 66, + "valueBytes": 16 + } + ] + }, + "user": { + "request": { + "sender": "0x8c557bb0a12d47c1eda90dd4883b44674111b915fa39ff862e6a0a39140dcd4", + "sequenceNumber": "42465", + "maxGasAmount": "100000", + "gasUnitPrice": "100", + "expirationTimestampSecs": { + "seconds": "1739991287" + }, + "payload": { + "type": "TYPE_ENTRY_FUNCTION_PAYLOAD", + "entryFunctionPayload": { + "function": { + "module": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "name": "biddings_v2" + }, + "name": "cancel_collection_bid" + }, + "arguments": [ + "{\"inner\":\"0x2b040f0a6dcf7a2dc0bf2c0e0cfe962983038ab3df4211daba31d5f8d7a005d7\"}" + ], + "entryFunctionIdStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::biddings_v2::cancel_collection_bid" + } + }, + "signature": { + "type": "TYPE_ED25519", + "ed25519": { + "publicKey": "EKGqbmjxRW+jOGpkqaNnq+z8syHRlO+/jAx3Zhrcpec=", + "signature": "G38gFofPQt9g7gVSmTmP6sm8Nchs/MtOFTzw7x+HSoRLFeh7IMdunZkRFtGXVQYzEqnRdEkniWzOnwFvAQ27CQ==" + } + } + }, + "events": [ + { + "key": { + "creationNumber": "2", + "accountAddress": "0x08c557bb0a12d47c1eda90dd4883b44674111b915fa39ff862e6a0a39140dcd4" + }, + "sequenceNumber": "7197", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "DepositEvent" + } + }, + "typeStr": "0x1::coin::DepositEvent", + "data": "{\"amount\":\"2726630000\"}" + }, + { + "key": { + "creationNumber": "22", + "accountAddress": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26" + }, + "sequenceNumber": "41274", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "module": "biddings_v2", + "name": "DeleteCollectionBidEvent" + } + }, + "typeStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::biddings_v2::DeleteCollectionBidEvent", + "data": "{\"bid\":{\"inner\":\"0x2b040f0a6dcf7a2dc0bf2c0e0cfe962983038ab3df4211daba31d5f8d7a005d7\"},\"bid_buyer\":\"0x8c557bb0a12d47c1eda90dd4883b44674111b915fa39ff862e6a0a39140dcd4\",\"collection\":{\"inner\":\"0xa2485c3b392d211770ed161e73a1097d21016c7dd41f53592434380b2aa14cba\"},\"price\":\"2726630000\",\"timestamp\":\"1739990687880613\"}" + }, + { + "key": { + "accountAddress": "0x0" + }, + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "transaction_fee", + "name": "FeeStatement" + } + }, + "typeStr": "0x1::transaction_fee::FeeStatement", + "data": "{\"execution_gas_units\":\"5\",\"io_gas_units\":\"5\",\"storage_fee_octas\":\"0\",\"storage_fee_refund_octas\":\"55640\",\"total_charge_gas_units\":\"10\"}" + } + ] + } +} \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2386891051_tradeport_v2_place_collection_offer.json b/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2386891051_tradeport_v2_place_collection_offer.json new file mode 100644 index 0000000000000..e58aa382eb46e --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-test-transactions/src/json_transactions/imported_mainnet_txns/2386891051_tradeport_v2_place_collection_offer.json @@ -0,0 +1,241 @@ +{ + "timestamp": { + "seconds": "1739990720", + "nanos": 621178000 + }, + "version": "2386891051", + "info": { + "hash": "Jvce9khSZR0dmFxyRrgZSJmjTAT/6cTajh8har+Y8No=", + "stateChangeHash": "/vpugO6ZsAAuwoJSR2ILpKUC+CRRPLCm4HLRMUIeasI=", + "eventRootHash": "uyHUcc78fzF1yp5Mo9L2lSPEqg5lTOeX7scV41TfCaE=", + "gasUsed": "570", + "success": true, + "vmStatus": "Executed successfully", + "accumulatorRootHash": "zKGNbHn2/d9nsbrIe+jVIbUFml9tBeIdM56kQ5/lnPc=", + "changes": [ + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x8c557bb0a12d47c1eda90dd4883b44674111b915fa39ff862e6a0a39140dcd4", + "stateKeyHash": "kSJiqPFCska3SlG2fNyd46mssC9QXZVqf1lwRkrXuCM=", + "type": { + "address": "0x1", + "module": "coin", + "name": "CoinStore", + "genericTypeParams": [ + { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "aptos_coin", + "name": "AptosCoin" + } + } + ] + }, + "typeStr": "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", + "data": "{\"coin\":{\"value\":\"56026081904\"},\"deposit_events\":{\"counter\":\"7198\",\"guid\":{\"id\":{\"addr\":\"0x8c557bb0a12d47c1eda90dd4883b44674111b915fa39ff862e6a0a39140dcd4\",\"creation_num\":\"2\"}}},\"frozen\":false,\"withdraw_events\":{\"counter\":\"7265\",\"guid\":{\"id\":{\"addr\":\"0x8c557bb0a12d47c1eda90dd4883b44674111b915fa39ff862e6a0a39140dcd4\",\"creation_num\":\"3\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0x8c557bb0a12d47c1eda90dd4883b44674111b915fa39ff862e6a0a39140dcd4", + "stateKeyHash": "sGxUIhBizy+RgNIG5xaVsDlZM1aKr/4wnYoa1MAFoQQ=", + "type": { + "address": "0x1", + "module": "account", + "name": "Account" + }, + "typeStr": "0x1::account::Account", + "data": "{\"authentication_key\":\"0x08c557bb0a12d47c1eda90dd4883b44674111b915fa39ff862e6a0a39140dcd4\",\"coin_register_events\":{\"counter\":\"4\",\"guid\":{\"id\":{\"addr\":\"0x8c557bb0a12d47c1eda90dd4883b44674111b915fa39ff862e6a0a39140dcd4\",\"creation_num\":\"0\"}}},\"guid_creation_num\":\"6064\",\"key_rotation_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0x8c557bb0a12d47c1eda90dd4883b44674111b915fa39ff862e6a0a39140dcd4\",\"creation_num\":\"1\"}}},\"rotation_capability_offer\":{\"for\":{\"vec\":[]}},\"sequence_number\":\"42467\",\"signer_capability_offer\":{\"for\":{\"vec\":[]}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xe06d17630ff3fd295fcc1b83dc023187de07bb79118998ea17d0873a34150f84", + "stateKeyHash": "Hz/oHbg8iya9KaPUZfPrCqebdcX0OwFGtCyRt9kUj80=", + "type": { + "address": "0x1", + "module": "object", + "name": "ObjectCore" + }, + "typeStr": "0x1::object::ObjectCore", + "data": "{\"allow_ungated_transfer\":false,\"guid_creation_num\":\"1125899906842625\",\"owner\":\"0x8c557bb0a12d47c1eda90dd4883b44674111b915fa39ff862e6a0a39140dcd4\",\"transfer_events\":{\"counter\":\"0\",\"guid\":{\"id\":{\"addr\":\"0xe06d17630ff3fd295fcc1b83dc023187de07bb79118998ea17d0873a34150f84\",\"creation_num\":\"1125899906842624\"}}}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xe06d17630ff3fd295fcc1b83dc023187de07bb79118998ea17d0873a34150f84", + "stateKeyHash": "Hz/oHbg8iya9KaPUZfPrCqebdcX0OwFGtCyRt9kUj80=", + "type": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "module": "biddings_v2", + "name": "CollectionBid" + }, + "typeStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::biddings_v2::CollectionBid", + "data": "{\"bid_buyer\":\"0x8c557bb0a12d47c1eda90dd4883b44674111b915fa39ff862e6a0a39140dcd4\",\"coins\":{\"value\":\"2759990000\"},\"collection\":{\"inner\":\"0xa2485c3b392d211770ed161e73a1097d21016c7dd41f53592434380b2aa14cba\"},\"delete_ref\":{\"self\":\"0xe06d17630ff3fd295fcc1b83dc023187de07bb79118998ea17d0873a34150f84\"}}" + } + }, + { + "type": "TYPE_WRITE_RESOURCE", + "writeResource": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "stateKeyHash": "YyxEM2ISNprB8ZNNbepmRFb5Yl7G+bHX/jJlJ/MnbD8=", + "type": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "module": "biddings_v2", + "name": "BidStore" + }, + "typeStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::biddings_v2::BidStore", + "data": "{\"accept_collection_bid_events\":{\"counter\":\"30950\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"23\"}}},\"accept_token_bid_events\":{\"counter\":\"630\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"20\"}}},\"delete_collection_bid_events\":{\"counter\":\"41275\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"22\"}}},\"delete_token_bid_events\":{\"counter\":\"3874\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"19\"}}},\"insert_collection_bid_events\":{\"counter\":\"114749\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"21\"}}},\"insert_token_bid_events\":{\"counter\":\"11693\",\"guid\":{\"id\":{\"addr\":\"0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26\",\"creation_num\":\"18\"}}}}" + } + }, + { + "type": "TYPE_WRITE_TABLE_ITEM", + "writeTableItem": { + "stateKeyHash": "bkso1A+YoQamUWNTCSTA3LQME0nTqpFdEItNbPwd2xk=", + "handle": "0x1b854694ae746cdbd8d44186ca4929b2b337df21d1c74633be19b2710552fdca", + "key": "0x0619dc29a0aac8fa146714058e8dd6d2d0f3bdf5f6331907bf91f3acd81e6935", + "data": { + "key": "\"0x619dc29a0aac8fa146714058e8dd6d2d0f3bdf5f6331907bf91f3acd81e6935\"", + "keyType": "address", + "value": "\"114088996445803003\"", + "valueType": "u128" + } + } + } + ] + }, + "epoch": "10388", + "blockHeight": "292870507", + "type": "TRANSACTION_TYPE_USER", + "sizeInfo": { + "transactionBytes": 279, + "eventSizeInfo": [ + { + "typeTagBytes": 53, + "totalBytes": 109 + }, + { + "typeTagBytes": 71, + "totalBytes": 231 + }, + { + "typeTagBytes": 63, + "totalBytes": 103 + } + ], + "writeOpSizeInfo": [ + { + "keyBytes": 138, + "valueBytes": 105 + }, + { + "keyBytes": 84, + "valueBytes": 147 + }, + { + "keyBytes": 87, + "valueBytes": 306 + }, + { + "keyBytes": 89, + "valueBytes": 288 + }, + { + "keyBytes": 66, + "valueBytes": 16 + } + ] + }, + "user": { + "request": { + "sender": "0x8c557bb0a12d47c1eda90dd4883b44674111b915fa39ff862e6a0a39140dcd4", + "sequenceNumber": "42466", + "maxGasAmount": "100000", + "gasUnitPrice": "100", + "expirationTimestampSecs": { + "seconds": "1739991320" + }, + "payload": { + "type": "TYPE_ENTRY_FUNCTION_PAYLOAD", + "entryFunctionPayload": { + "function": { + "module": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "name": "biddings_v2" + }, + "name": "collection_bids" + }, + "arguments": [ + "{\"inner\":\"0xa2485c3b392d211770ed161e73a1097d21016c7dd41f53592434380b2aa14cba\"}", + "\"2759990000\"", + "\"1\"" + ], + "entryFunctionIdStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::biddings_v2::collection_bids" + } + }, + "signature": { + "type": "TYPE_ED25519", + "ed25519": { + "publicKey": "EKGqbmjxRW+jOGpkqaNnq+z8syHRlO+/jAx3Zhrcpec=", + "signature": "IAfJy5F72RzRb896WO3DxY11VrEICX2HoSHqKLxd436+aYvY4VXGFIG9Uq+g7hqEXDITyYpf+w7Mecv9ry1qBQ==" + } + } + }, + "events": [ + { + "key": { + "creationNumber": "3", + "accountAddress": "0x08c557bb0a12d47c1eda90dd4883b44674111b915fa39ff862e6a0a39140dcd4" + }, + "sequenceNumber": "7264", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "coin", + "name": "WithdrawEvent" + } + }, + "typeStr": "0x1::coin::WithdrawEvent", + "data": "{\"amount\":\"2759990000\"}" + }, + { + "key": { + "creationNumber": "21", + "accountAddress": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26" + }, + "sequenceNumber": "114748", + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26", + "module": "biddings_v2", + "name": "InsertCollectionBidEvent" + } + }, + "typeStr": "0xe11c12ec495f3989c35e1c6a0af414451223305b579291fc8f3d9d0575a23c26::biddings_v2::InsertCollectionBidEvent", + "data": "{\"bid\":{\"inner\":\"0xe06d17630ff3fd295fcc1b83dc023187de07bb79118998ea17d0873a34150f84\"},\"bid_buyer\":\"0x8c557bb0a12d47c1eda90dd4883b44674111b915fa39ff862e6a0a39140dcd4\",\"collection\":{\"inner\":\"0xa2485c3b392d211770ed161e73a1097d21016c7dd41f53592434380b2aa14cba\"},\"price\":\"2759990000\",\"timestamp\":\"1739990720621178\"}" + }, + { + "key": { + "accountAddress": "0x0" + }, + "type": { + "type": "MOVE_TYPES_STRUCT", + "struct": { + "address": "0x1", + "module": "transaction_fee", + "name": "FeeStatement" + } + }, + "typeStr": "0x1::transaction_fee::FeeStatement", + "data": "{\"execution_gas_units\":\"7\",\"io_gas_units\":\"7\",\"storage_fee_octas\":\"55640\",\"storage_fee_refund_octas\":\"0\",\"total_charge_gas_units\":\"570\"}" + } + ] + } +} \ No newline at end of file diff --git a/ecosystem/indexer-grpc/indexer-transaction-generator/imported_transactions/imported_transactions.yaml b/ecosystem/indexer-grpc/indexer-transaction-generator/imported_transactions/imported_transactions.yaml index 1e64e7879aa8a..745084ebfc150 100644 --- a/ecosystem/indexer-grpc/indexer-transaction-generator/imported_transactions/imported_transactions.yaml +++ b/ecosystem/indexer-grpc/indexer-transaction-generator/imported_transactions/imported_transactions.yaml @@ -119,3 +119,31 @@ mainnet: # Fungible asset migration 1680592683: 1680592683_fa_migration_coin_info 1957950162: 1957950162_fa_migration_v2_store_only + + # Nft Aggregator tradeport + 2386133936: 2386133936_tradeport_v2_place_offer + 2298838662: 2298838662_tradeport_v2_fill_offer + 2386142672: 2386142672_tradeport_v2_cancel_offer + 2386889884: 2386889884_tradeport_v2_cancel_collection_offer + 2386021136: 2386021136_tradeport_v2_fill_collection_offer + 2386891051: 2386891051_tradeport_v2_place_collection_offer + 2386716658: 2386716658_tradeport_v2_cancel_listing + 2386809975: 2386809975_tradeport_v2_place_listing + 2386455218: 2386455218_tradeport_v2_fill_listing + + 2296149225: 2296149225_tradeport_v2_accept_token_delist + 2296098846: 2296098846_tradeport_v2_accept_token_delist2 + 2277018899: 2277018899_tradeport_v2_accept_token_delist_same_token_data_id + + # wapal + 2381810159: 2381810159_wapal_cancel_offer + 2313248448: 2313248448_wapal_fill_offer + 2382313982: 2382313982_wapal_place_offer + 2382373209: 2382373209_wapal_place_collection_offer + 2382373978: 2382373978_wapal_cancel_collection_offer + 2382219668: 2382219668_wapal_fill_collection_offer + 2382221134: 2382221134_wapal_fill_listing + 2381742315: 2381742315_wapal_cancel_listing + 2382251863: 2382251863_wapal_place_listing + + # diff --git a/ecosystem/indexer-grpc/indexer-transaction-generator/src/config_template.yaml b/ecosystem/indexer-grpc/indexer-transaction-generator/src/config_template.yaml index bb5780d45384e..62a515622d186 100644 --- a/ecosystem/indexer-grpc/indexer-transaction-generator/src/config_template.yaml +++ b/ecosystem/indexer-grpc/indexer-transaction-generator/src/config_template.yaml @@ -1,6 +1,6 @@ import_config: testnet: - # Transaction Stream endpoint addresss. + # Transaction Stream endpoint address. transaction_stream_endpoint: https://grpc.testnet.aptoslabs.com:443 # (Optional) The key to use with developers.aptoslabs.com api_key: YOUR_KEY_HERE @@ -8,21 +8,21 @@ import_config: versions_to_import: 123: testnet_v1.json # mainnet: - # # Transaction Stream endpoint addresss. + # # Transaction Stream endpoint address. # transaction_stream_endpoint: https://grpc.mainnet.aptoslabs.com:443 # # (Optional) The key to use with developers.aptoslabs.com # api_key: YOUR_KEY_HERE # versions_to_import: # 123: mainnetnet_v1.json # devnet: - # # Transaction Stream endpoint addresss. + # # Transaction Stream endpoint address. # transaction_stream_endpoint: https://grpc.devnet.aptoslabs.com:443 # # (Optional) The key to use with developers.aptoslabs.com # api_key: YOUR_KEY_HERE # versions_to_import: # 123: devnet_v1.json # custom: - # # Transaction Stream endpoint addresss. + # # Transaction Stream endpoint address. # transaction_stream_endpoint: YOUR_CUSTOM_ENDPOINT # versions_to_import: - # 123: custom_v1.json \ No newline at end of file + # 123: custom_v1.json diff --git a/execution/executor-benchmark/src/block_preparation.rs b/execution/executor-benchmark/src/block_preparation.rs index c58e428214b30..c80ab728a7c4a 100644 --- a/execution/executor-benchmark/src/block_preparation.rs +++ b/execution/executor-benchmark/src/block_preparation.rs @@ -94,7 +94,11 @@ impl BlockPreparationStage { let partitioned_txns = partitioner.partition(analyzed_transactions, self.num_executor_shards); timer.stop_and_record(); - ExecutableBlock::new(block_id, ExecutableTransactions::Sharded(partitioned_txns)) + ExecutableBlock::new( + block_id, + ExecutableTransactions::Sharded(partitioned_txns), + vec![], + ) }, }; self.num_blocks_processed += 1; diff --git a/execution/executor-benchmark/src/db_access.rs b/execution/executor-benchmark/src/db_access.rs index 36598d57b5c47..c704f74f52c4c 100644 --- a/execution/executor-benchmark/src/db_access.rs +++ b/execution/executor-benchmark/src/db_access.rs @@ -200,7 +200,7 @@ impl DbAccessUtil { struct_tag .type_args .iter() - .map(|v| v.to_string()) + .map(|v| v.to_canonical_string()) .join(", ") ) .to_string() diff --git a/execution/executor-benchmark/src/lib.rs b/execution/executor-benchmark/src/lib.rs index f2d7348946975..4dab03e5fdc4a 100644 --- a/execution/executor-benchmark/src/lib.rs +++ b/execution/executor-benchmark/src/lib.rs @@ -56,7 +56,9 @@ pub struct SingleRunResults { } pub fn default_benchmark_features() -> Features { - Features::default() + let mut features = Features::default(); + features.disable(FeatureFlag::CALCULATE_TRANSACTION_FEE_FOR_DISTRIBUTION); + features } pub fn init_db(config: &NodeConfig) -> DbReaderWriter { @@ -656,7 +658,10 @@ mod tests { account_address::AccountAddress, on_chain_config::{FeatureFlag, Features}, state_store::state_key::inner::StateKeyInner, - transaction::{Transaction, TransactionPayload}, + transaction::{ + signature_verified_transaction::into_signature_verified_block, Transaction, + TransactionPayload, + }, }; use aptos_vm::{aptos_vm::AptosVMBlockExecutor, AptosVM, VMBlockExecutor}; use itertools::Itertools; @@ -758,7 +763,7 @@ mod tests { let block_id = HashValue::random(); vm_executor .execute_and_update_state( - (block_id, vec![txn.clone()]).into(), + (block_id, into_signature_verified_block(vec![txn.clone()])).into(), parent_block_id, BENCHMARKS_BLOCK_EXECUTOR_ONCHAIN_CONFIG, ) @@ -779,7 +784,7 @@ mod tests { let block_id = HashValue::random(); other_executor .execute_and_update_state( - (block_id, vec![txn]).into(), + (block_id, into_signature_verified_block(vec![txn])).into(), parent_block_id, BENCHMARKS_BLOCK_EXECUTOR_ONCHAIN_CONFIG, ) diff --git a/execution/executor-benchmark/src/native/aptos_vm_uncoordinated.rs b/execution/executor-benchmark/src/native/aptos_vm_uncoordinated.rs index 56ebb80fa6c54..5faa9edabe1c0 100644 --- a/execution/executor-benchmark/src/native/aptos_vm_uncoordinated.rs +++ b/execution/executor-benchmark/src/native/aptos_vm_uncoordinated.rs @@ -44,10 +44,18 @@ impl VMBlockExecutor for AptosVMParallelUncoordinatedBlockExecutor { let env = AptosEnvironment::new(state_view); let vm = AptosVM::new(&env, state_view); + let block_epilogue_txn = Transaction::block_epilogue_v0( + transaction_slice_metadata + .append_state_checkpoint_to_block() + .unwrap(), + BlockEndInfo::new_empty(), + ); + let transaction_outputs = NATIVE_EXECUTOR_POOL.install(|| { txn_provider .get_txns() .par_iter() + .chain(vec![block_epilogue_txn.clone().into()].par_iter()) .enumerate() .map(|(txn_idx, txn)| { let log_context = AdapterLogSchema::new(state_view.id(), txn_idx); @@ -68,13 +76,6 @@ impl VMBlockExecutor for AptosVMParallelUncoordinatedBlockExecutor { .collect::, _>>() })?; - let block_epilogue_txn = Transaction::block_epilogue( - transaction_slice_metadata - .append_state_checkpoint_to_block() - .unwrap(), - BlockEndInfo::new_empty(), - ); - Ok(BlockOutput::new( transaction_outputs, Some(block_epilogue_txn), diff --git a/execution/executor-benchmark/src/native/native_transaction.rs b/execution/executor-benchmark/src/native/native_transaction.rs index 8c1ccbd9b89ed..b80e1b800a48a 100644 --- a/execution/executor-benchmark/src/native/native_transaction.rs +++ b/execution/executor-benchmark/src/native/native_transaction.rs @@ -35,6 +35,7 @@ pub enum NativeTransaction { fail_on_recipient_account_existing: bool, fail_on_recipient_account_missing: bool, }, + BlockEpilogue, } impl NativeTransaction { @@ -115,6 +116,7 @@ impl NativeTransaction { _ => unimplemented!(), } }, + aptos_types::transaction::Transaction::BlockEpilogue(_) => Self::BlockEpilogue, _ => unimplemented!(), } } diff --git a/execution/executor-benchmark/src/native/native_vm.rs b/execution/executor-benchmark/src/native/native_vm.rs index 6e8d703646846..c239f1a721c47 100644 --- a/execution/executor-benchmark/src/native/native_vm.rs +++ b/execution/executor-benchmark/src/native/native_vm.rs @@ -149,20 +149,19 @@ impl ExecutorTask for NativeVMExecutorTask { txn: &SignatureVerifiedTransaction, _txn_idx: TxnIndex, ) -> ExecutionStatus { - let gas_units = 4; - match self.execute_transaction_impl( executor_with_group_view, txn, - gas_units, self.fa_migration_complete, ) { - Ok(change_set) => ExecutionStatus::Success(AptosTransactionOutput::new(VMOutput::new( - change_set, - ModuleWriteSet::empty(), - FeeStatement::new(gas_units, gas_units, 0, 0, 0), - TransactionStatus::Keep(aptos_types::transaction::ExecutionStatus::Success), - ))), + Ok((change_set, gas_units)) => { + ExecutionStatus::Success(AptosTransactionOutput::new(VMOutput::new( + change_set, + ModuleWriteSet::empty(), + FeeStatement::new(gas_units, gas_units, 0, 0, 0), + TransactionStatus::Keep(aptos_types::transaction::ExecutionStatus::Success), + ))) + }, Err(_) => ExecutionStatus::SpeculativeExecutionAbortError("something".to_string()), } } @@ -185,9 +184,9 @@ impl NativeVMExecutorTask { &self, view: &(impl ExecutorView + ResourceGroupView), txn: &SignatureVerifiedTransaction, - gas_units: u64, fa_migration_complete: bool, - ) -> Result { + ) -> Result<(VMChangeSet, u64), ()> { + let gas_units = 4; let gas = gas_units * 100; let mut resource_write_set = BTreeMap::new(); @@ -352,6 +351,7 @@ impl NativeVMExecutorTask { } } }, + NativeTransaction::BlockEpilogue => return Ok((VMChangeSet::empty(), 0)), }; events.push(( @@ -361,12 +361,15 @@ impl NativeVMExecutorTask { None, )); - Ok(VMChangeSet::new( - resource_write_set, - events, - delayed_field_change_set, - aggregator_v1_write_set, - aggregator_v1_delta_set, + Ok(( + VMChangeSet::new( + resource_write_set, + events, + delayed_field_change_set, + aggregator_v1_write_set, + aggregator_v1_delta_set, + ), + gas_units, )) } diff --git a/execution/executor-benchmark/src/native/parallel_uncoordinated_block_executor.rs b/execution/executor-benchmark/src/native/parallel_uncoordinated_block_executor.rs index 1966dbc9ac76c..52d2df18582ff 100644 --- a/execution/executor-benchmark/src/native/parallel_uncoordinated_block_executor.rs +++ b/execution/executor-benchmark/src/native/parallel_uncoordinated_block_executor.rs @@ -46,7 +46,6 @@ use rayon::iter::{IntoParallelIterator, IntoParallelRefIterator, ParallelIterato use std::{ cell::Cell, collections::BTreeMap, - hash::RandomState, sync::atomic::{AtomicU64, Ordering}, }; use thread_local::ThreadLocal; @@ -86,13 +85,21 @@ impl VMBlockExecutor _onchain_config: BlockExecutorConfigFromOnchain, transaction_slice_metadata: TransactionSliceMetadata, ) -> Result, VMStatus> { - let native_transactions = NATIVE_EXECUTOR_POOL.install(|| { + let block_epilogue_txn = Transaction::block_epilogue_v0( + transaction_slice_metadata + .append_state_checkpoint_to_block() + .unwrap(), + BlockEndInfo::new_empty(), + ); + + let mut native_transactions = NATIVE_EXECUTOR_POOL.install(|| { txn_provider .get_txns() .par_iter() .map(NativeTransaction::parse) .collect::>() }); + native_transactions.push(NativeTransaction::parse(&block_epilogue_txn.clone().into())); let _timer = BLOCK_EXECUTOR_INNER_EXECUTE_BLOCK.start_timer(); @@ -112,13 +119,6 @@ impl VMBlockExecutor ) })?; - let block_epilogue_txn = Transaction::block_epilogue( - transaction_slice_metadata - .append_state_checkpoint_to_block() - .unwrap(), - BlockEndInfo::new_empty(), - ); - Ok(BlockOutput::new( transaction_outputs, Some(block_epilogue_txn), @@ -140,11 +140,13 @@ impl IncrementalOutput { } fn into_success_output(mut self, gas: u64) -> Result { - self.events.push( - FeeStatement::new(gas, gas, 0, 0, 0) - .create_event_v2() - .expect("Creating FeeStatement should always succeed"), - ); + if gas != 0 { + self.events.push( + FeeStatement::new(gas, gas, 0, 0, 0) + .create_event_v2() + .expect("Creating FeeStatement should always succeed"), + ); + } Ok(TransactionOutput::new( WriteSetMut::new(self.write_set).freeze()?, @@ -425,6 +427,7 @@ impl RawTransactionExecutor for T { } } }, + NativeTransaction::BlockEpilogue => return output.into_success_output(0), }; self.reduce_apt_supply(fa_migration_complete, gas, state_view, &mut output)?; @@ -1023,7 +1026,7 @@ impl NativeValueCacheRawTransactionExecutor { &'a self, key: &StateKey, init_value: impl FnOnce(&StateKey) -> CachedResource, - ) -> Ref<'a, StateKey, CachedResource, RandomState> { + ) -> Ref<'a, StateKey, CachedResource> { // Data in cache is going to be the hot path, so short-circuit here to avoid cloning the key. if let Some(ref_mut) = self.cache.get(key) { return ref_mut; @@ -1039,7 +1042,7 @@ impl NativeValueCacheRawTransactionExecutor { &'a self, key: &StateKey, init_value: impl FnOnce(&StateKey) -> CachedResource, - ) -> RefMut<'a, StateKey, CachedResource, RandomState> { + ) -> RefMut<'a, StateKey, CachedResource> { // Data in cache is going to be the hot path, so short-circuit here to avoid cloning the key. if let Some(ref_mut) = self.cache.get_mut(key) { return ref_mut; @@ -1241,6 +1244,7 @@ impl RawTransactionExecutor for NativeNoStorageRawTransactionExecutor { } (sender, sequence_number) }, + NativeTransaction::BlockEpilogue => return output.into_success_output(0), }; self.seq_nums.insert(sender, sequence_number); diff --git a/execution/executor-service/src/test_utils.rs b/execution/executor-service/src/test_utils.rs index 3934e676b4e95..7f28cc2746234 100644 --- a/execution/executor-service/src/test_utils.rs +++ b/execution/executor-service/src/test_utils.rs @@ -148,7 +148,7 @@ pub fn test_sharded_block_executor_no_conflict Vec { + block + .iter() + .map(|txn| { + txn.borrow_into_inner().try_as_signed_user_txn().map_or( + AuxiliaryInfo::new_empty(), + |_| { + AuxiliaryInfo::new( + PersistedAuxiliaryInfo::None, + Some(EphemeralAuxiliaryInfo { proposer_index: 0 }), + ) + }, + ) + }) + .collect() +} diff --git a/execution/executor-types/src/execution_output.rs b/execution/executor-types/src/execution_output.rs index b500dfe5a818d..f21a6614d660d 100644 --- a/execution/executor-types/src/execution_output.rs +++ b/execution/executor-types/src/execution_output.rs @@ -178,12 +178,12 @@ impl Inner { let aborts = self .to_commit .iter() - .flat_map(|(txn, output)| match output.status().status() { + .flat_map(|(txn, output, aux_info)| match output.status().status() { Ok(execution_status) => { if execution_status.is_success() { None } else { - Some(format!("{:?}: {:?}", txn, output.status())) + Some(format!("{txn:?}: {:?} {aux_info:?}", output.status())) } }, Err(_) => None, @@ -194,13 +194,13 @@ impl Inner { .to_discard .iter() .take(3) - .map(|(txn, output)| format!("{:?}: {:?}", txn, output.status())) + .map(|(txn, output, aux_info)| format!("{txn:?}: {:?} {aux_info:?}", output.status())) .collect::>(); let retries_3 = self .to_retry .iter() .take(3) - .map(|(txn, output)| format!("{:?}: {:?}", txn, output.status())) + .map(|(txn, output, aux_info)| format!("{txn:?}: {:?} {aux_info:?}", output.status())) .collect::>(); if !aborts.is_empty() || !discards_3.is_empty() || !retries_3.is_empty() { diff --git a/execution/executor-types/src/lib.rs b/execution/executor-types/src/lib.rs index 8bb4b3d756216..e72fab1477fdc 100644 --- a/execution/executor-types/src/lib.rs +++ b/execution/executor-types/src/lib.rs @@ -15,8 +15,8 @@ use aptos_types::{ ledger_info::LedgerInfoWithSignatures, state_store::state_key::StateKey, transaction::{ - Transaction, TransactionInfo, TransactionListWithProof, TransactionOutputListWithProof, - Version, + PersistedAuxiliaryInfo, Transaction, TransactionInfo, TransactionListWithProof, + TransactionOutputListWithProof, Version, }, write_set::WriteSet, }; @@ -248,6 +248,7 @@ pub trait TransactionReplayer: Send { fn enqueue_chunks( &self, transactions: Vec, + persisted_info: Vec, transaction_infos: Vec, write_sets: Vec, event_vecs: Vec>, diff --git a/execution/executor-types/src/state_compute_result.rs b/execution/executor-types/src/state_compute_result.rs index ec370fd5ecfb8..5664ed902136c 100644 --- a/execution/executor-types/src/state_compute_result.rs +++ b/execution/executor-types/src/state_compute_result.rs @@ -159,6 +159,7 @@ impl StateComputeResult { ChunkToCommit { first_version: self.ledger_update_output.first_version(), transactions: &self.execution_output.to_commit.transactions, + persisted_info: &self.execution_output.to_commit.persisted_info, transaction_outputs: &self.execution_output.to_commit.transaction_outputs, transaction_infos: &self.ledger_update_output.transaction_infos, state: &self.execution_output.result_state, diff --git a/execution/executor-types/src/transactions_with_output.rs b/execution/executor-types/src/transactions_with_output.rs index 051c8f21b64a5..88a9e5f1980b3 100644 --- a/execution/executor-types/src/transactions_with_output.rs +++ b/execution/executor-types/src/transactions_with_output.rs @@ -5,7 +5,7 @@ use crate::metrics::TIMER; use anyhow::{ensure, Result}; use aptos_metrics_core::TimerHelper; use aptos_storage_interface::state_store::state_update_refs::StateUpdateRefs; -use aptos_types::transaction::{Transaction, TransactionOutput, Version}; +use aptos_types::transaction::{PersistedAuxiliaryInfo, Transaction, TransactionOutput, Version}; use itertools::izip; use std::{ fmt::{Debug, Formatter}, @@ -16,17 +16,21 @@ use std::{ pub struct TransactionsWithOutput { pub transactions: Vec, pub transaction_outputs: Vec, + pub persisted_info: Vec, } impl TransactionsWithOutput { pub fn new( transactions: Vec, transaction_outputs: Vec, + persisted_info: Vec, ) -> Self { assert_eq!(transactions.len(), transaction_outputs.len()); + assert_eq!(transactions.len(), persisted_info.len()); Self { transactions, transaction_outputs, + persisted_info, } } @@ -34,9 +38,15 @@ impl TransactionsWithOutput { Self::default() } - pub fn push(&mut self, transaction: Transaction, transaction_output: TransactionOutput) { + pub fn push( + &mut self, + transaction: Transaction, + transaction_output: TransactionOutput, + persisted_info: PersistedAuxiliaryInfo, + ) { self.transactions.push(transaction); self.transaction_outputs.push(transaction_output); + self.persisted_info.push(persisted_info); } pub fn len(&self) -> usize { @@ -47,8 +57,14 @@ impl TransactionsWithOutput { self.transactions.is_empty() } - pub fn iter(&self) -> impl Iterator { - izip!(self.transactions.iter(), self.transaction_outputs.iter(),) + pub fn iter( + &self, + ) -> impl Iterator { + izip!( + self.transactions.iter(), + self.transaction_outputs.iter(), + self.persisted_info.iter() + ) } } @@ -96,19 +112,21 @@ impl TransactionsToKeep { first_version: Version, transactions: Vec, transaction_outputs: Vec, + auxiliary_info: Vec, is_reconfig: bool, ) -> Self { - let txns_with_output = TransactionsWithOutput::new(transactions, transaction_outputs); + let txns_with_output = + TransactionsWithOutput::new(transactions, transaction_outputs, auxiliary_info); Self::index(first_version, txns_with_output, is_reconfig) } pub fn new_empty() -> Self { - Self::make(0, vec![], vec![], false) + Self::make(0, vec![], vec![], vec![], false) } pub fn new_dummy_success(txns: Vec) -> Self { let txn_outputs = vec![TransactionOutput::new_empty_success(); txns.len()]; - Self::make(0, txns, txn_outputs, false) + Self::make(0, txns, txn_outputs, vec![], false) } pub fn is_reconfig(&self) -> bool { diff --git a/execution/executor/src/block_executor/mod.rs b/execution/executor/src/block_executor/mod.rs index 2195c36ab1a47..0b0fd25b89484 100644 --- a/execution/executor/src/block_executor/mod.rs +++ b/execution/executor/src/block_executor/mod.rs @@ -189,6 +189,7 @@ where let ExecutableBlock { block_id, transactions, + auxiliary_info, } = block; let mut block_vec = self .block_tree @@ -232,6 +233,7 @@ where DoGetExecutionOutput::by_transaction_execution( &self.block_executor, transactions, + auxiliary_info, parent_output.result_state(), state_view, onchain_config.clone(), diff --git a/execution/executor/src/chunk_executor/mod.rs b/execution/executor/src/chunk_executor/mod.rs index b0ca02943edb1..826d06776ada6 100644 --- a/execution/executor/src/chunk_executor/mod.rs +++ b/execution/executor/src/chunk_executor/mod.rs @@ -39,9 +39,10 @@ use aptos_types::{ ledger_info::LedgerInfoWithSignatures, state_store::StateViewId, transaction::{ - signature_verified_transaction::SignatureVerifiedTransaction, Transaction, - TransactionAuxiliaryData, TransactionInfo, TransactionListWithProof, TransactionOutput, - TransactionOutputListWithProof, TransactionStatus, Version, + signature_verified_transaction::SignatureVerifiedTransaction, AuxiliaryInfo, + PersistedAuxiliaryInfo, Transaction, TransactionAuxiliaryData, TransactionInfo, + TransactionListWithProof, TransactionOutput, TransactionOutputListWithProof, + TransactionStatus, Version, }, write_set::WriteSet, }; @@ -176,11 +177,16 @@ impl ChunkExecutorTrait for ChunkExecutor { first_transaction_output_version: v, proof: txn_infos_with_proof, } = txn_output_list_with_proof; - let (transactions, transaction_outputs) = transactions_and_outputs.into_iter().unzip(); + let (transactions, transaction_outputs): (Vec<_>, Vec<_>) = + transactions_and_outputs.into_iter().unzip(); + // TODO(grao): Support PersistedAuxiliaryInfo in state sync. + let mut persisted_info = Vec::new(); + persisted_info.resize(transactions.len(), PersistedAuxiliaryInfo::None); let chunk = ChunkToApply { transactions, transaction_outputs, + persisted_info, first_version: v.ok_or_else(|| anyhow!("first version is None"))?, }; let chunk_verifier = Arc::new(StateSyncChunkVerifier { @@ -407,6 +413,7 @@ impl TransactionReplayer for ChunkExecutor { fn enqueue_chunks( &self, transactions: Vec, + persisted_info: Vec, transaction_infos: Vec, write_sets: Vec, event_vecs: Vec>, @@ -421,6 +428,7 @@ impl TransactionReplayer for ChunkExecutor { .expect("not reset") .enqueue_chunks( transactions, + persisted_info, transaction_infos, write_sets, event_vecs, @@ -439,6 +447,7 @@ impl ChunkExecutorInner { fn enqueue_chunks( &self, mut transactions: Vec, + mut persisted_info: Vec, mut transaction_infos: Vec, mut write_sets: Vec, mut event_vecs: Vec>, @@ -468,6 +477,7 @@ impl ChunkExecutorInner { for (begin, end) in epochs { chunks_enqueued += self.remove_and_replay_epoch( &mut transactions, + &mut persisted_info, &mut transaction_infos, &mut write_sets, &mut event_vecs, @@ -508,6 +518,7 @@ impl ChunkExecutorInner { fn remove_and_replay_epoch( &self, transactions: &mut Vec, + persisted_info: &mut Vec, transaction_infos: &mut Vec, write_sets: &mut Vec, event_vecs: &mut Vec>, @@ -530,6 +541,7 @@ impl ChunkExecutorInner { // batch_end is a known broken version that won't pass execution verification self.remove_and_apply( transactions, + persisted_info, transaction_infos, write_sets, event_vecs, @@ -562,6 +574,7 @@ impl ChunkExecutorInner { }; self.remove_and_apply( transactions, + persisted_info, transaction_infos, write_sets, event_vecs, @@ -595,10 +608,14 @@ impl ChunkExecutorInner { .map(|t| t.into()) .collect::>(); + let mut auxiliary_info = Vec::new(); + // TODO(grao): Pass in persisted auxiliary info. + auxiliary_info.resize(txns.len(), AuxiliaryInfo::new_empty()); // State sync executor shouldn't have block gas limit. let execution_output = DoGetExecutionOutput::by_transaction_execution::( &V::new(), txns.into(), + auxiliary_info, &parent_state, state_view, BlockExecutorConfigFromOnchain::new_no_block_limit(), @@ -635,6 +652,7 @@ impl ChunkExecutorInner { fn remove_and_apply( &self, transactions: &mut Vec, + persisted_info: &mut Vec, transaction_infos: &mut Vec, write_sets: &mut Vec, event_vecs: &mut Vec>, @@ -643,15 +661,17 @@ impl ChunkExecutorInner { ) -> Result<()> { let num_txns = (end_version - begin_version) as usize; let txn_infos: Vec<_> = transaction_infos.drain(..num_txns).collect(); - let (transactions, transaction_outputs) = multizip(( + let (transactions, persisted_info, transaction_outputs) = multizip(( transactions.drain(..num_txns), + persisted_info.drain(..num_txns), txn_infos.iter(), write_sets.drain(..num_txns), event_vecs.drain(..num_txns), )) - .map(|(txn, txn_info, write_set, events)| { + .map(|(txn, persisted_info, txn_info, write_set, events)| { ( txn, + persisted_info, TransactionOutput::new( write_set, events, @@ -661,11 +681,12 @@ impl ChunkExecutorInner { ), ) }) - .unzip(); + .multiunzip(); let chunk = ChunkToApply { transactions, transaction_outputs, + persisted_info, first_version: begin_version, }; let chunk_verifier = Arc::new(ReplayChunkVerifier { diff --git a/execution/executor/src/chunk_executor/transaction_chunk.rs b/execution/executor/src/chunk_executor/transaction_chunk.rs index eb749734dd919..288564c5c9670 100644 --- a/execution/executor/src/chunk_executor/transaction_chunk.rs +++ b/execution/executor/src/chunk_executor/transaction_chunk.rs @@ -17,7 +17,7 @@ use aptos_types::{ config::BlockExecutorConfigFromOnchain, transaction_slice_metadata::TransactionSliceMetadata, }, - transaction::{Transaction, TransactionOutput, Version}, + transaction::{AuxiliaryInfo, PersistedAuxiliaryInfo, Transaction, TransactionOutput, Version}, }; use aptos_vm::VMBlockExecutor; use once_cell::sync::Lazy; @@ -90,9 +90,13 @@ impl TransactionChunk for ChunkToExecute { }; let _timer = VM_EXECUTE_CHUNK.start_timer(); + let mut auxiliary_info = Vec::new(); + // TODO(grao): Pass in persisted auxiliary info. + auxiliary_info.resize(sig_verified_txns.len(), AuxiliaryInfo::new_empty()); DoGetExecutionOutput::by_transaction_execution::( &V::new(), sig_verified_txns.into(), + auxiliary_info, parent_state, state_view, BlockExecutorConfigFromOnchain::new_no_block_limit(), @@ -104,6 +108,7 @@ impl TransactionChunk for ChunkToExecute { pub struct ChunkToApply { pub transactions: Vec, pub transaction_outputs: Vec, + pub persisted_info: Vec, pub first_version: Version, } @@ -124,12 +129,17 @@ impl TransactionChunk for ChunkToApply { let Self { transactions, transaction_outputs, + persisted_info, first_version: _, } = self; DoGetExecutionOutput::by_transaction_output( transactions, transaction_outputs, + persisted_info + .into_iter() + .map(|info| AuxiliaryInfo::new(info, None)) + .collect(), parent_state, state_view, ) diff --git a/execution/executor/src/db_bootstrapper/mod.rs b/execution/executor/src/db_bootstrapper/mod.rs index 260d23994337c..10103046afd5e 100644 --- a/execution/executor/src/db_bootstrapper/mod.rs +++ b/execution/executor/src/db_bootstrapper/mod.rs @@ -27,7 +27,7 @@ use aptos_types::{ on_chain_config::ConfigurationResource, state_store::{state_key::StateKey, StateViewId, TStateView}, timestamp::TimestampResource, - transaction::Transaction, + transaction::{AuxiliaryInfo, Transaction}, waypoint::Waypoint, }; use aptos_vm::VMBlockExecutor; @@ -137,6 +137,9 @@ pub fn calculate_genesis( let execution_output = DoGetExecutionOutput::by_transaction_execution::( &V::new(), vec![genesis_txn.clone().into()].into(), + // TODO(grao): Do we need any auxiliary info for hard fork? Not now, but maybe one day we + // will need it. + vec![AuxiliaryInfo::new_empty()], &ledger_summary.state, base_state_view, BlockExecutorConfigFromOnchain::new_no_block_limit(), diff --git a/execution/executor/src/tests/mock_vm/mock_vm_test.rs b/execution/executor/src/tests/mock_vm/mock_vm_test.rs index 86a68f1254a49..bda60fc1c88f0 100644 --- a/execution/executor/src/tests/mock_vm/mock_vm_test.rs +++ b/execution/executor/src/tests/mock_vm/mock_vm_test.rs @@ -26,7 +26,8 @@ fn test_mock_vm_different_senders() { txns.push(encode_mint_transaction(gen_address(i), amount)); } - let txn_provider = DefaultTxnProvider::new(into_signature_verified_block(txns.clone())); + let txn_provider = + DefaultTxnProvider::new_without_info(into_signature_verified_block(txns.clone())); let outputs = MockVM::new() .execute_block_no_limit(&txn_provider, &MockStateView::empty()) .expect("MockVM should not fail to start"); @@ -64,7 +65,7 @@ fn test_mock_vm_same_sender() { txns.push(encode_mint_transaction(sender, amount)); } - let txn_provider = DefaultTxnProvider::new(into_signature_verified_block(txns)); + let txn_provider = DefaultTxnProvider::new_without_info(into_signature_verified_block(txns)); let outputs = MockVM::new() .execute_block_no_limit(&txn_provider, &MockStateView::empty()) .expect("MockVM should not fail to start"); @@ -100,7 +101,7 @@ fn test_mock_vm_payment() { encode_transfer_transaction(gen_address(0), gen_address(1), 50), ]; - let txn_provider = DefaultTxnProvider::new(into_signature_verified_block(txns)); + let txn_provider = DefaultTxnProvider::new_without_info(into_signature_verified_block(txns)); let output = MockVM::new() .execute_block_no_limit(&txn_provider, &MockStateView::empty()) .expect("MockVM should not fail to start"); diff --git a/execution/executor/src/tests/mock_vm/mod.rs b/execution/executor/src/tests/mock_vm/mod.rs index 32729310754ce..2d5f2d31ef369 100644 --- a/execution/executor/src/tests/mock_vm/mod.rs +++ b/execution/executor/src/tests/mock_vm/mod.rs @@ -201,10 +201,11 @@ impl VMBlockExecutor for MockVM { let mut block_epilogue_txn = None; if !skip_rest { if let Some(block_id) = transaction_slice_metadata.append_state_checkpoint_to_block() { - block_epilogue_txn = Some(Transaction::block_epilogue( + block_epilogue_txn = Some(Transaction::block_epilogue_v0( block_id, BlockEndInfo::new_empty(), )); + outputs.push(TransactionOutput::new_empty_success()); } } diff --git a/execution/executor/src/tests/mod.rs b/execution/executor/src/tests/mod.rs index 7b718c997ccba..f9da8a06891ac 100644 --- a/execution/executor/src/tests/mod.rs +++ b/execution/executor/src/tests/mod.rs @@ -30,10 +30,12 @@ use aptos_types::{ state_store::{state_key::StateKey, state_value::StateValue, StateViewId}, test_helpers::transaction_test_helpers::{block, TEST_BLOCK_EXECUTOR_ONCHAIN_CONFIG}, transaction::{ - signature_verified_transaction::SignatureVerifiedTransaction, BlockEndInfo, - ExecutionStatus, RawTransaction, Script, SignedTransaction, Transaction, - TransactionAuxiliaryData, TransactionListWithProof, TransactionOutput, TransactionPayload, - TransactionStatus, Version, + signature_verified_transaction::{ + into_signature_verified_block, SignatureVerifiedTransaction, + }, + AuxiliaryInfo, BlockEndInfo, ExecutionStatus, PersistedAuxiliaryInfo, RawTransaction, + Script, SignedTransaction, Transaction, TransactionAuxiliaryData, TransactionListWithProof, + TransactionOutput, TransactionPayload, TransactionStatus, Version, }, write_set::{WriteOp, WriteSet, WriteSetMut}, }; @@ -387,7 +389,7 @@ fn create_blocks_and_chunks( let block_id = gen_block_id(version); let output = block_executor .execute_block( - (block_id, txns.clone()).into(), + (block_id, into_signature_verified_block(txns.clone())).into(), parent_block_id, TEST_BLOCK_EXECUTOR_ONCHAIN_CONFIG, ) @@ -460,7 +462,7 @@ fn apply_transaction_by_writeset( ) { let ledger_summary: LedgerSummary = db.reader.get_pre_committed_ledger_summary().unwrap(); - let (txns, txn_outs) = transactions_and_writesets + let (txns, txn_outs): (Vec<_>, Vec<_>) = transactions_and_writesets .iter() .map(|(txn, write_set)| { ( @@ -492,9 +494,11 @@ fn apply_transaction_by_writeset( ledger_summary.state.latest().clone(), ) .unwrap(); + let aux_info = txns.iter().map(|_| AuxiliaryInfo::new_empty()).collect(); let chunk_output = DoGetExecutionOutput::by_transaction_output( txns, txn_outs, + aux_info, &ledger_summary.state, state_view, ) @@ -696,6 +700,7 @@ fn run_transactions_naive( let out = DoGetExecutionOutput::by_transaction_execution( &MockVM::new(), vec![txn].into(), + vec![AuxiliaryInfo::new_empty()], &ledger_summary.state, state_view, block_executor_onchain_config.clone(), @@ -801,6 +806,7 @@ proptest! { let replayer = chunk_executor_tests::TestExecutor::new(); let chunks_enqueued = replayer.executor.enqueue_chunks( txn_list.transactions, + txn_infos.iter().map(|_| PersistedAuxiliaryInfo::None).collect(), txn_infos, write_sets, event_vecs, @@ -867,9 +873,9 @@ proptest! { let expected_root_hash = run_transactions_naive({ let mut txns = vec![]; txns.extend(block_a.txns.iter().cloned()); - txns.push(SignatureVerifiedTransaction::Valid(Transaction::block_epilogue(block_a.id, BlockEndInfo::new_empty()))); + txns.push(SignatureVerifiedTransaction::Valid(Transaction::block_epilogue_v0(block_a.id, BlockEndInfo::new_empty()))); txns.extend(block_b.txns.iter().cloned()); - txns.push(SignatureVerifiedTransaction::Valid(Transaction::block_epilogue(block_b.id, BlockEndInfo::new_empty()))); + txns.push(SignatureVerifiedTransaction::Valid(Transaction::block_epilogue_v0(block_b.id, BlockEndInfo::new_empty()))); txns }, TEST_BLOCK_EXECUTOR_ONCHAIN_CONFIG); diff --git a/execution/executor/src/workflow/do_get_execution_output.rs b/execution/executor/src/workflow/do_get_execution_output.rs index 85057bedbde9c..5a048edc51354 100644 --- a/execution/executor/src/workflow/do_get_execution_output.rs +++ b/execution/executor/src/workflow/do_get_execution_output.rs @@ -42,8 +42,8 @@ use aptos_types::{ TStateView, }, transaction::{ - signature_verified_transaction::SignatureVerifiedTransaction, BlockOutput, Transaction, - TransactionOutput, TransactionStatus, Version, + signature_verified_transaction::SignatureVerifiedTransaction, AuxiliaryInfo, BlockOutput, + PersistedAuxiliaryInfo, Transaction, TransactionOutput, TransactionStatus, Version, }, write_set::{TransactionWrite, WriteSet}, }; @@ -57,6 +57,7 @@ impl DoGetExecutionOutput { pub fn by_transaction_execution( executor: &V, transactions: ExecutableTransactions, + auxiliary_info: Vec, parent_state: &LedgerState, state_view: CachedStateView, onchain_config: BlockExecutorConfigFromOnchain, @@ -67,6 +68,7 @@ impl DoGetExecutionOutput { Self::by_transaction_execution_unsharded::( executor, txns, + auxiliary_info, parent_state, state_view, onchain_config, @@ -75,6 +77,7 @@ impl DoGetExecutionOutput { }, ExecutableTransactions::Sharded(txns) => Self::by_transaction_execution_sharded::( txns, + auxiliary_info, parent_state, state_view, onchain_config, @@ -100,12 +103,13 @@ impl DoGetExecutionOutput { fn by_transaction_execution_unsharded( executor: &V, transactions: Vec, + auxiliary_info: Vec, parent_state: &LedgerState, state_view: CachedStateView, onchain_config: BlockExecutorConfigFromOnchain, transaction_slice_metadata: TransactionSliceMetadata, ) -> Result { - let txn_provider = DefaultTxnProvider::new(transactions); + let txn_provider = DefaultTxnProvider::new(transactions, auxiliary_info); let block_output = Self::execute_block::( executor, &txn_provider, @@ -113,30 +117,23 @@ impl DoGetExecutionOutput { onchain_config, transaction_slice_metadata, )?; - let (mut transaction_outputs, block_epilogue_txn) = block_output.into_inner(); - let mut transactions: Vec<_> = txn_provider - .txns + let (transaction_outputs, block_epilogue_txn) = block_output.into_inner(); + let (transactions, mut auxiliary_info) = txn_provider.into_inner(); + let mut transactions = transactions .into_iter() .map(|t| t.into_inner()) - .collect(); + .collect_vec(); if let Some(block_epilogue_txn) = block_epilogue_txn { transactions.push(block_epilogue_txn); - // TODO(HotState): there are three possible paths where the block epilogue - // output is passed to the DB: - // 1. a block from consensus is executed: the VM outputs the block end info - // and the block epilogue transaction and output are generated here. - // 2. a chunk re-executed: The VM will see the block epilogue transaction and - // should output the transaction output by looking at the block end info - // embedded in the epilogue transaction (and maybe the state view). - // 3. a chunk replayed by transaction output: we get the transaction output - // directly. - transaction_outputs.push(TransactionOutput::new_empty_success()); + // TODO(grao): Double check if we want to put anything into AuxiliaryInfo here. + auxiliary_info.push(AuxiliaryInfo::new_empty()); } Parser::parse( state_view.next_version(), transactions, transaction_outputs, + auxiliary_info, parent_state, state_view, false, // prime_state_cache @@ -148,6 +145,7 @@ impl DoGetExecutionOutput { pub fn by_transaction_execution_sharded( transactions: PartitionedTransactions, + auxiliary_info: Vec, parent_state: &LedgerState, state_view: CachedStateView, onchain_config: BlockExecutorConfigFromOnchain, @@ -160,6 +158,8 @@ impl DoGetExecutionOutput { onchain_config, )?; + // TODO(Manu): Handle state checkpoint here. + // TODO(skedia) add logic to emit counters per shard instead of doing it globally. // Unwrapping here is safe because the execution has finished and it is guaranteed that @@ -172,6 +172,7 @@ impl DoGetExecutionOutput { .map(|t| t.into_txn().into_inner()) .collect(), transaction_outputs, + auxiliary_info, parent_state, state_view, false, // prime_state_cache @@ -182,6 +183,7 @@ impl DoGetExecutionOutput { pub fn by_transaction_output( transactions: Vec, transaction_outputs: Vec, + auxiliary_info: Vec, parent_state: &LedgerState, state_view: CachedStateView, ) -> Result { @@ -189,6 +191,7 @@ impl DoGetExecutionOutput { state_view.next_version(), transactions, transaction_outputs, + auxiliary_info, parent_state, state_view, true, // prime state cache @@ -301,6 +304,7 @@ impl Parser { first_version: Version, mut transactions: Vec, mut transaction_outputs: Vec, + auxiliary_info: Vec, parent_state: &LedgerState, base_state_view: CachedStateView, prime_state_cache: bool, @@ -318,12 +322,17 @@ impl Parser { .collect_vec() }; - // Isolate retries. - let (to_retry, has_reconfig) = - Self::extract_retries(&mut transactions, &mut transaction_outputs); + let mut persisted_auxiliary_info = auxiliary_info + .into_iter() + .map(|info| info.into_persisted_info()) + .collect(); - // Isolate discards. - let to_discard = Self::extract_discards(&mut transactions, &mut transaction_outputs); + // Isolate retries and discards. + let (to_retry, to_discard, has_reconfig) = Self::extract_retries_and_discards( + &mut transactions, + &mut transaction_outputs, + &mut persisted_auxiliary_info, + ); let mut block_end_info = None; if is_block && !has_reconfig { @@ -332,10 +341,15 @@ impl Parser { ensure!(statuses_for_input_txns.pop().is_some()); } } + // The rest is to be committed, attach block epilogue as needed and optionally get next EpochState. let to_commit = { let _timer = OTHER_TIMERS.timer_with(&["parse_raw_output__to_commit"]); - let to_commit = TransactionsWithOutput::new(transactions, transaction_outputs); + let to_commit = TransactionsWithOutput::new( + transactions, + transaction_outputs, + persisted_auxiliary_info, + ); TransactionsToKeep::index(first_version, to_commit, has_reconfig) }; let next_epoch_state = { @@ -387,11 +401,12 @@ impl Parser { .collect_vec() } - fn extract_retries( + fn extract_retries_and_discards( transactions: &mut Vec, transaction_outputs: &mut Vec, - ) -> (TransactionsWithOutput, bool) { - let _timer = OTHER_TIMERS.timer_with(&["parse_raw_output__retries"]); + persisted_info: &mut Vec, + ) -> (TransactionsWithOutput, TransactionsWithOutput, bool) { + let _timer = OTHER_TIMERS.timer_with(&["parse_raw_output__retries_and_discards"]); let last_non_retry = transaction_outputs .iter() @@ -402,41 +417,39 @@ impl Parser { false }; - let first_retry = last_non_retry.map_or(0, |pos| pos + 1); - let to_retry = TransactionsWithOutput::new( - transactions.drain(first_retry..).collect(), - transaction_outputs.drain(first_retry..).collect(), - ); - - (to_retry, is_reconfig) - } - - fn extract_discards( - transactions: &mut Vec, - transaction_outputs: &mut Vec, - ) -> TransactionsWithOutput { - let _timer = OTHER_TIMERS.timer_with(&["parse_raw_output__discards"]); - - let to_discard = { - let mut res = TransactionsWithOutput::new_empty(); - for idx in 0..transactions.len() { - if transaction_outputs[idx].status().is_discarded() { - res.push(transactions[idx].clone(), transaction_outputs[idx].clone()); - } else if !res.is_empty() { - transactions[idx - res.len()] = transactions[idx].clone(); - transaction_outputs[idx - res.len()] = transaction_outputs[idx].clone(); - } - } - if !res.is_empty() { - let remaining = transactions.len() - res.len(); - transactions.truncate(remaining); - transaction_outputs.truncate(remaining); + let mut to_discard = TransactionsWithOutput::new_empty(); + let mut to_retry = TransactionsWithOutput::new_empty(); + + let mut num_keep_txns = 0; + + for idx in 0..transactions.len() { + match transaction_outputs[idx].status() { + TransactionStatus::Keep(_) => { + if num_keep_txns != idx { + transactions[num_keep_txns] = transactions[idx].clone(); + transaction_outputs[num_keep_txns] = transaction_outputs[idx].clone(); + } + num_keep_txns += 1; + }, + TransactionStatus::Retry => to_retry.push( + transactions[idx].clone(), + transaction_outputs[idx].clone(), + persisted_info[idx], + ), + TransactionStatus::Discard(_) => to_discard.push( + transactions[idx].clone(), + transaction_outputs[idx].clone(), + persisted_info[idx], + ), } - res - }; + } + + transactions.truncate(num_keep_txns); + transaction_outputs.truncate(num_keep_txns); + persisted_info.truncate(num_keep_txns); // Sanity check transactions with the Discard status: - to_discard.iter().for_each(|(t, o)| { + to_discard.iter().for_each(|(t, o, _)| { // In case a new status other than Retry, Keep and Discard is added: if !matches!(o.status(), TransactionStatus::Discard(_)) { error!("Status other than Retry, Keep or Discard; Transaction discarded."); @@ -453,7 +466,7 @@ impl Parser { } }); - to_discard + (to_retry, to_discard, is_reconfig) } fn ensure_next_epoch_state(to_commit: &TransactionsWithOutput) -> Result { @@ -500,6 +513,7 @@ impl TStateView for WriteSetStateView<'_> { unreachable!("Not supposed to be called on WriteSetStateView.") } } + #[cfg(test)] mod tests { use super::Parser; @@ -509,9 +523,10 @@ mod tests { use aptos_types::{ contract_event::ContractEvent, transaction::{ - ExecutionStatus, Transaction, TransactionAuxiliaryData, TransactionOutput, - TransactionStatus, + AuxiliaryInfo, ExecutionStatus, PersistedAuxiliaryInfo, Transaction, + TransactionAuxiliaryData, TransactionOutput, TransactionStatus, }, + vm_status::StatusCode, write_set::WriteSet, }; @@ -543,11 +558,13 @@ mod tests { TransactionAuxiliaryData::default(), ), ]; + let aux_info = vec![AuxiliaryInfo::new_empty(), AuxiliaryInfo::new_empty()]; let state = LedgerState::new_empty(); let execution_output = Parser::parse( 0, txns, txn_outs, + aux_info, &state, CachedStateView::new_dummy(&state), false, @@ -559,4 +576,98 @@ mod tests { *execution_output.subscribable_events ); } + + #[test] + fn test_extract_retry_and_discard_no_reconfig() { + let mut txns = vec![ + Transaction::dummy(), + Transaction::dummy(), + Transaction::dummy(), + Transaction::dummy(), + ]; + let mut txn_outs = vec![ + TransactionOutput::new( + WriteSet::default(), + vec![], + 0, + TransactionStatus::Keep(ExecutionStatus::Success), + TransactionAuxiliaryData::default(), + ), + TransactionOutput::new( + WriteSet::default(), + vec![], + 0, + TransactionStatus::Discard(StatusCode::SEQUENCE_NUMBER_TOO_OLD), + TransactionAuxiliaryData::default(), + ), + TransactionOutput::new( + WriteSet::default(), + vec![], + 0, + TransactionStatus::Retry, + TransactionAuxiliaryData::default(), + ), + TransactionOutput::new( + WriteSet::default(), + vec![], + 0, + TransactionStatus::Keep(ExecutionStatus::Success), + TransactionAuxiliaryData::default(), + ), + ]; + let mut aux_info = txns.iter().map(|_| PersistedAuxiliaryInfo::None).collect(); + let (to_retry, to_discard, is_reconfig) = + Parser::extract_retries_and_discards(&mut txns, &mut txn_outs, &mut aux_info); + assert!(!is_reconfig); + assert_eq!(to_retry.len(), 1); + assert_eq!(to_discard.len(), 1); + assert_eq!(txns.len(), 2); + assert_eq!(txn_outs.len(), 2); + assert_eq!(aux_info.len(), 2); + } + + #[test] + fn test_extract_retry_and_discard_reconfig() { + let reconfig_event = ContractEvent::new_v2_with_type_tag_str( + "0x1::reconfiguration::NewEpochEvent", + b"".to_vec(), + ); + let mut txns = vec![ + Transaction::dummy(), + Transaction::dummy(), + Transaction::dummy(), + ]; + let mut txn_outs = vec![ + TransactionOutput::new( + WriteSet::default(), + vec![reconfig_event], + 0, + TransactionStatus::Keep(ExecutionStatus::Success), + TransactionAuxiliaryData::default(), + ), + TransactionOutput::new( + WriteSet::default(), + vec![], + 0, + TransactionStatus::Retry, + TransactionAuxiliaryData::default(), + ), + TransactionOutput::new( + WriteSet::default(), + vec![], + 0, + TransactionStatus::Retry, + TransactionAuxiliaryData::default(), + ), + ]; + let mut aux_info = txns.iter().map(|_| PersistedAuxiliaryInfo::None).collect(); + let (to_retry, to_discard, is_reconfig) = + Parser::extract_retries_and_discards(&mut txns, &mut txn_outs, &mut aux_info); + assert!(is_reconfig); + assert_eq!(to_retry.len(), 2); + assert_eq!(to_discard.len(), 0); + assert_eq!(txns.len(), 1); + assert_eq!(txn_outs.len(), 1); + assert_eq!(aux_info.len(), 1); + } } diff --git a/execution/executor/src/workflow/do_ledger_update.rs b/execution/executor/src/workflow/do_ledger_update.rs index f4b7b0247c806..0dcc829173f55 100644 --- a/execution/executor/src/workflow/do_ledger_update.rs +++ b/execution/executor/src/workflow/do_ledger_update.rs @@ -12,7 +12,7 @@ use aptos_experimental_runtimes::thread_manager::optimal_min_len; use aptos_metrics_core::TimerHelper; use aptos_types::{ proof::accumulator::{InMemoryEventAccumulator, InMemoryTransactionAccumulator}, - transaction::{TransactionInfo, TransactionOutput}, + transaction::{PersistedAuxiliaryInfo, TransactionInfo, TransactionOutput}, }; use itertools::{izip, Itertools}; use rayon::prelude::*; @@ -92,7 +92,12 @@ impl DoLedgerUpdate { writeset_hashes ) .map( - |((txn, txn_out), state_checkpoint_hash, event_root_hash, write_set_hash)| { + |( + (txn, txn_out, persisted_info), + state_checkpoint_hash, + event_root_hash, + write_set_hash, + )| { TransactionInfo::new( txn.hash(), write_set_hash, @@ -100,6 +105,11 @@ impl DoLedgerUpdate { state_checkpoint_hash, txn_out.gas_used(), txn_out.status().as_kept_status().expect("Already sorted."), + if matches!(persisted_info, PersistedAuxiliaryInfo::None) { + None + } else { + Some(CryptoHash::hash(persisted_info)) + }, ) }, ) diff --git a/execution/executor/tests/internal_indexer_test.rs b/execution/executor/tests/internal_indexer_test.rs index efe1812a8a5ff..6f4e3d7966486 100644 --- a/execution/executor/tests/internal_indexer_test.rs +++ b/execution/executor/tests/internal_indexer_test.rs @@ -353,6 +353,7 @@ fn test_db_indexer_data() { (false, "0x1::gas_schedule::GasScheduleV2"), (false, "0x1::jwks::SupportedOIDCProviders"), (false, "0x1::stake::AptosCoinCapabilities"), + (false, "0x1::stake::PendingTransactionFee"), (false, "0x1::reconfiguration_state::State"), (false, "0x1::version::SetVersionCapability"), (false, "0x1::storage_gas::StorageGasConfig"), diff --git a/experimental/bulk-txn-submit/src/event_lookup.rs b/experimental/bulk-txn-submit/src/event_lookup.rs index f2f8afa4a8937..692d3acb477eb 100644 --- a/experimental/bulk-txn-submit/src/event_lookup.rs +++ b/experimental/bulk-txn-submit/src/event_lookup.rs @@ -2,8 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 use anyhow::{bail, Result}; -use aptos_sdk::types::{account_address::AccountAddress, contract_event::ContractEvent}; +use aptos_sdk::{ + move_types::language_storage::TypeTag, + types::{account_address::AccountAddress, contract_event::ContractEvent}, +}; use serde::{Deserialize, Serialize}; +use std::str::FromStr; #[derive(Debug, Serialize, Deserialize)] pub struct DepositMoveStruct { @@ -32,30 +36,26 @@ pub struct BurnMoveStruct { } pub fn get_mint_token_addr(events: &[ContractEvent]) -> Result { - let mint_event: MintMoveStruct = search_single_event_data( - events, - "0000000000000000000000000000000000000000000000000000000000000004::collection::Mint", - )?; + let mint_event: MintMoveStruct = + search_single_event_data(events, &TypeTag::from_str("0x4::collection::Mint")?)?; Ok(mint_event.token) } pub fn get_burn_token_addr(events: &[ContractEvent]) -> Result { - let burn_event: BurnMoveStruct = search_single_event_data( - events, - "0000000000000000000000000000000000000000000000000000000000000004::collection::Burn", - )?; + let burn_event: BurnMoveStruct = + search_single_event_data(events, &TypeTag::from_str("0x4::collection::Burn")?)?; Ok(burn_event.token) } -pub fn search_event(events: &[ContractEvent], type_tag: &str) -> Vec { +fn search_event(events: &[ContractEvent], type_tag: &TypeTag) -> Vec { events .iter() - .filter(|event| event.type_tag().to_canonical_string() == type_tag) + .filter(|event| event.type_tag() == type_tag) .cloned() .collect::>() } -pub fn search_single_event_data(events: &[ContractEvent], type_tag: &str) -> Result +fn search_single_event_data(events: &[ContractEvent], type_tag: &TypeTag) -> Result where T: serde::de::DeserializeOwned, { @@ -77,6 +77,9 @@ where } pub fn get_deposit_dst(events: &[ContractEvent]) -> Result { - let deposit_event: DepositMoveStruct = search_single_event_data(events, "0000000000000000000000000000000000000000000000000000000000000001::coin::Deposit<0000000000000000000000000000000000000000000000000000000000000001::aptos_coin::AptosCoin>")?; + let deposit_event: DepositMoveStruct = search_single_event_data( + events, + &TypeTag::from_str("0x1::coin::Deposit<0x1::aptos_coin::AptosCoin>")?, + )?; Ok(deposit_event.account) } diff --git a/mempool/Cargo.toml b/mempool/Cargo.toml index dede31a831943..43c5894badfb3 100644 --- a/mempool/Cargo.toml +++ b/mempool/Cargo.toml @@ -31,6 +31,7 @@ aptos-runtimes = { workspace = true } aptos-short-hex-str = { workspace = true } aptos-storage-interface = { workspace = true } aptos-time-service = { workspace = true } +aptos-transaction-filters = { workspace = true } aptos-types = { workspace = true } aptos-vm-validator = { workspace = true } bcs = { workspace = true } @@ -55,6 +56,7 @@ aptos-id-generator = { workspace = true } aptos-network = { workspace = true, features = ["fuzzing"] } aptos-storage-interface = { workspace = true, features = ["fuzzing"] } aptos-time-service = { workspace = true, features = ["testing"] } +aptos-transaction-filters = { workspace = true, features = ["fuzzing"] } enum_dispatch = { workspace = true } proptest = { workspace = true } diff --git a/mempool/src/counters.rs b/mempool/src/counters.rs index 838506efd7fc2..330593bf97b9a 100644 --- a/mempool/src/counters.rs +++ b/mempool/src/counters.rs @@ -58,6 +58,7 @@ pub const REQUEST_SUCCESS_LABEL: &str = "success"; // Process txn breakdown type labels pub const FETCH_SEQ_NUM_LABEL: &str = "storage_fetch"; +pub const FILTER_TRANSACTIONS_LABEL: &str = "filter_transactions"; pub const VM_VALIDATION_LABEL: &str = "vm_validation"; // Txn process result labels diff --git a/mempool/src/logging.rs b/mempool/src/logging.rs index 342a3ec5a10d6..a2e5fbdac678d 100644 --- a/mempool/src/logging.rs +++ b/mempool/src/logging.rs @@ -129,6 +129,7 @@ pub struct LogSchema<'a> { message_id: Option<&'a MempoolMessageId>, backpressure: Option, num_txns: Option, + message: Option<&'a str>, } impl LogSchema<'_> { @@ -156,6 +157,7 @@ impl LogSchema<'_> { message_id: None, backpressure: None, num_txns: None, + message: None, } } } @@ -187,6 +189,7 @@ pub enum LogEntry { DBError, UnexpectedNetworkMsg, MempoolSnapshot, + TransactionFilter, } #[derive(Clone, Copy, Serialize)] @@ -209,5 +212,8 @@ pub enum LogEvent { SystemTTLExpiration, ClientExpiration, + // Transaction filter events + TransactionRejected, + Success, } diff --git a/mempool/src/shared_mempool/runtime.rs b/mempool/src/shared_mempool/runtime.rs index 7fedbb050d6f6..9253260f109e1 100644 --- a/mempool/src/shared_mempool/runtime.rs +++ b/mempool/src/shared_mempool/runtime.rs @@ -51,10 +51,12 @@ pub(crate) fn start_shared_mempool( ConfigProvider: OnChainConfigProvider, { let node_type = NodeType::extract_from_config(config); + let transaction_filter_config = config.transaction_filters.mempool_filter.clone(); let smp: SharedMempool, TransactionValidator> = SharedMempool::new( mempool.clone(), config.mempool.clone(), + transaction_filter_config, network_client, db, validator, diff --git a/mempool/src/shared_mempool/tasks.rs b/mempool/src/shared_mempool/tasks.rs index 60476c70e7420..99bf2ffd167c5 100644 --- a/mempool/src/shared_mempool/tasks.rs +++ b/mempool/src/shared_mempool/tasks.rs @@ -20,7 +20,7 @@ use crate::{ QuorumStoreRequest, QuorumStoreResponse, SubmissionStatus, }; use anyhow::Result; -use aptos_config::network_id::PeerNetworkId; +use aptos_config::{config::TransactionFilterConfig, network_id::PeerNetworkId}; use aptos_consensus_types::common::RejectedTransactionSummary; use aptos_crypto::HashValue; use aptos_infallible::{Mutex, RwLock}; @@ -302,7 +302,15 @@ where NetworkClient: NetworkClientInterface, TransactionValidator: TransactionValidation, { + // Filter out any disallowed transactions let mut statuses = vec![]; + let transactions = + filter_transactions(&smp.transaction_filter_config, transactions, &mut statuses); + + // If there are no transactions left after filtering, return early + if transactions.is_empty() { + return statuses; + } let start_storage_read = Instant::now(); let state_view = smp @@ -382,6 +390,68 @@ where statuses } +/// Filters transactions based on the transaction filter configuration. Any +/// transactions that are filtered out will have their statuses marked accordingly. +fn filter_transactions( + transaction_filter_config: &TransactionFilterConfig, + transactions: Vec<( + SignedTransaction, + Option, + Option, + )>, + statuses: &mut Vec<(SignedTransaction, (MempoolStatus, Option))>, +) -> Vec<( + SignedTransaction, + Option, + Option, +)> { + // If the filter is not enabled, return early + if !transaction_filter_config.is_enabled() { + return transactions; + } + + // Start the filter processing timer + let transaction_filter_timer = counters::PROCESS_TXN_BREAKDOWN_LATENCY + .with_label_values(&[counters::FILTER_TRANSACTIONS_LABEL]) + .start_timer(); + + // Filter the transactions and update the statuses accordingly + let transactions = transactions + .into_iter() + .filter_map(|(transaction, account_sequence_number, priority)| { + if transaction_filter_config + .transaction_filter() + .allows_transaction(&transaction) + { + Some((transaction, account_sequence_number, priority)) + } else { + info!(LogSchema::event_log( + LogEntry::TransactionFilter, + LogEvent::TransactionRejected + ) + .message(&format!( + "Transaction {} rejected by filter", + transaction.committed_hash() + ))); + + statuses.push(( + transaction.clone(), + ( + MempoolStatus::new(MempoolStatusCode::RejectedByFilter), + None, + ), + )); + None + } + }) + .collect(); + + // Update the filter processing latency metrics + transaction_filter_timer.stop_and_record(); + + transactions +} + /// Perfoms VM validation on the transactions and inserts those that passes /// validation into the mempool. #[cfg(not(feature = "consensus-only-perf-test"))] @@ -709,3 +779,140 @@ pub(crate) async fn process_config_update( }, } } + +#[cfg(test)] +mod test { + use super::*; + use aptos_crypto::{ed25519::Ed25519PrivateKey, PrivateKey, SigningKey, Uniform}; + use aptos_transaction_filters::transaction_filter::TransactionFilter; + use aptos_types::{ + chain_id::ChainId, + transaction::{RawTransaction, Script, TransactionPayload}, + }; + + #[test] + fn test_filter_transactions() { + // Create test transactions + let mut transactions = vec![]; + for _ in 0..10 { + let transaction = create_signed_transaction(); + transactions.push((transaction, None, Some(BroadcastPeerPriority::Primary))); + } + + // Create a config with filtering enabled (the first and last transactions will be rejected) + let transaction_filter = TransactionFilter::empty() + .add_sender_filter(false, transactions[0].0.sender()) + .add_sender_filter(false, transactions[9].0.sender()); + let transaction_filter_config = TransactionFilterConfig::new(true, transaction_filter); + + // Filter the transactions + let mut statuses = vec![]; + let filtered_transactions = filter_transactions( + &transaction_filter_config, + transactions.clone(), + &mut statuses, + ); + + // Verify that the first and last transactions are filtered out + assert_eq!(filtered_transactions.len(), 8); + assert!(!filtered_transactions.contains(&transactions[0])); + assert!(!filtered_transactions.contains(&transactions[9])); + + // Verify the filtered transaction statuses + assert_eq!(statuses.len(), 2); + verify_rejected_status(statuses[0].clone(), transactions[0].0.clone()); + verify_rejected_status(statuses[1].clone(), transactions[9].0.clone()); + } + + #[test] + fn test_filter_transactions_disabled() { + // Create test transactions + let num_transactions = 10; + let mut transactions = vec![]; + for _ in 0..num_transactions { + let transaction = create_signed_transaction(); + transactions.push((transaction, None, Some(BroadcastPeerPriority::Primary))); + } + + // Create a config with filtering disabled + let transaction_filter = TransactionFilter::empty().add_all_filter(false); // Reject all transactions + let transaction_filter_config = TransactionFilterConfig::new(false, transaction_filter); + + // Filter the transactions + let mut statuses = vec![]; + let filtered_transactions = filter_transactions( + &transaction_filter_config, + transactions.clone(), + &mut statuses, + ); + + // Verify that all transactions are retained + assert_eq!(filtered_transactions.len(), num_transactions); + assert!(statuses.is_empty()); + for transaction in transactions { + assert!(filtered_transactions.contains(&transaction)); + } + } + + #[test] + fn test_filter_transactions_empty() { + // Create test transactions + let num_transactions = 10; + let mut transactions = vec![]; + for _ in 0..num_transactions { + let transaction = create_signed_transaction(); + transactions.push((transaction, None, Some(BroadcastPeerPriority::Primary))); + } + + // Create a config with filtering enabled (the filter is empty, so no transactions will be rejected) + let transaction_filter = TransactionFilter::empty(); // Allow all transactions + let transaction_filter_config = TransactionFilterConfig::new(true, transaction_filter); + + // Filter the transactions + let mut statuses = vec![]; + let filtered_transactions = filter_transactions( + &transaction_filter_config, + transactions.clone(), + &mut statuses, + ); + + // Verify that all transactions are retained + assert_eq!(filtered_transactions.len(), num_transactions); + assert!(statuses.is_empty()); + for transaction in transactions { + assert!(filtered_transactions.contains(&transaction)); + } + } + + fn create_raw_transaction() -> RawTransaction { + RawTransaction::new( + AccountAddress::random(), + 0, + TransactionPayload::Script(Script::new(vec![], vec![], vec![])), + 0, + 0, + 0, + ChainId::new(10), + ) + } + + fn create_signed_transaction() -> SignedTransaction { + let raw_transaction = create_raw_transaction(); + let private_key_1 = Ed25519PrivateKey::generate_for_testing(); + let signature = private_key_1.sign(&raw_transaction).unwrap(); + + SignedTransaction::new( + raw_transaction.clone(), + private_key_1.public_key(), + signature.clone(), + ) + } + + fn verify_rejected_status( + status: (SignedTransaction, (MempoolStatus, Option)), + transaction: SignedTransaction, + ) { + let rejected_status = MempoolStatus::new(MempoolStatusCode::RejectedByFilter); + assert_eq!(status, (transaction, (rejected_status, None))); + } +} diff --git a/mempool/src/shared_mempool/types.rs b/mempool/src/shared_mempool/types.rs index d50be22d6eed3..6de0f9c851b3c 100644 --- a/mempool/src/shared_mempool/types.rs +++ b/mempool/src/shared_mempool/types.rs @@ -10,7 +10,7 @@ use crate::{ }; use anyhow::Result; use aptos_config::{ - config::{MempoolConfig, NodeType}, + config::{MempoolConfig, NodeType, TransactionFilterConfig}, network_id::PeerNetworkId, }; use aptos_consensus_types::common::{ @@ -56,6 +56,7 @@ pub(crate) struct SharedMempool { pub subscribers: Vec>, pub broadcast_within_validator_network: Arc>, pub use_case_history: Arc>, + pub transaction_filter_config: TransactionFilterConfig, } impl< @@ -66,6 +67,7 @@ impl< pub fn new( mempool: Arc>, config: MempoolConfig, + transaction_filter_config: TransactionFilterConfig, network_client: NetworkClient, db: Arc, validator: Arc>, @@ -87,6 +89,7 @@ impl< subscribers, broadcast_within_validator_network: Arc::new(RwLock::new(true)), use_case_history: Arc::new(Mutex::new(use_case_history)), + transaction_filter_config, } } diff --git a/mempool/src/tests/fuzzing.rs b/mempool/src/tests/fuzzing.rs index d978ecaf6c4ec..838037d78fc8a 100644 --- a/mempool/src/tests/fuzzing.rs +++ b/mempool/src/tests/fuzzing.rs @@ -82,9 +82,11 @@ pub fn test_mempool_process_incoming_transactions_impl( HashMap::new(), PeersAndMetadata::new(&[NetworkId::Validator]), ); + let transaction_filter_config = config.transaction_filters.mempool_filter.clone(); let smp: SharedMempool, MockVMValidator> = SharedMempool::new( Arc::new(Mutex::new(CoreMempool::new(&config))), config.mempool.clone(), + transaction_filter_config, network_client, Arc::new(mock_db), vm_validator, diff --git a/state-sync/inter-component/event-notifications/src/lib.rs b/state-sync/inter-component/event-notifications/src/lib.rs index 965a43fedad10..c7fb9481a52a8 100644 --- a/state-sync/inter-component/event-notifications/src/lib.rs +++ b/state-sync/inter-component/event-notifications/src/lib.rs @@ -218,7 +218,7 @@ impl EventSubscriptionService { let maybe_subscription_ids = match event { ContractEvent::V1(evt) => self.event_key_subscriptions.get(evt.key()), ContractEvent::V2(evt) => { - let tag = evt.type_tag().to_string(); + let tag = evt.type_tag().to_canonical_string(); self.event_v2_tag_subscriptions.get(&tag) }, }; diff --git a/state-sync/inter-component/event-notifications/src/tests.rs b/state-sync/inter-component/event-notifications/src/tests.rs index 252e7640efdfd..66a8e9573e305 100644 --- a/state-sync/inter-component/event-notifications/src/tests.rs +++ b/state-sync/inter-component/event-notifications/src/tests.rs @@ -122,7 +122,7 @@ fn test_dynamic_subscribers() { // Add another subscriber for event_key_1 and the reconfig_event_key let mut event_listener_2 = event_service .subscribe_to_events(vec![event_key_1], vec![ - NEW_EPOCH_EVENT_V2_MOVE_TYPE_TAG.to_string() + NEW_EPOCH_EVENT_V2_MOVE_TYPE_TAG.to_canonical_string() ]) .unwrap(); @@ -170,7 +170,9 @@ fn test_event_and_reconfig_subscribers() { .subscribe_to_events(vec![event_key_1, event_key_2], vec![]) .unwrap(); let mut event_listener_3 = event_service - .subscribe_to_events(vec![], vec![NEW_EPOCH_EVENT_V2_MOVE_TYPE_TAG.to_string()]) + .subscribe_to_events(vec![], vec![ + NEW_EPOCH_EVENT_V2_MOVE_TYPE_TAG.to_canonical_string() + ]) .unwrap(); // Create reconfiguration subscribers diff --git a/state-sync/state-sync-driver/src/tests/utils.rs b/state-sync/state-sync-driver/src/tests/utils.rs index d4100b2723d00..979a414f7b75e 100644 --- a/state-sync/state-sync-driver/src/tests/utils.rs +++ b/state-sync/state-sync-driver/src/tests/utils.rs @@ -241,6 +241,7 @@ pub fn create_transaction_info() -> TransactionInfo { Some(HashValue::random()), 0, ExecutionStatus::Success, + Some(HashValue::random()), ) } diff --git a/storage/README.md b/storage/README.md index 4f4227c852ed7..162ce6d61affd 100644 --- a/storage/README.md +++ b/storage/README.md @@ -188,7 +188,7 @@ https://github.com/aptos-labs/aptos-core/blob/main/state-sync/README.md ### Continuously backing up to a cloud storage -The backup coordinator runs continously, talks to the backup service embedded +The backup coordinator runs continuously, talks to the backup service embedded inside a Aptos Node and writes backup data automatically to a configured cloud storage. diff --git a/storage/aptosdb/src/db/aptosdb_test.rs b/storage/aptosdb/src/db/aptosdb_test.rs index f1ff5353c5a99..b02a09577ff5a 100644 --- a/storage/aptosdb/src/db/aptosdb_test.rs +++ b/storage/aptosdb/src/db/aptosdb_test.rs @@ -199,6 +199,7 @@ fn test_get_latest_ledger_summary() { Some(state_hash), 0, ExecutionStatus::MiscellaneousError(None), + Some(HashValue::random()), ); let root_hash = txn_info.hash(); let mut txn_to_commit = TransactionToCommit::dummy(); @@ -321,6 +322,8 @@ proptest! { #![proptest_config(ProptestConfig::with_cases(10))] #[test] + #[ignore] + // TODO(grao): Fix this. fn test_state_merkle_pruning(input in arb_blocks_to_commit()) { aptos_logger::Logger::new().init(); test_state_merkle_pruning_impl(input); diff --git a/storage/aptosdb/src/db/include/aptosdb_internal.rs b/storage/aptosdb/src/db/include/aptosdb_internal.rs index 1ea266d0decf7..190837ba70cca 100644 --- a/storage/aptosdb/src/db/include/aptosdb_internal.rs +++ b/storage/aptosdb/src/db/include/aptosdb_internal.rs @@ -107,15 +107,23 @@ impl AptosDB { if !readonly { if let Some(version) = myself.get_synced_version()? { - myself.ledger_pruner + myself + .ledger_pruner .maybe_set_pruner_target_db_version(version); - myself.state_store + myself + .state_store .state_kv_pruner .maybe_set_pruner_target_db_version(version); } if let Some(version) = myself.get_latest_state_checkpoint_version()? { - myself.state_store.state_merkle_pruner.maybe_set_pruner_target_db_version(version); - myself.state_store.epoch_snapshot_pruner.maybe_set_pruner_target_db_version(version); + myself + .state_store + .state_merkle_pruner + .maybe_set_pruner_target_db_version(version); + myself + .state_store + .epoch_snapshot_pruner + .maybe_set_pruner_target_db_version(version); } } diff --git a/storage/aptosdb/src/db/include/aptosdb_reader.rs b/storage/aptosdb/src/db/include/aptosdb_reader.rs index f48b10daa8b2e..91d6c51d21b95 100644 --- a/storage/aptosdb/src/db/include/aptosdb_reader.rs +++ b/storage/aptosdb/src/db/include/aptosdb_reader.rs @@ -1,9 +1,9 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use aptos_storage_interface::state_store::state::State; -use aptos_storage_interface::state_store::state_summary::StateSummary; -use aptos_storage_interface::state_store::state_view::hot_state_view::HotStateView; +use aptos_storage_interface::state_store::{ + state::State, state_summary::StateSummary, state_view::hot_state_view::HotStateView, +}; use aptos_types::{block_info::BlockHeight, transaction::IndexedTransactionSummary}; impl DbReader for AptosDB { @@ -149,24 +149,23 @@ impl DbReader for AptosDB { let txn_summaries_iter = self .transaction_store - .get_account_transaction_summaries_iter(address, start_version, end_version, limit, ledger_version)? + .get_account_transaction_summaries_iter( + address, + start_version, + end_version, + limit, + ledger_version, + )? .map(|result| { let (_version, txn_summary) = result?; Ok(txn_summary) }); if start_version.is_some() { - txn_summaries_iter - .collect::>>() + txn_summaries_iter.collect::>>() } else { - let txn_summaries = txn_summaries_iter - .collect::>>()?; - Ok( - txn_summaries - .into_iter() - .rev() - .collect::>() - ) + let txn_summaries = txn_summaries_iter.collect::>>()?; + Ok(txn_summaries.into_iter().rev().collect::>()) } }) } @@ -556,11 +555,8 @@ impl DbReader for AptosDB { gauged_api("get_state_value_with_proof_by_version_ext", || { self.error_if_state_merkle_pruned("State merkle", version)?; - self.state_store.get_state_value_with_proof_by_version_ext( - key_hash, - version, - root_depth, - ) + self.state_store + .get_state_value_with_proof_by_version_ext(key_hash, version, root_depth) }) } @@ -579,7 +575,10 @@ impl DbReader for AptosDB { fn get_pre_committed_ledger_summary(&self) -> Result { gauged_api("get_pre_committed_ledger_summary", || { - let (state, state_summary) = self.state_store.current_state_locked().to_state_and_summary(); + let (state, state_summary) = self + .state_store + .current_state_locked() + .to_state_and_summary(); let num_txns = state.next_version(); let frozen_subtrees = self @@ -628,9 +627,7 @@ impl DbReader for AptosDB { for item in iter { let (_block_height, block_info) = item?; let first_version = block_info.first_version(); - if latest_version - .as_ref().is_some_and(|v| first_version <= *v) - { + if latest_version.as_ref().is_some_and(|v| first_version <= *v) { let event = self .ledger_db .event_db() @@ -681,7 +678,11 @@ impl DbReader for AptosDB { fn get_latest_state_checkpoint_version(&self) -> Result> { gauged_api("get_latest_state_checkpoint_version", || { - Ok(self.state_store.current_state_locked().last_checkpoint().version()) + Ok(self + .state_store + .current_state_locked() + .last_checkpoint() + .version()) }) } diff --git a/storage/aptosdb/src/db/include/aptosdb_testonly.rs b/storage/aptosdb/src/db/include/aptosdb_testonly.rs index 67fc0c027d456..2c21c31503cb7 100644 --- a/storage/aptosdb/src/db/include/aptosdb_testonly.rs +++ b/storage/aptosdb/src/db/include/aptosdb_testonly.rs @@ -1,11 +1,13 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use aptos_config::config::{BUFFERED_STATE_TARGET_ITEMS_FOR_TEST, DEFAULT_MAX_NUM_NODES_PER_LRU_CACHE_SHARD}; -use std::default::Default; -use aptos_types::transaction::{TransactionStatus, TransactionToCommit}; +use aptos_config::config::{ + BUFFERED_STATE_TARGET_ITEMS_FOR_TEST, DEFAULT_MAX_NUM_NODES_PER_LRU_CACHE_SHARD, +}; use aptos_executor_types::transactions_with_output::TransactionsToKeep; use aptos_storage_interface::state_store::state_summary::ProvableStateSummary; +use aptos_types::transaction::{PersistedAuxiliaryInfo, TransactionStatus, TransactionToCommit}; +use std::default::Default; impl AptosDB { /// This opens db in non-readonly mode, without the pruner. @@ -44,13 +46,21 @@ impl AptosDB { /// This opens db in non-readonly mode, without the pruner and cache. pub fn new_for_test_no_cache + Clone>(db_root_path: P) -> Self { - Self::new_without_pruner(db_root_path, false, - BUFFERED_STATE_TARGET_ITEMS_FOR_TEST, - 0, false, false) + Self::new_without_pruner( + db_root_path, + false, + BUFFERED_STATE_TARGET_ITEMS_FOR_TEST, + 0, + false, + false, + ) } /// This opens db in non-readonly mode, without the pruner, and with the indexer - pub fn new_for_test_with_indexer + Clone>(db_root_path: P, enable_sharding: bool) -> Self { + pub fn new_for_test_with_indexer + Clone>( + db_root_path: P, + enable_sharding: bool, + ) -> Self { Self::new_without_pruner( db_root_path, false, @@ -103,13 +113,24 @@ impl AptosDB { ledger_info_with_sigs: Option<&LedgerInfoWithSignatures>, sync_commit: bool, ) -> Result<()> { - let (transactions, transaction_outputs, transaction_infos) = Self::disassemble_txns_to_commit(txns_to_commit); + let (transactions, transaction_outputs, transaction_infos) = + Self::disassemble_txns_to_commit(txns_to_commit); let is_reconfig = transaction_outputs .iter() .rev() .flat_map(TransactionOutput::events) .any(ContractEvent::is_new_epoch_event); - let transactions_to_keep = TransactionsToKeep::make(first_version, transactions, transaction_outputs, is_reconfig); + let auxiliary_info = transactions + .iter() + .map(|_| PersistedAuxiliaryInfo::None) + .collect(); + let transactions_to_keep = TransactionsToKeep::make( + first_version, + transactions, + transaction_outputs, + auxiliary_info, + is_reconfig, + ); let current = self.state_store.current_state_locked().clone(); let (hot_state, persisted_state) = self.state_store.get_persisted_state()?; @@ -128,6 +149,7 @@ impl AptosDB { let chunk = ChunkToCommit { first_version, transactions: &transactions_to_keep.transactions, + persisted_info: &transactions_to_keep.persisted_info, transaction_outputs: &transactions_to_keep.transaction_outputs, transaction_infos: &transaction_infos, state: &new_state, @@ -137,26 +159,42 @@ impl AptosDB { is_reconfig, }; - self.save_transactions( chunk, ledger_info_with_sigs, sync_commit) + self.save_transactions(chunk, ledger_info_with_sigs, sync_commit) } - fn disassemble_txns_to_commit(txns_to_commit: &[TransactionToCommit]) -> ( - Vec, Vec, Vec + fn disassemble_txns_to_commit( + txns_to_commit: &[TransactionToCommit], + ) -> ( + Vec, + Vec, + Vec, ) { - txns_to_commit.iter().map(|txn_to_commit| { - let TransactionToCommit { - transaction, transaction_info, write_set, events, is_reconfig: _, transaction_auxiliary_data - } = txn_to_commit; + txns_to_commit + .iter() + .map(|txn_to_commit| { + let TransactionToCommit { + transaction, + transaction_info, + write_set, + events, + is_reconfig: _, + transaction_auxiliary_data, + } = txn_to_commit; - let transaction_output = TransactionOutput::new( - write_set.clone(), - events.clone(), - transaction_info.gas_used(), - TransactionStatus::Keep(transaction_info.status().clone()), - transaction_auxiliary_data.clone(), - ); + let transaction_output = TransactionOutput::new( + write_set.clone(), + events.clone(), + transaction_info.gas_used(), + TransactionStatus::Keep(transaction_info.status().clone()), + transaction_auxiliary_data.clone(), + ); - (transaction.clone(), transaction_output, transaction_info.clone()) - }).multiunzip() + ( + transaction.clone(), + transaction_output, + transaction_info.clone(), + ) + }) + .multiunzip() } } diff --git a/storage/aptosdb/src/db/include/aptosdb_writer.rs b/storage/aptosdb/src/db/include/aptosdb_writer.rs index ef02303e12a1d..8c4efb13842e2 100644 --- a/storage/aptosdb/src/db/include/aptosdb_writer.rs +++ b/storage/aptosdb/src/db/include/aptosdb_writer.rs @@ -1,15 +1,11 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use itertools::Itertools; use aptos_storage_interface::chunk_to_commit::ChunkToCommit; +use itertools::Itertools; impl DbWriter for AptosDB { - fn pre_commit_ledger( - &self, - chunk: ChunkToCommit, - sync_commit: bool, - ) -> Result<()> { + fn pre_commit_ledger(&self, chunk: ChunkToCommit, sync_commit: bool) -> Result<()> { gauged_api("pre_commit_ledger", || { // Pre-committing and committing in concurrency is allowed but not pre-committing at the // same time from multiple threads, the same for committing. @@ -21,13 +17,15 @@ impl DbWriter for AptosDB { .expect("Concurrent committing detected."); let _timer = OTHER_TIMERS_SECONDS.timer_with(&["pre_commit_ledger"]); - chunk.state_summary.latest().global_state_summary.log_generation("db_save"); + chunk + .state_summary + .latest() + .global_state_summary + .log_generation("db_save"); self.pre_commit_validation(&chunk)?; - let _new_root_hash = self.calculate_and_commit_ledger_and_state_kv( - &chunk, - self.skip_index_and_usage, - )?; + let _new_root_hash = + self.calculate_and_commit_ledger_and_state_kv(&chunk, self.skip_index_and_usage)?; let _timer = OTHER_TIMERS_SECONDS.timer_with(&["save_transactions__others"]); @@ -73,16 +71,10 @@ impl DbWriter for AptosDB { self.ledger_db.metadata_db().write_schemas(ledger_batch)?; // Notify the pruners, invoke the indexer, and update in-memory ledger info. - self.post_commit( - old_committed_ver, - version, - ledger_info_with_sigs, - chunk_opt, - ) + self.post_commit(old_committed_ver, version, ledger_info_with_sigs, chunk_opt) }) } - fn get_state_snapshot_receiver( &self, version: Version, @@ -212,10 +204,7 @@ impl DbWriter for AptosDB { } impl AptosDB { - fn pre_commit_validation( - &self, - chunk: &ChunkToCommit, - ) -> Result<()> { + fn pre_commit_validation(&self, chunk: &ChunkToCommit) -> Result<()> { let _timer = OTHER_TIMERS_SECONDS.timer_with(&["save_transactions_validation"]); ensure!(!chunk.is_empty(), "chunk is empty, nothing to save."); @@ -247,30 +236,38 @@ impl AptosDB { // // TODO(grao): Consider propagating the error instead of panic, if necessary. s.spawn(|_| { - self.commit_events(chunk.first_version, chunk.transaction_outputs, skip_index_and_usage) - .unwrap() + self.commit_events( + chunk.first_version, + chunk.transaction_outputs, + skip_index_and_usage, + ) + .unwrap() }); s.spawn(|_| { self.ledger_db .write_set_db() - .commit_write_sets( + .commit_write_sets(chunk.first_version, chunk.transaction_outputs) + .unwrap() + }); + s.spawn(|_| { + self.ledger_db + .transaction_db() + .commit_transactions( chunk.first_version, - chunk.transaction_outputs, + chunk.transactions, + skip_index_and_usage, ) .unwrap() }); s.spawn(|_| { self.ledger_db - .transaction_db() - .commit_transactions(chunk.first_version, chunk.transactions, skip_index_and_usage) + .persisted_auxiliary_info_db() + .commit_auxiliary_info(chunk.first_version, chunk.persisted_info) .unwrap() }); s.spawn(|_| { - self.commit_state_kv_and_ledger_metadata( - chunk, - skip_index_and_usage, - ) - .unwrap() + self.commit_state_kv_and_ledger_metadata(chunk, skip_index_and_usage) + .unwrap() }); s.spawn(|_| { self.commit_transaction_infos(chunk.first_version, chunk.transaction_infos) @@ -329,7 +326,8 @@ impl AptosDB { ) .unwrap(); - let _timer = OTHER_TIMERS_SECONDS.timer_with(&["commit_state_kv_and_ledger_metadata___commit"]); + let _timer = + OTHER_TIMERS_SECONDS.timer_with(&["commit_state_kv_and_ledger_metadata___commit"]); rayon::scope(|s| { s.spawn(|_| { self.ledger_db @@ -339,11 +337,7 @@ impl AptosDB { }); s.spawn(|_| { self.state_kv_db - .commit( - chunk.expect_last_version(), - None, - sharded_state_kv_batches, - ) + .commit(chunk.expect_last_version(), None, sharded_state_kv_batches) .unwrap(); }); }); @@ -402,11 +396,7 @@ impl AptosDB { let root_hash = self .ledger_db .transaction_accumulator_db() - .put_transaction_accumulator( - first_version, - transaction_infos, - &mut batch, - )?; + .put_transaction_accumulator(first_version, transaction_infos, &mut batch)?; let _timer = OTHER_TIMERS_SECONDS.timer_with(&["commit_transaction_accumulator___commit"]); self.ledger_db @@ -414,8 +404,7 @@ impl AptosDB { .write_schemas(batch)?; let mut batch = SchemaBatch::new(); - let all_versions: Vec<_> = - (first_version..first_version + num_txns).collect(); + let all_versions: Vec<_> = (first_version..first_version + num_txns).collect(); THREAD_MANAGER .get_non_exe_cpu_pool() .install(|| -> Result<()> { @@ -465,7 +454,8 @@ impl AptosDB { Ok(()) })?; - let _timer = OTHER_TIMERS_SECONDS.timer_with(&["commit_transaction_auxiliary_data___commit"]); + let _timer = + OTHER_TIMERS_SECONDS.timer_with(&["commit_transaction_auxiliary_data___commit"]); self.ledger_db .transaction_auxiliary_data_db() .write_schemas(batch) @@ -484,11 +474,7 @@ impl AptosDB { .enumerate() .try_for_each(|(i, txn_info)| -> Result<()> { let version = first_version + i as u64; - TransactionInfoDb::put_transaction_info( - version, - txn_info, - &mut batch, - )?; + TransactionInfoDb::put_transaction_info(version, txn_info, &mut batch)?; Ok(()) })?; @@ -497,10 +483,7 @@ impl AptosDB { self.ledger_db.transaction_info_db().write_schemas(batch) } - fn get_and_check_commit_range( - &self, - version_to_commit: Version, - ) -> Result> { + fn get_and_check_commit_range(&self, version_to_commit: Version) -> Result> { let old_committed_ver = self.ledger_db.metadata_db().get_synced_version()?; let pre_committed_ver = self.state_store.current_state_locked().version(); ensure!( @@ -522,7 +505,7 @@ impl AptosDB { &self, version: Version, ledger_info_with_sig: &LedgerInfoWithSignatures, - ledger_batch: &mut SchemaBatch + ledger_batch: &mut SchemaBatch, ) -> Result<(), AptosDbError> { let ledger_info = ledger_info_with_sig.ledger_info(); @@ -535,8 +518,13 @@ impl AptosDB { ); // Verify the root hash. - let db_root_hash = self.ledger_db.transaction_accumulator_db().get_root_hash(version)?; - let li_root_hash = ledger_info_with_sig.ledger_info().transaction_accumulator_hash(); + let db_root_hash = self + .ledger_db + .transaction_accumulator_db() + .get_root_hash(version)?; + let li_root_hash = ledger_info_with_sig + .ledger_info() + .transaction_accumulator_hash(); ensure!( db_root_hash == li_root_hash, "Root hash pre-committed doesn't match LedgerInfo. pre-commited: {:?} vs in LedgerInfo: {:?}", @@ -592,9 +580,9 @@ impl AptosDB { COMMITTED_TXNS.inc_by(num_txns); LATEST_TXN_VERSION.set(version as i64); if let Some(update_sender) = &self.update_subscriber { - update_sender.send( - (Instant::now(), version) - ).map_err(| err | { + update_sender + .send((Instant::now(), version)) + .map_err(|err| { AptosDbError::Other(format!("Failed to send update to subscriber: {}", err)) })?; } @@ -614,10 +602,20 @@ impl AptosDB { // n.b. txns_to_commit can be partial, when the control was handed over from consensus to state sync // where state sync won't send the pre-committed part to the DB again. if chunk_opt.is_some() && chunk_opt.as_ref().unwrap().len() == num_txns as usize { - let write_sets = chunk_opt.as_ref().unwrap().transaction_outputs.iter().map(|t| t.write_set()).collect_vec(); + let write_sets = chunk_opt + .as_ref() + .unwrap() + .transaction_outputs + .iter() + .map(|t| t.write_set()) + .collect_vec(); indexer.index(self.state_store.clone(), first_version, &write_sets)?; } else { - let write_sets: Vec<_> = self.ledger_db.write_set_db().get_write_set_iter(first_version, num_txns as usize)?.try_collect()?; + let write_sets: Vec<_> = self + .ledger_db + .write_set_db() + .get_write_set_iter(first_version, num_txns as usize)? + .try_collect()?; let write_set_refs = write_sets.iter().collect_vec(); indexer.index(self.state_store.clone(), first_version, &write_set_refs)?; }; diff --git a/storage/aptosdb/src/db/test_helper.rs b/storage/aptosdb/src/db/test_helper.rs index b3e552684403b..c5810ab7a589a 100644 --- a/storage/aptosdb/src/db/test_helper.rs +++ b/storage/aptosdb/src/db/test_helper.rs @@ -21,8 +21,8 @@ use aptos_types::{ proptest_types::{AccountInfoUniverse, BlockGen}, state_store::{state_key::StateKey, state_value::StateValue}, transaction::{ - ReplayProtector, Transaction, TransactionAuxiliaryData, TransactionInfo, - TransactionToCommit, Version, + PersistedAuxiliaryInfo, ReplayProtector, Transaction, TransactionAuxiliaryData, + TransactionInfo, TransactionToCommit, Version, }, write_set::TransactionWrite, }; @@ -181,6 +181,8 @@ prop_compose! { state_checkpoint_hash, placeholder_txn_info.gas_used(), placeholder_txn_info.status().clone(), + // TODO(grao): Consider making a real one? + None, ); txn_accumulator = txn_accumulator.append(&[txn_info.hash()]); txn.set_transaction_info(txn_info); @@ -930,6 +932,17 @@ pub fn verify_committed_transactions( ); } +pub fn put_persisted_auxiliary_info( + db: &AptosDB, + version: Version, + persisted_info: &[PersistedAuxiliaryInfo], +) { + db.ledger_db + .persisted_auxiliary_info_db() + .commit_auxiliary_info(version, persisted_info) + .unwrap() +} + pub fn put_transaction_infos( db: &AptosDB, version: Version, diff --git a/storage/aptosdb/src/db_options.rs b/storage/aptosdb/src/db_options.rs index 0d59eaad0caf2..131ae3a66b747 100644 --- a/storage/aptosdb/src/db_options.rs +++ b/storage/aptosdb/src/db_options.rs @@ -22,6 +22,7 @@ pub(super) fn ledger_db_column_families() -> Vec { EVENT_BY_VERSION_CF_NAME, EVENT_CF_NAME, LEDGER_INFO_CF_NAME, + PERSISTED_AUXILIARY_INFO_CF_NAME, STALE_STATE_VALUE_INDEX_CF_NAME, STATE_VALUE_CF_NAME, TRANSACTION_CF_NAME, @@ -49,6 +50,14 @@ pub(super) fn event_db_column_families() -> Vec { ] } +pub(super) fn persisted_auxiliary_info_db_column_families() -> Vec { + vec![ + /* empty cf */ DEFAULT_COLUMN_FAMILY_NAME, + DB_METADATA_CF_NAME, + PERSISTED_AUXILIARY_INFO_CF_NAME, + ] +} + pub(super) fn transaction_accumulator_db_column_families() -> Vec { vec![ /* empty cf */ DEFAULT_COLUMN_FAMILY_NAME, @@ -191,6 +200,13 @@ pub(super) fn gen_event_cfds(rocksdb_config: &RocksdbConfig) -> Vec Vec { + let cfs = persisted_auxiliary_info_db_column_families(); + gen_cfds(rocksdb_config, cfs, |_, _| {}) +} + pub(super) fn gen_transaction_accumulator_cfds( rocksdb_config: &RocksdbConfig, ) -> Vec { diff --git a/storage/aptosdb/src/ledger_db/mod.rs b/storage/aptosdb/src/ledger_db/mod.rs index afc68ad8858c2..d37db75f71d6d 100644 --- a/storage/aptosdb/src/ledger_db/mod.rs +++ b/storage/aptosdb/src/ledger_db/mod.rs @@ -7,16 +7,17 @@ use crate::{ db_options::{ event_db_column_families, gen_event_cfds, gen_ledger_cfds, gen_ledger_metadata_cfds, - gen_transaction_accumulator_cfds, gen_transaction_auxiliary_data_cfds, - gen_transaction_cfds, gen_transaction_info_cfds, gen_write_set_cfds, - ledger_db_column_families, ledger_metadata_db_column_families, - transaction_accumulator_db_column_families, transaction_auxiliary_data_db_column_families, - transaction_db_column_families, transaction_info_db_column_families, - write_set_db_column_families, + gen_persisted_auxiliary_info_cfds, gen_transaction_accumulator_cfds, + gen_transaction_auxiliary_data_cfds, gen_transaction_cfds, gen_transaction_info_cfds, + gen_write_set_cfds, ledger_db_column_families, ledger_metadata_db_column_families, + persisted_auxiliary_info_db_column_families, transaction_accumulator_db_column_families, + transaction_auxiliary_data_db_column_families, transaction_db_column_families, + transaction_info_db_column_families, write_set_db_column_families, }, event_store::EventStore, ledger_db::{ event_db::EventDb, ledger_metadata_db::LedgerMetadataDb, + persisted_auxiliary_info_db::PersistedAuxiliaryInfoDb, transaction_accumulator_db::TransactionAccumulatorDb, transaction_auxiliary_data_db::TransactionAuxiliaryDataDb, transaction_db::TransactionDb, transaction_info_db::TransactionInfoDb, write_set_db::WriteSetDb, @@ -41,6 +42,9 @@ mod event_db_test; pub(crate) mod ledger_metadata_db; #[cfg(test)] mod ledger_metadata_db_test; +pub(crate) mod persisted_auxiliary_info_db; +#[cfg(test)] +mod persisted_auxiliary_info_db_test; pub(crate) mod transaction_accumulator_db; pub(crate) mod transaction_auxiliary_data_db; #[cfg(test)] @@ -59,6 +63,7 @@ pub const LEDGER_DB_FOLDER_NAME: &str = "ledger_db"; pub const LEDGER_DB_NAME: &str = "ledger_db"; pub const LEDGER_METADATA_DB_NAME: &str = "ledger_metadata_db"; pub const EVENT_DB_NAME: &str = "event_db"; +pub const PERSISTED_AUXILIARY_INFO_DB_NAME: &str = "persisted_auxiliary_info_db"; pub const TRANSACTION_ACCUMULATOR_DB_NAME: &str = "transaction_accumulator_db"; pub const TRANSACTION_AUXILIARY_DATA_DB_NAME: &str = "transaction_auxiliary_data_db"; pub const TRANSACTION_DB_NAME: &str = "transaction_db"; @@ -69,6 +74,7 @@ pub const WRITE_SET_DB_NAME: &str = "write_set_db"; pub struct LedgerDbSchemaBatches { pub ledger_metadata_db_batches: SchemaBatch, pub event_db_batches: SchemaBatch, + pub persisted_auxiliary_info_db_batches: SchemaBatch, pub transaction_accumulator_db_batches: SchemaBatch, pub transaction_auxiliary_data_db_batches: SchemaBatch, pub transaction_db_batches: SchemaBatch, @@ -81,6 +87,7 @@ impl Default for LedgerDbSchemaBatches { Self { ledger_metadata_db_batches: SchemaBatch::new(), event_db_batches: SchemaBatch::new(), + persisted_auxiliary_info_db_batches: SchemaBatch::new(), transaction_accumulator_db_batches: SchemaBatch::new(), transaction_auxiliary_data_db_batches: SchemaBatch::new(), transaction_db_batches: SchemaBatch::new(), @@ -100,6 +107,7 @@ impl LedgerDbSchemaBatches { pub struct LedgerDb { ledger_metadata_db: LedgerMetadataDb, event_db: EventDb, + persisted_auxiliary_info_db: PersistedAuxiliaryInfoDb, transaction_accumulator_db: TransactionAccumulatorDb, transaction_auxiliary_data_db: TransactionAuxiliaryDataDb, transaction_db: TransactionDb, @@ -141,6 +149,9 @@ impl LedgerDb { Arc::clone(&ledger_metadata_db), EventStore::new(Arc::clone(&ledger_metadata_db)), ), + persisted_auxiliary_info_db: PersistedAuxiliaryInfoDb::new(Arc::clone( + &ledger_metadata_db, + )), transaction_accumulator_db: TransactionAccumulatorDb::new(Arc::clone( &ledger_metadata_db, )), @@ -157,6 +168,7 @@ impl LedgerDb { let ledger_db_folder = db_root_path.as_ref().join(LEDGER_DB_FOLDER_NAME); let mut event_db = None; + let mut persisted_auxiliary_info_db = None; let mut transaction_accumulator_db = None; let mut transaction_auxiliary_data_db = None; let mut transaction_db = None; @@ -178,6 +190,17 @@ impl LedgerDb { EventStore::new(event_db_raw), )); }); + s.spawn(|_| { + persisted_auxiliary_info_db = Some(PersistedAuxiliaryInfoDb::new(Arc::new( + Self::open_rocksdb( + ledger_db_folder.join(PERSISTED_AUXILIARY_INFO_DB_NAME), + PERSISTED_AUXILIARY_INFO_DB_NAME, + &rocksdb_configs.ledger_db_config, + readonly, + ) + .unwrap(), + ))); + }); s.spawn(|_| { transaction_accumulator_db = Some(TransactionAccumulatorDb::new(Arc::new( Self::open_rocksdb( @@ -240,6 +263,7 @@ impl LedgerDb { Ok(Self { ledger_metadata_db: LedgerMetadataDb::new(ledger_metadata_db), event_db: event_db.unwrap(), + persisted_auxiliary_info_db: persisted_auxiliary_info_db.unwrap(), transaction_accumulator_db: transaction_accumulator_db.unwrap(), transaction_auxiliary_data_db: transaction_auxiliary_data_db.unwrap(), transaction_db: transaction_db.unwrap(), @@ -294,6 +318,9 @@ impl LedgerDb { ledger_db .event_db() .create_checkpoint(cp_ledger_db_folder.join(EVENT_DB_NAME))?; + ledger_db + .persisted_auxiliary_info_db() + .create_checkpoint(cp_ledger_db_folder.join(PERSISTED_AUXILIARY_INFO_DB_NAME))?; ledger_db .transaction_accumulator_db() .create_checkpoint(cp_ledger_db_folder.join(TRANSACTION_ACCUMULATOR_DB_NAME))?; @@ -318,6 +345,8 @@ impl LedgerDb { pub(crate) fn write_pruner_progress(&self, version: Version) -> Result<()> { info!("Fast sync is done, writing pruner progress {version} for all ledger sub pruners."); self.event_db.write_pruner_progress(version)?; + self.persisted_auxiliary_info_db + .write_pruner_progress(version)?; self.transaction_accumulator_db .write_pruner_progress(version)?; self.transaction_auxiliary_data_db @@ -348,6 +377,10 @@ impl LedgerDb { self.event_db.db() } + pub(crate) fn persisted_auxiliary_info_db(&self) -> &PersistedAuxiliaryInfoDb { + &self.persisted_auxiliary_info_db + } + pub(crate) fn transaction_accumulator_db(&self) -> &TransactionAccumulatorDb { &self.transaction_accumulator_db } @@ -421,6 +454,7 @@ impl LedgerDb { LEDGER_DB_NAME => ledger_db_column_families(), LEDGER_METADATA_DB_NAME => ledger_metadata_db_column_families(), EVENT_DB_NAME => event_db_column_families(), + PERSISTED_AUXILIARY_INFO_DB_NAME => persisted_auxiliary_info_db_column_families(), TRANSACTION_ACCUMULATOR_DB_NAME => transaction_accumulator_db_column_families(), TRANSACTION_AUXILIARY_DATA_DB_NAME => transaction_auxiliary_data_db_column_families(), TRANSACTION_DB_NAME => transaction_db_column_families(), @@ -435,6 +469,7 @@ impl LedgerDb { LEDGER_DB_NAME => gen_ledger_cfds(db_config), LEDGER_METADATA_DB_NAME => gen_ledger_metadata_cfds(db_config), EVENT_DB_NAME => gen_event_cfds(db_config), + PERSISTED_AUXILIARY_INFO_DB_NAME => gen_persisted_auxiliary_info_cfds(db_config), TRANSACTION_ACCUMULATOR_DB_NAME => gen_transaction_accumulator_cfds(db_config), TRANSACTION_AUXILIARY_DATA_DB_NAME => gen_transaction_auxiliary_data_cfds(db_config), TRANSACTION_DB_NAME => gen_transaction_cfds(db_config), @@ -460,6 +495,8 @@ impl LedgerDb { .write_schemas(schemas.transaction_info_db_batches)?; self.transaction_db .write_schemas(schemas.transaction_db_batches)?; + self.persisted_auxiliary_info_db + .write_schemas(schemas.persisted_auxiliary_info_db_batches)?; self.event_db.write_schemas(schemas.event_db_batches)?; self.transaction_accumulator_db .write_schemas(schemas.transaction_accumulator_db_batches)?; diff --git a/storage/aptosdb/src/ledger_db/persisted_auxiliary_info_db.rs b/storage/aptosdb/src/ledger_db/persisted_auxiliary_info_db.rs new file mode 100644 index 0000000000000..8ada4875cd4a7 --- /dev/null +++ b/storage/aptosdb/src/ledger_db/persisted_auxiliary_info_db.rs @@ -0,0 +1,127 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + metrics::OTHER_TIMERS_SECONDS, + schema::{ + db_metadata::{DbMetadataKey, DbMetadataSchema, DbMetadataValue}, + persisted_auxiliary_info::PersistedAuxiliaryInfoSchema, + }, + utils::iterators::ExpectContinuousVersions, +}; +use aptos_metrics_core::TimerHelper; +use aptos_schemadb::{batch::SchemaBatch, DB}; +use aptos_storage_interface::Result; +use aptos_types::transaction::{PersistedAuxiliaryInfo, Version}; +use std::{path::Path, sync::Arc}; + +#[derive(Debug)] +pub(crate) struct PersistedAuxiliaryInfoDb { + db: Arc, +} + +impl PersistedAuxiliaryInfoDb { + pub(super) fn new(db: Arc) -> Self { + Self { db } + } + + pub(super) fn create_checkpoint(&self, path: impl AsRef) -> Result<()> { + self.db.create_checkpoint(path) + } + + pub(super) fn write_pruner_progress(&self, version: Version) -> Result<()> { + self.db.put::( + &DbMetadataKey::PersistedAuxiliaryInfoPrunerProgress, + &DbMetadataValue::Version(version), + ) + } + + pub(super) fn db(&self) -> &DB { + &self.db + } + + pub(crate) fn write_schemas(&self, batch: SchemaBatch) -> Result<()> { + self.db.write_schemas(batch) + } + + pub(crate) fn get_persisted_auxiliary_info( + &self, + version: Version, + ) -> Result> { + self.db.get::(&version) + } + + /// Returns an iterator that yields `num_persisted_auxiliary_info` persisted_auxiliary_info + /// starting from `start_version`. + /// + /// Requires the caller to not query the data beyond the latest version. + pub(crate) fn get_persisted_auxiliary_info_iter( + &self, + start_version: Version, + num_persisted_auxiliary_info: usize, + ) -> Result> + '_>> { + let mut iter = self.db.iter::()?; + iter.seek(&start_version)?; + let mut iter = iter.peekable(); + let item = iter.peek(); + let version = if item.is_some() { + item.unwrap().as_ref().map_err(|e| e.clone())?.0 + } else { + let mut iter = self.db.iter::()?; + iter.seek_to_last(); + if iter.next().transpose()?.is_some() { + return Ok(Box::new(std::iter::empty())); + } + // Note in this case we return all Nones. We rely on the caller to not query future + // data when the DB is empty. + // TODO(grao): This will be unreachable in the future, consider make it an error later. + start_version + num_persisted_auxiliary_info as u64 + }; + let num_none = std::cmp::min( + num_persisted_auxiliary_info, + version.saturating_sub(start_version) as usize, + ); + let none_iter = itertools::repeat_n(Ok(PersistedAuxiliaryInfo::None), num_none); + Ok(Box::new(none_iter.chain(iter.expect_continuous_versions( + start_version + num_none as u64, + num_persisted_auxiliary_info - num_none, + )?))) + } + + pub(crate) fn commit_auxiliary_info( + &self, + first_version: Version, + persisted_auxiliary_info: &[PersistedAuxiliaryInfo], + ) -> Result<()> { + let _timer = OTHER_TIMERS_SECONDS.timer_with(&["commit_auxiliary_info"]); + + let mut batch = SchemaBatch::new(); + persisted_auxiliary_info.iter().enumerate().try_for_each( + |(i, aux_info)| -> Result<()> { + let version = first_version + i as u64; + Self::put_persisted_auxiliary_info(version, aux_info, &mut batch) + }, + )?; + + { + let _timer = OTHER_TIMERS_SECONDS.timer_with(&["commit_auxiliary_info___commit"]); + self.write_schemas(batch) + } + } + + pub(crate) fn put_persisted_auxiliary_info( + version: Version, + persisted_info: &PersistedAuxiliaryInfo, + batch: &mut SchemaBatch, + ) -> Result<()> { + batch.put::(&version, persisted_info) + } + + /// Deletes the persisted auxiliary info between a range of version in [begin, end) + pub(crate) fn prune(begin: Version, end: Version, batch: &mut SchemaBatch) -> Result<()> { + for version in begin..end { + batch.delete::(&version)?; + } + Ok(()) + } +} diff --git a/storage/aptosdb/src/ledger_db/persisted_auxiliary_info_db_test.rs b/storage/aptosdb/src/ledger_db/persisted_auxiliary_info_db_test.rs new file mode 100644 index 0000000000000..b97a9480a6523 --- /dev/null +++ b/storage/aptosdb/src/ledger_db/persisted_auxiliary_info_db_test.rs @@ -0,0 +1,102 @@ +// Copyright © Aptos Foundation +// Parts of the project are originally copyright © Meta Platforms, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::{db::test_helper::put_persisted_auxiliary_info, AptosDB}; +use aptos_temppath::TempPath; +use aptos_types::transaction::PersistedAuxiliaryInfo; +use proptest::{collection::vec, prelude::*}; + +fn get_persisted_auxiliary_info( + db: &AptosDB, + start_version: u64, + count: usize, +) -> Vec { + db.ledger_db + .persisted_auxiliary_info_db() + .get_persisted_auxiliary_info_iter(start_version, count) + .unwrap() + .collect::, _>>() + .unwrap() +} + +#[test] +pub fn test_iterator() { + let tmp_dir = TempPath::new(); + let db = AptosDB::new_for_test(&tmp_dir); + let v1_info = PersistedAuxiliaryInfo::V1 { + transaction_index: 0, + }; + + { + assert_eq!( + get_persisted_auxiliary_info(&db, 90, 20), + vec![PersistedAuxiliaryInfo::None; 20] + ); + } + + let persisted_info = vec![v1_info; 100]; + put_persisted_auxiliary_info(&db, 100, &persisted_info); + + { + let mut expected = vec![]; + for _ in 0..10 { + expected.push(PersistedAuxiliaryInfo::None); + } + for _ in 0..10 { + expected.push(v1_info); + } + assert_eq!(get_persisted_auxiliary_info(&db, 90, 20), expected); + } + + assert_eq!( + get_persisted_auxiliary_info(&db, 0, 20), + vec![PersistedAuxiliaryInfo::None; 20] + ); + + assert_eq!(get_persisted_auxiliary_info(&db, 100, 20), vec![ + v1_info; + 20 + ]); + + assert_eq!(get_persisted_auxiliary_info(&db, 190, 20), vec![ + v1_info; + 10 + ]); + + assert_eq!(get_persisted_auxiliary_info(&db, 200, 20), vec![]); +} + +proptest! { + #![proptest_config(ProptestConfig::with_cases(10))] + + #[test] + fn test_persisted_auxiliary_info_get_iterator( + (persisted_info, start_version, num_transactions) in + vec(any::(), 1..100) + .prop_flat_map(|info| { + let num_txns = info.len() as u64; + (Just(info), 0..num_txns) + }) + .prop_flat_map(|(info, start_version)| { + let num_txns = info.len() as u64; + (Just(info), Just(start_version), 0..num_txns as usize * 2) + }) + ) { + let tmp_dir = TempPath::new(); + let db = AptosDB::new_for_test(&tmp_dir); + put_persisted_auxiliary_info(&db, 0, &persisted_info); + + let iter = db.ledger_db.persisted_auxiliary_info_db() + .get_persisted_auxiliary_info_iter(start_version, num_transactions) + .unwrap(); + prop_assert_eq!( + persisted_info + .into_iter() + .skip(start_version as usize) + .take(num_transactions) + .collect::>(), + iter.collect::, _>>().unwrap() + ); + } +} diff --git a/storage/aptosdb/src/pruner/ledger_pruner/mod.rs b/storage/aptosdb/src/pruner/ledger_pruner/mod.rs index 1bd766c93efe8..09e522e1596b8 100644 --- a/storage/aptosdb/src/pruner/ledger_pruner/mod.rs +++ b/storage/aptosdb/src/pruner/ledger_pruner/mod.rs @@ -4,6 +4,7 @@ mod event_store_pruner; mod ledger_metadata_pruner; pub(crate) mod ledger_pruner_manager; +mod persisted_auxiliary_info_pruner; mod transaction_accumulator_pruner; mod transaction_auxiliary_data_pruner; mod transaction_info_pruner; @@ -18,6 +19,7 @@ use crate::{ db_sub_pruner::DBSubPruner, ledger_pruner::{ event_store_pruner::EventStorePruner, ledger_metadata_pruner::LedgerMetadataPruner, + persisted_auxiliary_info_pruner::PersistedAuxiliaryInfoPruner, transaction_accumulator_pruner::TransactionAccumulatorPruner, transaction_auxiliary_data_pruner::TransactionAuxiliaryDataPruner, transaction_info_pruner::TransactionInfoPruner, transaction_pruner::TransactionPruner, @@ -138,6 +140,10 @@ impl LedgerPruner { metadata_progress, internal_indexer_db.clone(), )?); + let persisted_auxiliary_info_pruner = Box::new(PersistedAuxiliaryInfoPruner::new( + Arc::clone(&ledger_db), + metadata_progress, + )?); let transaction_accumulator_pruner = Box::new(TransactionAccumulatorPruner::new( Arc::clone(&ledger_db), metadata_progress, @@ -169,6 +175,7 @@ impl LedgerPruner { ledger_metadata_pruner, sub_pruners: vec![ event_store_pruner, + persisted_auxiliary_info_pruner, transaction_accumulator_pruner, transaction_auxiliary_data_pruner, transaction_info_pruner, diff --git a/storage/aptosdb/src/pruner/ledger_pruner/persisted_auxiliary_info_pruner.rs b/storage/aptosdb/src/pruner/ledger_pruner/persisted_auxiliary_info_pruner.rs new file mode 100644 index 0000000000000..9ae27bbf5b933 --- /dev/null +++ b/storage/aptosdb/src/pruner/ledger_pruner/persisted_auxiliary_info_pruner.rs @@ -0,0 +1,60 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + ledger_db::{persisted_auxiliary_info_db::PersistedAuxiliaryInfoDb, LedgerDb}, + pruner::{db_sub_pruner::DBSubPruner, pruner_utils::get_or_initialize_subpruner_progress}, + schema::db_metadata::{DbMetadataKey, DbMetadataSchema, DbMetadataValue}, +}; +use aptos_logger::info; +use aptos_schemadb::batch::SchemaBatch; +use aptos_storage_interface::Result; +use aptos_types::transaction::Version; +use std::sync::Arc; + +#[derive(Debug)] +pub struct PersistedAuxiliaryInfoPruner { + ledger_db: Arc, +} + +impl DBSubPruner for PersistedAuxiliaryInfoPruner { + fn name(&self) -> &str { + "PersistedAuxiliaryInfoPruner" + } + + fn prune(&self, current_progress: Version, target_version: Version) -> Result<()> { + let mut batch = SchemaBatch::new(); + PersistedAuxiliaryInfoDb::prune(current_progress, target_version, &mut batch)?; + batch.put::( + &DbMetadataKey::PersistedAuxiliaryInfoPrunerProgress, + &DbMetadataValue::Version(target_version), + )?; + self.ledger_db + .persisted_auxiliary_info_db() + .write_schemas(batch) + } +} + +impl PersistedAuxiliaryInfoPruner { + pub(in crate::pruner) fn new( + ledger_db: Arc, + metadata_progress: Version, + ) -> Result { + let progress = get_or_initialize_subpruner_progress( + ledger_db.write_set_db_raw(), + &DbMetadataKey::PersistedAuxiliaryInfoPrunerProgress, + metadata_progress, + )?; + + let myself = PersistedAuxiliaryInfoPruner { ledger_db }; + + info!( + progress = progress, + metadata_progress = metadata_progress, + "Catching up PersistedAuxiliaryInfoPruner." + ); + myself.prune(progress, metadata_progress)?; + + Ok(myself) + } +} diff --git a/storage/aptosdb/src/pruner/ledger_pruner/transaction_pruner.rs b/storage/aptosdb/src/pruner/ledger_pruner/transaction_pruner.rs index 8bde59726d604..25a3da4381907 100644 --- a/storage/aptosdb/src/pruner/ledger_pruner/transaction_pruner.rs +++ b/storage/aptosdb/src/pruner/ledger_pruner/transaction_pruner.rs @@ -49,6 +49,8 @@ impl DBSubPruner for TransactionPruner { target_version, &mut batch, )?; + self.transaction_store + .prune_transaction_summaries_by_account(&candidate_transactions, &mut batch)?; batch.put::( &DbMetadataKey::TransactionPrunerProgress, &DbMetadataValue::Version(target_version), diff --git a/storage/aptosdb/src/schema/db_metadata/mod.rs b/storage/aptosdb/src/schema/db_metadata/mod.rs index b76a423ba0ef9..682f9d18c9411 100644 --- a/storage/aptosdb/src/schema/db_metadata/mod.rs +++ b/storage/aptosdb/src/schema/db_metadata/mod.rs @@ -68,6 +68,7 @@ pub enum DbMetadataKey { StateKvShardPrunerProgress(ShardId), StateMerkleShardRestoreProgress(ShardId, Version), TransactionAuxiliaryDataPrunerProgress, + PersistedAuxiliaryInfoPrunerProgress, } define_schema!( diff --git a/storage/aptosdb/src/schema/mod.rs b/storage/aptosdb/src/schema/mod.rs index e4a6f62eed35a..ee5c681cd8d52 100644 --- a/storage/aptosdb/src/schema/mod.rs +++ b/storage/aptosdb/src/schema/mod.rs @@ -15,6 +15,7 @@ pub(crate) mod event_accumulator; pub(crate) mod hot_state_value_by_key_hash; pub(crate) mod jellyfish_merkle_node; pub(crate) mod ledger_info; +pub(crate) mod persisted_auxiliary_info; pub(crate) mod stale_node_index; pub(crate) mod stale_node_index_cross_epoch; pub(crate) mod stale_state_value_index; @@ -45,6 +46,7 @@ pub const EVENT_CF_NAME: ColumnFamilyName = "event"; pub const HOT_STATE_VALUE_BY_KEY_HASH_CF_NAME: ColumnFamilyName = "hot_state_value_by_key_hash"; pub const JELLYFISH_MERKLE_NODE_CF_NAME: ColumnFamilyName = "jellyfish_merkle_node"; pub const LEDGER_INFO_CF_NAME: ColumnFamilyName = "ledger_info"; +pub const PERSISTED_AUXILIARY_INFO_CF_NAME: ColumnFamilyName = "persisted_auxiliary_info"; pub const STALE_NODE_INDEX_CF_NAME: ColumnFamilyName = "stale_node_index"; pub const STALE_NODE_INDEX_CROSS_EPOCH_CF_NAME: ColumnFamilyName = "stale_node_index_cross_epoch"; pub const STALE_STATE_VALUE_INDEX_CF_NAME: ColumnFamilyName = "stale_state_value_index"; @@ -103,6 +105,9 @@ pub mod fuzzing { ); assert_no_panic_decoding::(data); assert_no_panic_decoding::(data); + assert_no_panic_decoding::( + data, + ); assert_no_panic_decoding::(data); assert_no_panic_decoding::< super::stale_node_index_cross_epoch::StaleNodeIndexCrossEpochSchema, diff --git a/storage/aptosdb/src/schema/persisted_auxiliary_info/mod.rs b/storage/aptosdb/src/schema/persisted_auxiliary_info/mod.rs new file mode 100644 index 0000000000000..abc632663eb01 --- /dev/null +++ b/storage/aptosdb/src/schema/persisted_auxiliary_info/mod.rs @@ -0,0 +1,54 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +//! This module defines physical storage schema for PersistedAuxiliaryInfo structure. +//! +//! Serialized PersistedAuxiliaryInfo bytes keyed by transaction version. +//! ```text +//! |<--key-->|<------------value------------->| +//! | version | persisted_auxiliary_info bytes | +//! ``` +//! +//! `Version` is serialized in big endian so that records in RocksDB will be in order of it's +//! numeric value. + +use crate::schema::{ensure_slice_len_eq, PERSISTED_AUXILIARY_INFO_CF_NAME}; +use anyhow::Result; +use aptos_schemadb::{ + define_schema, + schema::{KeyCodec, ValueCodec}, +}; +use aptos_types::transaction::{PersistedAuxiliaryInfo, Version}; +use byteorder::{BigEndian, ReadBytesExt}; +use std::mem::size_of; + +define_schema!( + PersistedAuxiliaryInfoSchema, + Version, + PersistedAuxiliaryInfo, + PERSISTED_AUXILIARY_INFO_CF_NAME +); + +impl KeyCodec for Version { + fn encode_key(&self) -> Result> { + Ok(self.to_be_bytes().to_vec()) + } + + fn decode_key(mut data: &[u8]) -> Result { + ensure_slice_len_eq(data, size_of::())?; + Ok(data.read_u64::()?) + } +} + +impl ValueCodec for PersistedAuxiliaryInfo { + fn encode_value(&self) -> Result> { + bcs::to_bytes(self).map_err(Into::into) + } + + fn decode_value(data: &[u8]) -> Result { + bcs::from_bytes(data).map_err(Into::into) + } +} + +#[cfg(test)] +mod test; diff --git a/storage/aptosdb/src/schema/persisted_auxiliary_info/test.rs b/storage/aptosdb/src/schema/persisted_auxiliary_info/test.rs new file mode 100644 index 0000000000000..05ef4b1cd8d00 --- /dev/null +++ b/storage/aptosdb/src/schema/persisted_auxiliary_info/test.rs @@ -0,0 +1,16 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use super::*; +use aptos_schemadb::{schema::fuzzing::assert_encode_decode, test_no_panic_decoding}; +use aptos_types::transaction::{PersistedAuxiliaryInfo, Version}; +use proptest::prelude::*; + +proptest! { + #[test] + fn test_encode_decode(version in any::(), info in any::()) { + assert_encode_decode::(&version, &info); + } +} + +test_no_panic_decoding!(PersistedAuxiliaryInfoSchema); diff --git a/storage/aptosdb/src/state_store/hot_state.rs b/storage/aptosdb/src/state_store/hot_state.rs index bb100445662c5..88b6632a87d7b 100644 --- a/storage/aptosdb/src/state_store/hot_state.rs +++ b/storage/aptosdb/src/state_store/hot_state.rs @@ -9,35 +9,47 @@ use aptos_metrics_core::{IntCounterHelper, IntGaugeHelper, TimerHelper}; use aptos_storage_interface::state_store::{ state::State, state_view::hot_state_view::HotStateView, }; -use aptos_types::{ - state_store::{state_key::StateKey, state_slot::StateSlot, StateViewResult}, - transaction::Version, -}; +use aptos_types::state_store::{state_key::StateKey, state_slot::StateSlot, StateViewResult}; use dashmap::DashMap; -use std::{ - collections::BTreeSet, - sync::{ - mpsc::{Receiver, SyncSender, TryRecvError}, - Arc, - }, +use std::sync::{ + mpsc::{Receiver, SyncSender, TryRecvError}, + Arc, }; const MAX_HOT_STATE_COMMIT_BACKLOG: usize = 10; #[derive(Debug)] -pub struct HotStateBase { +struct Entry { + data: V, + /// The key that is slightly newer than the current entry. `None` for the newest entry. + prev: Option, + /// The key that is slightly older than the current entry. `None` for the oldest entry. + next: Option, +} + +#[derive(Debug)] +pub struct HotStateBase +where + K: Eq + std::hash::Hash, +{ /// After committing a new batch to `inner`, items are evicted so that /// 1. total number of items doesn't exceed this number max_items: usize, /// 2. total number of bytes, incl. both keys and values doesn't exceed this number + #[allow(dead_code)] // TODO(HotState): not enforced for now max_bytes: usize, /// No item is accepted to `inner` if the size of the value exceeds this number + #[allow(dead_code)] // TODO(HotState): not enforced for now max_single_value_bytes: usize, - inner: DashMap, + inner: DashMap>, } -impl HotStateBase { +impl HotStateBase +where + K: Eq + std::hash::Hash, + V: Clone, +{ fn new_empty(max_items: usize, max_bytes: usize, max_single_value_bytes: usize) -> Self { Self { max_items, @@ -47,12 +59,12 @@ impl HotStateBase { } } - fn get(&self, key: &StateKey) -> Option { - self.inner.get(key).map(|val| val.clone()) + fn get(&self, key: &K) -> Option { + self.inner.get(key).map(|val| val.data.clone()) } } -impl HotStateView for HotStateBase { +impl HotStateView for HotStateBase { fn get_state_slot(&self, state_key: &StateKey) -> StateViewResult> { Ok(self.get(state_key)) } @@ -111,9 +123,12 @@ pub struct Committer { base: Arc, committed: Arc>, rx: Receiver, - key_by_hot_since_version: BTreeSet<(Version, StateKey)>, total_key_bytes: usize, total_value_bytes: usize, + /// Points to the newest entry. `None` if empty. + head: Option, + /// Points to the oldest entry. `None` if empty. + tail: Option, } impl Committer { @@ -129,9 +144,10 @@ impl Committer { base, committed, rx, - key_by_hot_since_version: BTreeSet::new(), total_key_bytes: 0, total_value_bytes: 0, + head: None, + tail: None, } } @@ -140,11 +156,8 @@ impl Committer { while let Some(to_commit) = self.next_to_commit() { self.commit(&to_commit); - self.evict(); *self.committed.lock() = to_commit; - assert_eq!(self.key_by_hot_since_version.len(), self.base.inner.len()); - GAUGE.set_with(&["hot_state_items"], self.base.inner.len() as i64); GAUGE.set_with(&["hot_state_key_bytes"], self.total_key_bytes as i64); GAUGE.set_with(&["hot_state_value_bytes"], self.total_value_bytes as i64); @@ -185,18 +198,28 @@ impl Committer { let _timer = OTHER_TIMERS_SECONDS.timer_with(&["hot_state_commit"]); let mut n_delete = 0; - let mut n_too_large = 0; + let n_too_large = 0; // TODO(HotState): enforce this later. let mut n_update = 0; let mut n_insert = 0; let delta = to_commit.make_delta(&self.committed.lock()); - for (key, slot) in delta.shards.iter().flat_map(|shard| shard.iter()) { + let mut all_updates = delta + .shards + .iter() + .flat_map(|shard| shard.iter()) + .collect::>(); + // We will update the LRU next. Here we put the deletions at the + // beginning, then the older updates, and the newest updates are at the + // end. + all_updates.sort_unstable_by_key(|(_key, slot)| { + slot.hot_since_version_opt().map_or(-1, |v| v as i64) + }); + + let mut updater = LRUUpdater::new(Arc::clone(&self.base), &mut self.head, &mut self.tail); + for (key, slot) in all_updates { let has_old_entry = if let Some(old_slot) = self.base.get(&key) { self.total_key_bytes -= key.size(); self.total_value_bytes -= old_slot.size(); - - self.key_by_hot_since_version - .remove(&(old_slot.expect_hot_since_version(), key.clone())); true } else { false @@ -206,14 +229,8 @@ impl Committer { // deletion if has_old_entry { n_delete += 1; + updater.delete(&key); } - - self.base.inner.remove(&key); - } else if slot.size() > self.base.max_single_value_bytes { - // item too large to hold in memory - n_too_large += 1; - - self.base.inner.remove(&key); } else { if has_old_entry { n_update += 1; @@ -224,9 +241,7 @@ impl Committer { self.total_key_bytes += key.size(); self.total_value_bytes += slot.size(); - self.key_by_hot_since_version - .insert((slot.expect_hot_since_version(), key.clone())); - self.base.inner.insert(key, slot); + updater.insert(key, slot); } } @@ -234,46 +249,229 @@ impl Committer { COUNTER.inc_with_by(&["hot_state_too_large"], n_too_large); COUNTER.inc_with_by(&["hot_state_update"], n_update); COUNTER.inc_with_by(&["hot_state_insert"], n_insert); + + { + let _timer = OTHER_TIMERS_SECONDS.timer_with(&["hot_state_evict"]); + + let evicted = updater.evict(); + if evicted.is_empty() { + return; + } + for (key, slot) in &evicted { + self.total_key_bytes -= key.size(); + self.total_value_bytes -= slot.size(); + } + + let head = self + .head + .as_ref() + .expect("LRU must not be empty when eviction has happened."); + let latest_version = self + .base + .inner + .get(head) + .expect("Head must exist.") + .data + .expect_hot_since_version(); + let max_evicted_version = evicted.last().unwrap().1.expect_hot_since_version(); + GAUGE.set_with( + &["hot_state_item_evict_age_versions"], + (latest_version - max_evicted_version) as i64, + ); + COUNTER.inc_with_by(&["hot_state_evict"], evicted.len() as u64); + } } +} - fn evict(&mut self) { - let _timer = OTHER_TIMERS_SECONDS.timer_with(&["hot_state_evict"]); +struct LRUUpdater<'a, K, V> +where + K: Eq + std::hash::Hash, +{ + base: Arc>, + head: &'a mut Option, + tail: &'a mut Option, +} - let latest_version = match self.key_by_hot_since_version.last() { +impl<'a, K, V> LRUUpdater<'a, K, V> +where + K: Clone + std::fmt::Debug + Eq + std::hash::Hash, + V: Clone + std::fmt::Debug, +{ + fn new( + base: Arc>, + head: &'a mut Option, + tail: &'a mut Option, + ) -> Self { + Self { base, head, tail } + } + + fn insert(&mut self, key: K, value: V) { + if self.base.inner.contains_key(&key) { + self.delete(&key); + } + self.insert_to_front(key, value); + } + + /// Deletes and returns the oldest entry. + fn delete_lru(&mut self) -> Option<(K, V)> { + let key = match &self.tail { + Some(k) => k.clone(), + None => return None, + }; + let value = self.delete(&key).expect("Tail must exist."); + Some((key, value)) + } + + fn delete(&mut self, key: &K) -> Option { + let old_entry = match self.base.inner.remove(key) { + Some((_k, e)) => e, + None => return None, + }; + + match &old_entry.prev { + Some(prev_key) => { + let mut prev_entry = self + .base + .inner + .get_mut(prev_key) + .expect("The previous key must exist"); + prev_entry.next = old_entry.next.clone(); + }, None => { - // hot state is empty - return; + // There is no newer entry. The current key was the head. + *self.head = old_entry.next.clone(); }, - Some((hot_since_version, _key)) => *hot_since_version, - }; - let mut max_evicted_version = 0; - let mut num_evicted = 0; + } - while self.should_evict() { - let (ver, key) = self - .key_by_hot_since_version - .pop_first() - .expect("Known Non-empty."); - let (key, slot) = self.base.inner.remove(&key).expect("Known to exist."); + match &old_entry.next { + Some(next_key) => { + let mut next_entry = self + .base + .inner + .get_mut(next_key) + .expect("The next key must exist."); + next_entry.prev = old_entry.prev; + }, + None => { + // There is no older entry. The current key was the tail. + *self.tail = old_entry.prev; + }, + } - self.total_key_bytes -= key.size(); - self.total_value_bytes -= slot.size(); + Some(old_entry.data) + } - num_evicted += 1; - max_evicted_version = ver; + fn insert_to_front(&mut self, key: K, value: V) { + assert_eq!(self.head.is_some(), self.tail.is_some()); + match self.head.take() { + Some(head) => { + { + // Release the reference to the old entry ASAP to avoid deadlock when inserting + // the new entry below. + let mut old_head_entry = + self.base.inner.get_mut(&head).expect("Head must exist."); + old_head_entry.prev = Some(key.clone()); + } + let entry = Entry { + data: value, + prev: None, + next: Some(head), + }; + self.base.inner.insert(key.clone(), entry); + *self.head = Some(key); + }, + None => { + let entry = Entry { + data: value, + prev: None, + next: None, + }; + self.base.inner.insert(key.clone(), entry); + *self.head = Some(key.clone()); + *self.tail = Some(key); + }, } + } - if num_evicted > 0 { - GAUGE.set_with( - &["hot_state_item_evict_age_versions"], - (latest_version - max_evicted_version) as i64, - ); - COUNTER.inc_with_by(&["hot_state_evict"], num_evicted as u64); + fn evict(&mut self) -> Vec<(K, V)> { + if !self.should_evict() { + return Vec::new(); + } + + let mut items = Vec::with_capacity(self.base.inner.len() - self.base.max_items); + while self.should_evict() { + items.push(self.delete_lru().unwrap()); } + items } fn should_evict(&self) -> bool { self.base.inner.len() > self.base.max_items - || self.total_key_bytes + self.total_value_bytes > self.base.max_bytes + } + + #[cfg(test)] + fn collect_all(&self) -> Vec<(K, V)> { + assert_eq!(self.head.is_some(), self.tail.is_some()); + + let mut keys = Vec::new(); + let mut values = Vec::new(); + + let mut current_key = self.head.clone(); + while let Some(key) = current_key { + let entry = self.base.inner.get(&key).unwrap(); + assert_eq!(entry.prev, keys.last().cloned()); + keys.push(key); + values.push(entry.data.clone()); + current_key = entry.next.clone(); + } + itertools::zip_eq(keys, values).collect() + } +} + +#[cfg(test)] +mod tests { + use super::{HotStateBase, LRUUpdater}; + use lru::LruCache; + use proptest::{collection::vec, option, prelude::*}; + use std::sync::Arc; + + const MAX_BYTES: usize = 10000; + const MAX_SINGLE_VALUE_BYTES: usize = 100; + + proptest! { + #[test] + fn test_hot_state_lru( + max_items in 1..10usize, + updates in vec((0..20u64, option::weighted(0.8, 0..1000u64)), 1..50), + ) { + let base = Arc::new(HotStateBase::new_empty( + max_items, + MAX_BYTES, + MAX_SINGLE_VALUE_BYTES, + )); + let mut head = None; + let mut tail = None; + + let mut updater = LRUUpdater::new(base, &mut head, &mut tail); + let mut cache = LruCache::new(max_items); + + for (key, value_opt) in updates { + match value_opt { + Some(value) => { + updater.insert(key, value); + cache.put(key, value); + } + None => { + updater.delete(&key); + cache.pop(&key); + } + } + updater.evict(); + + prop_assert_eq!(updater.base.inner.len(), cache.len()); + let items = updater.collect_all(); + prop_assert_eq!(items, cache.iter().map(|(k, v)| (*k, *v)).collect::>()); + } + } } } diff --git a/storage/aptosdb/src/transaction_store/mod.rs b/storage/aptosdb/src/transaction_store/mod.rs index d58fb1344b28f..5eed6fc5aaa82 100644 --- a/storage/aptosdb/src/transaction_store/mod.rs +++ b/storage/aptosdb/src/transaction_store/mod.rs @@ -146,13 +146,24 @@ impl TransactionStore { transactions: &[(Version, Transaction)], db_batch: &mut SchemaBatch, ) -> Result<()> { - for (version, transaction) in transactions { + for (_, transaction) in transactions { if let Some(txn) = transaction.try_as_signed_user_txn() { if let ReplayProtector::SequenceNumber(seq_num) = txn.replay_protector() { db_batch .delete::(&(txn.sender(), seq_num))?; } - // TODO[Orderless]: Check where else transactions summaries need to be pruned + } + } + Ok(()) + } + + pub fn prune_transaction_summaries_by_account( + &self, + transactions: &[(Version, Transaction)], + db_batch: &mut SchemaBatch, + ) -> Result<()> { + for (version, transaction) in transactions { + if let Some(txn) = transaction.try_as_signed_user_txn() { db_batch .delete::(&(txn.sender(), *version))?; } diff --git a/storage/aptosdb/src/utils/truncation_helper.rs b/storage/aptosdb/src/utils/truncation_helper.rs index 6b9129fe1643a..057f76ef7a059 100644 --- a/storage/aptosdb/src/utils/truncation_helper.rs +++ b/storage/aptosdb/src/utils/truncation_helper.rs @@ -385,6 +385,7 @@ fn delete_transaction_index_data( .zip(transactions) .collect::>(); transaction_store.prune_transaction_by_account(&transactions, batch)?; + transaction_store.prune_transaction_summaries_by_account(&transactions, batch)?; } Ok(()) diff --git a/storage/backup/backup-cli/src/backup_types/transaction/restore.rs b/storage/backup/backup-cli/src/backup_types/transaction/restore.rs index cfb0481c877c1..40767e71958df 100644 --- a/storage/backup/backup-cli/src/backup_types/transaction/restore.rs +++ b/storage/backup/backup-cli/src/backup_types/transaction/restore.rs @@ -35,7 +35,9 @@ use aptos_types::{ contract_event::ContractEvent, ledger_info::LedgerInfoWithSignatures, proof::{TransactionAccumulatorRangeProof, TransactionInfoListWithProof}, - transaction::{Transaction, TransactionInfo, TransactionListWithProof, Version}, + transaction::{ + PersistedAuxiliaryInfo, Transaction, TransactionInfo, TransactionListWithProof, Version, + }, write_set::WriteSet, }; use aptos_vm::{aptos_vm::AptosVMBlockExecutor, AptosVM}; @@ -610,6 +612,11 @@ impl TransactionRestoreBatchController { tokio::task::spawn_blocking(move || { chunk_replayer.enqueue_chunks( txns, + // TODO(grao): Support PersistedAuxiliaryInfo in restore. + txn_infos + .iter() + .map(|_| PersistedAuxiliaryInfo::None) + .collect(), txn_infos, write_sets, events, diff --git a/storage/db-tool/src/replay_on_archive.rs b/storage/db-tool/src/replay_on_archive.rs index 78c98703b9ae6..0b93c0b4f00e5 100644 --- a/storage/db-tool/src/replay_on_archive.rs +++ b/storage/db-tool/src/replay_on_archive.rs @@ -2,7 +2,7 @@ // Parts of the project are originally copyright © Meta Platforms, Inc. // SPDX-License-Identifier: Apache-2.0 -use anyhow::{Error, Ok, Result}; +use anyhow::{bail, Error, Ok, Result}; use aptos_backup_cli::utils::{ReplayConcurrencyLevelOpt, RocksdbOpt}; use aptos_block_executor::txn_provider::default::DefaultTxnProvider; use aptos_config::config::{ @@ -79,11 +79,9 @@ impl Opt { let all_errors = verifier.run()?; if !all_errors.is_empty() { error!("{} failed transactions", all_errors.len()); - /* errors were printed as found. for e in all_errors { error!("Failed: {}", e); } - */ process::exit(2); } Ok(()) @@ -196,7 +194,7 @@ impl Verifier { let res = ranges .par_iter() .map(|(start, limit)| self.verify(*start, *limit)) - .collect::>>>(); + .collect::>(); let mut all_failed_txns = Vec::new(); for iter in res.into_iter() { all_failed_txns.extend(iter?); @@ -219,12 +217,12 @@ impl Verifier { // timeout check if let Some(duration) = self.timeout_secs { if self.replay_stat.get_elapsed_secs() >= duration { - error!( - "Verify timeout: {}s elapsed. Deadline: {}s", + bail!( + "Verify timeout: {}s elapsed. Deadline: {}s. Failed txns count: {}", self.replay_stat.get_elapsed_secs(), - duration + duration, + total_failed_txns.len(), ); - return Ok(total_failed_txns); } } @@ -313,7 +311,8 @@ impl Verifier { .iter() .map(|txn| SignatureVerifiedTransaction::from(txn.clone())) .collect::>(); - let txns_provider = DefaultTxnProvider::new(txns); + // TODO(grao): Pass in persisted info. + let txns_provider = DefaultTxnProvider::new_without_info(txns); let executed_outputs = AptosVMBlockExecutor::new().execute_block_no_limit( &txns_provider, &self diff --git a/storage/db-tool/src/tests.rs b/storage/db-tool/src/tests.rs index 3368026aca9a2..efd8457e11591 100644 --- a/storage/db-tool/src/tests.rs +++ b/storage/db-tool/src/tests.rs @@ -285,16 +285,13 @@ mod dbtool_tests { backup_dir: PathBuf, old_db_dir: PathBuf, new_db_dir: PathBuf, - force_sharding: bool, ) -> (Runtime, String) { use aptos_config::config::{ RocksdbConfigs, StorageDirPaths, BUFFERED_STATE_TARGET_ITEMS_FOR_TEST, NO_OP_STORAGE_PRUNER_CONFIG, }; - use aptos_db::utils::iterators::PrefixedStateValueIterator; use aptos_db_indexer::utils::PrefixedStateValueIterator as IndexerPrefixedStateValueIterator; use aptos_indexer_grpc_table_info::internal_indexer_db_service::InternalIndexerDBService; - use itertools::zip_eq; let db = test_execution_with_storage_impl_inner(false, old_db_dir.as_path()); let (rt, port) = start_local_backup_service(Arc::clone(&db)); let server_addr = format!(" http://localhost:{}", port); @@ -446,132 +443,77 @@ mod dbtool_tests { "--local-fs-dir".to_string(), backup_dir.as_path().to_str().unwrap().to_string(), ]; - if force_sharding { - let additional_args = vec!["--enable-storage-sharding", "--enable-state-indices"] - .into_iter() - .map(|s| s.to_string()) - .collect::>(); - restore_args.extend(additional_args); - } + let additional_args = vec!["--enable-storage-sharding", "--enable-state-indices"] + .into_iter() + .map(|s| s.to_string()) + .collect::>(); + restore_args.extend(additional_args); rt.block_on(DBTool::try_parse_from(restore_args).unwrap().run()) .unwrap(); - // verify the new DB has the same data as the original DB - let db_config = if !force_sharding { - RocksdbConfigs::default() - } else { - RocksdbConfigs { - enable_storage_sharding: true, - ..Default::default() - } - }; - // assert the kv are the same in db and new_db // current all the kv are still stored in the ledger db // - if !force_sharding { - let (_ledger_db, tree_db, state_kv_db) = - AptosDB::open_dbs(&StorageDirPaths::from_path(new_db_dir), db_config, false, 0) - .unwrap(); - for ver in start..=end { - let new_iter = PrefixedStateValueIterator::new( - &state_kv_db, - StateKeyPrefix::new(AccessPath, b"".to_vec()), - None, - ver, - ) - .unwrap(); - - let old_iter = db - .deref() - .get_prefixed_state_value_iterator( - &StateKeyPrefix::new(AccessPath, b"".to_vec()), - None, - ver, - ) - .unwrap(); - - zip_eq(new_iter, old_iter).for_each(|(new, old)| { - let (new_key, new_value) = new.unwrap(); - let (old_key, old_value) = old.unwrap(); - assert_eq!(new_key, old_key); - assert_eq!(new_value, old_value); - }); - // first snapshot tree not recovered - assert!( - tree_db.get_root_hash(0).is_err() || tree_db.get_leaf_count(0).unwrap() == 0, - "tree at version 0 should not be restored" - ); - // second snapshot tree recovered - let second_snapshot_version: Version = 13; - assert!( - tree_db.get_root_hash(second_snapshot_version).is_ok(), - "root hash at version {} doesn't exist", - second_snapshot_version, - ); - } + let internal_indexer_db = + InternalIndexerDBService::get_indexer_db_for_restore(new_db_dir.as_path()).unwrap(); + + let aptos_db: Arc = Arc::new( + AptosDB::open( + StorageDirPaths::from_path(new_db_dir), + false, + NO_OP_STORAGE_PRUNER_CONFIG, + RocksdbConfigs::default(), + false, + BUFFERED_STATE_TARGET_ITEMS_FOR_TEST, + 1000, + Some(internal_indexer_db.clone()), + ) + .unwrap(), + ); + + // Only state key at and by the snapshot version are restored in internal indexer + let snapshot_version = if start == 0 { + 0 + } else if start > 0 && start < 15 { + 1 } else { - let internal_indexer_db = - InternalIndexerDBService::get_indexer_db_for_restore(new_db_dir.as_path()).unwrap(); - - let aptos_db: Arc = Arc::new( - AptosDB::open( - StorageDirPaths::from_path(new_db_dir), - false, - NO_OP_STORAGE_PRUNER_CONFIG, - db_config, - false, - BUFFERED_STATE_TARGET_ITEMS_FOR_TEST, - 1000, - Some(internal_indexer_db.clone()), - ) - .unwrap(), - ); - - // Only state key at and by the snapshot version are restored in internal indexer - let snapshot_version = if start == 0 { - 0 - } else if start > 0 && start < 15 { - 1 - } else { - 15 - }; + 15 + }; - let new_iter = IndexerPrefixedStateValueIterator::new( - aptos_db.clone(), - internal_indexer_db.get_inner_db_ref(), - StateKeyPrefix::new(AccessPath, b"".to_vec()), + let new_iter = IndexerPrefixedStateValueIterator::new( + aptos_db.clone(), + internal_indexer_db.get_inner_db_ref(), + StateKeyPrefix::new(AccessPath, b"".to_vec()), + None, + snapshot_version, + ) + .unwrap(); + + let old_iter = db + .deref() + .get_prefixed_state_value_iterator( + &StateKeyPrefix::new(AccessPath, b"".to_vec()), None, snapshot_version, ) .unwrap(); - let old_iter = db - .deref() - .get_prefixed_state_value_iterator( - &StateKeyPrefix::new(AccessPath, b"".to_vec()), - None, - snapshot_version, - ) - .unwrap(); - - // collect all the keys in the new_iter - let mut new_keys = new_iter.map(|e| e.unwrap().0).collect::>(); - new_keys.sort(); - let mut old_keys = old_iter.map(|e| e.unwrap().0).collect::>(); - old_keys.sort(); - assert_eq!(new_keys, old_keys); - - let ledger_version = aptos_db.get_latest_ledger_info_version().unwrap(); - for ver in start..=ledger_version { - let old_block_res = db.get_block_info_by_version(ver); - let new_block_res = aptos_db.get_block_info_by_version(ver); - let (old_block_version, old_block_height, _) = old_block_res.unwrap(); - let (new_block_version, new_block_height, _) = new_block_res.unwrap(); - assert_eq!(old_block_version, new_block_version); - assert_eq!(old_block_height, new_block_height); - } + // collect all the keys in the new_iter + let mut new_keys = new_iter.map(|e| e.unwrap().0).collect::>(); + new_keys.sort(); + let mut old_keys = old_iter.map(|e| e.unwrap().0).collect::>(); + old_keys.sort(); + assert_eq!(new_keys, old_keys); + + let ledger_version = aptos_db.get_latest_ledger_info_version().unwrap(); + for ver in start..=ledger_version { + let old_block_res = db.get_block_info_by_version(ver); + let new_block_res = aptos_db.get_block_info_by_version(ver); + let (old_block_version, old_block_height, _) = old_block_res.unwrap(); + let (new_block_version, new_block_height, _) = new_block_res.unwrap(); + assert_eq!(old_block_version, new_block_version); + assert_eq!(old_block_height, new_block_height); } (rt, server_addr) @@ -589,7 +531,6 @@ mod dbtool_tests { PathBuf::from(backup_dir.path()), PathBuf::from(old_db_dir.path()), PathBuf::from(new_db_dir.path()), - false, ); let backup_size = dir_size(backup_dir.path()); let db_size = dir_size(new_db_dir.path()); @@ -614,7 +555,6 @@ mod dbtool_tests { PathBuf::from(backup_dir.path()), PathBuf::from(old_db_dir.path()), PathBuf::from(new_db_dir.path()), - false, ); rt.shutdown_timeout(Duration::from_secs(1)); } @@ -633,7 +573,6 @@ mod dbtool_tests { PathBuf::from(backup_dir.path()), PathBuf::from(old_db_dir.path()), PathBuf::from(new_db_dir.path()), - false, ); // boostrap a historical DB starting from version 1 to version 18 // This only replays the txn from txn 17 to 18 @@ -658,38 +597,6 @@ mod dbtool_tests { rt.shutdown_timeout(Duration::from_secs(1)); } - #[test] - fn test_restore_with_sharded_db() { - let backup_dir = TempPath::new(); - backup_dir.create_as_dir().unwrap(); - let new_db_dir = TempPath::new(); - let old_db_dir = TempPath::new(); - - let (rt, _) = db_restore_test_setup( - 0, - 16, - PathBuf::from(backup_dir.path()), - PathBuf::from(old_db_dir.path()), - PathBuf::from(new_db_dir.path()), - true, - ); - let backup_size = dir_size(backup_dir.path()); - let db_size = dir_size(new_db_dir.path()); - let old_db_size = dir_size(old_db_dir.path()); - println!( - "backup size: {}, old db size: {}, new db size: {}", - backup_size, old_db_size, db_size - ); - - println!( - "backup size: {:?}, old db size: {:?}, new db size: {:?}", - backup_dir.path(), - old_db_dir.path(), - new_db_dir.path() - ); - rt.shutdown_timeout(Duration::from_secs(1)); - } - fn dir_size>(path: P) -> u64 { let mut size = 0; diff --git a/storage/schemadb/src/lib.rs b/storage/schemadb/src/lib.rs index 7b55878d9d036..51baf3b7162a4 100644 --- a/storage/schemadb/src/lib.rs +++ b/storage/schemadb/src/lib.rs @@ -30,16 +30,15 @@ use crate::{ }; use anyhow::format_err; use aptos_logger::prelude::*; -use aptos_metrics_core::TimerHelper; use aptos_storage_interface::{AptosDbError, Result as DbResult}; use batch::{IntoRawBatch, NativeBatch, WriteBatch}; use iterator::{ScanDirection, SchemaIterator}; -use rocksdb::ErrorKind; /// Type alias to `rocksdb::ReadOptions`. See [`rocksdb doc`](https://github.com/pingcap/rust-rocksdb/blob/master/src/rocksdb_options.rs) pub use rocksdb::{ BlockBasedOptions, Cache, ColumnFamilyDescriptor, DBCompressionType, Options, ReadOptions, SliceTransform, DEFAULT_COLUMN_FAMILY_NAME, }; +use rocksdb::{ErrorKind, WriteOptions}; use std::{collections::HashSet, fmt::Debug, iter::Iterator, path::Path}; pub type ColumnFamilyName = &'static str; @@ -265,15 +264,16 @@ impl DB { self.iter_with_direction::(opts, ScanDirection::Backward) } - /// Writes a group of records wrapped in a [`SchemaBatch`]. - pub fn write_schemas(&self, batch: impl IntoRawBatch) -> DbResult<()> { - let _timer = APTOS_SCHEMADB_BATCH_COMMIT_LATENCY_SECONDS.timer_with(&[&self.name]); + fn write_schemas_inner(&self, batch: impl IntoRawBatch, option: &WriteOptions) -> DbResult<()> { + let _timer = APTOS_SCHEMADB_BATCH_COMMIT_LATENCY_SECONDS + .with_label_values(&[&self.name]) + .start_timer(); let raw_batch = batch.into_raw_batch(self)?; let serialized_size = raw_batch.inner.size_in_bytes(); self.inner - .write_opt(raw_batch.inner, &default_write_options()) + .write_opt(raw_batch.inner, option) .into_db_res()?; raw_batch.stats.commit(); @@ -284,6 +284,20 @@ impl DB { Ok(()) } + /// Writes a group of records wrapped in a [`SchemaBatch`]. + pub fn write_schemas(&self, batch: impl IntoRawBatch) -> DbResult<()> { + self.write_schemas_inner(batch, &sync_write_option()) + } + + /// Writes without sync flag in write option. + /// If this flag is false, and the machine crashes, some recent + /// writes may be lost. Note that if it is just the process that + /// crashes (i.e., the machine does not reboot), no writes will be + /// lost even if sync==false. + pub fn write_schemas_relaxed(&self, batch: impl IntoRawBatch) -> DbResult<()> { + self.write_schemas_inner(batch, &WriteOptions::default()) + } + fn get_cf_handle(&self, cf_name: &str) -> DbResult<&rocksdb::ColumnFamily> { self.inner .cf_handle(cf_name) @@ -338,7 +352,7 @@ impl Drop for DB { /// For now we always use synchronous writes. This makes sure that once the operation returns /// `Ok(())` the data is persisted even if the machine crashes. In the future we might consider /// selectively turning this off for some non-critical writes to improve performance. -fn default_write_options() -> rocksdb::WriteOptions { +fn sync_write_option() -> rocksdb::WriteOptions { let mut opts = rocksdb::WriteOptions::default(); opts.set_sync(true); opts diff --git a/storage/storage-interface/src/chunk_to_commit.rs b/storage/storage-interface/src/chunk_to_commit.rs index d8d5109477653..745a8684fd993 100644 --- a/storage/storage-interface/src/chunk_to_commit.rs +++ b/storage/storage-interface/src/chunk_to_commit.rs @@ -8,12 +8,15 @@ use crate::state_store::{ state_view::cached_state_view::ShardedStateCache, state_with_summary::{LedgerStateWithSummary, StateWithSummary}, }; -use aptos_types::transaction::{Transaction, TransactionInfo, TransactionOutput, Version}; +use aptos_types::transaction::{ + PersistedAuxiliaryInfo, Transaction, TransactionInfo, TransactionOutput, Version, +}; #[derive(Clone)] pub struct ChunkToCommit<'a> { pub first_version: Version, pub transactions: &'a [Transaction], + pub persisted_info: &'a [PersistedAuxiliaryInfo], pub transaction_outputs: &'a [TransactionOutput], pub transaction_infos: &'a [TransactionInfo], pub state: &'a LedgerState, diff --git a/storage/storage-interface/src/errors.rs b/storage/storage-interface/src/errors.rs index 7acb9e202024e..d472d025e51df 100644 --- a/storage/storage-interface/src/errors.rs +++ b/storage/storage-interface/src/errors.rs @@ -8,7 +8,7 @@ use std::sync::mpsc::RecvError; use thiserror::Error; /// This enum defines errors commonly used among `AptosDB` APIs. -#[derive(Debug, Error)] +#[derive(Clone, Debug, Error)] pub enum AptosDbError { /// A requested item is not found. #[error("{0} not found.")] diff --git a/storage/storage-interface/src/state_store/state.rs b/storage/storage-interface/src/state_store/state.rs index e3a4d60ebf958..f0d89f1ff37dc 100644 --- a/storage/storage-interface/src/state_store/state.rs +++ b/storage/storage-interface/src/state_store/state.rs @@ -184,7 +184,7 @@ impl State { // otherwise we can't calculate the correct usage. let old_slot = overlay .get(k) - .or_else(|| cache.get(k).map(|entry| entry.value().clone())) + .or_else(|| cache.get(*k).map(|entry| entry.value().clone())) .expect("Must cache read"); if old_slot.is_occupied() { items_delta -= 1; diff --git a/storage/storage-interface/src/state_store/versioned_state_value.rs b/storage/storage-interface/src/state_store/versioned_state_value.rs index a890c3427fe77..b359a7a7afe6a 100644 --- a/storage/storage-interface/src/state_store/versioned_state_value.rs +++ b/storage/storage-interface/src/state_store/versioned_state_value.rs @@ -49,6 +49,20 @@ impl StateUpdateRef<'_> { hot_since_version: self.version, }, }, + BaseStateOp::Eviction { prev_slot } => match prev_slot { + StateSlot::HotVacant { .. } => StateSlot::ColdVacant, + StateSlot::HotOccupied { + value_version, + value, + .. + } => StateSlot::ColdOccupied { + value_version, + value, + }, + StateSlot::ColdVacant | StateSlot::ColdOccupied { .. } => { + unreachable!("only hot slots can be evicted") + }, + }, } } diff --git a/testsuite/forge-cli/src/suites/dag.rs b/testsuite/forge-cli/src/suites/dag.rs index b9554a8eaebae..3c791b3b36538 100644 --- a/testsuite/forge-cli/src/suites/dag.rs +++ b/testsuite/forge-cli/src/suites/dag.rs @@ -119,6 +119,10 @@ fn dag_realistic_env_max_load_test( config_v5.block_gas_limit_type = BlockGasLimitType::NoLimit; config_v5.transaction_shuffler_type = TransactionShufflerType::default_for_genesis(); } + OnChainExecutionConfig::V6(config_v6) => { + config_v6.block_gas_limit_type = BlockGasLimitType::NoLimit; + config_v6.transaction_shuffler_type = TransactionShufflerType::default_for_genesis(); + } } helm_values["chain"]["on_chain_execution_config"] = serde_yaml::to_value(on_chain_execution_config).expect("must serialize"); @@ -223,6 +227,9 @@ fn dag_reconfig_enable_test() -> ForgeConfig { OnChainExecutionConfig::V5(config_v5) => { config_v5.block_gas_limit_type = BlockGasLimitType::NoLimit; } + OnChainExecutionConfig::V6(config_v6) => { + config_v6.block_gas_limit_type = BlockGasLimitType::NoLimit; + } } helm_values["chain"]["on_chain_execution_config"] = serde_yaml::to_value(on_chain_execution_config).expect("must serialize"); diff --git a/testsuite/forge-cli/src/suites/realistic_environment.rs b/testsuite/forge-cli/src/suites/realistic_environment.rs index babfc18ee22da..78228da2a862b 100644 --- a/testsuite/forge-cli/src/suites/realistic_environment.rs +++ b/testsuite/forge-cli/src/suites/realistic_environment.rs @@ -316,8 +316,8 @@ pub(crate) fn realistic_env_max_load_test( // Give at least 60s for catchup, give 10% of the run for longer durations. (duration.as_secs() / 10).max(60), ) - .add_latency_threshold(3.4, LatencyType::P50) - .add_latency_threshold(4.5, LatencyType::P70) + .add_latency_threshold(3.6, LatencyType::P50) + .add_latency_threshold(4.8, LatencyType::P70) .add_chain_progress(StateProgressThreshold { max_non_epoch_no_progress_secs: 15.0, max_epoch_no_progress_secs: 16.0, @@ -335,7 +335,7 @@ pub(crate) fn realistic_env_max_load_test( LatencyBreakdownThreshold::new_with_breach_pct( vec![ // quorum store backpressure is relaxed, so queueing happens here - (LatencyBreakdownSlice::MempoolToBlockCreation, 0.35 + 2.5), + (LatencyBreakdownSlice::MempoolToBlockCreation, 0.35 + 3.0), // can be adjusted down if less backpressure (LatencyBreakdownSlice::ConsensusProposalToOrdered, 0.85), // can be adjusted down if less backpressure @@ -378,14 +378,17 @@ pub(crate) fn realistic_env_max_load_test( serde_yaml::to_value(OnChainExecutionConfig::default_for_genesis()) .expect("must serialize"); })) + .with_validator_override_node_config_fn(Arc::new(|config, _| { + config.consensus.enable_optimistic_proposal_tx = true; + })) .with_fullnode_override_node_config_fn(Arc::new(|config, _| { // Increase the consensus observer fallback thresholds config .consensus_observer - .observer_fallback_progress_threshold_ms = 20_000; // 20 seconds + .observer_fallback_progress_threshold_ms = 30_000; // 30 seconds config .consensus_observer - .observer_fallback_sync_lag_threshold_ms = 30_000; // 30 seconds + .observer_fallback_sync_lag_threshold_ms = 45_000; // 45 seconds })) // First start higher gas-fee traffic, to not cause issues with TxnEmitter setup - account creation .with_emit_job( @@ -468,6 +471,14 @@ pub(crate) fn realistic_network_tuned_for_throughput_test() -> ForgeConfig { user_use_case_spread_factor: 0, }; } + OnChainExecutionConfig::V6(config_v6) => { + config_v6.block_gas_limit_type = BlockGasLimitType::NoLimit; + config_v6.transaction_shuffler_type = TransactionShufflerType::UseCaseAware { + sender_spread_factor: 256, + platform_use_case_spread_factor: 0, + user_use_case_spread_factor: 0, + }; + } } helm_values["chain"]["on_chain_execution_config"] = serde_yaml::to_value(on_chain_execution_config).expect("must serialize"); diff --git a/testsuite/forge/src/backend/k8s_deployer/constants.rs b/testsuite/forge/src/backend/k8s_deployer/constants.rs index a4b27f15f1b41..0347e132e5146 100644 --- a/testsuite/forge/src/backend/k8s_deployer/constants.rs +++ b/testsuite/forge/src/backend/k8s_deployer/constants.rs @@ -10,7 +10,7 @@ pub const INDEXER_GRPC_DOCKER_IMAGE_REPO: &str = "us-docker.pkg.dev/aptos-registry/docker/indexer-grpc"; /// The version of the forge deployer image to use. -pub const DEFAULT_FORGE_DEPLOYER_IMAGE_TAG: &str = "90865ea9b15feb0e1fc234f6e08bc3f8db98c4b7"; // default to the latest stable build from the main branch +pub const DEFAULT_FORGE_DEPLOYER_IMAGE_TAG: &str = "f5937ed393eb5214997d1cae0da75d2392cdbc70"; // default to the latest stable build from the main branch /// This is the service account name that the deployer will use to deploy the forge components. It may require extra permissions and additonal setup pub const FORGE_DEPLOYER_SERVICE_ACCOUNT_NAME: &str = "forge"; diff --git a/testsuite/forge/src/config.rs b/testsuite/forge/src/config.rs index 8213544687888..d5fa72fb685ba 100644 --- a/testsuite/forge/src/config.rs +++ b/testsuite/forge/src/config.rs @@ -246,6 +246,11 @@ impl ForgeConfig { // override consensus observer refresh latency helm_values["fullnode"]["config"]["consensus_observer"] ["subscription_peer_change_interval_ms"] = 5_000.into(); + + // enable opt proposal + // TODO(ibalajiarun): + // helm_values["validator"]["config"]["consensus"]["enable_optimistic_proposal_tx"] = + // true.into(); })) } diff --git a/testsuite/fuzzer/fuzz/fuzz_targets/move/type_tag_to_string.rs b/testsuite/fuzzer/fuzz/fuzz_targets/move/type_tag_to_string.rs index 281ac4945aa35..6e3a4a1929a5c 100644 --- a/testsuite/fuzzer/fuzz/fuzz_targets/move/type_tag_to_string.rs +++ b/testsuite/fuzzer/fuzz/fuzz_targets/move/type_tag_to_string.rs @@ -5,6 +5,7 @@ use arbitrary::Arbitrary; use libfuzzer_sys::{fuzz_target, Corpus}; use move_core_types::{ability::AbilitySet, identifier::Identifier, language_storage::TypeTag}; + mod utils; #[derive(Arbitrary, Debug)] @@ -24,8 +25,14 @@ fn is_valid_type_tag(type_tag: &TypeTag) -> bool { TypeTag::Vector(inner_type_tag) => is_valid_type_tag(inner_type_tag), TypeTag::Function(function_tag) => { function_tag.abilities.into_u8() <= AbilitySet::ALL.into_u8() - && function_tag.args.iter().all(is_valid_type_tag) - && function_tag.results.iter().all(is_valid_type_tag) + && function_tag + .args + .iter() + .all(|t| is_valid_type_tag(t.inner_tag())) + && function_tag + .results + .iter() + .all(|t| is_valid_type_tag(t.inner_tag())) }, _ => true, // Primitive types are always valid } @@ -60,16 +67,15 @@ fuzz_target!(|data: FuzzData| -> Corpus { tdbg!( "a_type:{:?}\na_string:{}\nserialized:{:?}", data.a.clone(), - data.a.to_string(), + data.a.to_canonical_string(), bcs::to_bytes(&data.a).unwrap() ); tdbg!( "b_type:{:?}\nb_string:{}\nserialized:{:?}", data.b.clone(), - data.b.to_string(), + data.b.to_canonical_string(), bcs::to_bytes(&data.b).unwrap() ); - assert!(data.a.to_string() != data.b.to_string()); assert!(data.a.to_canonical_string() != data.b.to_canonical_string()); } diff --git a/testsuite/fuzzer/fuzz/fuzz_targets/move/value_deserialize.rs b/testsuite/fuzzer/fuzz/fuzz_targets/move/value_deserialize.rs index fcd5cf6ebb645..c898c2b99837a 100644 --- a/testsuite/fuzzer/fuzz/fuzz_targets/move/value_deserialize.rs +++ b/testsuite/fuzzer/fuzz/fuzz_targets/move/value_deserialize.rs @@ -19,6 +19,5 @@ fuzz_target!(|fuzz_data: FuzzData| { if fuzz_data.data.is_empty() || !is_valid_layout(&fuzz_data.layout) { return; } - // TODO: How do we fuzz function resolution? - let _ = ValueSerDeContext::new().deserialize(&fuzz_data.data, &fuzz_data.layout); + let _ = ValueSerDeContext::new(None).deserialize(&fuzz_data.data, &fuzz_data.layout); }); diff --git a/testsuite/generate-format/src/api.rs b/testsuite/generate-format/src/api.rs index d519dfda1f180..2d09009e37d5c 100644 --- a/testsuite/generate-format/src/api.rs +++ b/testsuite/generate-format/src/api.rs @@ -97,6 +97,7 @@ pub fn get_registry() -> Result { // 2. Trace the main entry point(s) + every enum separately. // stdlib types tracer.trace_type::(&samples)?; + tracer.trace_type::(&samples)?; tracer.trace_type::(&samples)?; tracer.trace_type::(&samples)?; tracer.trace_type::(&samples)?; diff --git a/testsuite/generate-format/src/aptos.rs b/testsuite/generate-format/src/aptos.rs index 9760854c559aa..8abeebf52b4ec 100644 --- a/testsuite/generate-format/src/aptos.rs +++ b/testsuite/generate-format/src/aptos.rs @@ -91,6 +91,7 @@ pub fn get_registry() -> Result { // 2. Trace the main entry point(s) + every enum separately. tracer.trace_type::(&samples)?; + tracer.trace_type::(&samples)?; tracer.trace_type::(&samples)?; tracer.trace_type::(&samples)?; tracer.trace_type::(&samples)?; diff --git a/testsuite/generate-format/src/consensus.rs b/testsuite/generate-format/src/consensus.rs index f79142cca9317..ebcb5f2da86b3 100644 --- a/testsuite/generate-format/src/consensus.rs +++ b/testsuite/generate-format/src/consensus.rs @@ -90,6 +90,7 @@ pub fn get_registry() -> Result { // 2. Trace the main entry point(s) + every enum separately. tracer.trace_type::(&samples)?; + tracer.trace_type::(&samples)?; tracer.trace_type::(&samples)?; tracer.trace_type::(&samples)?; tracer.trace_type::(&samples)?; diff --git a/testsuite/generate-format/src/move_abi.rs b/testsuite/generate-format/src/move_abi.rs index f07d8b27fb45e..26dffb6ea0ae3 100644 --- a/testsuite/generate-format/src/move_abi.rs +++ b/testsuite/generate-format/src/move_abi.rs @@ -19,6 +19,7 @@ pub fn get_registry() -> Result { // 2. Trace the main entry point(s) + every enum separately. tracer.trace_type::(&samples)?; + tracer.trace_type::(&samples)?; tracer.trace_type::(&samples)?; // aliases within StructTag diff --git a/testsuite/generate-format/tests/staged/api.yaml b/testsuite/generate-format/tests/staged/api.yaml index bfcaceda30d2d..57ed6db0bd4a1 100644 --- a/testsuite/generate-format/tests/staged/api.yaml +++ b/testsuite/generate-format/tests/staged/api.yaml @@ -159,6 +159,15 @@ BlockEpiloguePayload: TYPENAME: HashValue - block_end_info: TYPENAME: BlockEndInfo + 1: + V1: + STRUCT: + - block_id: + TYPENAME: HashValue + - block_end_info: + TYPENAME: BlockEndInfo + - fee_distribution: + TYPENAME: FeeDistribution BlockMetadata: STRUCT: - id: @@ -343,6 +352,29 @@ FederatedKeylessPublicKey: TYPENAME: AccountAddress - pk: TYPENAME: KeylessPublicKey +FeeDistribution: + ENUM: + 0: + V0: + STRUCT: + - amount: + MAP: + KEY: U64 + VALUE: U64 +FunctionParamOrReturnTag: + ENUM: + 0: + Reference: + NEWTYPE: + TYPENAME: TypeTag + 1: + MutableReference: + NEWTYPE: + TYPENAME: TypeTag + 2: + Value: + NEWTYPE: + TYPENAME: TypeTag FunctionInfo: STRUCT: - module_address: @@ -353,10 +385,10 @@ FunctionTag: STRUCT: - args: SEQ: - TYPENAME: TypeTag + TYPENAME: FunctionParamOrReturnTag - results: SEQ: - TYPENAME: TypeTag + TYPENAME: FunctionParamOrReturnTag - abilities: TYPENAME: AbilitySet G1Bytes: @@ -759,7 +791,7 @@ TransactionInfoV0: - state_checkpoint_hash: OPTION: TYPENAME: HashValue - - state_cemetery_hash: + - auxiliary_info_hash: OPTION: TYPENAME: HashValue TransactionOnChainData: diff --git a/testsuite/generate-format/tests/staged/aptos.yaml b/testsuite/generate-format/tests/staged/aptos.yaml index 731db6591d837..4a9c5b303a51d 100644 --- a/testsuite/generate-format/tests/staged/aptos.yaml +++ b/testsuite/generate-format/tests/staged/aptos.yaml @@ -147,6 +147,15 @@ BlockEpiloguePayload: TYPENAME: HashValue - block_end_info: TYPENAME: BlockEndInfo + 1: + V1: + STRUCT: + - block_id: + TYPENAME: HashValue + - block_end_info: + TYPENAME: BlockEndInfo + - fee_distribution: + TYPENAME: FeeDistribution BlockMetadata: STRUCT: - id: @@ -289,6 +298,29 @@ FederatedKeylessPublicKey: TYPENAME: AccountAddress - pk: TYPENAME: KeylessPublicKey +FeeDistribution: + ENUM: + 0: + V0: + STRUCT: + - amount: + MAP: + KEY: U64 + VALUE: U64 +FunctionParamOrReturnTag: + ENUM: + 0: + Reference: + NEWTYPE: + TYPENAME: TypeTag + 1: + MutableReference: + NEWTYPE: + TYPENAME: TypeTag + 2: + Value: + NEWTYPE: + TYPENAME: TypeTag FunctionInfo: STRUCT: - module_address: @@ -299,10 +331,10 @@ FunctionTag: STRUCT: - args: SEQ: - TYPENAME: TypeTag + TYPENAME: FunctionParamOrReturnTag - results: SEQ: - TYPENAME: TypeTag + TYPENAME: FunctionParamOrReturnTag - abilities: TYPENAME: AbilitySet G1Bytes: diff --git a/testsuite/generate-format/tests/staged/consensus.yaml b/testsuite/generate-format/tests/staged/consensus.yaml index 69ddf73287285..84a56b6296b5a 100644 --- a/testsuite/generate-format/tests/staged/consensus.yaml +++ b/testsuite/generate-format/tests/staged/consensus.yaml @@ -226,6 +226,15 @@ BlockEpiloguePayload: TYPENAME: HashValue - block_end_info: TYPENAME: BlockEndInfo + 1: + V1: + STRUCT: + - block_id: + TYPENAME: HashValue + - block_end_info: + TYPENAME: BlockEndInfo + - fee_distribution: + TYPENAME: FeeDistribution BlockInfo: STRUCT: - epoch: U64 @@ -345,6 +354,10 @@ BlockType: ProposalExt: NEWTYPE: TYPENAME: ProposalExt + 4: + OptimisticProposal: + NEWTYPE: + TYPENAME: OptBlockBody ChainId: NEWTYPESTRUCT: U8 ChangeSet: @@ -467,6 +480,10 @@ ConsensusMsg: BlockRetrievalRequest: NEWTYPE: TYPENAME: BlockRetrievalRequest + 21: + OptProposalMsg: + NEWTYPE: + TYPENAME: OptProposalMsg ContractEvent: ENUM: 0: @@ -582,6 +599,29 @@ FederatedKeylessPublicKey: TYPENAME: AccountAddress - pk: TYPENAME: KeylessPublicKey +FeeDistribution: + ENUM: + 0: + V0: + STRUCT: + - amount: + MAP: + KEY: U64 + VALUE: U64 +FunctionParamOrReturnTag: + ENUM: + 0: + Reference: + NEWTYPE: + TYPENAME: TypeTag + 1: + MutableReference: + NEWTYPE: + TYPENAME: TypeTag + 2: + Value: + NEWTYPE: + TYPENAME: TypeTag FunctionInfo: STRUCT: - module_address: @@ -592,10 +632,10 @@ FunctionTag: STRUCT: - args: SEQ: - TYPENAME: TypeTag + TYPENAME: FunctionParamOrReturnTag - results: SEQ: - TYPENAME: TypeTag + TYPENAME: FunctionParamOrReturnTag - abilities: TYPENAME: AbilitySet G1Bytes: @@ -722,6 +762,35 @@ OpenIdSig: TYPENAME: Pepper - idc_aud_val: OPTION: STR +OptBlockBody: + ENUM: + 0: + V0: + STRUCT: + - validator_txns: + SEQ: + TYPENAME: ValidatorTransaction + - payload: + TYPENAME: Payload + - author: + TYPENAME: AccountAddress + - grandparent_qc: + TYPENAME: QuorumCert +OptBlockData: + STRUCT: + - epoch: U64 + - round: U64 + - timestamp_usecs: U64 + - parent: + TYPENAME: BlockInfo + - block_body: + TYPENAME: OptBlockBody +OptProposalMsg: + STRUCT: + - block_data: + TYPENAME: OptBlockData + - sync_info: + TYPENAME: SyncInfo OptQuorumStorePayload: ENUM: 0: diff --git a/testsuite/generate-format/tests/staged/move_abi.yaml b/testsuite/generate-format/tests/staged/move_abi.yaml index 9919ebb6a1fdf..bb27b2a0cd51a 100644 --- a/testsuite/generate-format/tests/staged/move_abi.yaml +++ b/testsuite/generate-format/tests/staged/move_abi.yaml @@ -21,14 +21,28 @@ EntryABI: EntryFunction: NEWTYPE: TYPENAME: ScriptFunctionABI +FunctionParamOrReturnTag: + ENUM: + 0: + Reference: + NEWTYPE: + TYPENAME: TypeTag + 1: + MutableReference: + NEWTYPE: + TYPENAME: TypeTag + 2: + Value: + NEWTYPE: + TYPENAME: TypeTag FunctionTag: STRUCT: - args: SEQ: - TYPENAME: TypeTag + TYPENAME: FunctionParamOrReturnTag - results: SEQ: - TYPENAME: TypeTag + TYPENAME: FunctionParamOrReturnTag - abilities: TYPENAME: AbilitySet Identifier: diff --git a/testsuite/module-publish/src/packages/experimental_usecases/sources/order_book_example.move b/testsuite/module-publish/src/packages/experimental_usecases/sources/order_book_example.move index ec9ce4c966b07..5787c380c2274 100644 --- a/testsuite/module-publish/src/packages/experimental_usecases/sources/order_book_example.move +++ b/testsuite/module-publish/src/packages/experimental_usecases/sources/order_book_example.move @@ -2,10 +2,10 @@ module 0xABCD::order_book_example { use std::signer; use std::error; use std::option; + use std::vector; - use aptos_experimental::active_order_book::{Self, ActiveOrderBook}; use aptos_experimental::order_book::{Self, OrderBook}; - use aptos_experimental::order_book_types; + use aptos_experimental::order_book_types::{Self, OrderIdType}; const ENOT_AUTHORIZED: u64 = 1; // Resource being modified doesn't exist @@ -13,10 +13,6 @@ module 0xABCD::order_book_example { struct Empty has store, copy, drop {} - struct ActiveOnly has key { - active_only: ActiveOrderBook, - } - struct Dex has key { order_book: OrderBook, } @@ -29,55 +25,69 @@ module 0xABCD::order_book_example { ENOT_AUTHORIZED, ); - move_to( - publisher, - ActiveOnly { active_only: active_order_book::new_active_order_book() } - ); - move_to( publisher, Dex { order_book: order_book::new_order_book() } ); } - public entry fun place_active_post_only_order(sender: address, account_order_id: u64, bid_price: u64, volume: u64, is_buy: bool) acquires ActiveOnly { - assert!(exists(@publisher_address), error::invalid_argument(EDEX_RESOURCE_NOT_PRESENT)); - let active_only = borrow_global_mut(@publisher_address); - - let order_id = order_book_types::new_order_id_type(sender, account_order_id); - // TODO change from random to monothonically increasing value - let unique_priority_idx = order_book_types::generate_unique_idx_fifo_tiebraker(); - - active_only.active_only.place_maker_order( - order_id, + public entry fun place_order(sender: address, order_id: u64, bid_price: u64, volume: u64, is_bid: bool) acquires Dex { + assert!(exists(@publisher_address), error::invalid_argument(EDEX_RESOURCE_NOT_PRESENT)); + let dex = borrow_global_mut(@publisher_address); + place_order_and_get_matches( + &mut dex.order_book, + sender, // account + order_book_types::new_order_id_type(order_id as u128), bid_price, - unique_priority_idx, volume, - is_buy + volume, + is_bid, ); } - public entry fun place_order(sender: address, account_order_id: u64, bid_price: u64, volume: u64, is_buy: bool) acquires Dex { + public entry fun cancel_order(order_id: u64) acquires Dex { assert!(exists(@publisher_address), error::invalid_argument(EDEX_RESOURCE_NOT_PRESENT)); - let dex = borrow_global_mut(@publisher_address); - dex.order_book.place_order_and_get_matches( - order_book::new_order_request( - sender, // account - account_order_id, - option::none(), // unique_priority_idx, - bid_price, - volume, - volume, - is_buy, - option::none(), // trigger_condition - Empty {}, //metadata - ) - ); + let order_book = borrow_global_mut(@publisher_address); + order_book.order_book.cancel_order(@publisher_address, order_book_types::new_order_id_type(order_id as u128)); } - public entry fun cancel_order(account_order_id: u64) acquires Dex { - assert!(exists(@publisher_address), error::invalid_argument(EDEX_RESOURCE_NOT_PRESENT)); - let order_book = borrow_global_mut(@publisher_address); - order_book.order_book.cancel_order(@publisher_address, account_order_id); + // Copied from order_book, as it's test_only and not part of public API there. + public fun place_order_and_get_matches( + order_book: &mut OrderBook, + account: address, + order_id: OrderIdType, + price: u64, + orig_size: u64, + remaining_size: u64, + is_bid: bool, + ): vector> { + let trigger_condition = option::none(); + let match_results = vector::empty(); + while (remaining_size > 0) { + if (!order_book.is_taker_order(option::some(price), is_bid, trigger_condition)) { + order_book.place_maker_order( + order_book::new_order_request( + account, + order_id, + option::none(), + price, + orig_size, + remaining_size, + is_bid, + trigger_condition, // trigger_condition + Empty {}, // metadata + ) + ); + return match_results; + }; + let match_result = + order_book.get_single_match_for_taker( + option::some(price), remaining_size, is_bid + ); + let matched_size = match_result.get_matched_size(); + match_results.push_back(match_result); + remaining_size -= matched_size; + }; + return match_results } } diff --git a/testsuite/replay-verify/main.py b/testsuite/replay-verify/main.py index efd86aefa6342..c13e690beadb4 100644 --- a/testsuite/replay-verify/main.py +++ b/testsuite/replay-verify/main.py @@ -97,13 +97,13 @@ def __init__(self, network: Network) -> None: self.pvc_number = 5 self.min_range_size = 10_000 self.range_size = 5_000_000 - self.timeout_secs = 2000 + self.timeout_secs = 5400 else: - self.concurrent_replayer = 18 - self.pvc_number = 8 + self.concurrent_replayer = 20 + self.pvc_number = 7 self.min_range_size = 10_000 self.range_size = 2_000_000 - self.timeout_secs = 1000 + self.timeout_secs = 3600 class WorkerPod: @@ -234,6 +234,18 @@ def start(self) -> None: "--block-cache-size", "10737418240", ] + # TODO(ibalajiarun): bump memory limit to 180GiB for heavy ranges + if ( + self.network == Network.TESTNET + and self.start_version >= 6700000000 + and self.end_version < 6800000000 + ): + pod_manifest["spec"]["containers"][0]["resources"]["requests"][ + "memory" + ] = "180Gi" + pod_manifest["spec"]["containers"][0]["resources"]["limits"][ + "memory" + ] = "180Gi" if SHARDING_ENABLED: pod_manifest["spec"]["containers"][0]["command"].append( @@ -409,7 +421,7 @@ def sorted_ranges_to_skip(self): current_skip[1] = max(current_skip[1], next_skip[1]) ret.append(current_skip) - return sorted_skips + return ret def create_tasks(self) -> None: current = self.start_version @@ -423,10 +435,6 @@ def create_tasks(self) -> None: (skip_start, skip_end) = ( (INT64_MAX, INT64_MAX) if len(skips) == 0 else skips[0] ) - if skip_start <= current: - skips.pop(0) - current = skip_end + 1 - continue # TODO(ibalajiarun): temporary hack to handle heavy ranges if ( @@ -442,6 +450,18 @@ def create_tasks(self) -> None: current + range_size, self.end_version + 1, skip_start ) + # Only skip if current is within the skip range + if skip_start <= current <= skip_end: + skips.pop(0) + current = skip_end + 1 + continue + elif skip_start <= next_current - 1 <= skip_end: + # If the next current is within the skip range, we need to adjust it + next_current = skip_start + elif next_current > skip_start: + # If the next current is beyond the skip range, we need to adjust it + next_current = skip_start + # avoid having too many small tasks, simply skip the task range = (current, next_current - 1) if next_current - current >= self.config.min_range_size: diff --git a/testsuite/replay-verify/replay-verify-worker-template.yaml b/testsuite/replay-verify/replay-verify-worker-template.yaml index becc2aef7e6ed..b7c8608d80cc7 100644 --- a/testsuite/replay-verify/replay-verify-worker-template.yaml +++ b/testsuite/replay-verify/replay-verify-worker-template.yaml @@ -23,10 +23,10 @@ spec: value: "info" resources: requests: - memory: "180Gi" + memory: "90Gi" cpu: "30" limits: - memory: "180Gi" + memory: "90Gi" cpu: "30" volumes: - name: archive diff --git a/testsuite/single_node_performance.py b/testsuite/single_node_performance.py index 82a361c574924..e4fe77618882a 100755 --- a/testsuite/single_node_performance.py +++ b/testsuite/single_node_performance.py @@ -261,10 +261,11 @@ class RunGroupConfig: RunGroupConfig(key=RunGroupKey("vector-trim-append-len3000-size1"), included_in=Flow.CONTINUOUS), RunGroupConfig(key=RunGroupKey("vector-remove-insert-len3000-size1"), included_in=Flow.CONTINUOUS), - RunGroupConfig(key=RunGroupKey("order-book-no-matches"), included_in=Flow.ORDER_BOOK | Flow.CONTINUOUS), - RunGroupConfig(key=RunGroupKey("order-book-balanced-matches25-pct"), included_in=Flow.ORDER_BOOK | Flow.CONTINUOUS), - RunGroupConfig(key=RunGroupKey("order-book-balanced-matches80-pct"), included_in=Flow.ORDER_BOOK | Flow.CONTINUOUS), - RunGroupConfig(key=RunGroupKey("order-book-balanced-size-skewed80-pct"), included_in=Flow.ORDER_BOOK | Flow.CONTINUOUS), + # waived because of missing monothonic counter native. + RunGroupConfig(key=RunGroupKey("order-book-no-matches"), included_in=Flow.ORDER_BOOK | Flow.CONTINUOUS, waived=True), + RunGroupConfig(key=RunGroupKey("order-book-balanced-matches25-pct"), included_in=Flow.ORDER_BOOK | Flow.CONTINUOUS, waived=True), + RunGroupConfig(key=RunGroupKey("order-book-balanced-matches80-pct"), included_in=Flow.ORDER_BOOK | Flow.CONTINUOUS, waived=True), + RunGroupConfig(key=RunGroupKey("order-book-balanced-size-skewed80-pct"), included_in=Flow.ORDER_BOOK | Flow.CONTINUOUS, waived=True), RunGroupConfig(expected_tps=50000, key=RunGroupKey("coin_transfer_connected_components", executor_type="sharded"), key_extra=RunGroupKeyExtra(sharding_traffic_flags="--connected-tx-grps 5000", transaction_type_override=""), included_in=Flow.REPRESENTATIVE, waived=True), RunGroupConfig(expected_tps=50000, key=RunGroupKey("coin_transfer_hotspot", executor_type="sharded"), key_extra=RunGroupKeyExtra(sharding_traffic_flags="--hotspot-probability 0.8", transaction_type_override=""), included_in=Flow.REPRESENTATIVE, waived=True), diff --git a/testsuite/single_node_performance_values.tsv b/testsuite/single_node_performance_values.tsv index c17be81ffe17b..800ecf42657af 100644 --- a/testsuite/single_node_performance_values.tsv +++ b/testsuite/single_node_performance_values.tsv @@ -1,54 +1,54 @@ -no-op 1 VM 56 0.887 1.040 32094.5 -no-op 1000 VM 56 0.887 1.035 30762.7 -apt-fa-transfer 1 VM 56 0.908 1.053 22232.7 -apt-fa-transfer 1 NativeVM 56 0.844 1.148 38636.6 -account-generation 1 VM 57 0.922 1.038 18049.1 -account-generation 1 NativeVM 57 0.856 1.132 39215.5 -account-resource32-b 1 VM 57 0.897 1.036 29851.6 -modify-global-resource 1 VM 57 0.893 1.019 1941.0 -modify-global-resource 100 VM 57 0.906 1.027 27100.2 -publish-package 1 VM 56 0.915 1.025 1051.9 -mix_publish_transfer 1 VM 57 0.903 1.035 17332.1 -batch100-transfer 1 VM 57 0.864 1.051 876.4 -batch100-transfer 1 NativeVM 57 0.794 1.278 1585.4 -vector-picture30k 1 VM 57 0.946 1.027 123.7 -vector-picture30k 100 VM 57 0.630 1.077 2103.7 -smart-table-picture30-k-with200-change 1 VM 57 0.955 1.059 18.1 -smart-table-picture30-k-with200-change 100 VM 57 0.941 1.068 251.2 -modify-global-resource-agg-v2 1 VM 57 0.903 1.040 30704.4 -modify-global-flag-agg-v2 1 VM 57 0.938 1.025 3315.4 -modify-global-bounded-agg-v2 1 VM 56 0.901 1.108 6120.2 -modify-global-milestone-agg-v2 1 VM 57 0.922 1.050 21954.9 -resource-groups-global-write-tag1-kb 1 VM 57 0.913 1.044 7764.2 -resource-groups-global-write-and-read-tag1-kb 1 VM 57 0.919 1.041 4651.7 -resource-groups-sender-write-tag1-kb 1 VM 57 0.821 1.192 20606.2 -resource-groups-sender-multi-change1-kb 1 VM 57 0.770 1.233 17264.6 -token-v1ft-mint-and-transfer 1 VM 57 0.884 1.016 824.4 -token-v1ft-mint-and-transfer 100 VM 57 0.938 1.035 10530.6 -token-v1nft-mint-and-transfer-sequential 1 VM 57 0.855 1.019 574.9 -token-v1nft-mint-and-transfer-sequential 100 VM 57 0.918 1.033 8119.0 -coin-init-and-mint 1 VM 57 0.913 1.046 14953.6 -coin-init-and-mint 100 VM 57 0.914 1.048 12459.9 -fungible-asset-mint 1 VM 57 0.916 1.047 18466.4 -fungible-asset-mint 100 VM 57 0.914 1.044 15768.8 -no-op5-signers 1 VM 57 0.914 1.040 32094.5 -token-v2-ambassador-mint 1 VM 57 0.922 1.036 11645.3 -token-v2-ambassador-mint 100 VM 57 0.925 1.041 11562.6 -liquidity-pool-swap 1 VM 57 0.870 1.022 627.6 -liquidity-pool-swap 100 VM 57 0.923 1.052 7976.7 -liquidity-pool-swap-stable 1 VM 57 0.872 1.027 602.6 -liquidity-pool-swap-stable 100 VM 57 0.917 1.049 7764.2 -deserialize-u256 1 VM 57 0.909 1.036 30530.7 -no-op-fee-payer 1 VM 57 0.908 1.018 1634.2 -no-op-fee-payer 100 VM 57 0.903 1.031 24743.2 -simple-script 1 VM 57 0.836 1.050 32094.5 -vector-trim-append-len3000-size1 1 VM 57 0.824 1.032 740.9 -vector-remove-insert-len3000-size1 1 VM 57 0.897 1.026 807.0 -order-book-no-matches 1 VM 56 0.8 1.2 1450.0 -order-book-balanced-matches25-pct 1 VM 56 0.8 1.2 920.0 -order-book-balanced-matches80-pct 1 VM 56 0.8 1.2 487.0 -order-book-balanced-size-skewed80-pct 1 VM 56 0.8 1.2 1091.0 -no_commit_apt-fa-transfer 1 VM 57 0.952 1.037 23751.6 -no_commit_apt-fa-transfer 1 NativeVM 57 0.878 1.023 49486.4 -no_commit_apt-fa-transfer 1 AptosVMSpeculative 57 0.914 1.012 1014.9 -no_commit_apt-fa-transfer 1 NativeSpeculative 57 0.876 1.018 99502.2 +no-op 1 VM 6 0.986 1.013 36723.0 +no-op 1000 VM 6 0.988 1.017 35206.7 +apt-fa-transfer 1 VM 6 0.994 1.035 25142.4 +apt-fa-transfer 1 NativeVM 6 0.915 1.038 41665.7 +account-generation 1 VM 6 0.993 1.006 21876.0 +account-generation 1 NativeVM 6 0.897 1.053 43830.8 +account-resource32-b 1 VM 6 0.995 1.013 33925.8 +modify-global-resource 1 VM 6 0.985 1.010 2656.6 +modify-global-resource 100 VM 6 0.995 1.011 31355.7 +publish-package 1 VM 6 0.989 1.013 1031.2 +mix_publish_transfer 1 VM 6 0.998 1.012 18853.9 +batch100-transfer 1 VM 6 0.979 1.013 1025.0 +batch100-transfer 1 NativeVM 6 0.887 1.106 1841.6 +vector-picture30k 1 VM 6 0.994 1.009 140.7 +vector-picture30k 100 VM 6 0.974 1.022 1904.6 +smart-table-picture30-k-with200-change 1 VM 6 0.988 1.010 20.9 +smart-table-picture30-k-with200-change 100 VM 6 0.994 1.044 256.8 +modify-global-resource-agg-v2 1 VM 6 0.988 1.014 34867.1 +modify-global-flag-agg-v2 1 VM 6 0.995 1.024 4108.3 +modify-global-bounded-agg-v2 1 VM 6 0.931 1.057 7205.6 +modify-global-milestone-agg-v2 1 VM 6 0.975 1.012 26762.1 +resource-groups-global-write-tag1-kb 1 VM 6 0.965 1.023 8200.2 +resource-groups-global-write-and-read-tag1-kb 1 VM 6 0.998 1.018 5740.2 +resource-groups-sender-write-tag1-kb 1 VM 6 0.972 1.017 20606.2 +resource-groups-sender-multi-change1-kb 1 VM 6 0.938 1.054 16697.0 +token-v1ft-mint-and-transfer 1 VM 6 0.997 1.017 1185.0 +token-v1ft-mint-and-transfer 100 VM 6 0.985 1.007 14624.7 +token-v1nft-mint-and-transfer-sequential 1 VM 6 0.993 1.012 804.1 +token-v1nft-mint-and-transfer-sequential 100 VM 6 0.989 1.021 11378.2 +coin-init-and-mint 1 VM 6 0.976 1.010 16996.3 +coin-init-and-mint 100 VM 6 0.994 1.027 15009.6 +fungible-asset-mint 1 VM 6 0.996 1.014 21601.8 +fungible-asset-mint 100 VM 6 0.995 1.039 18783.0 +no-op5-signers 1 VM 6 0.988 1.010 36932.7 +token-v2-ambassador-mint 1 VM 6 0.997 1.017 14570.9 +token-v2-ambassador-mint 100 VM 6 0.983 1.028 14570.9 +liquidity-pool-swap 1 VM 6 0.985 1.015 833.3 +liquidity-pool-swap 100 VM 6 0.991 1.028 9876.8 +liquidity-pool-swap-stable 1 VM 6 0.995 1.012 795.5 +liquidity-pool-swap-stable 100 VM 6 0.987 1.025 9767.5 +deserialize-u256 1 VM 6 0.996 1.015 34596.7 +no-op-fee-payer 1 VM 6 0.998 1.005 2269.1 +no-op-fee-payer 100 VM 6 0.992 1.005 27943.9 +simple-script 1 VM 6 0.977 1.015 36723.0 +vector-trim-append-len3000-size1 1 VM 6 0.986 1.018 809.8 +vector-remove-insert-len3000-size1 1 VM 6 0.981 1.012 945.1 +order-book-no-matches 1 VM 6 0.963 1.029 1811.3 +order-book-balanced-matches25-pct 1 VM 6 0.983 1.019 1117.3 +order-book-balanced-matches80-pct 1 VM 6 0.986 1.034 601.4 +order-book-balanced-size-skewed80-pct 1 VM 6 0.963 1.105 1278.5 +no_commit_apt-fa-transfer 1 VM 6 0.994 1.006 27893.4 +no_commit_apt-fa-transfer 1 NativeVM 6 0.995 1.049 48614.4 +no_commit_apt-fa-transfer 1 AptosVMSpeculative 6 0.997 1.002 1136.7 +no_commit_apt-fa-transfer 1 NativeSpeculative 6 0.994 1.015 97401.9 diff --git a/testsuite/smoke-test/src/aptos/mint_transfer.rs b/testsuite/smoke-test/src/aptos/mint_transfer.rs index cb8b02b9186b6..3be1eee701ec5 100644 --- a/testsuite/smoke-test/src/aptos/mint_transfer.rs +++ b/testsuite/smoke-test/src/aptos/mint_transfer.rs @@ -1,15 +1,23 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::smoke_test_environment::new_local_swarm_with_aptos; +use crate::smoke_test_environment::SwarmBuilder; use aptos_cached_packages::aptos_stdlib; use aptos_forge::Swarm; use aptos_move_debugger::aptos_debugger::AptosDebugger; use aptos_types::transaction::{ExecutionStatus, TransactionStatus}; +use std::sync::Arc; #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn test_mint_transfer() { - let swarm = new_local_swarm_with_aptos(1).await; + let swarm = SwarmBuilder::new_local(1) + .with_aptos() + .with_init_config(Arc::new(|_, conf, _| { + conf.indexer_db_config.enable_event = true; + conf.indexer_db_config.enable_transaction = true; + })) + .build() + .await; let mut info = swarm.aptos_public_info(); let account1 = info.random_account(); diff --git a/testsuite/smoke-test/src/aptos_cli/validator.rs b/testsuite/smoke-test/src/aptos_cli/validator.rs index 1bb68a3c9e6f7..c401b49f3e7c3 100644 --- a/testsuite/smoke-test/src/aptos_cli/validator.rs +++ b/testsuite/smoke-test/src/aptos_cli/validator.rs @@ -44,6 +44,9 @@ use std::{ async fn test_analyze_validators() { let (swarm, cli, _faucet) = SwarmBuilder::new_local(1) .with_aptos() + .with_init_config(Arc::new(|_, conf, _| { + conf.indexer_db_config.enable_event = true; + })) .with_init_genesis_stake(Arc::new(|_i, genesis_stake_amount| { *genesis_stake_amount = 100000; })) @@ -623,6 +626,7 @@ async fn test_nodes_rewards() { conf.consensus.round_initial_timeout_ms = 200; conf.consensus.quorum_store_poll_time_ms = 100; conf.api.failpoints_enabled = true; + conf.indexer_db_config.enable_event = true; })) .with_init_genesis_stake(Arc::new(|i, genesis_stake_amount| { // make sure we have quorum diff --git a/testsuite/smoke-test/src/client.rs b/testsuite/smoke-test/src/client.rs index 7aa44679baa7f..53cc626fb56d1 100644 --- a/testsuite/smoke-test/src/client.rs +++ b/testsuite/smoke-test/src/client.rs @@ -3,7 +3,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - smoke_test_environment::new_local_swarm_with_aptos, + smoke_test_environment::{new_local_swarm_with_aptos, SwarmBuilder}, utils::{ assert_balance, check_create_mint_transfer, create_and_fund_account, transfer_coins, MAX_HEALTHY_WAIT_SECS, @@ -11,7 +11,10 @@ use crate::{ }; use aptos_cached_packages::aptos_stdlib; use aptos_forge::{NodeExt, Swarm}; -use std::time::{Duration, Instant}; +use std::{ + sync::Arc, + time::{Duration, Instant}, +}; #[tokio::test] async fn test_create_mint_transfer_block_metadata() { @@ -110,7 +113,13 @@ async fn test_concurrent_transfers_single_node() { #[tokio::test] async fn test_latest_events_and_transactions() { - let mut swarm = new_local_swarm_with_aptos(1).await; + let mut swarm = SwarmBuilder::new_local(1) + .with_aptos() + .with_init_config(Arc::new(|_, conf, _| { + conf.indexer_db_config.enable_event = true; + })) + .build() + .await; let client = swarm.validators().next().unwrap().rest_client(); let start_events = client .get_new_block_events_bcs(None, Some(2)) diff --git a/testsuite/smoke-test/src/consensus/consensus_fault_tolerance.rs b/testsuite/smoke-test/src/consensus/consensus_fault_tolerance.rs index dc640d98146da..84f4d495ed9f9 100644 --- a/testsuite/smoke-test/src/consensus/consensus_fault_tolerance.rs +++ b/testsuite/smoke-test/src/consensus/consensus_fault_tolerance.rs @@ -49,6 +49,7 @@ pub async fn create_swarm(num_nodes: usize, max_block_txns: u64) -> LocalSwarm { .state_sync .state_sync_driver .max_connection_deadline_secs = 3; + config.indexer_db_config.enable_event = true; })) .build() .await; diff --git a/testsuite/smoke-test/src/consensus/dag/dag_fault_tolerance.rs b/testsuite/smoke-test/src/consensus/dag/dag_fault_tolerance.rs index dff5abd986263..cf6d586e47bc4 100644 --- a/testsuite/smoke-test/src/consensus/dag/dag_fault_tolerance.rs +++ b/testsuite/smoke-test/src/consensus/dag/dag_fault_tolerance.rs @@ -139,6 +139,7 @@ async fn run_dag_fail_point_test( } #[tokio::test] +#[ignore] async fn test_fault_tolerance_of_network_send() { // Randomly increase network failure rate, until network halts, and check that it comes back afterwards. let mut small_rng = SmallRng::from_entropy(); @@ -172,6 +173,7 @@ async fn test_fault_tolerance_of_network_send() { } #[tokio::test] +#[ignore] async fn test_fault_tolerance_of_network_receive() { // Randomly increase network failure rate, until network halts, and check that it comes back afterwards. let mut small_rng = SmallRng::from_entropy(); @@ -205,6 +207,7 @@ async fn test_fault_tolerance_of_network_receive() { } #[tokio::test] +#[ignore] async fn test_changing_working_consensus() { // with 7 nodes, consensus needs 5 to operate. // we rotate in each cycle, which 2 nodes are down. diff --git a/testsuite/smoke-test/src/execution.rs b/testsuite/smoke-test/src/execution.rs index 954903b93d6bb..cce5f318d1dd6 100644 --- a/testsuite/smoke-test/src/execution.rs +++ b/testsuite/smoke-test/src/execution.rs @@ -135,7 +135,7 @@ async fn block_epilogue_upgrade_test() { assert_eq!( get_last_non_reconfig_block_ending_txn_name(&rest_client).await, - Some("block_epilogue") + Some("state_checkpoint") ); for _ in 0..3 { @@ -200,7 +200,7 @@ async fn block_epilogue_upgrade_test() { assert_eq!( get_last_non_reconfig_block_ending_txn_name(&rest_client).await, - Some("block_epilogue") + Some("state_checkpoint") ); } } diff --git a/testsuite/smoke-test/src/genesis.rs b/testsuite/smoke-test/src/genesis.rs index 0a2b8fbd8364d..950c575301720 100644 --- a/testsuite/smoke-test/src/genesis.rs +++ b/testsuite/smoke-test/src/genesis.rs @@ -373,18 +373,7 @@ async fn delete_db_and_execute_restore( fs::remove_dir_all(&db_dir).unwrap(); // Perform a DB restore on the specified validator - let enable_storage_sharding = validator - .config() - .storage - .rocksdb_configs - .enable_storage_sharding; - db_restore( - backup_path.path(), - db_dir.as_path(), - &[waypoint], - enable_storage_sharding, - None, - ); + db_restore(backup_path.path(), db_dir.as_path(), &[waypoint], None); // Restart the validator and wait for it to become healthy validator.start().unwrap(); diff --git a/testsuite/smoke-test/src/jwks/jwk_consensus_per_issuer.rs b/testsuite/smoke-test/src/jwks/jwk_consensus_per_issuer.rs index 76a07be724ae2..5a959cdd5ec06 100644 --- a/testsuite/smoke-test/src/jwks/jwk_consensus_per_issuer.rs +++ b/testsuite/smoke-test/src/jwks/jwk_consensus_per_issuer.rs @@ -26,6 +26,7 @@ use tokio::time::sleep; /// The validators should do JWK consensus per issuer: /// one problematic issuer should not block valid updates of other issuers. #[tokio::test] +#[ignore] async fn jwk_consensus_per_issuer() { let epoch_duration_secs = 30; diff --git a/testsuite/smoke-test/src/jwks/jwk_consensus_per_key.rs b/testsuite/smoke-test/src/jwks/jwk_consensus_per_key.rs index b374e5b140735..affe6c564a0a2 100644 --- a/testsuite/smoke-test/src/jwks/jwk_consensus_per_key.rs +++ b/testsuite/smoke-test/src/jwks/jwk_consensus_per_key.rs @@ -26,6 +26,7 @@ use tokio::time::sleep; /// Validators should be able to reach consensus on key-level diffs /// even if providers are equivocating on the full key list. #[tokio::test] +#[ignore] async fn jwk_consensus_per_key() { let epoch_duration_secs = 30; diff --git a/testsuite/smoke-test/src/keyless.rs b/testsuite/smoke-test/src/keyless.rs index 9b54d1b96d935..297147cfc020f 100644 --- a/testsuite/smoke-test/src/keyless.rs +++ b/testsuite/smoke-test/src/keyless.rs @@ -650,12 +650,6 @@ async fn sign_transaction_any_keyless_pk( addr.to_hex_literal(), info.get_balance(addr).await ); - // TODO: No idea why, but these calls do not actually reflect the updated sequence number after a successful TXN. - info!( - "{} sequence number before TXN: {}", - addr.to_hex_literal(), - info.get_account_sequence_number(addr).await.unwrap() - ); info.sync_root_account_sequence_number().await; let recipient = info .create_and_fund_user_account(20_000_000_000) diff --git a/testsuite/smoke-test/src/randomness/mod.rs b/testsuite/smoke-test/src/randomness/mod.rs index b7ef35055c397..bc58450442211 100644 --- a/testsuite/smoke-test/src/randomness/mod.rs +++ b/testsuite/smoke-test/src/randomness/mod.rs @@ -51,7 +51,7 @@ async fn get_on_chain_resource_at_version( let maybe_response = rest_client .get_account_resource_at_version_bcs::( CORE_CODE_ADDRESS, - T::struct_tag().to_string().as_str(), + T::struct_tag().to_canonical_string().as_str(), version, ) .await; diff --git a/testsuite/smoke-test/src/rest_api.rs b/testsuite/smoke-test/src/rest_api.rs index ac7ac0a6446e6..c44e404534ddc 100644 --- a/testsuite/smoke-test/src/rest_api.rs +++ b/testsuite/smoke-test/src/rest_api.rs @@ -39,7 +39,13 @@ async fn test_get_index() { #[tokio::test] async fn test_basic_client() { - let swarm = new_local_swarm_with_aptos(1).await; + let swarm = SwarmBuilder::new_local(1) + .with_aptos() + .with_init_config(Arc::new(|_, conf, _| { + conf.indexer_db_config.enable_statekeys = true; + })) + .build() + .await; let mut info = swarm.aptos_public_info(); info.client().get_ledger_information().await.unwrap(); @@ -246,7 +252,15 @@ async fn test_gas_estimation_gas_used_limit() { #[tokio::test] async fn test_bcs() { - let swarm = new_local_swarm_with_aptos(1).await; + let swarm = SwarmBuilder::new_local(1) + .with_aptos() + .with_init_config(Arc::new(|_, conf, _| { + conf.indexer_db_config.enable_statekeys = true; + conf.indexer_db_config.enable_transaction = true; + conf.indexer_db_config.enable_event = true; + })) + .build() + .await; let mut info = swarm.aptos_public_info(); // Create accounts diff --git a/testsuite/smoke-test/src/rosetta.rs b/testsuite/smoke-test/src/rosetta.rs index cc6fdcce7bf8e..e471d4c37694b 100644 --- a/testsuite/smoke-test/src/rosetta.rs +++ b/testsuite/smoke-test/src/rosetta.rs @@ -66,7 +66,13 @@ async fn setup_simple_test( JoinHandle>, RosettaClient, ) { - setup_test(num_accounts, Arc::new(|_, _, _| {})).await + setup_test( + num_accounts, + Arc::new(|_, config, _| { + config.indexer_db_config.enable_transaction = true; + }), + ) + .await } async fn setup_test( diff --git a/testsuite/smoke-test/src/state_sync_utils.rs b/testsuite/smoke-test/src/state_sync_utils.rs index b321fb22519b0..742d3d9b8ccaf 100644 --- a/testsuite/smoke-test/src/state_sync_utils.rs +++ b/testsuite/smoke-test/src/state_sync_utils.rs @@ -199,7 +199,7 @@ fn verify_first_ledger_info(node: &mut LocalNode) { node.stop(); // Verify that the ledger info exists at version 0 - let aptos_db = AptosDB::new_for_test(db_path_buf.as_path()); + let aptos_db = AptosDB::new_for_test_with_sharding(db_path_buf.as_path(), 1 << 13); aptos_db.get_epoch_ending_ledger_info(0).unwrap(); // Restart the node diff --git a/testsuite/smoke-test/src/storage.rs b/testsuite/smoke-test/src/storage.rs index 6211e50c7bdaf..70537aedd81d1 100644 --- a/testsuite/smoke-test/src/storage.rs +++ b/testsuite/smoke-test/src/storage.rs @@ -134,13 +134,7 @@ async fn test_db_restore() { info!("---------- 3. stopped node 0, gonna restore DB."); // restore db from backup - db_restore( - backup_path.path(), - db_dir.as_path(), - &[], - node0_config.storage.rocksdb_configs.enable_storage_sharding, - None, - ); + db_restore(backup_path.path(), db_dir.as_path(), &[], None); expected_balance_0 -= 3; expected_balance_1 += 3; @@ -420,7 +414,6 @@ pub(crate) fn db_restore( backup_path: &Path, db_path: &Path, trusted_waypoints: &[Waypoint], - enable_storage_sharding: bool, target_verion: Option, /* target version should be same as epoch ending version to start a node */ ) { let now = Instant::now(); @@ -436,10 +429,8 @@ pub(crate) fn db_restore( cmd.arg(&w.to_string()); }); - if enable_storage_sharding { - cmd.arg("--enable_storage_sharding"); - cmd.arg("--enable-state-indices"); - } + cmd.arg("--enable-storage-sharding"); + cmd.arg("--enable-state-indices"); if let Some(version) = target_verion { cmd.arg("--target-version"); cmd.arg(&version.to_string()); diff --git a/testsuite/smoke-test/src/utils.rs b/testsuite/smoke-test/src/utils.rs index 79d5022f8f278..039dacdaec4cd 100644 --- a/testsuite/smoke-test/src/utils.rs +++ b/testsuite/smoke-test/src/utils.rs @@ -279,7 +279,10 @@ pub async fn get_current_version(rest_client: &RestClient) -> u64 { pub async fn get_on_chain_resource(rest_client: &Client) -> T { let maybe_response = rest_client - .get_account_resource_bcs::(CORE_CODE_ADDRESS, T::struct_tag().to_string().as_str()) + .get_account_resource_bcs::( + CORE_CODE_ADDRESS, + T::struct_tag().to_canonical_string().as_str(), + ) .await; let response = maybe_response.unwrap(); response.into_inner() diff --git a/third_party/move/extensions/move-table-extension/src/lib.rs b/third_party/move/extensions/move-table-extension/src/lib.rs index 8db3130da653b..e8af643f87d5f 100644 --- a/third_party/move/extensions/move-table-extension/src/lib.rs +++ b/third_party/move/extensions/move-table-extension/src/lib.rs @@ -70,7 +70,12 @@ impl TableInfo { impl Display for TableInfo { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - writeln!(f, "Table<{}, {}>", self.key_type, self.value_type) + writeln!( + f, + "Table<{}, {}>", + self.key_type.to_canonical_string(), + self.value_type.to_canonical_string() + ) } } @@ -703,7 +708,8 @@ fn serialize( layout: &MoveTypeLayout, val: &Value, ) -> PartialVMResult> { - ValueSerDeContext::new() + let max_value_nest_depth = function_value_extension.max_value_nest_depth(); + ValueSerDeContext::new(max_value_nest_depth) .with_func_args_deserialization(function_value_extension) .serialize(val, layout)? .ok_or_else(|| partial_extension_error("cannot serialize table key or value")) @@ -714,7 +720,8 @@ fn deserialize( bytes: &[u8], layout: &MoveTypeLayout, ) -> PartialVMResult { - ValueSerDeContext::new() + let max_value_nest_depth = function_value_extension.max_value_nest_depth(); + ValueSerDeContext::new(max_value_nest_depth) .with_func_args_deserialization(function_value_extension) .deserialize(bytes, layout) .ok_or_else(|| partial_extension_error("cannot deserialize table key or value")) diff --git a/third_party/move/move-binary-format/src/check_bounds.rs b/third_party/move/move-binary-format/src/check_bounds.rs index 6f259d57b722e..85a74a1bff900 100644 --- a/third_party/move/move-binary-format/src/check_bounds.rs +++ b/third_party/move/move-binary-format/src/check_bounds.rs @@ -144,8 +144,8 @@ impl<'a> BoundsChecker<'a> { } fn check_module_handles(&self) -> PartialVMResult<()> { - for script_handle in self.view.module_handles() { - self.check_module_handle(script_handle)? + for module_handle in self.view.module_handles() { + self.check_module_handle(module_handle)? } Ok(()) } @@ -240,7 +240,7 @@ impl<'a> BoundsChecker<'a> { check_bounds_impl(self.view.identifiers(), function_handle.name)?; check_bounds_impl(self.view.signatures(), function_handle.parameters)?; check_bounds_impl(self.view.signatures(), function_handle.return_)?; - // function signature type paramters must be in bounds to the function type parameters + // function signature type parameters must be in bounds to the function type parameters let type_param_count = function_handle.type_parameters.len(); self.check_type_parameters_in_signature(function_handle.parameters, type_param_count)?; self.check_type_parameters_in_signature(function_handle.return_, type_param_count)?; @@ -678,6 +678,9 @@ impl<'a> BoundsChecker<'a> { Ok(()) } + /// Check `ty` for: + /// - struct handle bounds + /// - struct instantiations have the correct number of type parameters fn check_type(&self, ty: &SignatureToken) -> PartialVMResult<()> { use self::SignatureToken::*; diff --git a/third_party/move/move-binary-format/src/file_format.rs b/third_party/move/move-binary-format/src/file_format.rs index 07b272c12bed6..93b7d53afccd2 100644 --- a/third_party/move/move-binary-format/src/file_format.rs +++ b/third_party/move/move-binary-format/src/file_format.rs @@ -2698,7 +2698,7 @@ pub enum Bytecode { #[group = "closure"] #[description = r#" - `CallClosure(|t1..tn|r has a)` evalutes a closure of the given function type, + `CallClosure(|t1..tn|r has a)` evaluates a closure of the given function type, taking the captured arguments and mixing in the provided ones on the stack. On top of the stack is the closure being evaluated, underneath the arguments: diff --git a/third_party/move/move-bytecode-verifier/bytecode-verifier-tests/src/unit_tests/large_type_test.rs b/third_party/move/move-bytecode-verifier/bytecode-verifier-tests/src/unit_tests/large_type_test.rs index 1bb45fd6b9c03..4bf8a89ce658e 100644 --- a/third_party/move/move-bytecode-verifier/bytecode-verifier-tests/src/unit_tests/large_type_test.rs +++ b/third_party/move/move-bytecode-verifier/bytecode-verifier-tests/src/unit_tests/large_type_test.rs @@ -160,6 +160,6 @@ fn test_large_types() { ); assert_eq!( result.unwrap_err().major_status(), - StatusCode::CONSTRAINT_NOT_SATISFIED, + StatusCode::TOO_MANY_TYPE_NODES, ); } diff --git a/third_party/move/move-bytecode-verifier/bytecode-verifier-tests/src/unit_tests/vec_pack_tests.rs b/third_party/move/move-bytecode-verifier/bytecode-verifier-tests/src/unit_tests/vec_pack_tests.rs index 4a119ab4b5085..5d215549ed261 100644 --- a/third_party/move/move-bytecode-verifier/bytecode-verifier-tests/src/unit_tests/vec_pack_tests.rs +++ b/third_party/move/move-bytecode-verifier/bytecode-verifier-tests/src/unit_tests/vec_pack_tests.rs @@ -67,5 +67,5 @@ fn test_vec_pack() { &m, ) .unwrap_err(); - assert_eq!(res.major_status(), StatusCode::VALUE_STACK_PUSH_OVERFLOW); + assert_eq!(res.major_status(), StatusCode::TOO_MANY_TYPE_NODES); } diff --git a/third_party/move/move-bytecode-verifier/src/check_duplication.rs b/third_party/move/move-bytecode-verifier/src/check_duplication.rs index a42008a92ca79..4e13e88769524 100644 --- a/third_party/move/move-bytecode-verifier/src/check_duplication.rs +++ b/third_party/move/move-bytecode-verifier/src/check_duplication.rs @@ -271,6 +271,7 @@ impl<'a> DuplicationChecker<'a> { }, StructFieldInformation::DeclaredVariants(variants) => { Self::check_duplicate_variants(variants.iter())?; + // Note: unlike structs, number of fields within a variant can be zero. for variant in variants { Self::check_duplicate_fields(variant.fields.iter())? } diff --git a/third_party/move/move-bytecode-verifier/src/instantiation_loops.rs b/third_party/move/move-bytecode-verifier/src/instantiation_loops.rs index 53896c212ee14..f621d273ac96a 100644 --- a/third_party/move/move-bytecode-verifier/src/instantiation_loops.rs +++ b/third_party/move/move-bytecode-verifier/src/instantiation_loops.rs @@ -217,15 +217,106 @@ impl<'a> InstantiationLoopChecker<'a> { ) { if let Some(code) = &caller_def.code { for instr in &code.code { - if let Bytecode::CallGeneric(callee_inst_idx) = instr { - // Get the id of the definition of the function being called. - // Skip if the function is not defined in the current module, as we do not - // have mutual recursions across module boundaries. - let callee_si = self.module.function_instantiation_at(*callee_inst_idx); - if let Some(callee_idx) = self.func_handle_def_map.get(&callee_si.handle) { - let callee_idx = *callee_idx; - self.build_graph_call(caller_idx, callee_idx, callee_si.type_parameters) - } + match instr { + Bytecode::CallGeneric(callee_inst_idx) + | Bytecode::PackClosureGeneric(callee_inst_idx, _) => { + // Get the id of the definition of the function being called/packed into a closure. + // Skip if the function is not defined in the current module, as we do not + // have mutual recursions across module boundaries. + let callee_si = self.module.function_instantiation_at(*callee_inst_idx); + if let Some(callee_idx) = self.func_handle_def_map.get(&callee_si.handle) { + let callee_idx = *callee_idx; + self.build_graph_call(caller_idx, callee_idx, callee_si.type_parameters) + } + }, + Bytecode::Pop + | Bytecode::Ret + | Bytecode::BrTrue(_) + | Bytecode::BrFalse(_) + | Bytecode::Branch(_) + | Bytecode::LdU8(_) + | Bytecode::LdU16(_) + | Bytecode::LdU32(_) + | Bytecode::LdU64(_) + | Bytecode::LdU128(_) + | Bytecode::LdU256(_) + | Bytecode::LdConst(_) + | Bytecode::LdTrue + | Bytecode::LdFalse + | Bytecode::CopyLoc(_) + | Bytecode::MoveLoc(_) + | Bytecode::StLoc(_) + | Bytecode::FreezeRef + | Bytecode::MutBorrowLoc(_) + | Bytecode::ImmBorrowLoc(_) + | Bytecode::MutBorrowField(_) + | Bytecode::ImmBorrowField(_) + | Bytecode::MutBorrowFieldGeneric(_) + | Bytecode::ImmBorrowFieldGeneric(_) + | Bytecode::Call(_) + | Bytecode::Pack(_) + | Bytecode::Unpack(_) + | Bytecode::ReadRef + | Bytecode::WriteRef + | Bytecode::CastU8 + | Bytecode::CastU16 + | Bytecode::CastU32 + | Bytecode::CastU64 + | Bytecode::CastU128 + | Bytecode::CastU256 + | Bytecode::Add + | Bytecode::Sub + | Bytecode::Mul + | Bytecode::Mod + | Bytecode::Div + | Bytecode::BitOr + | Bytecode::BitAnd + | Bytecode::Xor + | Bytecode::Shl + | Bytecode::Shr + | Bytecode::Or + | Bytecode::And + | Bytecode::Not + | Bytecode::Eq + | Bytecode::Neq + | Bytecode::Lt + | Bytecode::Gt + | Bytecode::Le + | Bytecode::Ge + | Bytecode::Abort + | Bytecode::Nop + | Bytecode::Exists(_) + | Bytecode::ExistsGeneric(_) + | Bytecode::MoveFrom(_) + | Bytecode::MoveFromGeneric(_) + | Bytecode::MoveTo(_) + | Bytecode::MoveToGeneric(_) + | Bytecode::VecPack(_, _) + | Bytecode::VecLen(_) + | Bytecode::VecImmBorrow(_) + | Bytecode::VecMutBorrow(_) + | Bytecode::VecPushBack(_) + | Bytecode::VecPopBack(_) + | Bytecode::VecUnpack(_, _) + | Bytecode::VecSwap(_) + | Bytecode::UnpackGeneric(_) + | Bytecode::PackGeneric(_) + | Bytecode::PackVariant(_) + | Bytecode::UnpackVariant(_) + | Bytecode::PackVariantGeneric(_) + | Bytecode::UnpackVariantGeneric(_) + | Bytecode::TestVariant(_) + | Bytecode::TestVariantGeneric(_) + | Bytecode::MutBorrowVariantField(_) + | Bytecode::MutBorrowVariantFieldGeneric(_) + | Bytecode::ImmBorrowVariantField(_) + | Bytecode::ImmBorrowVariantFieldGeneric(_) + | Bytecode::MutBorrowGlobal(_) + | Bytecode::ImmBorrowGlobal(_) + | Bytecode::MutBorrowGlobalGeneric(_) + | Bytecode::ImmBorrowGlobalGeneric(_) + | Bytecode::PackClosure(_, _) + | Bytecode::CallClosure(_) => {}, } } } diff --git a/third_party/move/move-bytecode-verifier/src/limits.rs b/third_party/move/move-bytecode-verifier/src/limits.rs index 1fcb2436be6f2..54542e16b1833 100644 --- a/third_party/move/move-bytecode-verifier/src/limits.rs +++ b/third_party/move/move-bytecode-verifier/src/limits.rs @@ -81,7 +81,14 @@ impl<'a> LimitsVerifier<'a> { return Err(PartialVMError::new(StatusCode::TOO_MANY_PARAMETERS) .at_index(IndexKind::FunctionHandle, idx as u16)); } + } + if let Some(limit) = config.max_function_return_values { + if self.resolver.signature_at(function_handle.return_).0.len() > limit { + return Err(PartialVMError::new(StatusCode::TOO_MANY_PARAMETERS) + .at_index(IndexKind::FunctionHandle, idx as u16)); + } }; + // Note: the size of `attributes` is limited by the deserializer. } Ok(()) } @@ -122,24 +129,58 @@ impl<'a> LimitsVerifier<'a> { config: &VerifierConfig, ty: &SignatureToken, ) -> PartialVMResult<()> { - if let Some(max) = &config.max_type_nodes { - // Structs and Parameters can expand to an unknown number of nodes, therefore - // we give them a higher size weight here. - const STRUCT_SIZE_WEIGHT: usize = 4; - const PARAM_SIZE_WEIGHT: usize = 4; - let mut size = 0; - for t in ty.preorder_traversal() { - // Notice that the preorder traversal will iterate all type instantiations, so we - // why we can ignore them below. - match t { - SignatureToken::Struct(..) | SignatureToken::StructInstantiation(..) => { - size += STRUCT_SIZE_WEIGHT - }, - SignatureToken::TypeParameter(..) => size += PARAM_SIZE_WEIGHT, - _ => size += 1, + if config.max_type_nodes.is_none() + && config.max_function_parameters.is_none() + && config.max_function_return_values.is_none() + && config.max_type_depth.is_none() + { + // If no type-related limits are set, we do not need to verify the type nodes. + return Ok(()); + } + // Structs and Parameters can expand to an unknown number of nodes, therefore + // we give them a higher size weight here. + const STRUCT_SIZE_WEIGHT: usize = 4; + const PARAM_SIZE_WEIGHT: usize = 4; + let mut type_size = 0; + for (token, depth) in ty.preorder_traversal_with_depth() { + if let Some(limit) = config.max_type_depth { + if depth > limit { + return Err(PartialVMError::new(StatusCode::TOO_MANY_TYPE_NODES)); } } - if size > *max { + match token { + SignatureToken::Struct(..) | SignatureToken::StructInstantiation(..) => { + type_size += STRUCT_SIZE_WEIGHT + }, + SignatureToken::TypeParameter(..) => type_size += PARAM_SIZE_WEIGHT, + SignatureToken::Function(params, ret, _) => { + if let Some(limit) = config.max_function_parameters { + if params.len() > limit { + return Err(PartialVMError::new(StatusCode::TOO_MANY_PARAMETERS)); + } + } + if let Some(limit) = config.max_function_return_values { + if ret.len() > limit { + return Err(PartialVMError::new(StatusCode::TOO_MANY_PARAMETERS)); + } + } + }, + SignatureToken::Bool + | SignatureToken::U8 + | SignatureToken::U16 + | SignatureToken::U32 + | SignatureToken::U64 + | SignatureToken::U128 + | SignatureToken::U256 + | SignatureToken::Address + | SignatureToken::Signer + | SignatureToken::Vector(_) + | SignatureToken::Reference(_) + | SignatureToken::MutableReference(_) => type_size += 1, + } + } + if let Some(limit) = config.max_type_nodes { + if type_size > limit { return Err(PartialVMError::new(StatusCode::TOO_MANY_TYPE_NODES)); } } diff --git a/third_party/move/move-bytecode-verifier/src/stack_usage_verifier.rs b/third_party/move/move-bytecode-verifier/src/stack_usage_verifier.rs index 4a40ecb3e524c..c2735f91bc66e 100644 --- a/third_party/move/move-bytecode-verifier/src/stack_usage_verifier.rs +++ b/third_party/move/move-bytecode-verifier/src/stack_usage_verifier.rs @@ -228,42 +228,27 @@ impl<'a> StackUsageVerifier<'a> { (arg_count, return_count) }, - // ClosEval pops the number of arguments and pushes the results of the given function - // type + // `CallClosure` pops the closure and then the number of arguments and + // pushes the results of the given function type Bytecode::CallClosure(idx) => { if let Some(SignatureToken::Function(args, result, _)) = self.resolver.signature_at(*idx).0.first() { ((1 + args.len()) as u64, result.len() as u64) } else { - // We don't know what it will pop/push, but the signature checker + // We don't know what it will pop/push, but the signature checker v2 // ensures we never reach this (0, 0) } }, - // ClosPack pops the captured arguments and returns 1 value - Bytecode::PackClosure(idx, mask) => { - let function_handle = self.resolver.function_handle_at(*idx); - // TODO(#15664): use `captured_count` for efficiency - let arg_count = mask - .extract( - &self.resolver.signature_at(function_handle.parameters).0, - true, - ) - .len() as u64; + // `PackClosure` pops the captured arguments and returns 1 value + Bytecode::PackClosure(_, mask) => { + let arg_count = mask.captured_count() as u64; (arg_count, 1) }, - Bytecode::PackClosureGeneric(idx, mask) => { - let func_inst = self.resolver.function_instantiation_at(*idx); - let function_handle = self.resolver.function_handle_at(func_inst.handle); - // TODO(#15664): use `captured_count` for efficiency - let arg_count = mask - .extract( - &self.resolver.signature_at(function_handle.parameters).0, - true, - ) - .len() as u64; + Bytecode::PackClosureGeneric(_, mask) => { + let arg_count = mask.captured_count() as u64; (arg_count, 1) }, diff --git a/third_party/move/move-bytecode-verifier/src/type_safety.rs b/third_party/move/move-bytecode-verifier/src/type_safety.rs index 60beb13850d7a..bef08fed0089b 100644 --- a/third_party/move/move-bytecode-verifier/src/type_safety.rs +++ b/third_party/move/move-bytecode-verifier/src/type_safety.rs @@ -361,6 +361,8 @@ fn clos_pack( }; // Check the captured arguments on the stack let param_sgn = verifier.resolver.signature_at(func_handle.parameters); + // Instruction consistency check has verified that the number of captured arguments + // is less than or equal to the number of parameters of the function. let captured_param_tys = mask.extract(¶m_sgn.0, true); for ty in captured_param_tys.into_iter().rev() { let arg = safe_unwrap!(verifier.stack.pop()); diff --git a/third_party/move/move-bytecode-verifier/src/verifier.rs b/third_party/move/move-bytecode-verifier/src/verifier.rs index d358a73537a31..10d64d4659a98 100644 --- a/third_party/move/move-bytecode-verifier/src/verifier.rs +++ b/third_party/move/move-bytecode-verifier/src/verifier.rs @@ -43,6 +43,10 @@ pub struct VerifierConfig { pub enable_enum_types: bool, pub enable_resource_access_control: bool, pub enable_function_values: bool, + /// Maximum number of function return values. + pub max_function_return_values: Option, + /// Maximum depth of a type node. + pub max_type_depth: Option, } /// Helper for a "canonical" verification of a module. @@ -242,6 +246,9 @@ impl Default for VerifierConfig { enable_enum_types: true, enable_resource_access_control: true, enable_function_values: true, + + max_function_return_values: None, + max_type_depth: None, } } } @@ -265,7 +272,7 @@ impl VerifierConfig { max_basic_blocks: Some(1024), max_basic_blocks_in_script: Some(1024), max_value_stack_size: 1024, - max_type_nodes: Some(256), + max_type_nodes: Some(128), max_push_size: Some(10000), max_struct_definitions: Some(200), max_fields_in_struct: Some(30), @@ -287,6 +294,9 @@ impl VerifierConfig { enable_enum_types: true, enable_resource_access_control: true, enable_function_values: true, + + max_function_return_values: Some(128), + max_type_depth: Some(20), } } } diff --git a/third_party/move/move-compiler-v2/legacy-move-compiler/src/expansion/dependency_ordering.rs b/third_party/move/move-compiler-v2/legacy-move-compiler/src/expansion/dependency_ordering.rs index 2b65661467ec8..5b0f9d72cc8a2 100644 --- a/third_party/move/move-compiler-v2/legacy-move-compiler/src/expansion/dependency_ordering.rs +++ b/third_party/move/move-compiler-v2/legacy-move-compiler/src/expansion/dependency_ordering.rs @@ -37,7 +37,8 @@ pub fn verify( .map(|(mident, _)| mident) .collect::>(); let graph = dependency_graph(&module_neighbors, &imm_module_idents); - let graph = add_implicit_vector_dependencies(graph); + let graph = add_implicit_module_dependencies(graph, AccountAddress::ONE, "vector"); + let graph = add_implicit_module_dependencies(graph, AccountAddress::ONE, "cmp"); match petgraph_toposort(&graph, None) { Err(cycle_node) => { let cycle_ident = *cycle_node.node_id(); @@ -180,29 +181,31 @@ impl<'a> Context<'a> { } } -/// If the `vector` module is present in `graph`, then add dependency edges -/// from every module (not in the `vector` module's dependency closure) to the -/// `vector` module. This is because modules can have implicit dependencies -/// on the `vector` module. -fn add_implicit_vector_dependencies( - mut graph: DiGraphMap<&ModuleIdent, ()>, -) -> DiGraphMap<&ModuleIdent, ()> { - let vector_module = graph.nodes().find(|m| { - m.value.address.into_addr_bytes().into_inner() == AccountAddress::ONE - && m.value.module.0.value.as_str() == "vector" +/// If the target module (`module_address::module_name`) is present in `graph`, +/// then add dependency edges from every module (not in the target module's dependency closure) +/// to the target module. This is used to maintain implicit dependencies introduced +/// by the compiler between user modules and modules like `vector` or `cmp`. +fn add_implicit_module_dependencies<'a>( + mut graph: DiGraphMap<&'a ModuleIdent, ()>, + module_address: AccountAddress, + module_name: &str, +) -> DiGraphMap<&'a ModuleIdent, ()> { + let target_module = graph.nodes().find(|m| { + m.value.address.into_addr_bytes().into_inner() == module_address + && m.value.module.0.value.as_str() == module_name }); - if let Some(vector_module) = vector_module { - let mut dfs = Dfs::new(&graph, vector_module); + if let Some(target_module) = target_module { + let mut dfs = Dfs::new(&graph, target_module); // Get the transitive closure of the `vector` module and its dependencies. - let mut vector_dep_closure = BTreeSet::new(); + let mut target_dep_closure = BTreeSet::new(); while let Some(node) = dfs.next(&graph) { - vector_dep_closure.insert(node); + target_dep_closure.insert(node); } // For every module that is not in `vector_dep_closure`, add an edge to `vector_module`. let all_modules = graph.nodes().collect::>(); for module in all_modules { - if !vector_dep_closure.contains(module) { - graph.add_edge(module, vector_module, ()); + if !target_dep_closure.contains(module) { + graph.add_edge(module, target_module, ()); } } } diff --git a/third_party/move/move-compiler-v2/src/bytecode_generator.rs b/third_party/move/move-compiler-v2/src/bytecode_generator.rs index 3e3982fdfbde3..f1379c3403827 100644 --- a/third_party/move/move-compiler-v2/src/bytecode_generator.rs +++ b/third_party/move/move-compiler-v2/src/bytecode_generator.rs @@ -2,7 +2,7 @@ // Parts of the project are originally copyright © Meta Platforms, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::Options; +use crate::{Options, COMPILER_BUG_REPORT_MSG}; use codespan_reporting::diagnostic::Severity; use ethnum::U256; use itertools::Itertools; @@ -317,7 +317,14 @@ impl<'env> Generator<'env> { /// Report an (internal) error at the location associated with the node. fn internal_error(&self, id: NodeId, msg: impl AsRef) { - self.diag(id, Severity::Bug, msg) + let env = self.env(); + let loc = env.get_node_loc(id); + env.diag_with_notes( + Severity::Bug, + loc.as_ref(), + &format!("compiler internal error: {}", msg.as_ref()), + vec![COMPILER_BUG_REPORT_MSG.to_string()], + ); } fn diag(&self, id: NodeId, severity: Severity, msg: impl AsRef) { diff --git a/third_party/move/move-compiler-v2/src/env_pipeline/cmp_rewriter.rs b/third_party/move/move-compiler-v2/src/env_pipeline/cmp_rewriter.rs new file mode 100644 index 0000000000000..25d883a90e3f6 --- /dev/null +++ b/third_party/move/move-compiler-v2/src/env_pipeline/cmp_rewriter.rs @@ -0,0 +1,368 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +//! Comparison operation rewriter +//! - The in-house Lt/Le/Gt/Ge operations only allow integer operands. +//! - We visit Lt/Le/Gt/Ge operations +//! - If their operands are not references or integer primitive types, +//! - we rewrite the operation to use the `std::cmp::compare` function, +//! - and then interpret the result using `std::cmp::is_lt`, `std::cmp::is_le`, etc. +//! +//! Example: +//! x <= y +//! =======> +//! std::cmp::compare(&x, &y).std::cmp::is_le() +//! +//! Key impls +//! - `fn rewrite_cmp_operation`: +//! - Given an ast exp representing a comparison operation, e.g., `Call(Operation::Lt, [arg1, arg2])`, +//! - the function takes five steps: +//! 1. check if `arg1` and `arg2` can be transformed based on their types +//! 2. transform `arg1` and `arg2` into `&arg1` and `&arg2` +//! 3. create a call to `std::cmp::compare`: `exp1 = Call(MoveFunction(std::cmp::compare), [&arg1, &arg2])` +//! 4. create an immutable reference to the result of `std::cmp::compare`: `exp2 = Call(Borrow(Immutable), [exp1])` +//! 5. generate a final call of `is_lt / is_le / is_gt / is_ge` to interpret the result of `std::cmp::compare`: `exp3 = Call(MoveFunction(std::cmp::is_le), [exp2])` +//! - [TODO] An optimization to consider +//! - Once we have public enum, we can direly apply `==` to the result of `std::cmp::compare` (of the `Enum Ordering` type), +//! - which can avoid creating a reference to the result of `std::cmp::compare` and calling `is_lt / is_le / is_gt / is_ge`. +//! +//! +//! Important notes about linking `std::cmp` module: +//! - To ensure the `std::cmp` module is preserved in GlobalEnv during compilation, +//! - code is added in `third_party/move/move-model/src/lib.rs` to keep `std::cmp` and its dependencies during the ast expansion phase +//! - To ensure implicit dependencies introduced by comparison rewriting are maintained +//! - code is added in `third_party/move/move-compiler-v2/legacy-move-compiler/src/expansion/dependency_ordering.rs` to add dependency between every user module and `std::cmp`. +//! - If the user does not include `move-stdlib` for compilation, +//! - a compilation error "cannot find `std::cmp` module" will be raised. +//! +//! Important notes about specs +//! - No extensions are made to comparison operations in specs. +//! - Only primitive integer types are supported as before +//! + +use crate::env_pipeline::rewrite_target::{ + RewriteState, RewriteTarget, RewriteTargets, RewritingScope, +}; +use move_core_types::account_address::AccountAddress; +use move_model::{ + ast::{Address, Exp, ExpData, ModuleName, Operation}, + exp_rewriter::ExpRewriterFunctions, + model::{FunId, FunctionEnv, GlobalEnv, ModuleEnv, NodeId, QualifiedId}, + ty::*, + well_known, +}; +use std::{ + collections::{BTreeMap, BTreeSet}, + iter::Iterator, +}; + +/// Main interface for the comparison operation rewriter +pub fn rewrite(env: &mut GlobalEnv) { + // Create a shared CmpRewriter instance for processing all target functions + let mut rewriter = CmpRewriter::new(env); + let mut targets = RewriteTargets::create(env, RewritingScope::CompilationTarget); + let todo: BTreeSet<_> = targets.keys().collect(); + for target in todo { + if let RewriteTarget::MoveFun(func_id) = target { + let new_def = rewrite_target(&mut rewriter, func_id); + if let Some(def) = new_def { + *targets.state_mut(&target) = RewriteState::Def(def); + } + } + } + targets.write_to_env(env); +} + +/// Rewrite each target function +/// - The rewriting will go through the ExpRewriterFunctions trait +/// - On a `Call` operation, the transformation will be redirected to `rewrite_call` defined later +fn rewrite_target(rewriter: &mut CmpRewriter, func_id: QualifiedId) -> Option { + let func_env = rewriter.env.get_function(func_id); + let def_opt = func_env.get_def(); + if let Some(def) = def_opt { + let rewritten_def = rewriter.rewrite_exp(def.clone()); + if !ExpData::ptr_eq(&rewritten_def, def) { + return Some(rewritten_def); + } + } + None +} + +struct CmpRewriter<'env> { + env: &'env GlobalEnv, + cmp_module: Option>, + cmp_functions: BTreeMap<&'static str, FunctionEnv<'env>>, +} + +/// Override `rewrite_call` from `ExpRewriterFunctions` trait +impl ExpRewriterFunctions for CmpRewriter<'_> { + fn rewrite_call(&mut self, call_id: NodeId, oper: &Operation, args: &[Exp]) -> Option { + if matches!( + oper, + Operation::Lt | Operation::Le | Operation::Gt | Operation::Ge + ) { + self.rewrite_cmp_operation(call_id, oper, args) + } else { + None + } + } +} + +impl<'env> CmpRewriter<'env> { + /// Constants for the `std::cmp` module and its functions + const COMPARE: &'static str = "compare"; + const IS_GE: &'static str = "is_ge"; + const IS_GT: &'static str = "is_gt"; + const IS_LE: &'static str = "is_le"; + const IS_LT: &'static str = "is_lt"; + + fn new(env: &'env GlobalEnv) -> Self { + let cmp_module = Self::find_cmp_module(env); + let mut cmp_functions = BTreeMap::new(); + + if let Some(module) = &cmp_module { + for name in [ + Self::COMPARE, + Self::IS_LT, + Self::IS_LE, + Self::IS_GT, + Self::IS_GE, + ] { + if let Some(func) = Self::find_cmp_function(module, name) { + cmp_functions.insert(name, func); + } + } + } + + Self { + env, + cmp_module, + cmp_functions, + } + } + + /// Rewrite comparison operations + /// - Designs detailed in the beginning of this file + fn rewrite_cmp_operation( + &mut self, + call_id: NodeId, + cmp_op: &Operation, + args: &[Exp], + ) -> Option { + // Step 1: Check argument types + if args.iter().any(|arg| self.arg_cannot_transform(arg)) { + return None; + } + + // Step 2: Transform `arg1` and `arg2` into `&arg1` and `&arg2` + let transformed_args: Vec = args.iter().map(|arg| self.rewrite_cmp_arg(arg)).collect(); + + // Step 3: Create an inner call to `std::cmp::compare(&arg1, &arg2)` + let expected_arg_ty = self.env.get_node_type(args[0].as_ref().node_id()); + let call_cmp = self.generate_call_to_compare(call_id, transformed_args, expected_arg_ty)?; + + // Step 4: Create a immutable reference to the result of `std::cmp::compare(&arg1, &arg2)` + let immref_cmp_res = self.immborrow_compare_res(call_cmp); + + //Step 5: Generate a final call of `is_lt / is_le / is_gt / is_ge` to interpret the result of `std::cmp::compare` + self.generate_call_to_final_res(call_id, cmp_op, vec![immref_cmp_res]) + } + + /// Generate a call to `std::cmp::compare(&arg1, &arg2)` + fn generate_call_to_compare( + &self, + cmp_op_id: NodeId, + args: Vec, + expected_arg_ty: Type, + ) -> Option { + // Reuse the loc info of the original comparison operation + let cmp_loc = self.env.get_node_loc(cmp_op_id); + + // Check the `std::cmp` module + let cmp_module = match self.cmp_module.as_ref() { + Some(module) => module, + None => { + self.env.error( + &cmp_loc, + "cannot find `std::cmp` module. Include `move-stdlib` for compilation.", + ); + let invalid_id = self.env.new_node(self.env.internal_loc(), Type::Error); + return Some(ExpData::Invalid(invalid_id).into_exp()); + }, + }; + let cmp_module_id = cmp_module.get_id(); + + // Check the `std::cmp::compare` function + let compare_function = match self.cmp_functions.get(Self::COMPARE) { + Some(func) => func, + None => { + self.env.error(&cmp_loc, "cannot find `std::cmp::compare` function. Include `move-stdlib` for compilation."); + let invalid_id = self.env.new_node(self.env.internal_loc(), Type::Error); + return Some(ExpData::Invalid(invalid_id).into_exp()); + }, + }; + + // Create a new node sharing return type with `std::cmp::compare` + let cmp_ty = compare_function.get_result_type(); + let new_cmp_node = self.env.new_node(cmp_loc, cmp_ty.clone()); + + // `std::cmp::compare` takes a type parameter, + // which should be instantiated with the type of the actual arguments + self.env + .set_node_instantiation(new_cmp_node, vec![expected_arg_ty]); + + Some( + ExpData::Call( + new_cmp_node, + Operation::MoveFunction(cmp_module_id, compare_function.get_id()), + args, + ) + .into_exp(), + ) + } + + /// Create a new immutable reference for the result of `std::cmp::compare` + fn immborrow_compare_res(&self, call_cmp: Exp) -> Exp { + // Create a new immutable reference for the return value of `std::cmp::compare` + let call_cmp_id = call_cmp.node_id(); + let cmp_loc = self.env.get_node_loc(call_cmp_id); + let cmp_ty = self.env.get_node_type(call_cmp_id); + let new_ref_type = Type::Reference(ReferenceKind::Immutable, Box::new(cmp_ty)); + let new_ref_id = self.env.new_node(cmp_loc, new_ref_type); + + ExpData::Call( + new_ref_id, + Operation::Borrow(ReferenceKind::Immutable), + vec![call_cmp], + ) + .into_exp() + } + + /// Generate a final call to interpret the result of `std::cmp::compare` + fn generate_call_to_final_res( + &self, + ori_op_id: NodeId, + cmp_op: &Operation, + args: Vec, + ) -> Option { + // Reuse the loc info of the original comparison operation + let final_res_loc = self.env.get_node_loc(ori_op_id); + + // Check the `std::cmp` module + let cmp_module = match self.cmp_module.as_ref() { + Some(module) => module, + None => { + self.env + .error(&final_res_loc, "cannot find `std::cmp` module"); + let invalid_id = self.env.new_node(self.env.internal_loc(), Type::Error); + return Some(ExpData::Invalid(invalid_id).into_exp()); + }, + }; + let cmp_module_id = cmp_module.get_id(); + + let sym = match cmp_op { + Operation::Lt => Self::IS_LT, + Operation::Le => Self::IS_LE, + Operation::Gt => Self::IS_GT, + Operation::Ge => Self::IS_GE, + _ => return None, + }; + + // Check the `std::cmp::is_lt/is_le/is_gt/is_ge` function + let final_res_function = match self.cmp_functions.get(sym) { + Some(func) => func, + None => { + self.env.error( + &final_res_loc, + format!("cannot find `std::cmp::{}` function", sym).as_str(), + ); + let invalid_id = self.env.new_node(self.env.internal_loc(), Type::Error); + return Some(ExpData::Invalid(invalid_id).into_exp()); + }, + }; + + let final_res_ty = Type::Primitive(PrimitiveType::Bool); + let final_res_id = self.env.new_node(final_res_loc, final_res_ty.clone()); + Some( + ExpData::Call( + final_res_id, + Operation::MoveFunction(cmp_module_id, final_res_function.get_id()), + args, + ) + .into_exp(), + ) + } + + /// We cannot rewrite references or integer primitive types + /// - References are not allowed in Lt/Le/Gt/Ge operations + /// - Integer primitive types are supported by the VM natively + fn arg_cannot_transform(&mut self, arg: &Exp) -> bool { + let arg_ty = self.env.get_node_type(arg.as_ref().node_id()); + matches!( + arg_ty, + Type::Reference(_, _) + | Type::Primitive( + PrimitiveType::U8 + | PrimitiveType::U16 + | PrimitiveType::U32 + | PrimitiveType::U64 + | PrimitiveType::U128 + | PrimitiveType::U256 + | PrimitiveType::Num + ) + ) + } + + /// Insert a new immutable reference before the argument + fn rewrite_cmp_arg(&mut self, arg: &Exp) -> Exp { + if let Some(arg_ref) = self.remove_deref_from_arg(arg) { + return arg_ref; + } + // Insert a new immutable reference before the argument + let arg_loc = self.env.get_node_loc(arg.as_ref().node_id()); + let arg_ty = self.env.get_node_type(arg.as_ref().node_id()); + let new_ref_type = Type::Reference(ReferenceKind::Immutable, Box::new(arg_ty.clone())); + let new_ref_id = self.env.new_node(arg_loc, new_ref_type); + ExpData::Call( + new_ref_id, + Operation::Borrow(ReferenceKind::Immutable), + vec![arg.clone()], + ) + .into_exp() + } + + /// Optimization: if the argument is a dereference operation, we get the inner reference directly + fn remove_deref_from_arg(&mut self, arg: &Exp) -> Option { + if let ExpData::Call(_, Operation::Deref, deref_args) = arg.as_ref() { + debug_assert!( + deref_args.len() == 1, + "there should be exactly one argument for dereference" + ); + let deref_arg_ty = self.env.get_node_type(deref_args[0].as_ref().node_id()); + if let Type::Reference(ReferenceKind::Immutable, _) = deref_arg_ty { + // If the deref argument is an immutable reference, we can return it directly + return Some(deref_args[0].clone()); + } + } + None + } + + /// Find the `std::cmp` module + fn find_cmp_module(env: &'env GlobalEnv) -> Option> { + // Find the `std::cmp` module + let cmp_module_name = ModuleName::new( + Address::Numerical(AccountAddress::ONE), + env.symbol_pool().make(well_known::CMP_MODULE), + ); + env.find_module(&cmp_module_name) + } + + /// Find function from the `std::cmp` module + fn find_cmp_function( + cmp_module: &ModuleEnv<'env>, + func_name: &str, + ) -> Option> { + let compare_sym = cmp_module.symbol_pool().make(func_name); + cmp_module.find_function(compare_sym) + } +} diff --git a/third_party/move/move-compiler-v2/src/env_pipeline/function_checker.rs b/third_party/move/move-compiler-v2/src/env_pipeline/function_checker.rs index 66655238318b5..9f29a8e6b2616 100644 --- a/third_party/move/move-compiler-v2/src/env_pipeline/function_checker.rs +++ b/third_party/move/move-compiler-v2/src/env_pipeline/function_checker.rs @@ -3,7 +3,7 @@ //! Do a few checks of functions and function calls. -use crate::Options; +use crate::{experiments::Experiment, Options}; use codespan_reporting::diagnostic::Severity; use move_binary_format::file_format::Visibility; use move_model::{ @@ -482,6 +482,12 @@ pub fn check_access_and_use(env: &mut GlobalEnv, before_inlining: bool) { let callees_with_sites = def.used_funs_with_uses(); for (callee, sites) in &callees_with_sites { let callee_func = env.get_function(*callee); + + // Script functions cannot be called. + if callee_func.module_env.is_script_module() { + calling_script_function_error(env, sites, &callee_func); + } + // Check visibility. // Same module is always visible @@ -527,13 +533,25 @@ pub fn check_access_and_use(env: &mut GlobalEnv, before_inlining: bool) { caller_func.module_env.get_full_name_str() ); } else { - call_package_fun_from_diff_package_error( - env, - sites, - &caller_func, - &callee_func, - ); - false + // With "unsafe package visibility" experiment on, all package functions are made + // visible in all modules with the same address. The prover uses this in filter mode + // to get around the lack of package-based target filtering functionality. + let options = env + .get_extension::() + .expect("Options is available"); + if options.experiment_on( + Experiment::UNSAFE_PACKAGE_VISIBILITY, + ) { + true + } else { + call_package_fun_from_diff_package_error( + env, + sites, + &caller_func, + &callee_func, + ); + false + } } } else { call_package_fun_from_diff_addr_error( @@ -837,3 +855,16 @@ fn call_package_fun_from_diff_addr_error( let why = "they are from different addresses"; cannot_call_error(env, why, sites, caller, callee); } + +fn calling_script_function_error(env: &GlobalEnv, sites: &BTreeSet, callee: &FunctionEnv) { + let call_details: Vec<_> = sites + .iter() + .map(|node_id| (env.get_node_loc(*node_id), "used here".to_owned())) + .collect(); + let callee_name = callee.get_name_str(); + let msg = format!( + "script function `{}` cannot be used in Move code", + callee_name + ); + env.diag_with_labels(Severity::Error, &callee.get_id_loc(), &msg, call_details); +} diff --git a/third_party/move/move-compiler-v2/src/env_pipeline/lambda_lifter.rs b/third_party/move/move-compiler-v2/src/env_pipeline/lambda_lifter.rs index 58333b037ac2f..d55c782ce96b4 100644 --- a/third_party/move/move-compiler-v2/src/env_pipeline/lambda_lifter.rs +++ b/third_party/move/move-compiler-v2/src/env_pipeline/lambda_lifter.rs @@ -174,7 +174,7 @@ pub struct LambdaLifter<'a> { } struct VarInfo { - /// The node were this variable was found. + /// The node where this variable was found. node_id: NodeId, /// Whether the variable is modified modified: bool, @@ -462,6 +462,53 @@ impl<'a> LambdaLifter<'a> { | Loop(..) | LoopCont(..) | Assign(..) | Mutate(..) | SpecBlock(..) => false, } } + + fn is_symbol_in_scope(&self, sym: &Symbol) -> bool { + self.scopes.iter().any(|scope| scope.contains(sym)) + } + + /// Insert `sym` as a free local variable, if it is not already in scope. + fn try_insert_free_local(&mut self, sym: Symbol, var_info: VarInfo) { + if !self.is_symbol_in_scope(&sym) { + if var_info.modified { + // Make sure we mark the variable as modified. + self.free_locals.insert(sym, var_info); + } else { + self.free_locals.entry(sym).or_insert(var_info); + } + } + } + + /// Perform a rewrite action in an isolated context. + /// To do so, we save the current context (free parameters, free locals, and scopes), + /// perform the rewrite action, and then restore the context. + fn rewrite_with_isolated_context(&mut self, rewrite: F, exp: Exp) -> Exp + where + F: FnOnce(&mut Self, Exp) -> Exp, + { + // Save the current context. + let mut curr_free_params = mem::take(&mut self.free_params); + let mut curr_free_locals = mem::take(&mut self.free_locals); + let curr_scopes = mem::take(&mut self.scopes); + // Perform the rewrite action. + let result = rewrite(self, exp); + // Restore the context. + self.scopes = curr_scopes; + // Remove free vars present in the re-instated scope. + let to_remove = self + .free_locals + .keys() + .filter(|sym| self.is_symbol_in_scope(sym)) + .copied() + .collect::>(); + for sym in to_remove { + self.free_locals.remove(&sym); + } + self.free_locals.append(&mut curr_free_locals); + self.free_params.append(&mut curr_free_params); + // Return the result of the rewrite. + result + } } impl ExpRewriterFunctions for LambdaLifter<'_> { @@ -483,12 +530,7 @@ impl ExpRewriterFunctions for LambdaLifter<'_> { // Also if this is a lambda, before descent, clear any usages from siblings in the // context, so we get the isolated usage information for the lambda's body. if matches!(exp.as_ref(), ExpData::Lambda(..)) { - let mut curr_free_params = mem::take(&mut self.free_params); - let mut curr_free_locals = mem::take(&mut self.free_locals); - let result = self.rewrite_exp_descent(exp); - self.free_params.append(&mut curr_free_params); - self.free_locals.append(&mut curr_free_locals); - result + self.rewrite_with_isolated_context(ExpRewriterFunctions::rewrite_exp_descent, exp) } else { self.rewrite_exp_descent(exp) } @@ -504,14 +546,12 @@ impl ExpRewriterFunctions for LambdaLifter<'_> { } fn rewrite_exit_scope(&mut self, _id: NodeId) { - let exiting = self.scopes.pop().expect("stack balanced"); - // Remove all locals which are bound in the scope we are exiting. - self.free_locals.retain(|name, _| !exiting.contains(name)); + self.scopes.pop().expect("stack balanced"); } fn rewrite_local_var(&mut self, node_id: NodeId, sym: Symbol) -> Option { // duplicates are OK -- they are all the same local at different locations - self.free_locals.entry(sym).or_insert(VarInfo { + self.try_insert_free_local(sym, VarInfo { node_id, modified: false, }); @@ -529,7 +569,7 @@ impl ExpRewriterFunctions for LambdaLifter<'_> { fn rewrite_assign(&mut self, _node_id: NodeId, lhs: &Pattern, _rhs: &Exp) -> Option { for (node_id, name) in lhs.vars() { - self.free_locals.insert(name, VarInfo { + self.try_insert_free_local(name, VarInfo { node_id, modified: true, }); @@ -541,7 +581,7 @@ impl ExpRewriterFunctions for LambdaLifter<'_> { if matches!(oper, Operation::Borrow(ReferenceKind::Mutable)) { match args[0].as_ref() { ExpData::LocalVar(node_id, name) => { - self.free_locals.insert(*name, VarInfo { + self.try_insert_free_local(*name, VarInfo { node_id: *node_id, modified: true, }); diff --git a/third_party/move/move-compiler-v2/src/env_pipeline/mod.rs b/third_party/move/move-compiler-v2/src/env_pipeline/mod.rs index fb2161ae5ee34..029a395a0f616 100644 --- a/third_party/move/move-compiler-v2/src/env_pipeline/mod.rs +++ b/third_party/move/move-compiler-v2/src/env_pipeline/mod.rs @@ -11,6 +11,7 @@ use std::io::Write; pub mod acquires_checker; pub mod ast_simplifier; pub mod closure_checker; +pub mod cmp_rewriter; pub mod cyclic_instantiation_checker; pub mod flow_insensitive_checkers; pub mod function_checker; diff --git a/third_party/move/move-compiler-v2/src/experiments.rs b/third_party/move/move-compiler-v2/src/experiments.rs index 1a94e655181cd..4379809ece01c 100644 --- a/third_party/move/move-compiler-v2/src/experiments.rs +++ b/third_party/move/move-compiler-v2/src/experiments.rs @@ -96,6 +96,11 @@ pub static EXPERIMENTS: Lazy> = Lazy::new(|| { .to_string(), default: Inherited(Experiment::CHECKS.to_string()), }, + Experiment { + name: Experiment::CMP_REWRITE.to_string(), + description: "Rewrite comparison operations".to_string(), + default: Given(true), + }, Experiment { name: Experiment::INLINING.to_string(), description: "Turns on or off inlining".to_string(), @@ -265,6 +270,12 @@ pub static EXPERIMENTS: Lazy> = Lazy::new(|| { .to_string(), default: Given(false), }, + Experiment { + name: Experiment::UNSAFE_PACKAGE_VISIBILITY.to_string(), + description: "Treat all package functions with same address as visible (currently necessary for prover in filter mode)" + .to_string(), + default: Given(false), + }, ]; experiments .into_iter() @@ -282,6 +293,7 @@ impl Experiment { pub const ATTACH_COMPILED_MODULE: &'static str = "attach-compiled-module"; pub const CFG_SIMPLIFICATION: &'static str = "cfg-simplification"; pub const CHECKS: &'static str = "checks"; + pub const CMP_REWRITE: &'static str = "cmp-rewrite"; pub const DEAD_CODE_ELIMINATION: &'static str = "dead-code-elimination"; pub const DUPLICATE_STRUCT_PARAMS_CHECK: &'static str = "duplicate-struct-params-check"; pub const FAIL_ON_WARNING: &'static str = "fail-on-warning"; @@ -312,6 +324,7 @@ impl Experiment { pub const STOP_BEFORE_FILE_FORMAT: &'static str = "stop-before-file-format"; pub const STOP_BEFORE_STACKLESS_BYTECODE: &'static str = "stop-before-stackless-bytecode"; pub const UNINITIALIZED_CHECK: &'static str = "uninitialized-check"; + pub const UNSAFE_PACKAGE_VISIBILITY: &'static str = "unsafe-package-visibility"; pub const UNUSED_ASSIGNMENT_CHECK: &'static str = "unused-assignment-check"; pub const UNUSED_STRUCT_PARAMS_CHECK: &'static str = "unused-struct-params-check"; pub const USAGE_CHECK: &'static str = "usage-check"; diff --git a/third_party/move/move-compiler-v2/src/file_format_generator/module_generator.rs b/third_party/move/move-compiler-v2/src/file_format_generator/module_generator.rs index ab2b8fb67330b..fa3642439bb72 100644 --- a/third_party/move/move-compiler-v2/src/file_format_generator/module_generator.rs +++ b/third_party/move/move-compiler-v2/src/file_format_generator/module_generator.rs @@ -8,7 +8,7 @@ use crate::{ MAX_MODULE_COUNT, MAX_SIGNATURE_COUNT, MAX_STRUCT_COUNT, MAX_STRUCT_DEF_COUNT, MAX_STRUCT_DEF_INST_COUNT, MAX_STRUCT_VARIANT_COUNT, MAX_STRUCT_VARIANT_INST_COUNT, }, - Options, + Options, COMPILER_BUG_REPORT_MSG, }; use codespan_reporting::diagnostic::Severity; use itertools::Itertools; @@ -995,8 +995,13 @@ impl ModuleContext<'_> { } /// Emits an internal error at the location. - pub fn internal_error(&self, loc: impl AsRef, msg: impl AsRef) { - self.env.diag(Severity::Bug, loc.as_ref(), msg.as_ref()) + pub fn internal_error(&self, loc: impl AsRef, msg: impl AsRef + ToString) { + self.env.diag_with_notes( + Severity::Bug, + loc.as_ref(), + format!("compiler internal error: {}", msg.to_string()).as_str(), + vec![COMPILER_BUG_REPORT_MSG.to_string()], + ) } /// Check for a bound table index and report an error if its out of bound. All bounds diff --git a/third_party/move/move-compiler-v2/src/lib.rs b/third_party/move/move-compiler-v2/src/lib.rs index 223287b9a2f63..bcc18618334a1 100644 --- a/third_party/move/move-compiler-v2/src/lib.rs +++ b/third_party/move/move-compiler-v2/src/lib.rs @@ -17,11 +17,11 @@ pub mod plan_builder; use crate::{ diagnostics::Emitter, env_pipeline::{ - acquires_checker, ast_simplifier, closure_checker, cyclic_instantiation_checker, - flow_insensitive_checkers, function_checker, inliner, lambda_lifter, - lambda_lifter::LambdaLiftingOptions, model_ast_lints, recursive_struct_checker, - rewrite_target::RewritingScope, seqs_in_binop_checker, spec_checker, spec_rewriter, - unused_params_checker, EnvProcessorPipeline, + acquires_checker, ast_simplifier, closure_checker, cmp_rewriter, + cyclic_instantiation_checker, flow_insensitive_checkers, function_checker, inliner, + lambda_lifter, lambda_lifter::LambdaLiftingOptions, model_ast_lints, + recursive_struct_checker, rewrite_target::RewritingScope, seqs_in_binop_checker, + spec_checker, spec_rewriter, unused_params_checker, EnvProcessorPipeline, }, pipeline::{ ability_processor::AbilityProcessor, @@ -72,6 +72,8 @@ pub use options::Options; use std::{collections::BTreeSet, path::Path}; const DEBUG: bool = false; +const COMPILER_BUG_REPORT_MSG: &str = + "please consider reporting this issue (see https://aptos.dev/en/build/smart-contracts/compiler_v2#reporting-an-issue)"; /// Run Move compiler and print errors to stderr. pub fn run_move_compiler_to_stderr( @@ -377,6 +379,20 @@ pub fn env_check_and_transform_pipeline<'a, 'b>(options: &'a Options) -> EnvProc env_pipeline.add("model AST lints", model_ast_lints::checker); } + // The comparison rewriter is a new features in Aptos Move 2.2 and onwards + let rewrite_cmp = options + .language_version + .unwrap_or_default() + .is_at_least(LanguageVersion::V2_2) + && options.experiment_on(Experiment::CMP_REWRITE); + + if rewrite_cmp { + env_pipeline.add("rewrite comparison operations", |env| { + // This rewrite is suggested to run before inlining to avoid repeated rewriting + cmp_rewriter::rewrite(env); + }); + } + if options.experiment_on(Experiment::INLINING) { let rewriting_scope = if options.whole_program { RewritingScope::Everything @@ -641,13 +657,14 @@ fn report_bytecode_verification_error( env.to_loc(module_ir_loc) }); if e.status_type() != StatusType::Verification { - env.diag( + env.diag_with_notes( Severity::Bug, loc, &format!( - "unexpected error returned from bytecode verification. This is a compiler bug, consider reporting it.\n{:#?}", + "unexpected error returned from bytecode verification:\n{:#?}", e ), + vec![COMPILER_BUG_REPORT_MSG.to_string()], ) } else { let debug_info = if command_line::get_move_compiler_backtrace_from_env() { @@ -658,15 +675,15 @@ fn report_bytecode_verification_error( e.message().cloned().unwrap_or_else(|| "none".to_string()) ) }; - env.diag( + env.diag_with_notes( Severity::Bug, loc, &format!( - "bytecode verification failed with \ - unexpected status code `{:?}`. This is a compiler bug, consider reporting it.{}", + "bytecode verification failed with unexpected status code `{:?}`:{}", e.major_status(), debug_info ), + vec![COMPILER_BUG_REPORT_MSG.to_string()], ) } } diff --git a/third_party/move/move-compiler-v2/tests/ability-check/ability_violation.exp b/third_party/move/move-compiler-v2/tests/ability-check/ability_violation.exp index 449755574b05a..bb5deed1f948b 100644 --- a/third_party/move/move-compiler-v2/tests/ability-check/ability_violation.exp +++ b/third_party/move/move-compiler-v2/tests/ability-check/ability_violation.exp @@ -1,18 +1,18 @@ Diagnostics: -error: local `x` of type `Impotent` does not have the `copy` ability +error: value of type `Impotent` does not have the `drop` ability ┌─ tests/ability-check/ability_violation.move:7:10 │ 7 │ (x, x); - │ ^ - used here - │ │ - │ copy needed here because value is still in use + │ ^ implicitly dropped here since it is no longer used -error: value of type `Impotent` does not have the `drop` ability +error: local `x` of type `Impotent` does not have the `copy` ability ┌─ tests/ability-check/ability_violation.move:7:10 │ 7 │ (x, x); - │ ^ implicitly dropped here since it is no longer used + │ ^ - used here + │ │ + │ copy needed here because value is still in use error: value of type `Impotent` does not have the `drop` ability ┌─ tests/ability-check/ability_violation.move:7:13 diff --git a/third_party/move/move-compiler-v2/tests/ability-check/fv_as_keys/compiler_ok_case.exp b/third_party/move/move-compiler-v2/tests/ability-check/fv_as_keys/compiler_ok_case.exp new file mode 100644 index 0000000000000..90b32906711cc --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/ability-check/fv_as_keys/compiler_ok_case.exp @@ -0,0 +1,2 @@ + +============ bytecode verification succeeded ======== diff --git a/third_party/move/move-compiler-v2/tests/ability-check/fv_as_keys/compiler_ok_case.move b/third_party/move/move-compiler-v2/tests/ability-check/fv_as_keys/compiler_ok_case.move new file mode 100644 index 0000000000000..f2821c6d7297d --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/ability-check/fv_as_keys/compiler_ok_case.move @@ -0,0 +1,24 @@ +module 0x99::basic_struct { + struct Wrapper has key { + fv: T + } + + #[persistent] + fun test(f: &||u64): u64 { + if (f == f) + 1 + else + 2 + } + + // all abilities satisfied + fun add_resource_with_struct(acc: &signer, f: | &||u64 |u64 has copy+store+drop) { + move_to>(acc, Wrapper { fv: f}); + } + + public fun test_driver(acc: &signer){ + // ok case + let f: | &||u64 |u64 has copy+store+drop = test; + add_resource_with_struct(acc, f); + } +} diff --git a/third_party/move/move-compiler-v2/tests/ability-check/fv_as_keys/compiler_ok_case_enum.exp b/third_party/move/move-compiler-v2/tests/ability-check/fv_as_keys/compiler_ok_case_enum.exp new file mode 100644 index 0000000000000..90b32906711cc --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/ability-check/fv_as_keys/compiler_ok_case_enum.exp @@ -0,0 +1,2 @@ + +============ bytecode verification succeeded ======== diff --git a/third_party/move/move-compiler-v2/tests/ability-check/fv_as_keys/compiler_ok_case_enum.move b/third_party/move/move-compiler-v2/tests/ability-check/fv_as_keys/compiler_ok_case_enum.move new file mode 100644 index 0000000000000..4509789116acb --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/ability-check/fv_as_keys/compiler_ok_case_enum.move @@ -0,0 +1,15 @@ +module 0x99::basic_enum { + #[persistent] + fun increment_by_one(x: &mut u64): u64 { *x = *x + 1; *x } + + enum FV has key { + V1 { v1: |&mut T|T has copy+store}, + } + + fun test_fun_vec(s: &signer) { + // ok case + let f1: |&mut u64|u64 has copy+store = increment_by_one; + let v1 = FV::V1{v1: f1}; + move_to(s, v1); + } +} diff --git a/third_party/move/move-compiler-v2/tests/ability-check/fv_as_keys/missing_store.exp b/third_party/move/move-compiler-v2/tests/ability-check/fv_as_keys/missing_store.exp new file mode 100644 index 0000000000000..6877d4385cfb6 --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/ability-check/fv_as_keys/missing_store.exp @@ -0,0 +1,7 @@ + +Diagnostics: +error: type `|&||u64|u64 has copy + drop` is missing required ability `store` + ┌─ tests/ability-check/fv_as_keys/missing_store.move:19:35 + │ +19 │ add_resource_with_struct(acc, f); + │ ^ diff --git a/third_party/move/move-compiler-v2/tests/ability-check/fv_as_keys/missing_store.move b/third_party/move/move-compiler-v2/tests/ability-check/fv_as_keys/missing_store.move new file mode 100644 index 0000000000000..affa0fc0a35ca --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/ability-check/fv_as_keys/missing_store.move @@ -0,0 +1,21 @@ +module 0x99::basic_struct { + struct Wrapper has key { + fv: T + } + + #[persistent] + fun test(f: &||u64): u64 { + 1 + } + + // all abilities satisfied + fun add_resource_with_struct(acc: &signer, f: | &||u64 |u64 has copy+store+drop) { + move_to>(acc, Wrapper { fv: f}); + } + + public fun test_driver(acc: &signer){ + // not ok case: cannot store functions without `store` + let f: | &||u64 |u64 has copy+drop = test; + add_resource_with_struct(acc, f); + } +} diff --git a/third_party/move/move-compiler-v2/tests/ability-check/fv_as_keys/missing_store_enum.exp b/third_party/move/move-compiler-v2/tests/ability-check/fv_as_keys/missing_store_enum.exp new file mode 100644 index 0000000000000..7e6ffea6dc341 --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/ability-check/fv_as_keys/missing_store_enum.exp @@ -0,0 +1,7 @@ + +Diagnostics: +error: type `|&mut u64|u64 has copy` is missing required ability `store` + ┌─ tests/ability-check/fv_as_keys/missing_store_enum.move:12:29 + │ +12 │ let v1 = FV::V1{v1: f1}; + │ ^^ diff --git a/third_party/move/move-compiler-v2/tests/ability-check/fv_as_keys/missing_store_enum.move b/third_party/move/move-compiler-v2/tests/ability-check/fv_as_keys/missing_store_enum.move new file mode 100644 index 0000000000000..dfee499887940 --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/ability-check/fv_as_keys/missing_store_enum.move @@ -0,0 +1,15 @@ +module 0x99::basic_enum { + #[persistent] + fun increment_by_one(x: &mut u64): u64 { *x = *x + 1; *x } + + enum FV has key { + V1 { v1: |&mut T|T has copy+store}, + } + + fun test_fun_vec(s: &signer) { + // not ok case: cannot store functions without `store` + let f1: |&mut u64|u64 has copy = increment_by_one; + let v1 = FV::V1{v1: f1}; + move_to(s, v1); + } +} diff --git a/third_party/move/move-compiler-v2/tests/ability-check/fv_as_keys/store_fv.exp b/third_party/move/move-compiler-v2/tests/ability-check/fv_as_keys/store_fv.exp new file mode 100644 index 0000000000000..d425fc109f8fe --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/ability-check/fv_as_keys/store_fv.exp @@ -0,0 +1,9 @@ + +Diagnostics: +error: Expected a struct type. Global storage operations are restricted to struct types declared in the current module. Found: '|&||u64|u64 has drop + store + key' + ┌─ tests/ability-check/fv_as_keys/store_fv.move:13:5 + │ +13 │ move_to<| &||u64 |u64 has store+drop+key>(acc, f); + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + │ │ + │ Invalid call to MoveTo<|&||u64|u64 has drop + store + key>. diff --git a/third_party/move/move-compiler-v2/tests/ability-check/fv_as_keys/store_fv.move b/third_party/move/move-compiler-v2/tests/ability-check/fv_as_keys/store_fv.move new file mode 100644 index 0000000000000..994c93d8420b2 --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/ability-check/fv_as_keys/store_fv.move @@ -0,0 +1,21 @@ +module 0x99::basic_struct { + struct Wrapper has key { + fv: T + } + + #[persistent] + fun test(f: &||u64): u64 { + 1 + } + + // not ok: storage mandates struct type + fun add_resource_with_fv(acc: &signer, f: | &||u64 |u64 has store+drop+key) { + move_to<| &||u64 |u64 has store+drop+key>(acc, f); + } + + public fun test_driver(acc: &signer){ + // not ok case: cannot put function values in storage directly + let f: | &||u64 |u64 has store+drop+key = test; + add_resource_with_fv(acc, f); + } +} diff --git a/third_party/move/move-compiler-v2/tests/ability-check/fv_as_keys/store_lambda.exp b/third_party/move/move-compiler-v2/tests/ability-check/fv_as_keys/store_lambda.exp new file mode 100644 index 0000000000000..b3241dfc2bc11 --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/ability-check/fv_as_keys/store_lambda.exp @@ -0,0 +1,10 @@ + +Diagnostics: +error: function resulting from lambda lifting is missing the `store` ability + ┌─ tests/ability-check/fv_as_keys/store_lambda.move:13:48 + │ +13 │ let f: | &||u64 |u64 has copy+store+drop = |x| (*x)(); + │ ^^^^^^^^^^ + │ + = lambda cannot be reduced to partial application of existing function + = expected function type: `|&||u64|u64 has copy + drop + store` diff --git a/third_party/move/move-compiler-v2/tests/ability-check/fv_as_keys/store_lambda.move b/third_party/move/move-compiler-v2/tests/ability-check/fv_as_keys/store_lambda.move new file mode 100644 index 0000000000000..801f58c6515f3 --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/ability-check/fv_as_keys/store_lambda.move @@ -0,0 +1,16 @@ +module 0x99::basic_struct { + struct Wrapper has key { + fv: T + } + + // all abilities satisfied + fun add_resource_with_struct(acc: &signer, f: | &||u64 |u64 has copy+store+drop) { + move_to>(acc, Wrapper { fv: f}); + } + + public fun test_driver(acc: &signer){ + // not ok case: lambda functions have no `store` + let f: | &||u64 |u64 has copy+store+drop = |x| (*x)(); + add_resource_with_struct(acc, f); + } +} diff --git a/third_party/move/move-compiler-v2/tests/ability-check/fv_as_keys/store_lambda_enum.exp b/third_party/move/move-compiler-v2/tests/ability-check/fv_as_keys/store_lambda_enum.exp new file mode 100644 index 0000000000000..011ebe0036071 --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/ability-check/fv_as_keys/store_lambda_enum.exp @@ -0,0 +1,10 @@ + +Diagnostics: +error: function resulting from lambda lifting is missing the `store` ability + ┌─ tests/ability-check/fv_as_keys/store_lambda_enum.move:8:48 + │ +8 │ let f1: |&mut u64|u64 has copy+store = |x| *x+1; + │ ^^^^^^^^ + │ + = lambda cannot be reduced to partial application of existing function + = expected function type: `|&mut u64|u64 has copy + store` diff --git a/third_party/move/move-compiler-v2/tests/ability-check/fv_as_keys/store_lambda_enum.move b/third_party/move/move-compiler-v2/tests/ability-check/fv_as_keys/store_lambda_enum.move new file mode 100644 index 0000000000000..26483bf05ddcc --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/ability-check/fv_as_keys/store_lambda_enum.move @@ -0,0 +1,12 @@ +module 0x99::basic_enum { + enum FV has key { + V1 { v1: |&mut T|T has copy+store}, + } + + fun test_fun_vec(s: &signer) { + // not ok case: cannot put function values in storage directly + let f1: |&mut u64|u64 has copy+store = |x| *x+1; + let v1 = FV::V1{v1: f1}; + move_to(s, v1); + } +} diff --git a/third_party/move/move-compiler-v2/tests/ability-check/fv_enum_err.exp b/third_party/move/move-compiler-v2/tests/ability-check/fv_enum_err.exp new file mode 100644 index 0000000000000..b4db6366a2ab0 --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/ability-check/fv_enum_err.exp @@ -0,0 +1,13 @@ + +Diagnostics: +error: value of type `|&mut u64|u64 has copy + store` does not have the `drop` ability + ┌─ tests/ability-check/fv_enum_err.move:21:17 + │ +21 │ vector::pop_back(&mut v1); + │ ^^^^^^^^^^^^^^^^^^^^^^^^^ implicitly dropped here since it is no longer used + +error: value of type `|&mut u64|u64 has copy + store` does not have the `drop` ability + ┌─ tests/ability-check/fv_enum_err.move:22:17 + │ +22 │ vector::pop_back(&mut v1); + │ ^^^^^^^^^^^^^^^^^^^^^^^^^ implicitly dropped here since it is no longer used diff --git a/third_party/move/move-compiler-v2/tests/ability-check/fv_enum_err.move b/third_party/move/move-compiler-v2/tests/ability-check/fv_enum_err.move new file mode 100644 index 0000000000000..5c03093593434 --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/ability-check/fv_enum_err.move @@ -0,0 +1,32 @@ +module 0x66::fv_enum_basic { + use std::signer; + + #[persistent] + fun add_k_persistent_ref(x: &mut u64, k: u64): u64 { *x = *x + 1; *x + k } + + enum FunVec has key { + V1 { v1: vector<|&mut T|T has copy + store> }, + V2 { v0: u64, v1: vector<|&mut T|T has copy + store> }, + } + + fun test_fun_vec(s: &signer) { + use std::vector; + let k = 3; + let add_k: |&mut u64|u64 has copy + store + drop = |x: &mut u64| add_k_persistent_ref(x, k); + let v1 = FunVec::V1 { v1: vector[add_k, add_k] }; + move_to(s, v1); + let m = move_from>(signer::address_of(s)); + match (m) { + FunVec::V1 { v1 } => { + vector::pop_back(&mut v1); + vector::pop_back(&mut v1); + vector::destroy_empty(v1); + } + FunVec::V2 { v0: _, v1 } => { + vector::destroy_empty(v1); + assert!(false, 99); + } + }; + } + +} diff --git a/third_party/move/move-compiler-v2/tests/bytecode-generator/fv_enum_wrapper_err.exp b/third_party/move/move-compiler-v2/tests/bytecode-generator/fv_enum_wrapper_err.exp new file mode 100644 index 0000000000000..a53260ecfe8e0 --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/bytecode-generator/fv_enum_wrapper_err.exp @@ -0,0 +1,9 @@ + +Diagnostics: +error: cannot select field `0` since it has different types in variants of enum `Wrapper` + ┌─ tests/bytecode-generator/fv_enum_wrapper_err.move:10:10 + │ +10 │ (f.0)() + │ ^ + │ + = field `0` has type `||u64 has copy + drop` in variant `B` and type `||u64 has copy` in variant `A` diff --git a/third_party/move/move-compiler-v2/tests/bytecode-generator/fv_enum_wrapper_err.move b/third_party/move/move-compiler-v2/tests/bytecode-generator/fv_enum_wrapper_err.move new file mode 100644 index 0000000000000..ddfb84f336966 --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/bytecode-generator/fv_enum_wrapper_err.move @@ -0,0 +1,19 @@ +module 0x66::fv_enum_wrapper { + + enum Wrapper { + A(||u64 has copy), + B(||u64 has copy + drop), + } + + fun call(f: Wrapper): u64 + { + (f.0)() + } + + public fun test(): u64 + { + let a = Wrapper::A(|| 42); + call(a) + } + +} diff --git a/third_party/move/move-compiler-v2/tests/bytecode-verify-failure/equality.exp b/third_party/move/move-compiler-v2/tests/bytecode-verify-failure/equality.exp index 601110318e585..cdeb24e656b47 100644 --- a/third_party/move/move-compiler-v2/tests/bytecode-verify-failure/equality.exp +++ b/third_party/move/move-compiler-v2/tests/bytecode-verify-failure/equality.exp @@ -1,7 +1,9 @@ Diagnostics: -bug: file format generator: Inferred and Store AssignKind should be not appear here. +bug: compiler internal error: file format generator: Inferred and Store AssignKind should be not appear here. ┌─ tests/bytecode-verify-failure/equality.move:2:7 │ 2 │ fun equality(x: T, y: T): bool { │ ^^^^^^^^ + │ + = please consider reporting this issue (see https://aptos.dev/en/build/smart-contracts/compiler_v2#reporting-an-issue) diff --git a/third_party/move/move-compiler-v2/tests/checking-lang-v2.2/lambda/script_uses_itself.exp b/third_party/move/move-compiler-v2/tests/checking-lang-v2.2/lambda/script_uses_itself.exp new file mode 100644 index 0000000000000..67a2a4ef1fe65 --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/checking-lang-v2.2/lambda/script_uses_itself.exp @@ -0,0 +1,15 @@ + +Diagnostics: +error: script function `main` cannot be used in Move code + ┌─ tests/checking-lang-v2.2/lambda/script_uses_itself.move:2:9 + │ +2 │ fun main() { + │ ^^^^ +3 │ let _f: || has drop = main; + │ ---- used here +4 │ let _g: || has drop = || main(); + │ ------ used here +5 │ (main)(); + │ ---- used here +6 │ main(); + │ ------ used here diff --git a/third_party/move/move-compiler-v2/tests/checking-lang-v2.2/lambda/script_uses_itself.move b/third_party/move/move-compiler-v2/tests/checking-lang-v2.2/lambda/script_uses_itself.move new file mode 100644 index 0000000000000..4b1ae54904bf7 --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/checking-lang-v2.2/lambda/script_uses_itself.move @@ -0,0 +1,8 @@ +script { + fun main() { + let _f: || has drop = main; + let _g: || has drop = || main(); + (main)(); + main(); + } +} diff --git a/third_party/move/move-compiler-v2/tests/checking-lang-v2.2/lambda/vector_assignability.exp b/third_party/move/move-compiler-v2/tests/checking-lang-v2.2/lambda/vector_assignability.exp new file mode 100644 index 0000000000000..7aa9b6bd10217 --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/checking-lang-v2.2/lambda/vector_assignability.exp @@ -0,0 +1,31 @@ + +Diagnostics: +error: type `||u64 has drop` is missing required ability `copy` + ┌─ tests/checking-lang-v2.2/lambda/vector_assignability.move:12:55 + │ +12 │ let b: vector<||u64 has drop + copy> = vector[a]; + │ ^ + +error: expected `vector<||u64 has drop>` but found a value of type `vector<||u64 has copy + drop>` + ┌─ tests/checking-lang-v2.2/lambda/vector_assignability.move:33:9 + │ +33 │ v[0] = a; + │ ^ + +error: type `||u64 has drop` is missing required ability `copy` + ┌─ tests/checking-lang-v2.2/lambda/vector_assignability.move:55:28 + │ +55 │ replace(&mut v[0], a); + │ ^ + +error: expected `vector<||u64 has copy + drop>` but found a value of type `vector<||u64 has drop>` + ┌─ tests/checking-lang-v2.2/lambda/vector_assignability.move:66:9 + │ +66 │ v[0] = a; + │ ^ + +error: cannot pass `&mut ||u64 has drop` to a function which expects argument of type `&mut ||u64 has copy + drop` + ┌─ tests/checking-lang-v2.2/lambda/vector_assignability.move:80:22 + │ +80 │ swap(&mut a, &mut b); + │ ^^^^^^ diff --git a/third_party/move/move-compiler-v2/tests/checking-lang-v2.2/lambda/vector_assignability.move b/third_party/move/move-compiler-v2/tests/checking-lang-v2.2/lambda/vector_assignability.move new file mode 100644 index 0000000000000..424c6c2baf12b --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/checking-lang-v2.2/lambda/vector_assignability.move @@ -0,0 +1,82 @@ +module 0xc0ffee::m { + struct NoCopy has drop; + + public fun foo() { + let x = NoCopy; + + let a: ||u64 has drop = ||{ + let NoCopy = x; + 1 + }; + + let b: vector<||u64 has drop + copy> = vector[a]; + (b[0])(); + (b[0])(); + } +} + +module 0xc0ffee::n { + struct NoCopy has drop; + + public fun foo() { + let x = NoCopy; + + let a: ||u64 has drop = ||{ + let NoCopy = x; + 1 + }; + + let b: ||u64 has copy + drop = || 42; + + let v = vector[b]; + (v[0])(); + v[0] = a; + } +} + +module 0xc0ffee::o { + struct NoCopy has drop; + + fun replace(ref: &mut T, new: T): T { + abort 0 + } + + public fun foo() { + let x = NoCopy; + + let a: ||u64 has drop = ||{ + let NoCopy = x; + 1 + }; + + let b: ||u64 has copy + drop = || 42; + + let v = vector[b]; + replace(&mut v[0], a); + } +} + +module 0xc0ffee::p { + public fun foo() { + let a: ||u64 has copy + drop = || 1; + + let b: ||u64 has drop = || 42; + + let v = vector[b]; + v[0] = a; + v[0](); + } +} + +module 0xc0ffee::q { + fun swap(left: &mut T, right: &mut T) { + abort 0 + } + + public fun foo() { + let a: ||u64 has copy + drop = || 1; + let b: ||u64 has drop = || 42; + + swap(&mut a, &mut b); + } +} diff --git a/third_party/move/move-compiler-v2/tests/checking/typing/constant_non_base_type.exp b/third_party/move/move-compiler-v2/tests/checking/typing/constant_non_base_type.exp index d1e07c3fbf0f8..2847d147f783f 100644 --- a/third_party/move/move-compiler-v2/tests/checking/typing/constant_non_base_type.exp +++ b/third_party/move/move-compiler-v2/tests/checking/typing/constant_non_base_type.exp @@ -16,6 +16,12 @@ error: Invalid type for constant │ │ │ Expected one of `u8`, `u16, `u32`, `u64`, `u128`, `u256`, `bool`, `address`, or `vector<_>` with valid element type. +error: expected `&mut u64` but found a value of type `&u64` (mutability mismatch) + ┌─ tests/checking/typing/constant_non_base_type.move:4:26 + │ +4 │ const C2: &mut u64 = &0; + │ ^^ + error: Not a valid constant expression. ┌─ tests/checking/typing/constant_non_base_type.move:4:26 │ @@ -32,12 +38,6 @@ error: Invalid type for constant │ │ │ Expected one of `u8`, `u16, `u32`, `u64`, `u128`, `u256`, `bool`, `address`, or `vector<_>` with valid element type. -error: expected `&mut u64` but found a value of type `&u64` (mutability mismatch) - ┌─ tests/checking/typing/constant_non_base_type.move:4:26 - │ -4 │ const C2: &mut u64 = &0; - │ ^^ - error: Invalid type for constant ┌─ tests/checking/typing/constant_non_base_type.move:5:20 │ diff --git a/third_party/move/move-compiler-v2/transactional-tests/tests/constants/large_vectors.no-optimize.exp b/third_party/move/move-compiler-v2/transactional-tests/tests/constants/large_vectors.no-optimize.exp index ab002de457e8b..bd428897d631c 100644 --- a/third_party/move/move-compiler-v2/transactional-tests/tests/constants/large_vectors.no-optimize.exp +++ b/third_party/move/move-compiler-v2/transactional-tests/tests/constants/large_vectors.no-optimize.exp @@ -2,12 +2,14 @@ processed 3 tasks task 0 'publish'. lines 1-15: Error: compilation errors: - bug: bytecode verification failed with unexpected status code `VALUE_STACK_OVERFLOW`. This is a compiler bug, consider reporting it. + bug: bytecode verification failed with unexpected status code `VALUE_STACK_OVERFLOW`: Error message: none ┌─ TEMPFILE:5:24 │ 5 │ let v = vector[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, 395, 396, 397, 398, 399, 400, 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, 415, 416, 417, 418, 419, 420, 421, 422, 423, 424, 425, 426, 427, 428, 429, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 460, 461, 462, 463, 464, 465, 466, 467, 468, 469, 470, 471, 472, 473, 474, 475, 476, 477, 478, 479, 480, 481, 482, 483, 484, 485, 486, 487, 488, 489, 490, 491, 492, 493, 494, 495, 496, 497, 498, 499, 500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 510, 511, 512, 513, 514, 515, 516, 517, 518, 519, 520, 521, 522, 523, 524, 525, 526, 527, 528, 529, 530, 531, 532, 533, 534, 535, 536, 537, 538, 539, 540, 541, 542, 543, 544, 545, 546, 547, 548, 549, 550, 551, 552, 553, 554, 555, 556, 557, 558, 559, 560, 561, 562, 563, 564, 565, 566, 567, 568, 569, 570, 571, 572, 573, 574, 575, 576, 577, 578, 579, 580, 581, 582, 583, 584, 585, 586, 587, 588, 589, 590, 591, 592, 593, 594, 595, 596, 597, 598, 599, 600, 601, 602, 603, 604, 605, 606, 607, 608, 609, 610, 611, 612, 613, 614, 615, 616, 617, 618, 619, 620, 621, 622, 623, 624, 625, 626, 627, 628, 629, 630, 631, 632, 633, 634, 635, 636, 637, 638, 639, 640, 641, 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, 658, 659, 660, 661, 662, 663, 664, 665, 666, 667, 668, 669, 670, 671, 672, 673, 674, 675, 676, 677, 678, 679, 680, 681, 682, 683, 684, 685, 686, 687, 688, 689, 690, 691, 692, 693, 694, 695, 696, 697, 698, 699, 700, 701, 702, 703, 704, 705, 706, 707, 708, 709, 710, 711, 712, 713, 714, 715, 716, 717, 718, 719, 720, 721, 722, 723, 724, 725, 726, 727, 728, 729, 730, 731, 732, 733, 734, 735, 736, 737, 738, 739, 740, 741, 742, 743, 744, 745, 746, 747, 748, 749, 750, 751, 752, 753, 754, 755, 756, 757, 758, 759, 760, 761, 762, 763, 764, 765, 766, 767, 768, 769, 770, 771, 772, 773, 774, 775, 776, 777, 778, 779, 780, 781, 782, 783, 784, 785, 786, 787, 788, 789, 790, 791, 792, 793, 794, 795, 796, 797, 798, 799, 800, 801, 802, 803, 804, 805, 806, 807, 808, 809, 810, 811, 812, 813, 814, 815, 816, 817, 818, 819, 820, 821, 822, 823, 824, 825, 826, 827, 828, 829, 830, 831, 832, 833, 834, 835, 836, 837, 838, 839, 840, 841, 842, 843, 844, 845, 846, 847, 848, 849, 850, 851, 852, 853, 854, 855, 856, 857, 858, 859, 860, 861, 862, 863, 864, 865, 866, 867, 868, 869, 870, 871, 872, 873, 874, 875, 876, 877, 878, 879, 880, 881, 882, 883, 884, 885, 886, 887, 888, 889, 890, 891, 892, 893, 894, 895, 896, 897, 898, 899, 900, 901, 902, 903, 904, 905, 906, 907, 908, 909, 910, 911, 912, 913, 914, 915, 916, 917, 918, 919, 920, 921, 922, 923, 924, 925, 926, 927, 928, 929, 930, 931, 932, 933, 934, 935, 936, 937, 938, 939, 940, 941, 942, 943, 944, 945, 946, 947, 948, 949, 950, 951, 952, 953, 954, 955, 956, 957, 958, 959, 960, 961, 962, 963, 964, 965, 966, 967, 968, 969, 970, 971, 972, 973, 974, 975, 976, 977, 978, 979, 980, 981, 982, 983, 984, 985, 986, 987, 988, 989, 990, 991, 992, 993, 994, 995, 996, 997, 998, 999, 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007, 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015, 1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023, 1024, 1025, 1026]; │ ^ + │ + = please consider reporting this issue (see https://aptos.dev/en/build/smart-contracts/compiler_v2#reporting-an-issue) diff --git a/third_party/move/move-compiler-v2/transactional-tests/tests/misc/too_many_returns.exp b/third_party/move/move-compiler-v2/transactional-tests/tests/misc/too_many_returns.exp new file mode 100644 index 0000000000000..b8d226a84aa22 --- /dev/null +++ b/third_party/move/move-compiler-v2/transactional-tests/tests/misc/too_many_returns.exp @@ -0,0 +1,10 @@ +processed 1 task + +task 0 'publish'. lines 1-6: +Error: Unable to publish module '0000000000000000000000000000000000000000000000000000000000c0ffee::m'. Got VMError: { + major_status: TOO_MANY_PARAMETERS, + sub_status: None, + location: 0xc0ffee::m, + indices: [(FunctionHandle, 0)], + offsets: [], +} diff --git a/third_party/move/move-compiler-v2/transactional-tests/tests/misc/too_many_returns.move b/third_party/move/move-compiler-v2/transactional-tests/tests/misc/too_many_returns.move new file mode 100644 index 0000000000000..d9e9d880213b5 --- /dev/null +++ b/third_party/move/move-compiler-v2/transactional-tests/tests/misc/too_many_returns.move @@ -0,0 +1,6 @@ +//# publish +module 0xc0ffee::m { + public fun test(): (u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64, u64) { + (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130) + } +} diff --git a/third_party/move/move-compiler-v2/transactional-tests/tests/misc/vector_depth_check.exp b/third_party/move/move-compiler-v2/transactional-tests/tests/misc/vector_depth_check.exp new file mode 100644 index 0000000000000..f5e54cfe92526 --- /dev/null +++ b/third_party/move/move-compiler-v2/transactional-tests/tests/misc/vector_depth_check.exp @@ -0,0 +1,13 @@ +processed 3 tasks + +task 1 'run'. lines 9-9: +return values: 0 + +task 2 'publish'. lines 11-17: +Error: Unable to publish module '0000000000000000000000000000000000000000000000000000000000c0ffee::n'. Got VMError: { + major_status: TOO_MANY_TYPE_NODES, + sub_status: None, + location: 0xc0ffee::n, + indices: [], + offsets: [], +} diff --git a/third_party/move/move-compiler-v2/transactional-tests/tests/misc/vector_depth_check.move b/third_party/move/move-compiler-v2/transactional-tests/tests/misc/vector_depth_check.move new file mode 100644 index 0000000000000..f6d36c77246c2 --- /dev/null +++ b/third_party/move/move-compiler-v2/transactional-tests/tests/misc/vector_depth_check.move @@ -0,0 +1,17 @@ +//# publish +module 0xc0ffee::m { + public fun test(): u64 { + let v: vector>>>> = vector[]; + std::vector::length(&v) + } +} + +//# run 0xc0ffee::m::test + +//# publish +module 0xc0ffee::n { + public fun test(): u64 { + let v: vector>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> = vector[]; + std::vector::length(&v) + } +} diff --git a/third_party/move/move-compiler-v2/transactional-tests/tests/no-access-check/pack_private_function.exp b/third_party/move/move-compiler-v2/transactional-tests/tests/no-access-check/pack_private_function.exp new file mode 100644 index 0000000000000..b32f70140950b --- /dev/null +++ b/third_party/move/move-compiler-v2/transactional-tests/tests/no-access-check/pack_private_function.exp @@ -0,0 +1,10 @@ +processed 2 tasks + +task 1 'publish'. lines 8-14: +Error: Unable to publish module '0000000000000000000000000000000000000000000000000000000000c0ffee::n'. Got VMError: { + major_status: LOOKUP_FAILED, + sub_status: None, + location: 0xc0ffee::n, + indices: [(FunctionHandle, 1)], + offsets: [], +} diff --git a/third_party/move/move-compiler-v2/transactional-tests/tests/no-access-check/pack_private_function.move b/third_party/move/move-compiler-v2/transactional-tests/tests/no-access-check/pack_private_function.move new file mode 100644 index 0000000000000..f6289e48b14f3 --- /dev/null +++ b/third_party/move/move-compiler-v2/transactional-tests/tests/no-access-check/pack_private_function.move @@ -0,0 +1,14 @@ +//# publish +module 0xc0ffee::m { + struct S(u64); + + fun inaccessible() {} +} + +//# publish +module 0xc0ffee::n { + public fun test() { + let f = || 0xc0ffee::m::inaccessible(); + f(); + } +} diff --git a/third_party/move/move-compiler-v2/transactional-tests/tests/no-recursive-check/no_recursive_check.exp b/third_party/move/move-compiler-v2/transactional-tests/tests/no-recursive-check/no_recursive_check.exp index d19e007de6f90..bdf9d66b02cee 100644 --- a/third_party/move/move-compiler-v2/transactional-tests/tests/no-recursive-check/no_recursive_check.exp +++ b/third_party/move/move-compiler-v2/transactional-tests/tests/no-recursive-check/no_recursive_check.exp @@ -2,7 +2,7 @@ processed 2 tasks task 0 'publish'. lines 1-27: Error: compilation errors: - bug: bytecode verification failed with unexpected status code `RECURSIVE_STRUCT_DEFINITION`. This is a compiler bug, consider reporting it. + bug: bytecode verification failed with unexpected status code `RECURSIVE_STRUCT_DEFINITION`: Error message: none ┌─ TEMPFILE:2:1 │ @@ -14,6 +14,8 @@ Error message: none 22 │ │ } 23 │ │ } │ ╰─^ + │ + = please consider reporting this issue (see https://aptos.dev/en/build/smart-contracts/compiler_v2#reporting-an-issue) diff --git a/third_party/move/move-compiler-v2/transactional-tests/tests/no-recursive-type-check/bug_16939_a.exp b/third_party/move/move-compiler-v2/transactional-tests/tests/no-recursive-type-check/bug_16939_a.exp new file mode 100644 index 0000000000000..89cbfba31ef57 --- /dev/null +++ b/third_party/move/move-compiler-v2/transactional-tests/tests/no-recursive-type-check/bug_16939_a.exp @@ -0,0 +1,20 @@ +processed 1 task + +task 0 'publish'. lines 1-9: +Error: compilation errors: + bug: bytecode verification failed with unexpected status code `LOOP_IN_INSTANTIATION_GRAPH`: +Error message: edges with constructors: [f0#0 --StructInstantiation(StructHandleIndex(0), [TypeParameter(0)])--> f0#0], nodes: [f0#0] + ┌─ TEMPFILE:2:1 + │ +2 │ ╭ module 0x8675309::M { +3 │ │ struct S { f: T } +4 │ │ fun f() { +5 │ │ let ff = || f>(); +6 │ │ ff(); +7 │ │ } +8 │ │ } + │ ╰─^ + │ + = please consider reporting this issue (see https://aptos.dev/en/build/smart-contracts/compiler_v2#reporting-an-issue) + + diff --git a/third_party/move/move-compiler-v2/transactional-tests/tests/no-recursive-type-check/bug_16939_a.move b/third_party/move/move-compiler-v2/transactional-tests/tests/no-recursive-type-check/bug_16939_a.move new file mode 100644 index 0000000000000..80ffd3c5d2ab3 --- /dev/null +++ b/third_party/move/move-compiler-v2/transactional-tests/tests/no-recursive-type-check/bug_16939_a.move @@ -0,0 +1,9 @@ +//# publish +module 0x8675309::M { + struct S { f: T } + + fun f() { + let ff = || f>(); + ff(); + } +} diff --git a/third_party/move/move-compiler-v2/transactional-tests/tests/no-recursive-type-check/bug_16939_b.exp b/third_party/move/move-compiler-v2/transactional-tests/tests/no-recursive-type-check/bug_16939_b.exp new file mode 100644 index 0000000000000..240e7cb6d87ac --- /dev/null +++ b/third_party/move/move-compiler-v2/transactional-tests/tests/no-recursive-type-check/bug_16939_b.exp @@ -0,0 +1,20 @@ +processed 1 task + +task 0 'publish'. lines 1-15: +Error: compilation errors: + bug: bytecode verification failed with unexpected status code `LOOP_IN_INSTANTIATION_GRAPH`: +Error message: edges with constructors: [f1#0 --Function([Reference(TypeParameter(0))], [], )--> f0#0], nodes: [f1#0, f0#0] + ┌─ TEMPFILE:2:1 + │ + 2 │ ╭ module 0xc0ffee::m { + 3 │ │ public fun foo(): || { + 4 │ │ bar<|&T|> + 5 │ │ } + · │ +12 │ │ } +13 │ │ } + │ ╰─^ + │ + = please consider reporting this issue (see https://aptos.dev/en/build/smart-contracts/compiler_v2#reporting-an-issue) + + diff --git a/third_party/move/move-compiler-v2/transactional-tests/tests/no-recursive-type-check/bug_16939_b.move b/third_party/move/move-compiler-v2/transactional-tests/tests/no-recursive-type-check/bug_16939_b.move new file mode 100644 index 0000000000000..923ee795fcc9d --- /dev/null +++ b/third_party/move/move-compiler-v2/transactional-tests/tests/no-recursive-type-check/bug_16939_b.move @@ -0,0 +1,15 @@ +//# publish +module 0xc0ffee::m { + public fun foo(): || { + bar<|&T|> + } + + fun bar() { + (foo())(); + } + + fun test() { + let f = foo<||>(); + f(); + } +} diff --git a/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/closures/bug_16492.exp b/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/closures/bug_16492.exp new file mode 100644 index 0000000000000..8020ce7cb9cad --- /dev/null +++ b/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/closures/bug_16492.exp @@ -0,0 +1,10 @@ +processed 1 task + +task 0 'publish'. lines 1-8: +Error: Unable to publish module '0000000000000000000000000000000000000000000000000000000000000066::work'. Got VMError: { + major_status: TOO_MANY_PARAMETERS, + sub_status: None, + location: 0x66::work, + indices: [], + offsets: [], +} diff --git a/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/closures/bug_16492.move b/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/closures/bug_16492.move new file mode 100644 index 0000000000000..0817077e4e8a7 --- /dev/null +++ b/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/closures/bug_16492.move @@ -0,0 +1,8 @@ +//# publish +module 0x66::work { + use 0x1::vector; + + public entry fun foo() { + let _: vector<|u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64,u64| has drop> = vector::empty(); + } +} diff --git a/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/closures/bug_16919.exp b/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/closures/bug_16919.exp new file mode 100644 index 0000000000000..2581c43420ad8 --- /dev/null +++ b/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/closures/bug_16919.exp @@ -0,0 +1,545 @@ +processed 1 task + +task 0 'publish'. lines 1-179: +Error: compilation errors: + warning: Unused value of parameter `var0`. Consider removing the parameter, or prefixing with an underscore (e.g., `_var0`), or binding to `_` + ┌─ TEMPFILE:30:27 + │ +30 │ public fun function1( var0: &mut Enum1, var1: &mut Enum1, var2: &mut Enum1, var3: &mut Enum1) { /* _block0 */ + │ ^^^^ + +warning: Unused value of parameter `var1`. Consider removing the parameter, or prefixing with an underscore (e.g., `_var1`), or binding to `_` + ┌─ TEMPFILE:30:45 + │ +30 │ public fun function1( var0: &mut Enum1, var1: &mut Enum1, var2: &mut Enum1, var3: &mut Enum1) { /* _block0 */ + │ ^^^^ + +warning: Unused value of parameter `var2`. Consider removing the parameter, or prefixing with an underscore (e.g., `_var2`), or binding to `_` + ┌─ TEMPFILE:30:63 + │ +30 │ public fun function1( var0: &mut Enum1, var1: &mut Enum1, var2: &mut Enum1, var3: &mut Enum1) { /* _block0 */ + │ ^^^^ + +warning: Unused value of parameter `var3`. Consider removing the parameter, or prefixing with an underscore (e.g., `_var3`), or binding to `_` + ┌─ TEMPFILE:30:81 + │ +30 │ public fun function1( var0: &mut Enum1, var1: &mut Enum1, var2: &mut Enum1, var3: &mut Enum1) { /* _block0 */ + │ ^^^^ + +warning: Unused value of parameter `var15`. Consider removing the parameter, or prefixing with an underscore (e.g., `_var15`), or binding to `_` + ┌─ TEMPFILE:32:72 + │ +32 │ public fun function2( var12: &Enum0, var13: Struct0, var14: Enum1, var15: Struct0, var16: | u32 | has copy+drop): u8 { /* _block1 */ + │ ^^^^^ + +warning: Unused value of parameter `var16`. Consider removing the parameter, or prefixing with an underscore (e.g., `_var16`), or binding to `_` + ┌─ TEMPFILE:32:88 + │ +32 │ public fun function2( var12: &Enum0, var13: Struct0, var14: Enum1, var15: Struct0, var16: | u32 | has copy+drop): u8 { /* _block1 */ + │ ^^^^^ + +warning: Unused value of parameter `var17`. Consider removing the parameter, or prefixing with an underscore (e.g., `_var17`), or binding to `_` + ┌─ TEMPFILE:33:43 + │ +33 │ *( &( Enum0::Variant1 { field5: | var17: | (| | has copy+drop), (| | has copy+drop), (| | has copy+drop), (| | has copy+drop) | has copy+drop, var18: | (| | has copy+drop), (| | has copy+drop), (| | has copy+drop), (| | has copy+drop) | has copy+drop | { /* _block2 */ + │ ^^^^^ + +warning: Unused value of parameter `var18`. Consider removing the parameter, or prefixing with an underscore (e.g., `_var18`), or binding to `_` + ┌─ TEMPFILE:33:152 + │ +33 │ *( &( Enum0::Variant1 { field5: | var17: | (| | has copy+drop), (| | has copy+drop), (| | has copy+drop), (| | has copy+drop) | has copy+drop, var18: | (| | has copy+drop), (| | has copy+drop), (| | has copy+drop), (| | has copy+drop) | has copy+drop | { /* _block2 */ + │ ^^^^^ + +warning: This assignment/binding to the left-hand-side variable `var19` is unused. Consider removing this assignment/binding, or prefixing the left-hand-side variable with an underscore (e.g., `_var19`), or renaming to `_` + ┌─ TEMPFILE:39:25 + │ +39 │ let var19 = *( &( false)); + │ ^^^^^^^^ + +warning: This assignment/binding to the left-hand-side variable `var24` is unused. Consider removing this assignment/binding, or prefixing the left-hand-side variable with an underscore (e.g., `_var24`), or renaming to `_` + ┌─ TEMPFILE:42:17 + │ +42 │ Enum1::Variant4 {field10: var24, ..} => { /* _block6 */ + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +warning: This assignment/binding to the left-hand-side variable `var20` is unused. Consider removing this assignment/binding, or prefixing the left-hand-side variable with an underscore (e.g., `_var20`), or renaming to `_` + ┌─ TEMPFILE:45:17 + │ +45 │ Enum1::Variant2 {field6: var20, field7: var21,} => { /* _block13 */ + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +warning: This assignment/binding to the left-hand-side variable `var21` is unused. Consider removing this assignment/binding, or prefixing the left-hand-side variable with an underscore (e.g., `_var21`), or renaming to `_` + ┌─ TEMPFILE:45:17 + │ +45 │ Enum1::Variant2 {field6: var20, field7: var21,} => { /* _block13 */ + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +warning: This assignment/binding to the left-hand-side variable `var38` is unused. Consider removing this assignment/binding, or prefixing the left-hand-side variable with an underscore (e.g., `_var38`), or renaming to `_` + ┌─ TEMPFILE:47:29 + │ +47 │ Enum1::Variant3 {field8: var38, field9: var39,} => | var42: bool, var43: Struct1, var44: Enum0 | { /* _block18 */ ( 18858u16 ^ 18170u16)}, + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +warning: This assignment/binding to the left-hand-side variable `var39` is unused. Consider removing this assignment/binding, or prefixing the left-hand-side variable with an underscore (e.g., `_var39`), or renaming to `_` + ┌─ TEMPFILE:47:29 + │ +47 │ Enum1::Variant3 {field8: var38, field9: var39,} => | var42: bool, var43: Struct1, var44: Enum0 | { /* _block18 */ ( 18858u16 ^ 18170u16)}, + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +warning: Unused value of parameter `var42`. Consider removing the parameter, or prefixing with an underscore (e.g., `_var42`), or binding to `_` + ┌─ TEMPFILE:47:82 + │ +47 │ Enum1::Variant3 {field8: var38, field9: var39,} => | var42: bool, var43: Struct1, var44: Enum0 | { /* _block18 */ ( 18858u16 ^ 18170u16)}, + │ ^^^^^ + +warning: Unused value of parameter `var43`. Consider removing the parameter, or prefixing with an underscore (e.g., `_var43`), or binding to `_` + ┌─ TEMPFILE:47:95 + │ +47 │ Enum1::Variant3 {field8: var38, field9: var39,} => | var42: bool, var43: Struct1, var44: Enum0 | { /* _block18 */ ( 18858u16 ^ 18170u16)}, + │ ^^^^^ + +warning: Unused value of parameter `var44`. Consider removing the parameter, or prefixing with an underscore (e.g., `_var44`), or binding to `_` + ┌─ TEMPFILE:47:111 + │ +47 │ Enum1::Variant3 {field8: var38, field9: var39,} => | var42: bool, var43: Struct1, var44: Enum0 | { /* _block18 */ ( 18858u16 ^ 18170u16)}, + │ ^^^^^ + +warning: This assignment/binding to the left-hand-side variable `var35` is unused. Consider removing this assignment/binding, or prefixing the left-hand-side variable with an underscore (e.g., `_var35`), or renaming to `_` + ┌─ TEMPFILE:47:80 + │ +47 │ Enum1::Variant3 {field8: var38, field9: var39,} => | var42: bool, var43: Struct1, var44: Enum0 | { /* _block18 */ ( 18858u16 ^ 18170u16)}, + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +warning: This assignment/binding to the left-hand-side variable `var36` is unused. Consider removing this assignment/binding, or prefixing the left-hand-side variable with an underscore (e.g., `_var36`), or renaming to `_` + ┌─ TEMPFILE:48:29 + │ +48 │ Enum1::Variant2 {field6: var36, ..} => { /* _block19 */ + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +warning: Unused value of parameter `var48`. Consider removing the parameter, or prefixing with an underscore (e.g., `_var48`), or binding to `_` + ┌─ TEMPFILE:49:39 + │ +49 │ | var48: bool, var49: Struct1, var50: Enum0 | { /* _block20 */ 39966u16} + │ ^^^^^ + +warning: Unused value of parameter `var49`. Consider removing the parameter, or prefixing with an underscore (e.g., `_var49`), or binding to `_` + ┌─ TEMPFILE:49:52 + │ +49 │ | var48: bool, var49: Struct1, var50: Enum0 | { /* _block20 */ 39966u16} + │ ^^^^^ + +warning: Unused value of parameter `var50`. Consider removing the parameter, or prefixing with an underscore (e.g., `_var50`), or binding to `_` + ┌─ TEMPFILE:49:68 + │ +49 │ | var48: bool, var49: Struct1, var50: Enum0 | { /* _block20 */ 39966u16} + │ ^^^^^ + +warning: This assignment/binding to the left-hand-side variable `var35` is unused. Consider removing this assignment/binding, or prefixing the left-hand-side variable with an underscore (e.g., `_var35`), or renaming to `_` + ┌─ TEMPFILE:49:37 + │ +49 │ | var48: bool, var49: Struct1, var50: Enum0 | { /* _block20 */ 39966u16} + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +warning: Unused value of parameter `var59`. Consider removing the parameter, or prefixing with an underscore (e.g., `_var59`), or binding to `_` + ┌─ TEMPFILE:52:39 + │ +52 │ | var59: bool, var60: Struct1, var61: Enum0 | { /* _block24 */ 6707u16} + │ ^^^^^ + +warning: Unused value of parameter `var60`. Consider removing the parameter, or prefixing with an underscore (e.g., `_var60`), or binding to `_` + ┌─ TEMPFILE:52:52 + │ +52 │ | var59: bool, var60: Struct1, var61: Enum0 | { /* _block24 */ 6707u16} + │ ^^^^^ + +warning: Unused value of parameter `var61`. Consider removing the parameter, or prefixing with an underscore (e.g., `_var61`), or binding to `_` + ┌─ TEMPFILE:52:68 + │ +52 │ | var59: bool, var60: Struct1, var61: Enum0 | { /* _block24 */ 6707u16} + │ ^^^^^ + +warning: This assignment/binding to the left-hand-side variable `var35` is unused. Consider removing this assignment/binding, or prefixing the left-hand-side variable with an underscore (e.g., `_var35`), or renaming to `_` + ┌─ TEMPFILE:52:37 + │ +52 │ | var59: bool, var60: Struct1, var61: Enum0 | { /* _block24 */ 6707u16} + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +warning: This assignment/binding to the left-hand-side variable `var67` is unused. Consider removing this assignment/binding, or prefixing the left-hand-side variable with an underscore (e.g., `_var67`), or renaming to `_` + ┌─ TEMPFILE:56:29 + │ +56 │ Enum1::Variant4 {field11: var67, ..} => { /* _block29 */ + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +warning: This assignment/binding to the left-hand-side variable `var22` is unused. Consider removing this assignment/binding, or prefixing the left-hand-side variable with an underscore (e.g., `_var22`), or renaming to `_` + ┌─ TEMPFILE:63:17 + │ +63 │ Enum1::Variant3 {field8: var22, field9: var23,} => match ( *( &( Enum1::Variant3 { field8: 69980731u32, field9: 65010u32}))) { + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +warning: This assignment/binding to the left-hand-side variable `var23` is unused. Consider removing this assignment/binding, or prefixing the left-hand-side variable with an underscore (e.g., `_var23`), or renaming to `_` + ┌─ TEMPFILE:63:17 + │ +63 │ Enum1::Variant3 {field8: var22, field9: var23,} => match ( *( &( Enum1::Variant3 { field8: 69980731u32, field9: 65010u32}))) { + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +warning: This assignment/binding to the left-hand-side variable `var70` is unused. Consider removing this assignment/binding, or prefixing the left-hand-side variable with an underscore (e.g., `_var70`), or renaming to `_` + ┌─ TEMPFILE:64:25 + │ +64 │ Enum1::Variant3 {field8: var70, ..} => &mut ( var14), + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +warning: This assignment/binding to the left-hand-side variable `var69` is unused. Consider removing this assignment/binding, or prefixing the left-hand-side variable with an underscore (e.g., `_var69`), or renaming to `_` + ┌─ TEMPFILE:65:25 + │ +65 │ Enum1::Variant2 {field7: var69, ..} => &mut ( var14), + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +error: cannot transfer mutable value since it is borrowed + ┌─ TEMPFILE:67:42 + │ + 40 │ let () = ( function1) ( &mut ( var14), + │ ------------- previously mutably borrowed here + │ ╭──────────────────' + 41 │ │ match ( Enum1::Variant4 { field10: ( ( 3226759902u32 + 19457u32) | 653762092u32), field11: *( &( 1822826693u32))}) { + 42 │ │ Enum1::Variant4 {field10: var24, ..} => { /* _block6 */ + 43 │ │ &mut ( Enum1::Variant2 { field6: 710633453u32, field7: 1761791860u32}) + · │ + 67 │ │ let () = ( function1) ( &mut ( var14), &mut ( Enum1::Variant2 { field6: 2733676076u32, field7: 662338862u32}), &mut ( var14), &mut ( var14)); + │ │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ transfer attempted here + · │ +174 │ │ &mut ( var14) +175 │ │ ); + │ ╰─────────' conflicting reference used here + +error: cannot transfer mutable value since it is borrowed + ┌─ TEMPFILE:67:42 + │ +67 │ let () = ( function1) ( &mut ( var14), &mut ( Enum1::Variant2 { field6: 2733676076u32, field7: 662338862u32}), &mut ( var14), &mut ( var14)); + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + │ │ │ + │ │ previously mutably borrowed here + │ transfer attempted here + +error: cannot transfer mutable value since it is borrowed + ┌─ TEMPFILE:67:42 + │ +67 │ let () = ( function1) ( &mut ( var14), &mut ( Enum1::Variant2 { field6: 2733676076u32, field7: 662338862u32}), &mut ( var14), &mut ( var14)); + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + │ │ │ + │ │ previously mutably borrowed here + │ transfer attempted here + +warning: This assignment/binding to the left-hand-side variable `var77` is unused. Consider removing this assignment/binding, or prefixing the left-hand-side variable with an underscore (e.g., `_var77`), or renaming to `_` + ┌─ TEMPFILE:68:45 + │ +68 │ let var77 = Enum1::Variant2 { field6: ( 1027423549u32 - 15677u32), field7: ( 1027423549u32 - 15677u32)}; + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +warning: This assignment/binding to the left-hand-side variable `var82` is unused. Consider removing this assignment/binding, or prefixing the left-hand-side variable with an underscore (e.g., `_var82`), or renaming to `_` + ┌─ TEMPFILE:74:17 + │ +74 │ Enum1::Variant4 {field10: var82, field11: var83,} => match ( Enum1::Variant4 { field10: ( 2560137368u32 * 6296u32), field11: ( 2560137368u32 * 6296u32)}) { + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +warning: This assignment/binding to the left-hand-side variable `var83` is unused. Consider removing this assignment/binding, or prefixing the left-hand-side variable with an underscore (e.g., `_var83`), or renaming to `_` + ┌─ TEMPFILE:74:17 + │ +74 │ Enum1::Variant4 {field10: var82, field11: var83,} => match ( Enum1::Variant4 { field10: ( 2560137368u32 * 6296u32), field11: ( 2560137368u32 * 6296u32)}) { + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +warning: This assignment/binding to the left-hand-side variable `var88` is unused. Consider removing this assignment/binding, or prefixing the left-hand-side variable with an underscore (e.g., `_var88`), or renaming to `_` + ┌─ TEMPFILE:75:25 + │ +75 │ Enum1::Variant4 {field10: var88, field11: var89,} => { /* _block42 */ + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +warning: This assignment/binding to the left-hand-side variable `var89` is unused. Consider removing this assignment/binding, or prefixing the left-hand-side variable with an underscore (e.g., `_var89`), or renaming to `_` + ┌─ TEMPFILE:75:25 + │ +75 │ Enum1::Variant4 {field10: var88, field11: var89,} => { /* _block42 */ + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +warning: This assignment/binding to the left-hand-side variable `var84` is unused. Consider removing this assignment/binding, or prefixing the left-hand-side variable with an underscore (e.g., `_var84`), or renaming to `_` + ┌─ TEMPFILE:78:25 + │ +78 │ Enum1::Variant2 {field6: var84, field7: var85,} => &mut ( Enum1::Variant2 { field6: 537516388u32, field7: 3026101499u32}), + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +warning: This assignment/binding to the left-hand-side variable `var85` is unused. Consider removing this assignment/binding, or prefixing the left-hand-side variable with an underscore (e.g., `_var85`), or renaming to `_` + ┌─ TEMPFILE:78:25 + │ +78 │ Enum1::Variant2 {field6: var84, field7: var85,} => &mut ( Enum1::Variant2 { field6: 537516388u32, field7: 3026101499u32}), + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +warning: This assignment/binding to the left-hand-side variable `var86` is unused. Consider removing this assignment/binding, or prefixing the left-hand-side variable with an underscore (e.g., `_var86`), or renaming to `_` + ┌─ TEMPFILE:79:25 + │ +79 │ Enum1::Variant3 {field8: var86, field9: var87,} => &mut ( Enum1::Variant2 { field6: 791621423u32, field7: 791621423u32}), + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +warning: This assignment/binding to the left-hand-side variable `var87` is unused. Consider removing this assignment/binding, or prefixing the left-hand-side variable with an underscore (e.g., `_var87`), or renaming to `_` + ┌─ TEMPFILE:79:25 + │ +79 │ Enum1::Variant3 {field8: var86, field9: var87,} => &mut ( Enum1::Variant2 { field6: 791621423u32, field7: 791621423u32}), + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +warning: This assignment/binding to the left-hand-side variable `var78` is unused. Consider removing this assignment/binding, or prefixing the left-hand-side variable with an underscore (e.g., `_var78`), or renaming to `_` + ┌─ TEMPFILE:81:17 + │ +81 │ Enum1::Variant2 {field6: var78, field7: var79,} => { /* _block44 */ + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +warning: This assignment/binding to the left-hand-side variable `var79` is unused. Consider removing this assignment/binding, or prefixing the left-hand-side variable with an underscore (e.g., `_var79`), or renaming to `_` + ┌─ TEMPFILE:81:17 + │ +81 │ Enum1::Variant2 {field6: var78, field7: var79,} => { /* _block44 */ + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +warning: This assignment/binding to the left-hand-side variable `var117` is unused. Consider removing this assignment/binding, or prefixing the left-hand-side variable with an underscore (e.g., `_var117`), or renaming to `_` + ┌─ TEMPFILE:82:29 + │ +82 │ let Struct0(var117, ..) = match ( Enum0::Variant1 { field5: | var121: | (| | has copy+drop), (| | has copy+drop), (| | has copy+drop), (| | has copy+drop) | has copy+drop, var122: | (| | has copy+drop), (| | has copy+drop), (| | has copy+drop), (| | has copy+drop) | has copy+drop | { /* _block47 */ + │ ^^^^^^^^^^^^^^^^^^^ + +warning: Unused value of parameter `var121`. Consider removing the parameter, or prefixing with an underscore (e.g., `_var121`), or binding to `_` + ┌─ TEMPFILE:82:87 + │ +82 │ let Struct0(var117, ..) = match ( Enum0::Variant1 { field5: | var121: | (| | has copy+drop), (| | has copy+drop), (| | has copy+drop), (| | has copy+drop) | has copy+drop, var122: | (| | has copy+drop), (| | has copy+drop), (| | has copy+drop), (| | has copy+drop) | has copy+drop | { /* _block47 */ + │ ^^^^^^ + +warning: Unused value of parameter `var122`. Consider removing the parameter, or prefixing with an underscore (e.g., `_var122`), or binding to `_` + ┌─ TEMPFILE:82:197 + │ +82 │ let Struct0(var117, ..) = match ( Enum0::Variant1 { field5: | var121: | (| | has copy+drop), (| | has copy+drop), (| | has copy+drop), (| | has copy+drop) | has copy+drop, var122: | (| | has copy+drop), (| | has copy+drop), (| | has copy+drop), (| | has copy+drop) | has copy+drop | { /* _block47 */ + │ ^^^^^^ + +warning: This assignment/binding to the left-hand-side variable `var141` is unused. Consider removing this assignment/binding, or prefixing the left-hand-side variable with an underscore (e.g., `_var141`), or renaming to `_` + ┌─ TEMPFILE:98:38 + │ + 98 │ let var141 = ( *( &( Struct1 {field2: 157u8, field3: Struct0 (true,true,),})) != + │ ╭──────────────────────────────────────^ + 99 │ │ Struct1 { +100 │ │ field2: ( 100u8 & 47u8), +101 │ │ field3: Struct0 ( + · │ +104 │ │ } +105 │ │ ); + │ ╰─────────────────────────^ + +warning: This assignment/binding to the left-hand-side variable `var143` is unused. Consider removing this assignment/binding, or prefixing the left-hand-side variable with an underscore (e.g., `_var143`), or renaming to `_` + ┌─ TEMPFILE:106:29 + │ +106 │ let Struct0(var143, ..) = match ( Enum0::Variant1 { field5: | var147: | (| | has copy+drop), (| | has copy+drop), (| | has copy+drop), (| | has copy+drop) | has copy+drop, var148: | (| | has copy+drop), (| | has copy+drop), (| | has copy+drop), (| | has copy+drop) | has copy+drop | { /* _block52 */ + │ ^^^^^^^^^^^^^^^^^^^ + +warning: Unused value of parameter `var147`. Consider removing the parameter, or prefixing with an underscore (e.g., `_var147`), or binding to `_` + ┌─ TEMPFILE:106:87 + │ +106 │ let Struct0(var143, ..) = match ( Enum0::Variant1 { field5: | var147: | (| | has copy+drop), (| | has copy+drop), (| | has copy+drop), (| | has copy+drop) | has copy+drop, var148: | (| | has copy+drop), (| | has copy+drop), (| | has copy+drop), (| | has copy+drop) | has copy+drop | { /* _block52 */ + │ ^^^^^^ + +warning: Unused value of parameter `var148`. Consider removing the parameter, or prefixing with an underscore (e.g., `_var148`), or binding to `_` + ┌─ TEMPFILE:106:197 + │ +106 │ let Struct0(var143, ..) = match ( Enum0::Variant1 { field5: | var147: | (| | has copy+drop), (| | has copy+drop), (| | has copy+drop), (| | has copy+drop) | has copy+drop, var148: | (| | has copy+drop), (| | has copy+drop), (| | has copy+drop), (| | has copy+drop) | has copy+drop | { /* _block52 */ + │ ^^^^^^ + +warning: This assignment/binding to the left-hand-side variable `var150` is unused. Consider removing this assignment/binding, or prefixing the left-hand-side variable with an underscore (e.g., `_var150`), or renaming to `_` + ┌─ TEMPFILE:112:41 + │ +112 │ let Struct0(var150, ..) = Struct0 ( + │ ^^^^^^^^^^^^^^^^^^^ + +warning: This assignment/binding to the left-hand-side variable `var153` is unused. Consider removing this assignment/binding, or prefixing the left-hand-side variable with an underscore (e.g., `_var153`), or renaming to `_` + ┌─ TEMPFILE:115:41 + │ +115 │ let Struct0(var153, ..) = Struct0 ( + │ ^^^^^^^^^^^^^^^^^^^ + +warning: This assignment/binding to the left-hand-side variable `var156` is unused. Consider removing this assignment/binding, or prefixing the left-hand-side variable with an underscore (e.g., `_var156`), or renaming to `_` + ┌─ TEMPFILE:118:41 + │ +118 │ let Struct0(var156, ..) = Struct0 ( + │ ^^^^^^^^^^^^^^^^^^^ + +warning: This assignment/binding to the left-hand-side variable `var159` is unused. Consider removing this assignment/binding, or prefixing the left-hand-side variable with an underscore (e.g., `_var159`), or renaming to `_` + ┌─ TEMPFILE:126:41 + │ +126 │ let Struct0(var159, ..) = Struct0 ( + │ ^^^^^^^^^^^^^^^^^^^ + +warning: This assignment/binding to the left-hand-side variable `var162` is unused. Consider removing this assignment/binding, or prefixing the left-hand-side variable with an underscore (e.g., `_var162`), or renaming to `_` + ┌─ TEMPFILE:129:41 + │ +129 │ let Struct0(var162, ..) = Struct0 ( + │ ^^^^^^^^^^^^^^^^^^^ + +warning: This assignment/binding to the left-hand-side variable `var165` is unused. Consider removing this assignment/binding, or prefixing the left-hand-side variable with an underscore (e.g., `_var165`), or renaming to `_` + ┌─ TEMPFILE:132:41 + │ +132 │ let Struct0(var165, ..) = Struct0 ( + │ ^^^^^^^^^^^^^^^^^^^ + +warning: Unused value of parameter `var169`. Consider removing the parameter, or prefixing with an underscore (e.g., `_var169`), or binding to `_` + ┌─ TEMPFILE:140:61 + │ +140 │ match ( Enum0::Variant1 { field5: | var169: | (| | has copy+drop), (| | has copy+drop), (| | has copy+drop), (| | has copy+drop) | has copy+drop, var170: | (| | has copy+drop), (| | has copy+drop), (| | has copy+drop), (| | has copy+drop) | has copy+drop | { /* _block57 */ + │ ^^^^^^ + +warning: Unused value of parameter `var170`. Consider removing the parameter, or prefixing with an underscore (e.g., `_var170`), or binding to `_` + ┌─ TEMPFILE:140:171 + │ +140 │ match ( Enum0::Variant1 { field5: | var169: | (| | has copy+drop), (| | has copy+drop), (| | has copy+drop), (| | has copy+drop) | has copy+drop, var170: | (| | has copy+drop), (| | has copy+drop), (| | has copy+drop), (| | has copy+drop) | has copy+drop | { /* _block57 */ + │ ^^^^^^ + +warning: This assignment/binding to the left-hand-side variable `var172` is unused. Consider removing this assignment/binding, or prefixing the left-hand-side variable with an underscore (e.g., `_var172`), or renaming to `_` + ┌─ TEMPFILE:146:41 + │ +146 │ let Struct0(var172, ..) = Struct0 ( + │ ^^^^^^^^^^^^^^^^^^^ + +warning: This assignment/binding to the left-hand-side variable `var175` is unused. Consider removing this assignment/binding, or prefixing the left-hand-side variable with an underscore (e.g., `_var175`), or renaming to `_` + ┌─ TEMPFILE:149:41 + │ +149 │ let Struct0(var175, ..) = Struct0 ( + │ ^^^^^^^^^^^^^^^^^^^ + +warning: This assignment/binding to the left-hand-side variable `var178` is unused. Consider removing this assignment/binding, or prefixing the left-hand-side variable with an underscore (e.g., `_var178`), or renaming to `_` + ┌─ TEMPFILE:152:41 + │ +152 │ let Struct0(var178, ..) = Struct0 ( + │ ^^^^^^^^^^^^^^^^^^^ + +warning: This assignment/binding to the left-hand-side variable `var80` is unused. Consider removing this assignment/binding, or prefixing the left-hand-side variable with an underscore (e.g., `_var80`), or renaming to `_` + ┌─ TEMPFILE:160:17 + │ +160 │ Enum1::Variant3 {field8: var80, field9: var81,} => { /* _block59 */ + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +warning: This assignment/binding to the left-hand-side variable `var81` is unused. Consider removing this assignment/binding, or prefixing the left-hand-side variable with an underscore (e.g., `_var81`), or renaming to `_` + ┌─ TEMPFILE:160:17 + │ +160 │ Enum1::Variant3 {field8: var80, field9: var81,} => { /* _block59 */ + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +warning: This assignment/binding to the left-hand-side variable `var181` is unused. Consider removing this assignment/binding, or prefixing the left-hand-side variable with an underscore (e.g., `_var181`), or renaming to `_` + ┌─ TEMPFILE:161:29 + │ +161 │ let Struct0(var181, var182) = var13; + │ ^^^^^^^^^^^^^^^^^^^^^^^ + +warning: This assignment/binding to the left-hand-side variable `var182` is unused. Consider removing this assignment/binding, or prefixing the left-hand-side variable with an underscore (e.g., `_var182`), or renaming to `_` + ┌─ TEMPFILE:161:29 + │ +161 │ let Struct0(var181, var182) = var13; + │ ^^^^^^^^^^^^^^^^^^^^^^^ + +warning: This assignment/binding to the left-hand-side variable `var183` is unused. Consider removing this assignment/binding, or prefixing the left-hand-side variable with an underscore (e.g., `_var183`), or renaming to `_` + ┌─ TEMPFILE:162:42 + │ +162 │ let var183 = *( &( Enum0::Variant0 { field4: false})); + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +warning: This assignment/binding to the left-hand-side variable `var184` is unused. Consider removing this assignment/binding, or prefixing the left-hand-side variable with an underscore (e.g., `_var184`), or renaming to `_` + ┌─ TEMPFILE:163:38 + │ +163 │ let var184 = 1867244186u32; + │ ^^^^^^^^^^^^^ + +warning: This assignment/binding to the left-hand-side variable `var187` is unused. Consider removing this assignment/binding, or prefixing the left-hand-side variable with an underscore (e.g., `_var187`), or renaming to `_` + ┌─ TEMPFILE:164:29 + │ +164 │ let Struct0(.., var187) = var13; + │ ^^^^^^^^^^^^^^^^^^^ + +warning: This assignment/binding to the left-hand-side variable `var190` is unused. Consider removing this assignment/binding, or prefixing the left-hand-side variable with an underscore (e.g., `_var190`), or renaming to `_` + ┌─ TEMPFILE:167:50 + │ +167 │ let var190 = Enum1::Variant2 { field6: ( 2084683531u32 >> 19u8), field7: 2881938354u32}; + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +error: cannot transfer mutable value since it is borrowed + ┌─ TEMPFILE:40:18 + │ + 40 │ let () = ( function1) ( &mut ( var14), + │ ------------- previously mutably borrowed here + │ ╭──────────────────^ + 41 │ │ match ( Enum1::Variant4 { field10: ( ( 3226759902u32 + 19457u32) | 653762092u32), field11: *( &( 1822826693u32))}) { + 42 │ │ Enum1::Variant4 {field10: var24, ..} => { /* _block6 */ + 43 │ │ &mut ( Enum1::Variant2 { field6: 710633453u32, field7: 1761791860u32}) + · │ +174 │ │ &mut ( var14) +175 │ │ ); + │ ╰─────────^ transfer attempted here + +error: cannot transfer mutable value since it is borrowed + ┌─ TEMPFILE:40:18 + │ + 40 │ let () = ( function1) ( &mut ( var14), + │ ------------- previously mutably borrowed here + │ ╭──────────────────^ + 41 │ │ match ( Enum1::Variant4 { field10: ( ( 3226759902u32 + 19457u32) | 653762092u32), field11: *( &( 1822826693u32))}) { + 42 │ │ Enum1::Variant4 {field10: var24, ..} => { /* _block6 */ + 43 │ │ &mut ( Enum1::Variant2 { field6: 710633453u32, field7: 1761791860u32}) + · │ + 64 │ │ Enum1::Variant3 {field8: var70, ..} => &mut ( var14), + │ │ ------------- previously mutably borrowed here + · │ +174 │ │ &mut ( var14) +175 │ │ ); + │ ╰─────────^ transfer attempted here + +error: cannot transfer mutable value since it is borrowed + ┌─ TEMPFILE:40:18 + │ + 40 │ let () = ( function1) ( &mut ( var14), + │ ------------- previously mutably borrowed here + │ ╭──────────────────^ + 41 │ │ match ( Enum1::Variant4 { field10: ( ( 3226759902u32 + 19457u32) | 653762092u32), field11: *( &( 1822826693u32))}) { + 42 │ │ Enum1::Variant4 {field10: var24, ..} => { /* _block6 */ + 43 │ │ &mut ( Enum1::Variant2 { field6: 710633453u32, field7: 1761791860u32}) + · │ + 64 │ │ Enum1::Variant3 {field8: var70, ..} => &mut ( var14), + │ │ ------------- previously mutably borrowed here + · │ + 76 │ │ &mut ( var14) + │ │ ------------- previously mutably borrowed here + · │ +174 │ │ &mut ( var14) +175 │ │ ); + │ ╰─────────^ transfer attempted here + +error: cannot transfer mutable value since it is borrowed + ┌─ TEMPFILE:40:18 + │ + 40 │ let () = ( function1) ( &mut ( var14), + │ ------------- previously mutably borrowed here + │ ╭──────────────────^ + 41 │ │ match ( Enum1::Variant4 { field10: ( ( 3226759902u32 + 19457u32) | 653762092u32), field11: *( &( 1822826693u32))}) { + 42 │ │ Enum1::Variant4 {field10: var24, ..} => { /* _block6 */ + 43 │ │ &mut ( Enum1::Variant2 { field6: 710633453u32, field7: 1761791860u32}) + · │ + 76 │ │ &mut ( var14) + │ │ ------------- previously mutably borrowed here + · │ +174 │ │ &mut ( var14) +175 │ │ ); + │ ╰─────────^ transfer attempted here + +error: cannot transfer mutable value since it is borrowed + ┌─ TEMPFILE:40:18 + │ + 40 │ let () = ( function1) ( &mut ( var14), + │ ╭──────────────────^ + 41 │ │ match ( Enum1::Variant4 { field10: ( ( 3226759902u32 + 19457u32) | 653762092u32), field11: *( &( 1822826693u32))}) { + 42 │ │ Enum1::Variant4 {field10: var24, ..} => { /* _block6 */ + 43 │ │ &mut ( Enum1::Variant2 { field6: 710633453u32, field7: 1761791860u32}) + · │ + 76 │ │ &mut ( var14) + │ │ ------------- previously mutably borrowed here + · │ +174 │ │ &mut ( var14) +175 │ │ ); + │ ╰─────────^ transfer attempted here + + diff --git a/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/closures/bug_16919.move b/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/closures/bug_16919.move new file mode 100644 index 0000000000000..b151c8c5cfa7b --- /dev/null +++ b/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/closures/bug_16919.move @@ -0,0 +1,179 @@ +//# publish +module 0x9A2D1C77326B441F3C4D46BA03C92B5DF09D75609AFDF1C491CD4529080A0385::Module0 { + struct Struct0(bool, bool) has copy, drop ; + struct Struct1 has copy, drop { + field2: u8, + field3: Struct0, + } + enum Enum0 has copy, drop { + Variant0 { + field4: bool, + }, + Variant1 { + field5: | (| (| | has copy+drop), (| | has copy+drop), (| | has copy+drop), (| | has copy+drop) | has copy+drop), (| (| | has copy+drop), (| | has copy+drop), (| | has copy+drop), (| | has copy+drop) | has copy+drop) | ((u16, bool) ) has copy+drop, + }, + } + enum Enum1 has copy, drop { + Variant2 { + field6: u32, + field7: u32, + }, + Variant3 { + field8: u32, + field9: u32, + }, + Variant4 { + field10: u32, + field11: u32, + }, + } + public fun function1( var0: &mut Enum1, var1: &mut Enum1, var2: &mut Enum1, var3: &mut Enum1) { /* _block0 */ + } + public fun function2( var12: &Enum0, var13: Struct0, var14: Enum1, var15: Struct0, var16: | u32 | has copy+drop): u8 { /* _block1 */ + *( &( Enum0::Variant1 { field5: | var17: | (| | has copy+drop), (| | has copy+drop), (| | has copy+drop), (| | has copy+drop) | has copy+drop, var18: | (| | has copy+drop), (| | has copy+drop), (| | has copy+drop), (| | has copy+drop) | has copy+drop | { /* _block2 */ + ( 41988u16, false,) + } + } + ) + ); + let var19 = *( &( false)); + let () = ( function1) ( &mut ( var14), + match ( Enum1::Variant4 { field10: ( ( 3226759902u32 + 19457u32) | 653762092u32), field11: *( &( 1822826693u32))}) { + Enum1::Variant4 {field10: var24, ..} => { /* _block6 */ + &mut ( Enum1::Variant2 { field6: 710633453u32, field7: 1761791860u32}) + }, + Enum1::Variant2 {field6: var20, field7: var21,} => { /* _block13 */ + let var35: | bool, Struct1, Enum0 | (u16 ) has copy+drop = match ( Enum1::Variant4 { field10: ( 1560922432u32 | 271770460u32), field11: 1499701105u32}) { + Enum1::Variant3 {field8: var38, field9: var39,} => | var42: bool, var43: Struct1, var44: Enum0 | { /* _block18 */ ( 18858u16 ^ 18170u16)}, + Enum1::Variant2 {field6: var36, ..} => { /* _block19 */ + | var48: bool, var49: Struct1, var50: Enum0 | { /* _block20 */ 39966u16} + }, + _ => { + | var59: bool, var60: Struct1, var61: Enum0 | { /* _block24 */ 6707u16} + } + }; + + match ( Enum1::Variant4 { field10: 1511514864u32, field11: ( 3357996168u32 << 10u8)}) { + Enum1::Variant4 {field11: var67, ..} => { /* _block29 */ + &mut ( Enum1::Variant2 { field6: 1159191745u32, field7: 2226907110u32}) + }, + _ => &mut ( var14) + }; + &mut ( var14) + }, + Enum1::Variant3 {field8: var22, field9: var23,} => match ( *( &( Enum1::Variant3 { field8: 69980731u32, field9: 65010u32}))) { + Enum1::Variant3 {field8: var70, ..} => &mut ( var14), + Enum1::Variant2 {field7: var69, ..} => &mut ( var14), + _ => { /* _block35 */ + let () = ( function1) ( &mut ( var14), &mut ( Enum1::Variant2 { field6: 2733676076u32, field7: 662338862u32}), &mut ( var14), &mut ( var14)); + let var77 = Enum1::Variant2 { field6: ( 1027423549u32 - 15677u32), field7: ( 1027423549u32 - 15677u32)}; + &mut ( Enum1::Variant2 { field6: 2560137368u32, field7: 2560137368u32}) + } + } + }, + match ( Enum1::Variant4 { field10: ( ( 2560137368u32 * 6296u32) * 6296u32), field11: ( ( 2560137368u32 * 6296u32) * 6296u32)}) { + Enum1::Variant4 {field10: var82, field11: var83,} => match ( Enum1::Variant4 { field10: ( 2560137368u32 * 6296u32), field11: ( 2560137368u32 * 6296u32)}) { + Enum1::Variant4 {field10: var88, field11: var89,} => { /* _block42 */ + &mut ( var14) + }, + Enum1::Variant2 {field6: var84, field7: var85,} => &mut ( Enum1::Variant2 { field6: 537516388u32, field7: 3026101499u32}), + Enum1::Variant3 {field8: var86, field9: var87,} => &mut ( Enum1::Variant2 { field6: 791621423u32, field7: 791621423u32}), + }, + Enum1::Variant2 {field6: var78, field7: var79,} => { /* _block44 */ + let Struct0(var117, ..) = match ( Enum0::Variant1 { field5: | var121: | (| | has copy+drop), (| | has copy+drop), (| | has copy+drop), (| | has copy+drop) | has copy+drop, var122: | (| | has copy+drop), (| | has copy+drop), (| | has copy+drop), (| | has copy+drop) | has copy+drop | { /* _block47 */ + ( 12079u16, true,) + } + } + ) { + Enum0::Variant1 {..} => { /* _block48 */ + Struct0 ( + true,true, + ) + }, + Enum0::Variant0 {..} => { /* _block49 */ + Struct0 ( + true,true, + ) + } + }; + let var141 = ( *( &( Struct1 {field2: 157u8, field3: Struct0 (true,true,),})) != + Struct1 { + field2: ( 100u8 & 47u8), + field3: Struct0 ( + true,true, + ), + } + ); + let Struct0(var143, ..) = match ( Enum0::Variant1 { field5: | var147: | (| | has copy+drop), (| | has copy+drop), (| | has copy+drop), (| | has copy+drop) | has copy+drop, var148: | (| | has copy+drop), (| | has copy+drop), (| | has copy+drop), (| | has copy+drop) | has copy+drop | { /* _block52 */ + ( 12079u16, true,) + } + } + ) { + Enum0::Variant1 {..} => { /* _block53 */ + let Struct0(var150, ..) = Struct0 ( + true,true, + ); + let Struct0(var153, ..) = Struct0 ( + true,true, + ); + let Struct0(var156, ..) = Struct0 ( + true,true, + ); + Struct0 ( + true,true, + ) + }, + Enum0::Variant0 {..} => { /* _block54 */ + let Struct0(var159, ..) = Struct0 ( + true,true, + ); + let Struct0(var162, ..) = Struct0 ( + true,true, + ); + let Struct0(var165, ..) = Struct0 ( + true,true, + ); + Struct0 ( + true,true, + ) + } + }; + match ( Enum0::Variant1 { field5: | var169: | (| | has copy+drop), (| | has copy+drop), (| | has copy+drop), (| | has copy+drop) | has copy+drop, var170: | (| | has copy+drop), (| | has copy+drop), (| | has copy+drop), (| | has copy+drop) | has copy+drop | { /* _block57 */ + ( 12079u16, true,) + } + } + ) { + Enum0::Variant1 {..} => { /* _block58 */ + let Struct0(var172, ..) = Struct0 ( + true,true, + ); + let Struct0(var175, ..) = Struct0 ( + true,true, + ); + let Struct0(var178, ..) = Struct0 ( + true,true, + ); + &mut ( var14) + }, + Enum0::Variant0 {..} => &mut ( var14) + } + }, + Enum1::Variant3 {field8: var80, field9: var81,} => { /* _block59 */ + let Struct0(var181, var182) = var13; + let var183 = *( &( Enum0::Variant0 { field4: false})); + let var184 = 1867244186u32; + let Struct0(.., var187) = var13; + match ( *( var12)) { + Enum0::Variant1 {..} => { /* _block62 */ + let var190 = Enum1::Variant2 { field6: ( 2084683531u32 >> 19u8), field7: 2881938354u32}; + &mut ( Enum1::Variant2 { field6: 582746179u32, field7: 2065999912u32}) + }, + Enum0::Variant0 {..} => &mut ( var14) + } + } + }, + &mut ( var14) + ); + 0 + } +} diff --git a/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/closures/bug_16954.exp b/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/closures/bug_16954.exp new file mode 100644 index 0000000000000..5e4f6bdcd04ce --- /dev/null +++ b/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/closures/bug_16954.exp @@ -0,0 +1,34 @@ +processed 12 tasks + +task 1 'run'. lines 115-115: +return values: 44 + +task 2 'run'. lines 117-117: +return values: 1 + +task 3 'run'. lines 119-119: +return values: 1 + +task 4 'run'. lines 121-121: +return values: 43 + +task 5 'run'. lines 123-123: +return values: 10 + +task 6 'run'. lines 125-125: +return values: 17 + +task 7 'run'. lines 127-127: +return values: 9 + +task 8 'run'. lines 129-129: +return values: 6 + +task 9 'run'. lines 131-131: +return values: 4 + +task 10 'run'. lines 133-133: +return values: 5 + +task 11 'run'. lines 135-135: +return values: 4 diff --git a/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/closures/bug_16954.move b/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/closures/bug_16954.move new file mode 100644 index 0000000000000..33e281801dc2b --- /dev/null +++ b/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/closures/bug_16954.move @@ -0,0 +1,135 @@ +//# publish +module 0xc0ffee::m { + public fun test1(): u8 { + let x = 1; + let f = |y: u8| { + let x = x + y; // x: 8 + let f = |y: u8| { + let x = y + x; // x: 11 + x + }; + f(3) * { + let f = |y: u8| { + let x = x - y; // x: 4 + x + }; + f(4) + } + }; + f(7) + } + + public fun test2(): u64 { + let x = 1; + let f = || || x; + f()() + } + + public fun test3(): u64 { + let x = 1; + let f = |x| { + (|x| x - 1)(x + 1) + }; + f(x) + } + + public fun test4(x: u64): u64 { + let f = || || { + let x = x + 1; + x + }; + f()() + } + + public fun test5(): u8 { + let x: u8 = 2; + let f = |x: u8| { // shadows outer `x` + let g = || { x + 1 }; // captures the *parameter* `x` + g() + }; + f(7) + x + } + + public fun test6(): u64 { + let x: u64 = 5; + let f = |y: u64| { + let x = y + 1; + let g = || { + let y = x + 1; + y + }; + g() + }; + f(10) + x + } + + public fun test7(): u8 { + let x: u8 = 3; + let f = |x: u8| { // shadows outer `x` + let x = x + 2; // shadows parameter `x` + let g = |y: u8| x + y; // captures second‑shadow `x` + g(4) // (3 + 2) + 4 + }; + f(x) + } + + public fun test8(): u64 { + let x: u64 = 1; + let f = || { + let x = 2; // shadows outermost + let g = || { + let x = 3; // shadows again + x // 3 + }; + g() + x // 3 + 2 + }; + f() + x // 5 + 1 + } + + public fun test9(): u8 { + let x: u8 = 10; + let f = |x: u8| { + let h = |x: u8| x; // yet another shadow + h(x) // returns the parameter `x` + }; + f(4) + (x - 10) // (4) + 0 + } + + public fun test10(): u8 { + let x: u8 = 2; + let f = |y: u8| y + x; // captures the first `x` + let x: u8 = 5; // new shadow of `x` + f(3) + (x - 5) // (3 + 2) + 0 + } + + fun call(f: ||u64): u64 { + f() + } + + public fun test11(): u64 { + let x = 1; + x + call(|| x + 2) + call(|| x - 1) + } +} + +//# run 0xc0ffee::m::test1 + +//# run 0xc0ffee::m::test2 + +//# run 0xc0ffee::m::test3 + +//# run 0xc0ffee::m::test4 --args 42 + +//# run 0xc0ffee::m::test5 + +//# run 0xc0ffee::m::test6 + +//# run 0xc0ffee::m::test7 + +//# run 0xc0ffee::m::test8 + +//# run 0xc0ffee::m::test9 + +//# run 0xc0ffee::m::test10 + +//# run 0xc0ffee::m::test11 diff --git a/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/closures/cross_module_unwrap.exp b/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/closures/cross_module_unwrap.exp new file mode 100644 index 0000000000000..f4150d6cf7a34 --- /dev/null +++ b/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/closures/cross_module_unwrap.exp @@ -0,0 +1,4 @@ +processed 3 tasks + +task 2 'run'. lines 25-25: +return values: 42 diff --git a/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/closures/cross_module_unwrap.move b/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/closures/cross_module_unwrap.move new file mode 100644 index 0000000000000..68333b9110577 --- /dev/null +++ b/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/closures/cross_module_unwrap.move @@ -0,0 +1,25 @@ +//# publish +module 0xc0ffee::m { + struct Wrapper(u64); + + public fun apply(f: |Wrapper|u64): u64 { + let w = Wrapper(42); + f(w) + } + + public fun unwrap_maker(): |Wrapper|u64 { + |Wrapper(x)| x + } +} + +//# publish +module 0xc0ffee::n { + use 0xc0ffee::m::apply; + + public fun test(): u64 { + let unwrap = 0xc0ffee::m::unwrap_maker(); + apply(unwrap) + } +} + +//# run 0xc0ffee::n::test diff --git a/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/closures/funs_as_storage_key.exp b/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/closures/funs_as_storage_key.exp index 4cc272d947b7d..9656b72fafa14 100644 --- a/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/closures/funs_as_storage_key.exp +++ b/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/closures/funs_as_storage_key.exp @@ -1,7 +1,10 @@ -processed 4 tasks +processed 13 tasks -task 2 'run'. lines 72-72: +task 5 'run'. lines 162-162: return values: true -task 3 'run'. lines 74-74: +task 6 'run'. lines 164-164: return values: true + +task 12 'run'. lines 176-176: +return values: 6 diff --git a/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/closures/funs_as_storage_key.move b/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/closures/funs_as_storage_key.move index b44d4f20e4cd4..2cb93dc37e892 100644 --- a/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/closures/funs_as_storage_key.move +++ b/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/closures/funs_as_storage_key.move @@ -69,6 +69,108 @@ module 0x42::mod3 { } } +//# publish +module 0x42::mod4 { + struct Wrapper has key { + fv: T + } + + #[persistent] + fun test(ref: &u64, _mut_ref: &mut u8): &u64 { + ref + } + + fun initialize(acc: &signer) { + move_to>(acc, Wrapper { fv: 0x42::mod4::test}); + } + + fun check_exists(_acc: &signer) { + let exists = exists>(@0x42); + assert!(exists, 404); + } +} + +//# publish +module 0x42::mod5 { + struct VecWrapper has key { + fvs: vector + } + + #[persistent] + fun test(ref: &u64, _mut_ref: &mut u8): &u64 { + ref + } + + fun initialize(acc: &signer) { + move_to>(acc, VecWrapper { fvs: vector[0x42::mod5::test]}); + } + + fun check_exists(_acc: &signer) { + let exists = exists>(@0x42); + assert!(exists, 404); + } +} + +//# publish +module 0x42::mod6 { + struct VecWrapper has key { + fvs: vector + } + + #[persistent] + fun test1(x: &mut u8) { + *x = *x + 1; + } + + #[persistent] + fun test2(x: &mut u8) { + *x = *x + 2; + } + + #[persistent] + fun test3(x: &mut u8) { + *x = *x + 3; + } + + fun initialize(acc: &signer) { + let fvs = vector[ + 0x42::mod6::test1, + 0x42::mod6::test2, + 0x42::mod6::test3, + ]; + move_to>(acc, VecWrapper { fvs }); + } + + fun compute(_acc: &signer): u8 { + let do_not_exist = !exists>(@0x42) + && !exists>(@0x42); + assert!(do_not_exist, 404); + + let wrapper = &borrow_global>(@0x42).fvs; + let x = 0; + + let i = 0; + while (i < 3) { + let f = std::vector::borrow(wrapper, i); + (*f)(&mut x); + i = i + 1; + }; + x + } +} + //# run 0x42::mod3::test_items --signers 0x42 --args true //# run 0x42::mod3::test_items --signers 0x42 --args false + +//# run 0x42::mod4::initialize --signers 0x42 + +//# run 0x42::mod4::check_exists --signers 0x42 + +//# run 0x42::mod5::initialize --signers 0x42 + +//# run 0x42::mod5::check_exists --signers 0x42 + +//# run 0x42::mod6::initialize --signers 0x42 + +//# run 0x42::mod6::compute --signers 0x42 diff --git a/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/closures/fv_enum.exp b/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/closures/fv_enum.exp new file mode 100644 index 0000000000000..457ace9c4acb6 --- /dev/null +++ b/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/closures/fv_enum.exp @@ -0,0 +1 @@ +processed 4 tasks diff --git a/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/closures/fv_enum.move b/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/closures/fv_enum.move new file mode 100644 index 0000000000000..c235583b528fc --- /dev/null +++ b/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/closures/fv_enum.move @@ -0,0 +1,84 @@ +//# publish +module 0x66::fv_enum_basic { + use std::signer; + enum Action has drop { + Noop, + Call(|u64|u64), + } + + fun square(x: u64): u64 { x * x } + + fun call_square(x: u64) { + let act = Action::Call(square); + let v = match (act) { + Action::Call(f) => f(x), + _ => 0 + }; + assert!(v == 49); + } + + enum Mapper has key { + Id(|T|R has copy + store), + Twice(Version), + } + + #[persistent] + fun add_k_persistent(x: u64, k: u64): u64 { x + k } + + enum Version has copy,store { + V1 { v1: |T|R has copy + store }, + } + + fun test_enum_in_another_enum(s: &signer) { + let k = 3; + let add_k: |u64|u64 has copy + store = |x: u64| add_k_persistent(x, k); + let v1 = Version::V1 { v1: add_k }; + move_to(s, Mapper::Twice(v1)); + let m = borrow_global>(signer::address_of(s)); + let v = match (m) { + Mapper::Twice(v1) => (v1.v1)((v1.v1)(10)), + Mapper::Id(f) => (*f)(10), + }; + assert!(v == 16, 99); + } + + #[persistent] + fun add_k_persistent_ref(x: &mut u64, k: u64): u64 { *x = *x + 1; *x + k } + + enum FunVec has key { + V1 { v1: vector<|&mut T|T has copy + store> }, + V2 { v0: u64, v1: vector<|&mut T|T has copy + store> }, + } + + fun test_fun_vec(s: &signer) { + use std::vector; + let k = 3; + let add_k: |&mut u64|u64 has copy + store + drop = |x: &mut u64| add_k_persistent_ref(x, k); + let v1 = FunVec::V1 { v1: vector[add_k, add_k] }; + move_to(s, v1); + let m = move_from>(signer::address_of(s)); + match (m) { + FunVec::V1 { v1 } => { + let add = vector::pop_back(&mut v1); + let v = 3; + let x = add(&mut v); + assert!(v == 4, 0); + assert!(x == 7, 1); + vector::push_back(&mut v1, add); + let m = FunVec::V2 { v0: 10, v1 }; + move_to(s, m); + } + FunVec::V2 { v0: _, v1 } => { + vector::destroy_empty(v1); + assert!(false, 2); + } + }; + } + +} + +//# run 0x66::fv_enum_basic::call_square --args 7 + +//# run 0x66::fv_enum_basic::test_enum_in_another_enum --signers 0x66 + +//# run 0x66::fv_enum_basic::test_fun_vec --signers 0x66 diff --git a/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/closures/misc_1.exp b/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/closures/misc_1.exp index 1bdb2c78fefb3..ef598c601836d 100644 --- a/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/closures/misc_1.exp +++ b/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/closures/misc_1.exp @@ -1,7 +1,22 @@ -processed 4 tasks +processed 7 tasks -task 1 'run'. lines 14-14: -return values: false +task 1 'run'. lines 31-31: +Error: Function execution failed with VMError: { + major_status: ABORTED, + sub_status: Some(453), + location: 0x1::bcs, + indices: [], + offsets: [(FunctionDefinitionIndex(0), 0)], +} -task 3 'run'. lines 27-27: +task 5 'run'. lines 48-48: return values: 42 + +task 6 'publish'. lines 50-59: +Error: Unable to publish module '0000000000000000000000000000000000000000000000000000000000c0ffee::o'. Got VMError: { + major_status: TOO_MANY_TYPE_NODES, + sub_status: None, + location: 0xc0ffee::o, + indices: [], + offsets: [], +} diff --git a/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/closures/misc_1.move b/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/closures/misc_1.move index cc7985166b1d8..97e0b9219a8e3 100644 --- a/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/closures/misc_1.move +++ b/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/closures/misc_1.move @@ -2,22 +2,43 @@ module 0xc0ffee::m { use std::bcs; - public fun test(): bool { + public fun test1(): bool { let x = 1; let y = 2; let f: || u64 has drop = || {x + y + 1}; - let g: || u64 has drop = || {x + y + 1}; - bcs::to_bytes(&f) == bcs::to_bytes(&g) + // Serialization fails! + let _ = bcs::to_bytes(&f); + true + } + + public fun foo() {} + + #[persistent] + fun bar() {} + + public fun test2() { + let f: || has drop = foo; + let b: || has drop = bar; + assert!(bcs::to_bytes(&f) != bcs::to_bytes(&b), 1); + } + + public fun test3() { + let f: || has drop = foo; + assert!(bcs::to_bytes(&f) == bcs::to_bytes(&f), 2); } } -//# run 0xc0ffee::m::test +//# run 0xc0ffee::m::test1 + +//# run 0xc0ffee::m::test2 + +//# run 0xc0ffee::m::test3 //# publish module 0xc0ffee::n { public fun test(x: u64): u64 { - let f = || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || |x| x; - let f1 = |x| f()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()(x); + let f = || || || || || || || || || || |x| x; + let f1 = |x| f()()()()()()()()()()(x); let f2 = |f| f(x); let f3 = |f1, f2| f1(f2); f3(f2, f1) @@ -25,3 +46,14 @@ module 0xc0ffee::n { } //# run 0xc0ffee::n::test --args 42 + +//# publish +module 0xc0ffee::o { + public fun test(x: u64): u64 { + let f = || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || || |x| x; + let f1 = |x| f()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()(x); + let f2 = |f| f(x); + let f3 = |f1, f2| f1(f2); + f3(f2, f1) + } +} diff --git a/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/fv_as_keys.baseline.exp b/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/fv_as_keys.baseline.exp new file mode 100644 index 0000000000000..73d90c2d1d0ed --- /dev/null +++ b/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/fv_as_keys.baseline.exp @@ -0,0 +1,78 @@ +processed 23 tasks + +task 6 'run'. lines 102-102: +Error: Function execution failed with VMError: { + message: Failed to borrow global resource from 0000000000000000000000000000000000000000000000000000000000000001, + major_status: MISSING_DATA, + sub_status: None, + location: 0x99::test_struct, + indices: [], + offsets: [(FunctionDefinitionIndex(4), 2)], + exec_state: Some(ExecutionState { stack_trace: [] }), +} + +task 8 'run'. lines 107-107: +Error: Function execution failed with VMError: { + message: Failed to move resource from 0000000000000000000000000000000000000000000000000000000000000001, + major_status: MISSING_DATA, + sub_status: None, + location: 0x99::test_struct, + indices: [], + offsets: [(FunctionDefinitionIndex(5), 2)], + exec_state: Some(ExecutionState { stack_trace: [] }), +} + +task 10 'run'. lines 112-115: +Error: Function execution failed with VMError: { + message: Failed to borrow global resource from 0000000000000000000000000000000000000000000000000000000000000001, + major_status: MISSING_DATA, + sub_status: None, + location: 0x99::test_struct, + indices: [], + offsets: [(FunctionDefinitionIndex(6), 2)], + exec_state: Some(ExecutionState { stack_trace: [] }), +} + +task 13 'run'. lines 230-230: +Error: Function execution failed with VMError: { + message: Failed to move resource into 0000000000000000000000000000000000000000000000000000000000000001, + major_status: RESOURCE_ALREADY_EXISTS, + sub_status: None, + location: 0x99::test_enum, + indices: [], + offsets: [(FunctionDefinitionIndex(1), 5)], + exec_state: Some(ExecutionState { stack_trace: [] }), +} + +task 18 'run'. lines 241-241: +Error: Function execution failed with VMError: { + message: Failed to borrow global resource from 0000000000000000000000000000000000000000000000000000000000000001, + major_status: MISSING_DATA, + sub_status: None, + location: 0x99::test_enum, + indices: [], + offsets: [(FunctionDefinitionIndex(5), 2)], + exec_state: Some(ExecutionState { stack_trace: [] }), +} + +task 20 'run'. lines 246-246: +Error: Function execution failed with VMError: { + message: Failed to move resource from 0000000000000000000000000000000000000000000000000000000000000001, + major_status: MISSING_DATA, + sub_status: None, + location: 0x99::test_enum, + indices: [], + offsets: [(FunctionDefinitionIndex(6), 2)], + exec_state: Some(ExecutionState { stack_trace: [] }), +} + +task 22 'run'. lines 251-251: +Error: Function execution failed with VMError: { + message: Failed to move resource from 0000000000000000000000000000000000000000000000000000000000000001, + major_status: MISSING_DATA, + sub_status: None, + location: 0x99::test_enum, + indices: [], + offsets: [(FunctionDefinitionIndex(9), 2)], + exec_state: Some(ExecutionState { stack_trace: [] }), +} diff --git a/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/fv_as_keys.exp b/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/fv_as_keys.exp new file mode 100644 index 0000000000000..73d90c2d1d0ed --- /dev/null +++ b/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/fv_as_keys.exp @@ -0,0 +1,78 @@ +processed 23 tasks + +task 6 'run'. lines 102-102: +Error: Function execution failed with VMError: { + message: Failed to borrow global resource from 0000000000000000000000000000000000000000000000000000000000000001, + major_status: MISSING_DATA, + sub_status: None, + location: 0x99::test_struct, + indices: [], + offsets: [(FunctionDefinitionIndex(4), 2)], + exec_state: Some(ExecutionState { stack_trace: [] }), +} + +task 8 'run'. lines 107-107: +Error: Function execution failed with VMError: { + message: Failed to move resource from 0000000000000000000000000000000000000000000000000000000000000001, + major_status: MISSING_DATA, + sub_status: None, + location: 0x99::test_struct, + indices: [], + offsets: [(FunctionDefinitionIndex(5), 2)], + exec_state: Some(ExecutionState { stack_trace: [] }), +} + +task 10 'run'. lines 112-115: +Error: Function execution failed with VMError: { + message: Failed to borrow global resource from 0000000000000000000000000000000000000000000000000000000000000001, + major_status: MISSING_DATA, + sub_status: None, + location: 0x99::test_struct, + indices: [], + offsets: [(FunctionDefinitionIndex(6), 2)], + exec_state: Some(ExecutionState { stack_trace: [] }), +} + +task 13 'run'. lines 230-230: +Error: Function execution failed with VMError: { + message: Failed to move resource into 0000000000000000000000000000000000000000000000000000000000000001, + major_status: RESOURCE_ALREADY_EXISTS, + sub_status: None, + location: 0x99::test_enum, + indices: [], + offsets: [(FunctionDefinitionIndex(1), 5)], + exec_state: Some(ExecutionState { stack_trace: [] }), +} + +task 18 'run'. lines 241-241: +Error: Function execution failed with VMError: { + message: Failed to borrow global resource from 0000000000000000000000000000000000000000000000000000000000000001, + major_status: MISSING_DATA, + sub_status: None, + location: 0x99::test_enum, + indices: [], + offsets: [(FunctionDefinitionIndex(5), 2)], + exec_state: Some(ExecutionState { stack_trace: [] }), +} + +task 20 'run'. lines 246-246: +Error: Function execution failed with VMError: { + message: Failed to move resource from 0000000000000000000000000000000000000000000000000000000000000001, + major_status: MISSING_DATA, + sub_status: None, + location: 0x99::test_enum, + indices: [], + offsets: [(FunctionDefinitionIndex(6), 2)], + exec_state: Some(ExecutionState { stack_trace: [] }), +} + +task 22 'run'. lines 251-251: +Error: Function execution failed with VMError: { + message: Failed to move resource from 0000000000000000000000000000000000000000000000000000000000000001, + major_status: MISSING_DATA, + sub_status: None, + location: 0x99::test_enum, + indices: [], + offsets: [(FunctionDefinitionIndex(9), 2)], + exec_state: Some(ExecutionState { stack_trace: [] }), +} diff --git a/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/fv_as_keys.move b/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/fv_as_keys.move new file mode 100644 index 0000000000000..e35e75f6e5768 --- /dev/null +++ b/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/fv_as_keys.move @@ -0,0 +1,251 @@ +/// Test cases for function values wrapped in structs as keys +//# publish +module 0x99::test_struct { + use std::signer; + + struct Wrapper has key, drop { + fv: |T|u64 has copy+store+drop + } + + #[persistent] + fun test(f: ||u64 has store+drop): u64 { + if (f() == 1) + 1 + else + 2 + } + + #[persistent] + fun test1(): u64 { + 1 + } + + #[persistent] + fun test2(): u64 { + 2 + } + + // store a function value of type `|T|u64` with `T = ||u64 has store+drop` + public fun init(acc: &signer){ + let f: | ||u64 has store+drop |u64 has copy+store+drop = |x| test(x); + move_to(acc, Wrapper { fv: f}); + } + + // check existence of a function value of type `|T|u64` with `T = ||u64 has store+drop` + // should return true + public fun test_exist(acc: &signer){ + let exist_res = exists>(signer::address_of(acc)); + assert!(exist_res); + } + + // check existence of a function value of type `|T|u64` with `T = u64` + // should return false due to incompatible T type + public fun test_not_exist_1(acc: &signer){ + let exist_res = exists>(signer::address_of(acc)); + assert!(!exist_res); + } + + // check existence of a function value of type `|T|u64` with `T = ||u64 has store` + // should return false due to T missing `drop` + public fun test_not_exist_2(acc: &signer){ + let exist_res = exists>(signer::address_of(acc)); + assert!(!exist_res); + } + + // check existence of a function value of type `|T|u64` with `T = ||u64 has store` + // should return false due to T additionally having `copy` + public fun test_not_exist_3(acc: &signer){ + let exist_res = exists>(signer::address_of(acc)); + assert!(!exist_res); + } + + // borrow function value of type `|T|u64` with `T = ||u64 has store+copy+drop` + // should fail due to T additionally having `copy` + public fun test_bad_borrow_from(acc: &signer){ + let f = borrow_global>(signer::address_of(acc)); + assert!((f.fv)(test1) == 1); + assert!((f.fv)(test2) == 2); + } + + // borrow function value of type `|T|u64` with `T = ||u64 has store+drop` + // should succeed + public fun test_borrow_from(acc: &signer){ + let f = borrow_global>(signer::address_of(acc)); + assert!((f.fv)(test1) == 1); + assert!((f.fv)(test2) == 2); + } + + // move function value of type `|T|u64` with `T = ||u64 has store+copy+drop` + // should fail due to T additionally having `copy` + public fun test_bad_move_from(acc: &signer){ + move_from>(signer::address_of(acc)); + } + + // move function value of type `|T|u64` with `T = ||u64 has store+drop` + // should succeed + public fun test_move_from(acc: &signer){ + move_from>(signer::address_of(acc)); + } +} + +//# run --verbose --signers 0x1 -- 0x99::test_struct::init + +//# run --verbose --signers 0x1 -- 0x99::test_struct::test_exist + +//# run --verbose --signers 0x1 -- 0x99::test_struct::test_not_exist_1 + +//# run --verbose --signers 0x1 -- 0x99::test_struct::test_not_exist_2 + +//# run --verbose --signers 0x1 -- 0x99::test_struct::test_not_exist_3 + +//expect to fail +//# run --verbose --signers 0x1 -- 0x99::test_struct::test_bad_borrow_from + +//# run --verbose --signers 0x1 -- 0x99::test_struct::test_borrow_from + +//expect to fail +//# run --verbose --signers 0x1 -- 0x99::test_struct::test_bad_move_from + +//# run --verbose --signers 0x1 -- 0x99::test_struct::test_move_from + +// expected to fail as the functon value has been removed above +//# run --verbose --signers 0x1 -- 0x99::test_struct::test_borrow_from + + +/// Test cases for function values wrapped in enums as keys +//# publish +module 0x99::test_enum { + use std::signer; + + enum Wrapper has key { + V1 {fv1: |T|u64 has copy + store}, + V2 {fv1: |T|u64 has copy + drop + store} + } + + + #[persistent] + fun test(f: ||u64 has store+drop): u64 { + if (f() == 1) + 1 + else + 2 + } + + #[persistent] + fun test1(): u64 { + 1 + } + + #[persistent] + fun test2(): u64 { + 2 + } + + // store a function value of type `|T|u64` with `T = ||u64 has store+drop` + public fun init(acc: &signer){ + let f1: | ||u64 has store+drop |u64 has copy+store = |x| test(x); + let v1 = Wrapper::V1{ fv1: f1 }; + move_to(acc, v1); + } + + // failed store because resource type already exists + public fun bad_init(acc: &signer){ + let f1: | ||u64 has store+drop |u64 has copy+drop+store = |x| test(x); + let v2 = Wrapper::V2{ fv1: f1 }; + move_to(acc, v2); + } + + // check existence of a function value of type `|T|u64` with `T = ||u64 has store+drop` + // should return true + public fun test_exist(acc: &signer){ + let exist_res = exists>(signer::address_of(acc)); + assert!(exist_res); + } + + // check existence of a function value of type `|T|u64` with `T = u64` + // should return true due to incompatible T type + public fun test_not_exist_1(acc: &signer){ + let exist_res = exists>(signer::address_of(acc)); + assert!(!exist_res); + } + + // check existence of a function value of type `|T|u64` with `T = ||u64 has store` + // should return true due to T missing `drop` + public fun test_not_exist_2(acc: &signer){ + let exist_res = exists>(signer::address_of(acc)); + assert!(!exist_res); + } + + // check existence of a function value of type `|T|u64` with `T = ||u64 has store` + // should return true due to T additionally having `copy` + public fun test_not_exist_3(acc: &signer){ + let exist_res = exists>(signer::address_of(acc)); + assert!(!exist_res); + } + + // borrow function value of type `|T|u64` with `T = ||u64 has store+copy+drop` + // should fail due to T additionally having `copy` + public fun test_bad_borrow_from(acc: &signer){ + borrow_global>(signer::address_of(acc)); + } + + // borrow function value of type `|T|u64` with `T = ||u64 has store+drop` + // should succeed + public fun test_borrow_from(acc: &signer){ + let f = borrow_global>(signer::address_of(acc)); + let res = match (f) { + V1{fv1} => (*fv1)(test1), + V2{fv1} => (*fv1)(test2) + }; + assert!(res == 1, 0); + } + + // move function value of type `|T|u64` with `T = ||u64 has store+copy+drop` + // should fail due to T additionally having `copy` + public fun test_bad_move_from(acc: &signer){ + let f = move_from>(signer::address_of(acc)); + let res = match (f) { + V1{fv1} => fv1(test1), + V2{fv1} => fv1(test2) + }; + assert!(res == 1, 0); + } + + // move function value of type `|T|u64` with `T = ||u64 has store+drop` + // should succeed + public fun test_move_from(acc: &signer){ + let f = move_from>(signer::address_of(acc)); + let res = match (f) { + V1{fv1} => fv1(test1), + V2{fv1} => fv1(test2) + }; + assert!(res == 1, 0); + } + +} + +//# run --verbose --signers 0x1 -- 0x99::test_enum::init + +//expect to fail +//# run --verbose --signers 0x1 -- 0x99::test_enum::bad_init + +//# run --verbose --signers 0x1 -- 0x99::test_enum::test_exist + +//# run --verbose --signers 0x1 -- 0x99::test_enum::test_not_exist_1 + +//# run --verbose --signers 0x1 -- 0x99::test_enum::test_not_exist_2 + +//# run --verbose --signers 0x1 -- 0x99::test_enum::test_not_exist_3 + +//expect to fail +//# run --verbose --signers 0x1 -- 0x99::test_enum::test_bad_borrow_from + +//# run --verbose --signers 0x1 -- 0x99::test_enum::test_borrow_from + +//expect to fail +//# run --verbose --signers 0x1 -- 0x99::test_enum::test_bad_move_from + +//# run --verbose --signers 0x1 -- 0x99::test_enum::test_move_from + +// expected to fail as the functon value has been removed above +//# run --verbose --signers 0x1 -- 0x99::test_enum::test_move_from diff --git a/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/fv_as_keys.no-optimize.exp b/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/fv_as_keys.no-optimize.exp new file mode 100644 index 0000000000000..0e9098739abdb --- /dev/null +++ b/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/fv_as_keys.no-optimize.exp @@ -0,0 +1,78 @@ +processed 23 tasks + +task 6 'run'. lines 102-102: +Error: Function execution failed with VMError: { + message: Failed to borrow global resource from 0000000000000000000000000000000000000000000000000000000000000001, + major_status: MISSING_DATA, + sub_status: None, + location: 0x99::test_struct, + indices: [], + offsets: [(FunctionDefinitionIndex(4), 2)], + exec_state: Some(ExecutionState { stack_trace: [] }), +} + +task 8 'run'. lines 107-107: +Error: Function execution failed with VMError: { + message: Failed to move resource from 0000000000000000000000000000000000000000000000000000000000000001, + major_status: MISSING_DATA, + sub_status: None, + location: 0x99::test_struct, + indices: [], + offsets: [(FunctionDefinitionIndex(5), 2)], + exec_state: Some(ExecutionState { stack_trace: [] }), +} + +task 10 'run'. lines 112-115: +Error: Function execution failed with VMError: { + message: Failed to borrow global resource from 0000000000000000000000000000000000000000000000000000000000000001, + major_status: MISSING_DATA, + sub_status: None, + location: 0x99::test_struct, + indices: [], + offsets: [(FunctionDefinitionIndex(6), 2)], + exec_state: Some(ExecutionState { stack_trace: [] }), +} + +task 13 'run'. lines 230-230: +Error: Function execution failed with VMError: { + message: Failed to move resource into 0000000000000000000000000000000000000000000000000000000000000001, + major_status: RESOURCE_ALREADY_EXISTS, + sub_status: None, + location: 0x99::test_enum, + indices: [], + offsets: [(FunctionDefinitionIndex(1), 7)], + exec_state: Some(ExecutionState { stack_trace: [] }), +} + +task 18 'run'. lines 241-241: +Error: Function execution failed with VMError: { + message: Failed to borrow global resource from 0000000000000000000000000000000000000000000000000000000000000001, + major_status: MISSING_DATA, + sub_status: None, + location: 0x99::test_enum, + indices: [], + offsets: [(FunctionDefinitionIndex(5), 2)], + exec_state: Some(ExecutionState { stack_trace: [] }), +} + +task 20 'run'. lines 246-246: +Error: Function execution failed with VMError: { + message: Failed to move resource from 0000000000000000000000000000000000000000000000000000000000000001, + major_status: MISSING_DATA, + sub_status: None, + location: 0x99::test_enum, + indices: [], + offsets: [(FunctionDefinitionIndex(6), 2)], + exec_state: Some(ExecutionState { stack_trace: [] }), +} + +task 22 'run'. lines 251-251: +Error: Function execution failed with VMError: { + message: Failed to move resource from 0000000000000000000000000000000000000000000000000000000000000001, + major_status: MISSING_DATA, + sub_status: None, + location: 0x99::test_enum, + indices: [], + offsets: [(FunctionDefinitionIndex(9), 2)], + exec_state: Some(ExecutionState { stack_trace: [] }), +} diff --git a/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/fv_as_keys.optimize.exp b/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/fv_as_keys.optimize.exp new file mode 100644 index 0000000000000..73d90c2d1d0ed --- /dev/null +++ b/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/fv_as_keys.optimize.exp @@ -0,0 +1,78 @@ +processed 23 tasks + +task 6 'run'. lines 102-102: +Error: Function execution failed with VMError: { + message: Failed to borrow global resource from 0000000000000000000000000000000000000000000000000000000000000001, + major_status: MISSING_DATA, + sub_status: None, + location: 0x99::test_struct, + indices: [], + offsets: [(FunctionDefinitionIndex(4), 2)], + exec_state: Some(ExecutionState { stack_trace: [] }), +} + +task 8 'run'. lines 107-107: +Error: Function execution failed with VMError: { + message: Failed to move resource from 0000000000000000000000000000000000000000000000000000000000000001, + major_status: MISSING_DATA, + sub_status: None, + location: 0x99::test_struct, + indices: [], + offsets: [(FunctionDefinitionIndex(5), 2)], + exec_state: Some(ExecutionState { stack_trace: [] }), +} + +task 10 'run'. lines 112-115: +Error: Function execution failed with VMError: { + message: Failed to borrow global resource from 0000000000000000000000000000000000000000000000000000000000000001, + major_status: MISSING_DATA, + sub_status: None, + location: 0x99::test_struct, + indices: [], + offsets: [(FunctionDefinitionIndex(6), 2)], + exec_state: Some(ExecutionState { stack_trace: [] }), +} + +task 13 'run'. lines 230-230: +Error: Function execution failed with VMError: { + message: Failed to move resource into 0000000000000000000000000000000000000000000000000000000000000001, + major_status: RESOURCE_ALREADY_EXISTS, + sub_status: None, + location: 0x99::test_enum, + indices: [], + offsets: [(FunctionDefinitionIndex(1), 5)], + exec_state: Some(ExecutionState { stack_trace: [] }), +} + +task 18 'run'. lines 241-241: +Error: Function execution failed with VMError: { + message: Failed to borrow global resource from 0000000000000000000000000000000000000000000000000000000000000001, + major_status: MISSING_DATA, + sub_status: None, + location: 0x99::test_enum, + indices: [], + offsets: [(FunctionDefinitionIndex(5), 2)], + exec_state: Some(ExecutionState { stack_trace: [] }), +} + +task 20 'run'. lines 246-246: +Error: Function execution failed with VMError: { + message: Failed to move resource from 0000000000000000000000000000000000000000000000000000000000000001, + major_status: MISSING_DATA, + sub_status: None, + location: 0x99::test_enum, + indices: [], + offsets: [(FunctionDefinitionIndex(6), 2)], + exec_state: Some(ExecutionState { stack_trace: [] }), +} + +task 22 'run'. lines 251-251: +Error: Function execution failed with VMError: { + message: Failed to move resource from 0000000000000000000000000000000000000000000000000000000000000001, + major_status: MISSING_DATA, + sub_status: None, + location: 0x99::test_enum, + indices: [], + offsets: [(FunctionDefinitionIndex(9), 2)], + exec_state: Some(ExecutionState { stack_trace: [] }), +} diff --git a/third_party/move/move-compiler-v2/transactional-tests/tests/tests.rs b/third_party/move/move-compiler-v2/transactional-tests/tests/tests.rs index 7916c1501dbcf..d0d05c65d1aac 100644 --- a/third_party/move/move-compiler-v2/transactional-tests/tests/tests.rs +++ b/third_party/move/move-compiler-v2/transactional-tests/tests/tests.rs @@ -29,6 +29,14 @@ struct TestConfig { exclude: &'static [&'static str], } +/// Set of exclusions that apply when using `include: &[]` in TestConfig. +const COMMON_EXCLUSIONS: &[&str] = &[ + "/operator_eval/", + "/no-recursive-check/", + "/no-access-check/", + "/no-recursive-type-check/", +]; + /// Note that any config which has different output for a test directory /// *must* be added to the `SEPARATE_BASELINE` array below, so that a /// special output file `test.foo.exp` will be generated for the output @@ -40,8 +48,8 @@ const TEST_CONFIGS: &[TestConfig] = &[ runner: |p| run(p, get_config_by_name("baseline")), experiments: &[], language_version: LanguageVersion::latest(), - include: &[], - exclude: &["/operator_eval/", "/no-recursive-check/"], + include: &[], // all tests except those excluded below + exclude: COMMON_EXCLUSIONS, }, // Test optimize/no-optimize/etc., except for `/access_control/` TestConfig { @@ -53,7 +61,7 @@ const TEST_CONFIGS: &[TestConfig] = &[ ], language_version: LanguageVersion::latest(), include: &[], // all tests except those excluded below - exclude: &["/operator_eval/", "/no-recursive-check/"], + exclude: COMMON_EXCLUSIONS, }, TestConfig { name: "no-optimize", @@ -61,7 +69,7 @@ const TEST_CONFIGS: &[TestConfig] = &[ experiments: &[(Experiment::OPTIMIZE, false)], language_version: LanguageVersion::latest(), include: &[], // all tests except those excluded below - exclude: &["/operator_eval/", "/no-recursive-check/"], + exclude: COMMON_EXCLUSIONS, }, // Test `/operator_eval/` with language version 1 and 2 TestConfig { @@ -88,6 +96,22 @@ const TEST_CONFIGS: &[TestConfig] = &[ include: &["/no-recursive-check/"], exclude: &[], }, + TestConfig { + name: "no-access-check", + runner: |p| run(p, get_config_by_name("no-access-check")), + experiments: &[(Experiment::ACCESS_CHECK, false)], + language_version: LanguageVersion::latest(), + include: &["/no-access-check/"], + exclude: &[], + }, + TestConfig { + name: "no-recursive-type-check", + runner: |p| run(p, get_config_by_name("no-recursive-type-check")), + experiments: &[(Experiment::RECURSIVE_TYPE_CHECK, false)], + language_version: LanguageVersion::latest(), + include: &["/no-recursive-type-check/"], + exclude: &[], + }, ]; /// Test files which must use separate baselines because their result @@ -121,6 +145,8 @@ const SEPARATE_BASELINE: &[&str] = &[ "optimization/bug_14223_unused_non_droppable.move", // Flaky redundant unused assignment error "no-v1-comparison/enum/enum_scoping.move", + // Different error messages depending on optimizations or not + "no-v1-comparison/fv_as_keys.move", ]; fn get_config_by_name(name: &str) -> TestConfig { diff --git a/third_party/move/move-core/types/src/effects.rs b/third_party/move/move-core/types/src/effects.rs index b3eeb98dc9cb1..8a843b10e42bd 100644 --- a/third_party/move/move-core/types/src/effects.rs +++ b/third_party/move/move-core/types/src/effects.rs @@ -138,7 +138,10 @@ impl AccountChanges { use btree_map::Entry::*; match self.resources.entry(struct_tag) { - Occupied(entry) => bail!("Resource {} already exists", entry.key()), + Occupied(entry) => bail!( + "Resource {} already exists", + entry.key().to_canonical_string() + ), Vacant(entry) => { entry.insert(op); }, diff --git a/third_party/move/move-core/types/src/function.rs b/third_party/move/move-core/types/src/function.rs index 80c4e1c9536ef..2cbd8c096e06d 100644 --- a/third_party/move/move-core/types/src/function.rs +++ b/third_party/move/move-core/types/src/function.rs @@ -382,38 +382,3 @@ mod serialization_tests { ); } } - -//=========================================================================================== - -impl fmt::Display for MoveClosure { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let MoveClosure { - module_id, - fun_id, - ty_args, - mask, - captured, - } = self; - let captured_str = mask - .format_arguments(captured.iter().map(|v| v.1.to_string()).collect()) - .join(", "); - let inst_str = if ty_args.is_empty() { - "".to_string() - } else { - format!( - "<{}>", - ty_args - .iter() - .map(|t| t.to_string()) - .collect::>() - .join(",") - ) - }; - write!( - f, - // this will print `a::m::f(a1,_,a2,_)` - "{}::{}{}({})", - module_id, fun_id, inst_str, captured_str - ) - } -} diff --git a/third_party/move/move-core/types/src/language_storage.rs b/third_party/move/move-core/types/src/language_storage.rs index 64e076ce0a86d..497bb398c2807 100644 --- a/third_party/move/move-core/types/src/language_storage.rs +++ b/third_party/move/move-core/types/src/language_storage.rs @@ -6,6 +6,7 @@ use crate::{ ability::AbilitySet, account_address::AccountAddress, identifier::{IdentStr, Identifier}, + language_storage::FunctionParamOrReturnTag::{MutableReference, Reference, Value}, parser::{parse_module_id, parse_struct_tag, parse_type_tag}, safe_serialize, }; @@ -26,6 +27,7 @@ pub const RESOURCE_TAG: u8 = 1; pub const CORE_CODE_ADDRESS: AccountAddress = AccountAddress::ONE; pub const TOKEN_ADDRESS: AccountAddress = AccountAddress::THREE; pub const TOKEN_OBJECTS_ADDRESS: AccountAddress = AccountAddress::FOUR; +pub const EXPERIMENTAL_CODE_ADDRESS: AccountAddress = AccountAddress::SEVEN; #[derive(Serialize, Deserialize, Debug, PartialEq, Hash, Eq, Clone, PartialOrd, Ord)] #[cfg_attr( @@ -82,16 +84,9 @@ pub enum TypeTag { } impl TypeTag { - /// Return a canonical string representation of the type. All types are represented - /// using their source syntax: - /// "u8", "u64", "u128", "bool", "address", "vector", "signer" for ground types. - /// Struct types are represented as fully qualified type names; e.g. - /// `00000000000000000000000000000001::string::String` or - /// `0000000000000000000000000000000a::module_name1::type_name1<0000000000000000000000000000000a::module_name2::type_name2>` - /// Addresses are hex-encoded lowercase values of length ADDRESS_LENGTH (16, 20, or 32 depending on the Move platform) - /// Note: this function is guaranteed to be stable, and this is suitable for use inside - /// Move native functions or the VM. By contrast, the `Display` implementation is subject - /// to change and should not be used inside stable code. + /// Returns a canonical string representation of the type tag. + /// + /// INVARIANT: If two type tags are different, they must have different canonical strings. pub fn to_canonical_string(&self) -> String { use TypeTag::*; @@ -107,25 +102,7 @@ impl TypeTag { Signer => "signer".to_owned(), Vector(t) => format!("vector<{}>", t.to_canonical_string()), Struct(s) => s.to_canonical_string(), - Function(f) => { - let fmt_list = |l: &[TypeTag]| -> String { - l.iter() - .map(|t| t.to_canonical_string()) - .collect::>() - .join(",") - }; - let FunctionTag { - args, - results, - abilities, - } = f.as_ref(); - format!( - "|{}|{}{}", - fmt_list(args), - fmt_list(results), - abilities.display_postfix() - ) - }, + Function(f) => f.to_canonical_string(), } } @@ -161,8 +138,13 @@ impl<'a> Iterator for TypeTagPreorderTraversalIter<'a> { Struct(struct_tag) => self.stack.extend(struct_tag.type_args.iter().rev()), Function(fun_tag) => { let FunctionTag { args, results, .. } = fun_tag.as_ref(); - self.stack - .extend(results.iter().rev().chain(args.iter().rev())) + self.stack.extend( + results + .iter() + .map(|t| t.inner_tag()) + .rev() + .chain(args.iter().map(|t| t.inner_tag()).rev()), + ) }, } Some(ty) @@ -231,27 +213,33 @@ impl StructTag { ModuleId::new(self.address, self.module.to_owned()) } - /// Return a canonical string representation of the struct. - /// Struct types are represented as fully qualified type names; e.g. - /// `00000000000000000000000000000001::string::String` or - /// `0000000000000000000000000000000a::module_name1::type_name1<0000000000000000000000000000000a::module_name2::type_name2>` - /// Addresses are hex-encoded lowercase values of length ADDRESS_LENGTH (16, 20, or 32 depending on the Move platform) - /// Note: this function is guaranteed to be stable, and this is suitable for use inside - /// Move native functions or the VM. By contrast, the `Display` implementation is subject - /// to change and should not be used inside stable code. + /// Returns a canonical string representation of the struct tag. + /// + /// Struct tags are represented as fully qualified type names; e.g., `0x1::string::String` or + /// `0x234::foo::Bar<0x123::bar::Foo>`. Addresses are hex-encoded lowercase values with + /// leading zeroes trimmed and prefixed with `0x`. + /// + /// INVARIANT: If two struct tags are different, they must have different canonical strings. pub fn to_canonical_string(&self) -> String { - let mut generics = String::new(); - if let Some(first_ty) = self.type_args.first() { - generics.push('<'); - generics.push_str(&first_ty.to_canonical_string()); - for ty in self.type_args.iter().skip(1) { - generics.push_str(&ty.to_canonical_string()) - } - generics.push('>'); - } + let generics = if self.type_args.is_empty() { + "".to_string() + } else { + format!( + "<{}>", + self.type_args + .iter() + .map(|t| t.to_canonical_string()) + .collect::>() + .join(", ") + ) + }; format!( - "{}::{}::{}{}", - self.address.to_canonical_string(), + // Note: + // For historical reasons, we convert addresses as strings using 0x... and trimming + // leading zeroes. This cannot be changed easily because 0x1::any::Any relies on that + // and may store bytes of these strings on-chain. + "0x{}::{}::{}{}", + self.address.short_str_lossless(), self.module, self.name, generics @@ -275,32 +263,72 @@ impl FromStr for StructTag { #[cfg_attr(any(test, feature = "fuzzing"), derive(Arbitrary))] #[cfg_attr(any(test, feature = "fuzzing"), proptest(no_params))] pub struct FunctionTag { - pub args: Vec, - pub results: Vec, + pub args: Vec, + pub results: Vec, pub abilities: AbilitySet, } -/// Represents the initial key into global storage where we first index by the address, and then -/// the struct tag -#[derive(Serialize, Deserialize, Debug, PartialEq, Hash, Eq, Clone, PartialOrd, Ord)] -pub struct ResourceKey { - pub address: AccountAddress, - pub type_: StructTag, +impl FunctionTag { + /// Returns a canonical string representation of the function tag. + /// + /// INVARIANT: If two function tags are different, they must have different canonical strings. + pub fn to_canonical_string(&self) -> String { + let fmt_list = |l: &[FunctionParamOrReturnTag]| -> String { + l.iter() + .map(|t| t.to_canonical_string()) + .collect::>() + .join(", ") + }; + // Note that we put returns in parentheses. This ensures that when functions used as type + // arguments, there is no ambiguity in presence of multiple returns, e.g., + // + // 0x1::a::A<||||> + // + // is ambiguous: is it a function that has zero arguments and returns a function ||, or is + // it a function that takes || argument and returns nothing? In order to disambiguate, we + // always add parentheses for returns. + format!( + "|{}|({}){}", + fmt_list(&self.args), + fmt_list(&self.results), + self.abilities.display_postfix() + ) + } } -impl ResourceKey { - pub fn address(&self) -> AccountAddress { - self.address - } +/// Represents an argument or return tag for [FunctionTag]. This is needed because function tags +/// carry information about return and argument types which can be references. So direct return +/// or paramter tags can be references, but not the inner tags. +#[derive(Serialize, Deserialize, Debug, PartialEq, Hash, Eq, Clone, PartialOrd, Ord)] +#[cfg_attr( + any(test, feature = "fuzzing"), + derive(arbitrary::Arbitrary, dearbitrary::Dearbitrary) +)] +#[cfg_attr(any(test, feature = "fuzzing"), derive(Arbitrary))] +#[cfg_attr(any(test, feature = "fuzzing"), proptest(no_params))] +pub enum FunctionParamOrReturnTag { + Reference(TypeTag), + MutableReference(TypeTag), + Value(TypeTag), +} - pub fn type_(&self) -> &StructTag { - &self.type_ +impl FunctionParamOrReturnTag { + /// Returns a canonical string representation of function tag's argument or return tag. If any + /// two tags are different, their canonical representation must be also different. + pub fn to_canonical_string(&self) -> String { + use FunctionParamOrReturnTag::*; + match self { + Reference(tag) => format!("&{}", tag.to_canonical_string()), + MutableReference(tag) => format!("&mut {}", tag.to_canonical_string()), + Value(tag) => tag.to_canonical_string(), + } } -} -impl ResourceKey { - pub fn new(address: AccountAddress, type_: StructTag) -> Self { - ResourceKey { address, type_ } + /// Returns the inner tag for this argument or return tag. + pub fn inner_tag(&self) -> &TypeTag { + match self { + Reference(tag) | MutableReference(tag) | Value(tag) => tag, + } } } @@ -397,58 +425,6 @@ impl ModuleId { } } -impl Display for StructTag { - fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { - write!( - f, - "0x{}::{}::{}", - self.address.short_str_lossless(), - self.module, - self.name - )?; - if let Some(first_ty) = self.type_args.first() { - write!(f, "<")?; - write!(f, "{}", first_ty)?; - for ty in self.type_args.iter().skip(1) { - write!(f, ", {}", ty)?; - } - write!(f, ">")?; - } - Ok(()) - } -} - -impl Display for TypeTag { - fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { - match self { - TypeTag::Struct(s) => write!(f, "{}", s), - TypeTag::Function(_) => write!(f, "{}", self.to_canonical_string()), - TypeTag::Vector(ty) => write!(f, "vector<{}>", ty), - TypeTag::U8 => write!(f, "u8"), - TypeTag::U16 => write!(f, "u16"), - TypeTag::U32 => write!(f, "u32"), - TypeTag::U64 => write!(f, "u64"), - TypeTag::U128 => write!(f, "u128"), - TypeTag::U256 => write!(f, "u256"), - TypeTag::Address => write!(f, "address"), - TypeTag::Signer => write!(f, "signer"), - TypeTag::Bool => write!(f, "bool"), - } - } -} - -impl Display for FunctionTag { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - TypeTag::Function(Box::new(self.clone())).fmt(f) - } -} - -impl Display for ResourceKey { - fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { - write!(f, "0x{}/{}", self.address.short_str_lossless(), self.type_) - } -} - impl From for TypeTag { fn from(t: StructTag) -> TypeTag { TypeTag::Struct(Box::new(t)) @@ -457,22 +433,140 @@ impl From for TypeTag { #[cfg(test)] mod tests { - use super::TypeTag; + use super::*; use crate::{ + ability::{Ability, AbilitySet}, account_address::AccountAddress, identifier::Identifier, language_storage::{ModuleId, StructTag}, safe_serialize::MAX_TYPE_TAG_NESTING, }; use hashbrown::Equivalent; - use proptest::prelude::*; + use proptest::{collection::vec, prelude::*}; use std::{ - collections::hash_map::DefaultHasher, + collections::{hash_map::DefaultHasher, HashMap}, hash::{Hash, Hasher}, mem, str::FromStr, }; + fn make_struct_tag( + address: AccountAddress, + module_name: &str, + name: &str, + type_args: Vec, + ) -> TypeTag { + TypeTag::Struct(Box::new(StructTag { + address, + module: Identifier::new(module_name).unwrap(), + name: Identifier::new(name).unwrap(), + type_args, + })) + } + + fn make_function_tag( + args: Vec, + results: Vec, + abilities: AbilitySet, + ) -> TypeTag { + TypeTag::Function(Box::new(FunctionTag { + args, + results, + abilities, + })) + } + + #[test] + fn test_to_canonical_string() { + use FunctionParamOrReturnTag::*; + use TypeTag::*; + + let data = [ + (U8, "u8"), + (U16, "u16"), + (U32, "u32"), + (U64, "u64"), + (U128, "u128"), + (U256, "u256"), + (Bool, "bool"), + (Address, "address"), + (Signer, "signer"), + (Vector(Box::new(Vector(Box::new(U8)))), "vector>"), + ( + make_struct_tag(AccountAddress::ONE, "a", "A", vec![]), + "0x1::a::A", + ), + ( + make_struct_tag(AccountAddress::ONE, "a", "A", vec![ + make_struct_tag(AccountAddress::from_str("0x123").unwrap(), "b", "B", vec![ + Bool, + Vector(Box::new(U8)), + ]), + make_struct_tag(AccountAddress::from_str("0xFF").unwrap(), "c", "C", vec![ + U8, + ]), + ]), + "0x1::a::A<0x123::b::B>, 0xff::c::C>", + ), + (make_function_tag(vec![], vec![], AbilitySet::EMPTY), "||()"), + ( + make_function_tag( + vec![], + vec![MutableReference(U8), Value(U64)], + AbilitySet::EMPTY, + ), + "||(&mut u8, u64)", + ), + ( + make_function_tag(vec![Reference(U8), Value(U64)], vec![], AbilitySet::EMPTY), + "|&u8, u64|()", + ), + ( + make_struct_tag(AccountAddress::ONE, "a", "A", vec![make_function_tag( + vec![Value(make_function_tag( + vec![Value(make_function_tag( + vec![], + vec![], + AbilitySet::singleton(Ability::Copy), + ))], + vec![], + AbilitySet::EMPTY, + ))], + vec![FunctionParamOrReturnTag::Value(make_function_tag( + vec![], + vec![], + AbilitySet::ALL, + ))], + AbilitySet::EMPTY, + )]), + "0x1::a::A<||||() has copy|()|(||() has copy + drop + store + key)>", + ), + ]; + + for (tag, string) in data { + assert_eq!(string, tag.to_canonical_string().as_str()); + } + } + + proptest! { + #[test] + fn test_to_canonical_string_is_unique(tags in vec(any::(), 1..100)) { + let mut seen = HashMap::new(); + for tag in &tags { + let s = tag.to_canonical_string(); + if let Some(other_tag) = seen.insert(s.clone(), tag) { + prop_assert!( + other_tag == tag, + "Collision for tags {:?} and {:?}: {}", + other_tag, + tag, + s, + ); + } + } + } + } + #[test] fn test_tag_iter() { let tag = TypeTag::from_str("vector<0x1::a::A>>>") diff --git a/third_party/move/move-core/types/src/parser.rs b/third_party/move/move-core/types/src/parser.rs index c2cb68c9a9e3e..6785f8b8b742e 100644 --- a/third_party/move/move-core/types/src/parser.rs +++ b/third_party/move/move-core/types/src/parser.rs @@ -639,7 +639,7 @@ mod tests { for text in valid { let st = parse_struct_tag(text).expect("valid StructTag"); assert_eq!( - st.to_string().replace(' ', ""), + st.to_canonical_string().replace(' ', ""), text.replace(' ', ""), "text: {:?}, StructTag: {:?}", text, diff --git a/third_party/move/move-core/types/src/value.rs b/third_party/move/move-core/types/src/value.rs index 5265fd39b32d5..d7e18c968c3ca 100644 --- a/third_party/move/move-core/types/src/value.rs +++ b/third_party/move/move-core/types/src/value.rs @@ -270,28 +270,6 @@ pub enum MoveTypeLayout { Function, } -impl MoveTypeLayout { - /// Determines whether the layout is serialization compatible with the other layout - /// (that is, any value serialized with this layout can be deserialized by the other). - pub fn is_compatible_with(&self, other: &Self) -> bool { - use MoveTypeLayout::*; - match (self, other) { - (Vector(t1), Vector(t2)) => t1.is_compatible_with(t2), - (Struct(s1), Struct(s2)) => s1.is_compatible_with(s2), - // For all other cases, equality is used - (t1, t2) => t1 == t2, - } - } - - pub fn is_compatible_with_slice(this: &[Self], other: &[Self]) -> bool { - this.len() == other.len() - && this - .iter() - .zip(other) - .all(|(t1, t2)| t1.is_compatible_with(t2)) - } -} - impl MoveValue { pub fn simple_deserialize(blob: &[u8], ty: &MoveTypeLayout) -> AResult { Ok(bcs::from_bytes_seed(ty, blob)?) @@ -510,31 +488,6 @@ impl MoveStructLayout { Self::WithVariants(variants) } - /// Determines whether the layout is serialization compatible with the other layout - /// (that is, any value serialized with this layout can be deserialized by the other). - /// This only will consider runtime variants, decorated variants are only compatible - /// if equal. - pub fn is_compatible_with(&self, other: &Self) -> bool { - use MoveStructLayout::*; - match (self, other) { - (RuntimeVariants(variants1), RuntimeVariants(variants2)) => { - variants1.len() <= variants2.len() - && variants1.iter().zip(variants2).all(|(fields1, fields2)| { - MoveTypeLayout::is_compatible_with_slice(fields1, fields2) - }) - }, - (Runtime(fields1), Runtime(fields2)) => { - fields1.len() == fields2.len() - && fields1 - .iter() - .zip(fields2) - .all(|(t1, t2)| t1.is_compatible_with(t2)) - }, - // All other cases require equality - (s1, s2) => s1 == s2, - } - } - pub fn fields(&self, variant: Option) -> &[MoveTypeLayout] { match self { Self::Runtime(vals) => vals, @@ -903,9 +856,9 @@ impl serde::Serialize for MoveStruct { // Unfortunately, we can't serialize this in the logical way: as a Serde struct named `type` with a field for // each of `fields` because serde insists that struct and field names be `'static &str`'s let mut t = serializer.serialize_struct(MOVE_STRUCT_NAME, 2)?; - // serialize type as string (e.g., 0x0::ModuleName::StructName) instead of (e.g. + // serialize type as string (e.g., 0x0::ModuleName::StructName) instead of (e.g. // { address: 0x0...0, module: ModuleName, name: StructName, type_args: [TypeArg1, TypeArg2]}) - t.serialize_field(MOVE_STRUCT_TYPE, &type_.to_string())?; + t.serialize_field(MOVE_STRUCT_TYPE, &type_.to_canonical_string())?; t.serialize_field(MOVE_STRUCT_FIELDS, &MoveFields(fields))?; t.end() }, @@ -973,7 +926,7 @@ impl fmt::Display for MoveStructLayout { } }, Self::WithTypes { type_, fields } => { - write!(f, "Type: {}", type_)?; + write!(f, "Type: {}", type_.to_canonical_string())?; write!(f, "Fields:")?; for field in fields { write!(f, "{}, ", field)? @@ -1036,75 +989,3 @@ impl TryInto for &MoveStructLayout { } } } - -impl fmt::Display for MoveValue { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - MoveValue::U8(u) => write!(f, "{}u8", u), - MoveValue::U16(u) => write!(f, "{}u16", u), - MoveValue::U32(u) => write!(f, "{}u32", u), - MoveValue::U64(u) => write!(f, "{}u64", u), - MoveValue::U128(u) => write!(f, "{}u128", u), - MoveValue::U256(u) => write!(f, "{}u256", u), - MoveValue::Bool(false) => write!(f, "false"), - MoveValue::Bool(true) => write!(f, "true"), - MoveValue::Address(a) => write!(f, "{}", a.to_hex_literal()), - MoveValue::Signer(a) => write!(f, "signer({})", a.to_hex_literal()), - MoveValue::Vector(v) => fmt_list(f, "vector[", v, "]"), - MoveValue::Struct(s) => fmt::Display::fmt(s, f), - MoveValue::Closure(c) => fmt::Display::fmt(c, f), - } - } -} - -impl fmt::Display for MoveStruct { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - MoveStruct::Runtime(v) => fmt_list(f, "struct[", v, "]"), - MoveStruct::RuntimeVariant(tag, v) => fmt_list(f, &format!("variant#{}[", tag), v, "]"), - MoveStruct::WithFields(fields) => { - fmt_list(f, "{", fields.iter().map(DisplayFieldBinding), "}") - }, - MoveStruct::WithTypes { - _type_: type_, - _fields: fields, - } => { - fmt::Display::fmt(type_, f)?; - fmt_list(f, " {", fields.iter().map(DisplayFieldBinding), "}") - }, - MoveStruct::WithVariantFields(name, _tag, fields) => fmt_list( - f, - &format!("{}{{", name), - fields.iter().map(DisplayFieldBinding), - "}", - ), - } - } -} - -struct DisplayFieldBinding<'a>(&'a (Identifier, MoveValue)); - -impl fmt::Display for DisplayFieldBinding<'_> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let DisplayFieldBinding((field, value)) = self; - write!(f, "{}: {}", field, value) - } -} - -fn fmt_list( - f: &mut fmt::Formatter<'_>, - begin: &str, - items: impl IntoIterator, - end: &str, -) -> fmt::Result { - write!(f, "{}", begin)?; - let mut items = items.into_iter(); - if let Some(x) = items.next() { - write!(f, "{}", x)?; - for x in items { - write!(f, ", {}", x)?; - } - } - write!(f, "{}", end)?; - Ok(()) -} diff --git a/third_party/move/move-core/types/src/vm_status.rs b/third_party/move/move-core/types/src/vm_status.rs index 1a3dda4e2b6a7..785cf1a7f353e 100644 --- a/third_party/move/move-core/types/src/vm_status.rs +++ b/third_party/move/move-core/types/src/vm_status.rs @@ -210,7 +210,9 @@ impl VMStatus { StatusCode::EXECUTION_LIMIT_REACHED | StatusCode::IO_LIMIT_REACHED | StatusCode::STORAGE_LIMIT_REACHED - | StatusCode::TOO_MANY_DELAYED_FIELDS, + | StatusCode::TOO_MANY_DELAYED_FIELDS + | StatusCode::UNABLE_TO_CAPTURE_DELAYED_FIELDS + | StatusCode::VM_MAX_VALUE_DEPTH_REACHED, .. } | VMStatus::Error { @@ -218,7 +220,9 @@ impl VMStatus { StatusCode::EXECUTION_LIMIT_REACHED | StatusCode::IO_LIMIT_REACHED | StatusCode::STORAGE_LIMIT_REACHED - | StatusCode::TOO_MANY_DELAYED_FIELDS, + | StatusCode::TOO_MANY_DELAYED_FIELDS + | StatusCode::UNABLE_TO_CAPTURE_DELAYED_FIELDS + | StatusCode::VM_MAX_VALUE_DEPTH_REACHED, .. } => Ok(KeptVMStatus::MiscellaneousError), @@ -884,12 +888,17 @@ pub enum StatusCode { STRUCT_VARIANT_MISMATCH = 4038, // An unimplemented functionality in the VM. UNIMPLEMENTED_FUNCTIONALITY = 4039, + // Modules are cyclic (module A uses module B which uses module A). Detected at runtime in case + // module loading is performed lazily. + RUNTIME_CYCLIC_MODULE_DEPENDENCY = 4040, + // Returned when a function value is trying to capture a delayed field. This is not allowed + // because layouts for values with delayed fields are not serializable. + UNABLE_TO_CAPTURE_DELAYED_FIELDS = 4041, // Reserved error code for future use. Always keep this buffer of well-defined new codes. - RESERVED_RUNTIME_ERROR_1 = 4040, - RESERVED_RUNTIME_ERROR_2 = 4041, - RESERVED_RUNTIME_ERROR_3 = 4042, - RESERVED_RUNTIME_ERROR_4 = 4043, + RESERVED_RUNTIME_ERROR_1 = 4042, + RESERVED_RUNTIME_ERROR_2 = 4043, + RESERVED_RUNTIME_ERROR_3 = 4044, // A reserved status to represent an unknown vm status. // this is u64::MAX, but we can't pattern match on that, so put the hardcoded value in diff --git a/third_party/move/move-model/bytecode/src/astifier.rs b/third_party/move/move-model/bytecode/src/astifier.rs index 4293e836c9a1e..c2306f95bcd36 100644 --- a/third_party/move/move-model/bytecode/src/astifier.rs +++ b/third_party/move/move-model/bytecode/src/astifier.rs @@ -525,11 +525,18 @@ impl Generator { // any blocks after that loop. This is a requirement for the algorithm to work. for header in &ctx.loop_headers { for after_loop_label in &ctx.after_loop_labels[header] { + let dest_block = ctx.block_of_label(*after_loop_label); + let edge_filter = |_: BlockId, _: BlockId| true; + let reachable_from_dest = ctx.forward_cfg.reachable_blocks(dest_block, edge_filter); for loop_block_label in &ctx.loop_labels[header] { - top_sort.add_dependency( - ctx.block_of_label(*loop_block_label), - ctx.block_of_label(*after_loop_label), - ) + // Only when the new virtual edge does not introduce a cycle, we add it! + let source_block = ctx.block_of_label(*loop_block_label); + if !reachable_from_dest.contains(&source_block) { + top_sort.add_dependency( + ctx.block_of_label(*loop_block_label), + ctx.block_of_label(*after_loop_label), + ) + } } } } diff --git a/third_party/move/move-model/src/builder/builtins.rs b/third_party/move/move-model/src/builder/builtins.rs index e189c9a3db327..12d038b5867e3 100644 --- a/third_party/move/move-model/src/builder/builtins.rs +++ b/third_party/move/move-model/src/builder/builtins.rs @@ -7,6 +7,7 @@ use crate::{ ast::{Operation, TraceKind, Value}, builder::model_builder::{ConstEntry, EntryVisibility, ModelBuilder, SpecOrBuiltinFunEntry}, + metadata::LanguageVersion, model::{Parameter, TypeParameter, TypeParameterKind}, ty::{Constraint, PrimitiveType, ReferenceKind, Type}, }; @@ -165,6 +166,37 @@ pub(crate) fn declare_builtins(trans: &mut ModelBuilder) { visibility, ); } + }; + + // Declare the specification arithm ops, based on Num type. + declare_arithm_ops( + trans, + &[], + &BTreeMap::new(), + Type::new_prim(PrimitiveType::Num), + Spec, // visible only in the spec language + ); + // For the implementation arithm ops, we use a generic function with a constraint, + // conceptually: `fun _+_(x: A, y: A): A where A: u8|u16|..|u256`. + declare_arithm_ops( + trans, + &[param_t_decl.clone()], + &[( + 0, + Constraint::SomeNumber(PrimitiveType::all_int_types().into_iter().collect()), + )] + .into_iter() + .collect(), + param_t.clone(), + Impl, // visible only in the impl language + ); + + // Builtin function for comparison operations + let declare_cmp_ops = |trans: &mut ModelBuilder, + type_params: &[TypeParameter], + type_constraints: &BTreeMap, + ty: Type, + visibility: EntryVisibility| { for (op, oper) in [ (Lt, Operation::Lt), (Le, Operation::Le), @@ -185,28 +217,49 @@ pub(crate) fn declare_builtins(trans: &mut ModelBuilder) { } }; - // Declare the specification arithm ops, based on Num type. - declare_arithm_ops( + // Declare the specification cmp ops. + // Only Num type is supported for cmp ops in spec. + declare_cmp_ops( trans, &[], &BTreeMap::new(), Type::new_prim(PrimitiveType::Num), Spec, // visible only in the spec language ); - // For the implementation arithm ops, we use a generic function with a constraint, - // conceptually: `fun _+_(x: A, y: A): A where A: u8|u16|..|u256`. - declare_arithm_ops( - trans, - &[param_t_decl.clone()], - &[( - 0, - Constraint::SomeNumber(PrimitiveType::all_int_types().into_iter().collect()), - )] - .into_iter() - .collect(), - param_t.clone(), - Impl, // visible only in the impl language - ); + // Declare the implementation cmp ops + if trans + .env + .language_version() + .is_at_least(LanguageVersion::V2_2) + { + // For LanguageVersion::V2_2 and later, we support comparison on all non-reference types. + // - integer types supported by the VM natively + // - other types supported by the `compare` native function + // - implicitly through compiler rewrite at the AST level + declare_cmp_ops( + trans, + &[param_t_decl.clone()], + &[(0, Constraint::NoReference)].into_iter().collect(), + param_t.clone(), + Impl, // visible only in the impl language + ); + } else { + // For LanguageVersion::V2_1 and earlier, we support only integer types. + // We use a generic function with a constraint, conceptually: + // `fun _cmp_(x: A, y: A): bool where A: u8|u16|..|u256`. + declare_cmp_ops( + trans, + &[param_t_decl.clone()], + &[( + 0, + Constraint::SomeNumber(PrimitiveType::all_int_types().into_iter().collect()), + )] + .into_iter() + .collect(), + param_t.clone(), + Impl, // visible only in the impl language + ); + } declare_bin(trans, Range, Operation::Range, num_t, num_t, range_t, Spec); diff --git a/third_party/move/move-model/src/lib.rs b/third_party/move/move-model/src/lib.rs index 5e75c98db19b5..b74745af0bb1d 100644 --- a/third_party/move/move-model/src/lib.rs +++ b/third_party/move/move-model/src/lib.rs @@ -269,7 +269,10 @@ pub fn run_model_builder_with_options_and_compilation_flags< let mut visited_modules = BTreeSet::new(); // Extract the module dependency closure for the vector module let mut vector_and_its_dependencies = BTreeSet::new(); + // Extract the module dependency closure for the std::cmp module + let mut cmp_and_its_dependencies = BTreeSet::new(); let mut seen_vector = false; + let mut seen_cmp = false; for (_, mident, mdef) in &expansion_ast.modules { let src_file_hash = mdef.loc.file_hash(); if !dep_files.contains(&src_file_hash) { @@ -284,6 +287,15 @@ pub fn run_model_builder_with_options_and_compilation_flags< &mut vector_and_its_dependencies, ); } + if !seen_cmp && is_cmp(*mident) { + seen_cmp = true; + // Collect the cmp module and its dependencies. + collect_related_modules_recursive( + mident, + &expansion_ast.modules, + &mut cmp_and_its_dependencies, + ); + } } for sdef in expansion_ast.scripts.values() { let src_file_hash = sdef.loc.file_hash(); @@ -307,6 +319,9 @@ pub fn run_model_builder_with_options_and_compilation_flags< // E.g., index operation on a vector results in a call to `vector::borrow`. // TODO(#15483): consider refactoring code to avoid this special case. (vector_and_its_dependencies.contains(&mident.value) + // We also need to always include the `cmp` module and its dependencies, + // so that we can use interfaces offered by `cmp` to support comparison, Lt/Le/Gt/Ge, over non-integer types. + || cmp_and_its_dependencies.contains(&mident.value) || visited_modules.contains(&mident.value)) .then(|| { mdef.is_source_module = true; @@ -327,6 +342,12 @@ fn is_vector(module_ident: ModuleIdent_) -> bool { && module_ident.module.0.value.as_str() == "vector" } +/// Is `module_ident` the `0x1::cmp` module? +fn is_cmp(module_ident: ModuleIdent_) -> bool { + module_ident.address.into_addr_bytes().into_inner() == AccountAddress::ONE + && module_ident.module.0.value.as_str() == well_known::CMP_MODULE +} + fn run_move_checker(env: &mut GlobalEnv, program: E::Program) { let mut builder = ModelBuilder::new(env); for (module_count, (module_id, module_def)) in program diff --git a/third_party/move/move-model/src/model.rs b/third_party/move/move-model/src/model.rs index 7e7903c951e66..fd0214e7afa9e 100644 --- a/third_party/move/move-model/src/model.rs +++ b/third_party/move/move-model/src/model.rs @@ -607,6 +607,8 @@ pub struct GlobalEnv { /// Whether the v2 compiler has generated this model. /// TODO: replace with a proper version number once we have this in file format pub(crate) generated_by_v2: bool, + /// A set of types that are instantiated in cmp module. + pub cmp_types: RefCell>, } /// A helper type for implementing fmt::Display depending on GlobalEnv @@ -668,6 +670,7 @@ impl GlobalEnv { address_alias_map: Default::default(), everything_is_target: Default::default(), generated_by_v2: false, + cmp_types: RefCell::new(Default::default()), } } @@ -1281,25 +1284,18 @@ impl GlobalEnv { // Comparison of Diagnostic values that tries to match program ordering so we // can display them to the user in a more natural order. fn cmp_diagnostic(diag1: &Diagnostic, diag2: &Diagnostic) -> Ordering { - let labels_ordering = GlobalEnv::cmp_labels(&diag1.labels, &diag2.labels); - if Ordering::Equal == labels_ordering { - let sev_ordering = diag1 + GlobalEnv::cmp_labels(&diag1.labels, &diag2.labels).then_with(|| { + diag1 .severity .partial_cmp(&diag2.severity) - .expect("Severity provides a total ordering for valid severity enum values"); - if Ordering::Equal == sev_ordering { - let message_ordering = diag1.message.cmp(&diag2.message); - if Ordering::Equal == message_ordering { - diag1.code.cmp(&diag2.code) - } else { - message_ordering - } - } else { - sev_ordering - } - } else { - labels_ordering - } + .expect("Severity provides a total ordering for valid severity enum values") + .then_with(|| { + diag1 + .message + .cmp(&diag2.message) + .then_with(|| diag1.code.cmp(&diag2.code)) + }) + }) } // Label comparison that tries to match program ordering. `FileId` is already set in visitation @@ -1307,25 +1303,13 @@ impl GlobalEnv { // marking nested regions, we want the innermost region, so we order first by end of labelled // code region, then in reverse by start of region. fn cmp_label(label1: &Label, label2: &Label) -> Ordering { - let file_ordering = label1.file_id.cmp(&label2.file_id); - if Ordering::Equal == file_ordering { - // First order by end of region. - let end1 = label1.range.end; - let end2 = label2.range.end; - let end_ordering = end1.cmp(&end2); - if Ordering::Equal == end_ordering { - let start1 = label1.range.start; - let start2 = label2.range.start; - - // For nested regions with same end, show inner-most region first. - // Swap 1 and 2 in comparing starts. - start2.cmp(&start1) - } else { - end_ordering - } - } else { - file_ordering - } + label1.file_id.cmp(&label2.file_id).then_with(|| { + label1 + .range + .end + .cmp(&label2.range.end) + .then_with(|| label2.range.start.cmp(&label1.range.start)) + }) } // Label comparison within a list of labels for a given diagnostic, which orders by priority @@ -1346,12 +1330,14 @@ impl GlobalEnv { fn cmp_labels(labels1: &[Label], labels2: &[Label]) -> Ordering { let mut sorted_labels1 = labels1.iter().collect_vec(); sorted_labels1.sort_by(|l1, l2| GlobalEnv::cmp_label_priority(l1, l2)); + let sorted_labels1_len = sorted_labels1.len(); let mut sorted_labels2 = labels2.iter().collect_vec(); sorted_labels2.sort_by(|l1, l2| GlobalEnv::cmp_label_priority(l1, l2)); + let sorted_labels2_len = sorted_labels2.len(); std::iter::zip(sorted_labels1, sorted_labels2) .map(|(l1, l2)| GlobalEnv::cmp_label(l1, l2)) - .find(|r| Ordering::Equal != *r) - .unwrap_or(Ordering::Equal) + .fold(Ordering::Equal, Ordering::then) + .then_with(|| sorted_labels1_len.cmp(&sorted_labels2_len)) } /// Writes accumulated diagnostics that pass through `filter` @@ -1362,12 +1348,8 @@ impl GlobalEnv { { let mut shown = BTreeSet::new(); self.diags.borrow_mut().sort_by(|a, b| { - let reported_ordering = a.1.cmp(&b.1); - if Ordering::Equal == reported_ordering { - GlobalEnv::cmp_diagnostic(&a.0, &b.0) - } else { - reported_ordering - } + a.1.cmp(&b.1) + .then_with(|| GlobalEnv::cmp_diagnostic(&a.0, &b.0)) }); for (diag, reported) in self.diags.borrow_mut().iter_mut().filter(|(d, reported)| { !reported @@ -3606,6 +3588,10 @@ impl<'env> ModuleEnv<'env> { || self.is_module_in_ext("table") || self.is_module_in_ext("table_with_length") } + + pub fn is_cmp(&self) -> bool { + self.is_module_in_std("cmp") + } } // ================================================================================================= diff --git a/third_party/move/move-model/src/ty.rs b/third_party/move/move-model/src/ty.rs index eba11991548e9..ba2f78d53cea6 100644 --- a/third_party/move/move-model/src/ty.rs +++ b/third_party/move/move-model/src/ty.rs @@ -21,7 +21,7 @@ use move_binary_format::{ }; use move_core_types::{ ability::{Ability, AbilitySet}, - language_storage::{FunctionTag, StructTag, TypeTag}, + language_storage::{FunctionParamOrReturnTag, FunctionTag, StructTag, TypeTag}, u256::U256, }; use num::BigInt; @@ -1086,6 +1086,89 @@ impl Type { matches!(self, Type::Var(_)) } + /// Returns all internal types contained in this type (including itself), skipping reference types. + pub fn get_all_contained_types_with_skip_reference(&self, env: &GlobalEnv) -> Vec { + match self { + Type::Primitive(_) => vec![self.clone()], + Type::Tuple(ts) => ts + .iter() + .flat_map(|t| t.get_all_contained_types_with_skip_reference(env)) + .collect(), + Type::Vector(et) => { + let mut types = et.get_all_contained_types_with_skip_reference(env); + types.push(self.clone()); + types + }, + Type::Struct(_, _, ts) => { + let struct_env = self.get_struct(env).unwrap().0; + let mut new_types = ts + .iter() + .zip(struct_env.data.type_params.iter()) + .filter(|(_, param)| !param.1.is_phantom) + .flat_map(|(t, _)| t.get_all_contained_types_with_skip_reference(env)) + .collect_vec(); + new_types.push(self.clone()); + if struct_env.has_variants() { + for variant in struct_env.get_variants() { + for field in struct_env.get_fields_of_variant(variant) { + new_types.extend( + field + .get_type() + .instantiate(ts) + .get_all_contained_types_with_skip_reference(env), + ); + } + } + } else { + for field in struct_env.get_fields() { + new_types.extend( + field + .get_type() + .instantiate(ts) + .get_all_contained_types_with_skip_reference(env), + ); + } + } + new_types + }, + Type::Fun(arg, result, _) => { + let mut types = arg.get_all_contained_types_with_skip_reference(env); + types.extend(result.get_all_contained_types_with_skip_reference(env)); + types + }, + Type::Reference(_, bt) => { + let mut types = bt.get_all_contained_types_with_skip_reference(env); + types.push(self.clone()); + types + }, + Type::TypeDomain(bt) => { + let mut types = bt.get_all_contained_types_with_skip_reference(env); + types.push(self.clone()); + types + }, + Type::ResourceDomain(_, _, Some(bt)) => { + let mut types = bt + .iter() + .flat_map(|t| t.get_all_contained_types_with_skip_reference(env)) + .collect_vec(); + types.push(self.clone()); + types + }, + Type::ResourceDomain(_, _, None) => { + vec![self.clone()] + }, + Type::Var(..) => { + vec![self.clone()] + }, + Type::Error => { + vec![self.clone()] + }, + Type::TypeParameter(..) => { + vec![self.clone()] + }, + } + } + /// Returns true if this is any number type. pub fn is_number(&self) -> bool { if let Type::Primitive(p) = self { @@ -1496,8 +1579,22 @@ impl Type { results, abilities, } = fun.as_ref(); - let from_vec = |ts: &[TypeTag]| { - Type::tuple(ts.iter().map(|t| Type::from_type_tag(t, env)).collect_vec()) + let from_vec = |ts: &[FunctionParamOrReturnTag]| { + Type::tuple( + ts.iter() + .map(|t| match t { + FunctionParamOrReturnTag::Reference(t) => Reference( + ReferenceKind::Immutable, + Box::new(Type::from_type_tag(t, env)), + ), + FunctionParamOrReturnTag::MutableReference(t) => Reference( + ReferenceKind::Mutable, + Box::new(Type::from_type_tag(t, env)), + ), + FunctionParamOrReturnTag::Value(t) => Type::from_type_tag(t, env), + }) + .collect_vec(), + ) }; Fun( Box::new(from_vec(args)), diff --git a/third_party/move/move-model/src/well_known.rs b/third_party/move/move-model/src/well_known.rs index 68c67c087dc50..c9cc893ee4d78 100644 --- a/third_party/move/move-model/src/well_known.rs +++ b/third_party/move/move-model/src/well_known.rs @@ -42,6 +42,8 @@ pub const VECTOR_FUNCS_WITH_BYTECODE_INSTRS: &[&str] = &[ "swap", ]; +pub const CMP_MODULE: &str = "cmp"; + pub const TYPE_NAME_MOVE: &str = "type_info::type_name"; pub const TYPE_NAME_SPEC: &str = "type_info::$type_name"; pub const TYPE_INFO_MOVE: &str = "type_info::type_of"; diff --git a/third_party/move/move-prover/boogie-backend/src/bytecode_translator.rs b/third_party/move/move-prover/boogie-backend/src/bytecode_translator.rs index 23d8f831b6449..05c48f192cefb 100644 --- a/third_party/move/move-prover/boogie-backend/src/bytecode_translator.rs +++ b/third_party/move/move-prover/boogie-backend/src/bytecode_translator.rs @@ -150,6 +150,51 @@ impl<'env> BoogieTranslator<'env> { } } + #[allow(clippy::literal_string_with_formatting_args)] + fn emit_function(&self, writer: &CodeWriter, signature: &str, body_fn: impl Fn()) { + self.emit_function_with_attr(writer, "{:inline} ", signature, body_fn) + } + + #[allow(clippy::literal_string_with_formatting_args)] + fn emit_procedure(&self, writer: &CodeWriter, signature: &str, body_fn: impl Fn()) { + self.emit_procedure_with_attr(writer, "{:inline 1} ", signature, body_fn) + } + + fn emit_with_attr( + &self, + writer: &CodeWriter, + sig: &str, + attr: &str, + signature: &str, + body_fn: impl Fn(), + ) { + emitln!(writer, "{} {}{} {{", sig, attr, signature); + writer.indent(); + body_fn(); + writer.unindent(); + emitln!(writer, "}"); + } + + fn emit_function_with_attr( + &self, + writer: &CodeWriter, + attr: &str, + signature: &str, + body_fn: impl Fn(), + ) { + self.emit_with_attr(writer, "function", attr, signature, body_fn) + } + + fn emit_procedure_with_attr( + &self, + writer: &CodeWriter, + attr: &str, + signature: &str, + body_fn: impl Fn(), + ) { + self.emit_with_attr(writer, "procedure", attr, signature, body_fn) + } + #[allow(clippy::literal_string_with_formatting_args)] pub fn translate(&mut self) { let writer = self.writer; @@ -257,6 +302,39 @@ impl<'env> BoogieTranslator<'env> { // declare the memory variable for this type emitln!(writer, "var {}_$memory: $Memory {};", suffix, param_type); + + // If cmp module is included, emit cmp functions for generic types + if env + .get_modules() + .any(|m| m.get_full_name_str() == "0x1::cmp") + { + self.emit_function( + writer, + &format!( + "$1_cmp_$compare'{}'(v1: {}, v2: {}): $1_cmp_Ordering", + suffix, param_type, param_type + ), + || { + emitln!( + writer, + "if $IsEqual'{}'(v1, v2) then $1_cmp_Ordering_Equal()", + suffix + ); + emitln!(writer, "else $Arbitrary_value_of'$1_cmp_Ordering'()"); + }, + ); + + self.emit_procedure( + writer, + &format!( + "$1_cmp_compare'{}'(v1: {}, v2: {}) returns ($ret0: $1_cmp_Ordering)", + suffix, param_type, param_type + ), + || { + emitln!(writer, "$ret0 := $1_cmp_$compare'{}'(v1, v2);", suffix); + }, + ); + } } emitln!(writer); @@ -511,7 +589,8 @@ impl StructTranslator<'_> { } // Emit $IsValid function for `variant`. - self.emit_function_with_attr( + self.parent.emit_function_with_attr( + writer, "", // not inlined! &format!("$IsValid'{}'(s: {}): bool", suffix_variant, struct_name), || { @@ -538,7 +617,8 @@ impl StructTranslator<'_> { // Emit $IsValid function for struct. fn emit_is_valid_struct(&self, struct_env: &StructEnv, struct_name: &str, emit_fn: impl Fn()) { let writer = self.parent.writer; - self.emit_function_with_attr( + self.parent.emit_function_with_attr( + writer, "", // not inlined! &format!("$IsValid'{}'(s: {}): bool", struct_name, struct_name), || { @@ -642,6 +722,121 @@ impl StructTranslator<'_> { emitln!(writer, "else false"); } + /// Emit the function cmp::compare for enum + fn emit_cmp_for_enum(&self, struct_env: &StructEnv, struct_name: &str) { + let writer = self.parent.writer; + let suffix: String = boogie_type_suffix_for_struct(struct_env, self.type_inst, false); + self.emit_function( + &format!( + "$1_cmp_$compare'{}'(v1: {}, v2: {}): $1_cmp_Ordering", + suffix, struct_name, struct_name + ), + || { + let mut else_symbol = ""; + for (pos_1, v1) in struct_env.get_variants().collect_vec().iter().enumerate() { + for (pos_2, v2) in struct_env.get_variants().collect_vec().iter().enumerate() { + if pos_2 <= pos_1 { + continue; + } + let struct_variant_name_1 = + boogie_struct_variant_name(struct_env, self.type_inst, *v1); + let struct_variant_name_2 = + boogie_struct_variant_name(struct_env, self.type_inst, *v2); + let cmp_order_less = format!( + "{} if v1 is {} && v2 is {} then $1_cmp_Ordering_Less()", + else_symbol, struct_variant_name_1, struct_variant_name_2 + ); + let cmp_order_greater = format!( + "else if v1 is {} && v2 is {} then $1_cmp_Ordering_Greater()", + struct_variant_name_2, struct_variant_name_1 + ); + if else_symbol.is_empty() { + else_symbol = "else"; + } + emitln!(writer, "{}", cmp_order_less); + emitln!(writer, "{}", cmp_order_greater); + } + } + for variant in struct_env.get_variants().collect_vec().iter() { + let struct_variant_name_1 = + boogie_struct_variant_name(struct_env, self.type_inst, *variant); + let suffix_variant = + boogie_type_suffix_for_struct_variant(struct_env, self.type_inst, variant); + let cmp_order = format!( + "{} if v1 is {} && v2 is {} then $1_cmp_$compare'{}'(v1, v2)", + else_symbol, struct_variant_name_1, struct_variant_name_1, suffix_variant + ); + emitln!(writer, "{}", cmp_order); + } + emitln!(writer, "else $Arbitrary_value_of'$1_cmp_Ordering'()"); + }, + ); + for variant in struct_env.get_variants().collect_vec().iter() { + self.emit_cmp_for_enum_variant(struct_env, *variant, struct_name); + } + } + + /// Emit the function cmp::compare for each enum variant + fn emit_cmp_for_enum_variant( + &self, + struct_env: &StructEnv, + variant: Symbol, + struct_name: &str, + ) { + let writer = self.parent.writer; + let suffix_variant = + boogie_type_suffix_for_struct_variant(struct_env, self.type_inst, &variant); + self.emit_function( + &format!( + "$1_cmp_$compare'{}'(v1: {}, v2: {}): $1_cmp_Ordering", + suffix_variant, struct_name, struct_name + ), + || { + if struct_env + .get_fields_of_variant(variant) + .collect_vec() + .is_empty() + { + emitln!(writer, "$1_cmp_Ordering_Equal()"); + } else { + for (pos, field) in struct_env.get_fields_of_variant(variant).enumerate() { + let bv_flag = self.field_bv_flag(&field); + let field_type_name = boogie_type_suffix_bv( + self.parent.env, + &self.inst(&field.get_type()), + bv_flag, + ); + let cmp_field_call = format!( + "$1_cmp_$compare'{}'(v1->{}, v2->{})", + field_type_name, + boogie_field_sel(&field), + boogie_field_sel(&field) + ); + let cmp_field_call_less = + format!("{} == $1_cmp_Ordering_Less()", cmp_field_call); + let cmp_field_call_greater = + format!("{} == $1_cmp_Ordering_Greater()", cmp_field_call); + emitln!(writer, "if {}", cmp_field_call_less); + emitln!(writer, "then $1_cmp_Ordering_Less()"); + emitln!(writer, "else if {}", cmp_field_call_greater); + emitln!(writer, "then $1_cmp_Ordering_Greater()"); + if pos + < struct_env + .get_fields_of_variant(variant) + .collect_vec() + .len() + - 1 + { + emitln!(writer, "else"); + } else { + emitln!(writer, "else $1_cmp_Ordering_Equal()"); + } + } + } + }, + ); + } + /// Return whether a field involves bitwise operations pub fn field_bv_flag(&self, field_env: &FieldEnv) -> bool { let global_state = &self @@ -805,21 +1000,90 @@ impl StructTranslator<'_> { emitln!(writer, "var {}: $Memory {};", memory_name, struct_name); } + // Emit compare function and procedure + let cmp_struct_types = self.parent.env.cmp_types.borrow(); + for cmp_struct_type in cmp_struct_types.iter() { + if let Some((cur_struct, inst)) = cmp_struct_type.get_struct(env) { + if cur_struct.get_id() == struct_env.get_id() && inst == self.type_inst { + if !struct_env.has_variants() { + let suffix = + boogie_type_suffix_for_struct(struct_env, self.type_inst, false); + self.emit_function( + &format!( + "$1_cmp_$compare'{}'(v1: {}, v2: {}): $1_cmp_Ordering", + suffix, struct_name, struct_name + ), + || { + for (pos, field) in struct_env.get_fields().enumerate() { + let bv_flag = self.field_bv_flag(&field); + let suffix_ty = boogie_type_suffix_bv( + self.parent.env, + &self.inst(&field.get_type()), + bv_flag, + ); + let cmp_field_call = format!( + "$1_cmp_$compare'{}'(v1->{}, v2->{})", + suffix_ty, + boogie_field_sel(&field), + boogie_field_sel(&field) + ); + let cmp_field_call_less = + format!("{} == $1_cmp_Ordering_Less()", cmp_field_call); + let cmp_field_call_greater = + format!("{} == $1_cmp_Ordering_Greater()", cmp_field_call); + emitln!(writer, "if {}", cmp_field_call_less); + emitln!(writer, "then $1_cmp_Ordering_Less()"); + emitln!(writer, "else if {}", cmp_field_call_greater); + emitln!(writer, "then $1_cmp_Ordering_Greater()"); + if pos < struct_env.get_field_count() - 1 { + emitln!(writer, "else"); + } else { + emitln!(writer, "else $1_cmp_Ordering_Equal()"); + } + } + }, + ); + self.emit_procedure( + &format!( + "$1_cmp_compare'{}'(v1: {}, v2: {}) returns ($ret0: $1_cmp_Ordering)", + suffix, struct_name, struct_name + ), + || { + emitln!(writer, "$ret0 := $1_cmp_$compare'{}'(v1, v2);", suffix); + }, + ); + } else { + self.emit_cmp_for_enum(struct_env, &struct_name); + let suffix: String = + boogie_type_suffix_for_struct(struct_env, self.type_inst, false); + self.emit_procedure( + &format!( + "$1_cmp_compare'{}'(v1: {}, v2: {}) returns ($ret0: $1_cmp_Ordering)", + suffix, struct_name, struct_name + ), + || { + emitln!(writer, "$ret0 := $1_cmp_$compare'{}'(v1, v2);", suffix); + }, + ); + } + break; + } + } + } + emitln!(writer); } #[allow(clippy::literal_string_with_formatting_args)] fn emit_function(&self, signature: &str, body_fn: impl Fn()) { - self.emit_function_with_attr("{:inline} ", signature, body_fn) + self.parent + .emit_function(self.parent.writer, signature, body_fn); } - fn emit_function_with_attr(&self, attr: &str, signature: &str, body_fn: impl Fn()) { - let writer = self.parent.writer; - emitln!(writer, "function {}{} {{", attr, signature); - writer.indent(); - body_fn(); - writer.unindent(); - emitln!(writer, "}"); + #[allow(clippy::literal_string_with_formatting_args)] + fn emit_procedure(&self, signature: &str, body_fn: impl Fn()) { + self.parent + .emit_procedure(self.parent.writer, signature, body_fn); } } @@ -1622,6 +1886,11 @@ impl FunctionTranslator<'_> { } else { let dest_bv_flag = !dests.is_empty() && compute_flag(dests[0]); let bv_flag = !srcs.is_empty() && compute_flag(srcs[0]); + if module_env.is_cmp() { + fun_name = boogie_function_bv_name(&callee_env, inst, &[ + bv_flag || dest_bv_flag, + ]); + } // Handle the case where the return value of length is assigned to a bv int because // length always returns a non-bv result if module_env.is_std_vector() { diff --git a/third_party/move/move-prover/boogie-backend/src/lib.rs b/third_party/move/move-prover/boogie-backend/src/lib.rs index 906bc6f7b6af8..137a3ab5b1666 100644 --- a/third_party/move/move-prover/boogie-backend/src/lib.rs +++ b/third_party/move/move-prover/boogie-backend/src/lib.rs @@ -55,6 +55,7 @@ const TABLE_ARRAY_THEORY: &[u8] = include_bytes!("prelude/table-array-theory.bpl // TODO use named addresses const BCS_MODULE: &str = "0x1::bcs"; const EVENT_MODULE: &str = "0x1::event"; +const CMP_MODULE: &str = "0x1::cmp"; mod boogie_helpers; pub mod boogie_wrapper; @@ -208,7 +209,6 @@ pub fn add_prelude( .into_iter() .collect_vec(); all_types.append(&mut bv_all_types); - context.insert("uninterpreted_instances", &all_types); // obtain bv number types appearing in the program, which is currently used to generate cast functions for bv types let number_types = mono_info @@ -321,6 +321,68 @@ pub fn add_prelude( let event_instances = filter_native_ensure_one_inst(EVENT_MODULE); context.insert("event_instances", &event_instances); + // handle cmp module + let filter_native_with_contained_types_with_bv_flag = |module: &str, bv_flag: bool| { + mono_info + .native_inst + .iter() + .filter(|(id, _)| env.get_module(**id).get_full_name_str() == module) + .flat_map(|(_, insts)| { + insts.iter().map(|inst| { + inst.iter() + .flat_map(|i| i.get_all_contained_types_with_skip_reference(env)) + .map(|i| (i.clone(), TypeInfo::new(env, options, &i, bv_flag))) + .collect::>() + }) + }) + .sorted() + .collect_vec() + }; + let filter_native_with_contained_types = |module: &str| { + let mut filtered = filter_native_with_contained_types_with_bv_flag(module, false); + let mut filtered_bv = filter_native_with_contained_types_with_bv_flag(module, true); + filtered.append(&mut filtered_bv); + filtered.into_iter().flatten().collect_vec() + }; + let mut cmp_instances = filter_native_with_contained_types(CMP_MODULE); + cmp_instances.sort(); + cmp_instances.dedup(); + let mut cmp_struct_types = vec![]; + let mut cmp_int_types = all_types + .clone() + .into_iter() + .filter(|ty| ty.name == "int") + .collect_vec(); + for (ty, ty_info) in &cmp_instances { + if ty.is_struct() { + cmp_struct_types.push(ty.clone()); + } + if ty.is_number() && !ty_info.suffix.contains("bv") && !cmp_int_types.contains(ty_info) { + cmp_int_types.push(ty_info.clone()); + } + } + cmp_int_types.sort(); + cmp_int_types.dedup(); + cmp_struct_types.sort(); + cmp_struct_types.dedup(); + context.insert("cmp_int_instances", &cmp_int_types); + env.cmp_types.borrow_mut().extend(cmp_struct_types); + + let filter_cmp_instances_with_name_prefix = |name_prefix: &str| { + cmp_instances + .clone() + .into_iter() + .filter(|ty| ty.1.name.starts_with(name_prefix)) + .map(|ty| ty.1) + .collect_vec() + }; + let cmp_vector_instances = filter_cmp_instances_with_name_prefix("Vec"); + context.insert("cmp_vector_instances", &cmp_vector_instances); + let cmp_table_instances = filter_cmp_instances_with_name_prefix("Table"); + context.insert("cmp_table_instances", &cmp_table_instances); + + context.insert("uninterpreted_instances", &all_types); + // TODO: we have defined {{std}} for adaptable resolution of stdlib addresses but // not used it yet in the templates. let std_addr = format!("${}", env.get_stdlib_address().expect_numerical()); diff --git a/third_party/move/move-prover/boogie-backend/src/prelude/prelude.bpl b/third_party/move/move-prover/boogie-backend/src/prelude/prelude.bpl index 7b279ac98870e..cc0e0a031f21e 100644 --- a/third_party/move/move-prover/boogie-backend/src/prelude/prelude.bpl +++ b/third_party/move/move-prover/boogie-backend/src/prelude/prelude.bpl @@ -32,7 +32,9 @@ options provided to the prover. {%- set S = "'" ~ instance.suffix ~ "'" -%} {%- set T = instance.name -%} +{%-if S != "'$1_cmp_Ordering'" %} function $Arbitrary_value_of{{S}}(): {{T}}; +{% endif %} {% endfor %} diff --git a/third_party/move/move-prover/boogie-backend/src/spec_translator.rs b/third_party/move/move-prover/boogie-backend/src/spec_translator.rs index 6ca781281636c..b2002cf79a561 100644 --- a/third_party/move/move-prover/boogie-backend/src/spec_translator.rs +++ b/third_party/move/move-prover/boogie-backend/src/spec_translator.rs @@ -1251,8 +1251,9 @@ impl SpecTranslator<'_> { .env .get_extension::() .expect("global number operation state"); - let is_vector_table_module = module_env.is_std_vector() || module_env.is_table(); - let bv_flag = if is_vector_table_module && !args.is_empty() { + let is_vector_table_cmp_module = + module_env.is_std_vector() || module_env.is_table() || module_env.is_cmp(); + let bv_flag = if is_vector_table_cmp_module && !args.is_empty() { global_state.get_node_num_oper(args[0].node_id()) == Bitwise } else { global_state.get_node_num_oper(node_id) == Bitwise @@ -1441,12 +1442,6 @@ impl SpecTranslator<'_> { args: &[Exp], ) { let struct_env = self.env.get_module(module_id).into_struct(struct_id); - if struct_env.is_intrinsic() { - self.env.error( - &self.env.get_node_loc(node_id), - "cannot test variants of intrinsic struct", - ); - } let struct_type = &self.get_node_type(args[0].node_id()); let (_, _, _) = struct_type.skip_reference().require_struct(); let inst = self.env.get_node_instantiation(node_id); diff --git a/third_party/move/move-stdlib/src/natives/bcs.rs b/third_party/move/move-stdlib/src/natives/bcs.rs index 3288dee47929a..1904d06de6cb8 100644 --- a/third_party/move/move-stdlib/src/natives/bcs.rs +++ b/third_party/move/move-stdlib/src/natives/bcs.rs @@ -13,7 +13,7 @@ use move_vm_types::{ loaded_data::runtime_types::Type, natives::function::NativeResult, pop_arg, - value_serde::ValueSerDeContext, + value_serde::{FunctionValueExtension, ValueSerDeContext}, values::{values_impl::Reference, Value}, }; use smallvec::smallvec; @@ -65,7 +65,8 @@ fn native_to_bytes( let val = ref_to_val.read_ref()?; let function_value_extension = context.function_value_extension(); - let serialized_value = match ValueSerDeContext::new() + let max_value_nest_depth = function_value_extension.max_value_nest_depth(); + let serialized_value = match ValueSerDeContext::new(max_value_nest_depth) .with_legacy_signer() .with_func_args_deserialization(&function_value_extension) .serialize(&val, &layout)? diff --git a/third_party/move/move-stdlib/src/natives/debug.rs b/third_party/move/move-stdlib/src/natives/debug.rs index 45e6bb1b1dbc7..43461fadab997 100644 --- a/third_party/move/move-stdlib/src/natives/debug.rs +++ b/third_party/move/move-stdlib/src/natives/debug.rs @@ -460,8 +460,11 @@ mod testing { )?; } }, - MoveValue::Closure(clos) => { - write!(out, "{}", clos).map_err(fmt_error_to_partial_vm_error)?; + MoveValue::Closure(_) => { + // Using non-Aptos Move stdlib with function values is not supported. In general, + // this debug implementation should be removed and replaced by the new one from + // Aptos Move. + return Err(PartialVMError::new(StatusCode::FEATURE_NOT_ENABLED)); }, MoveValue::Struct(move_struct) => match move_struct { MoveStruct::WithTypes { @@ -512,7 +515,8 @@ mod testing { let str = move_value_as_escaped_string(val)?; write!(out, "\"{}\"", str).map_err(fmt_error_to_partial_vm_error)? } else { - write!(out, "{} ", type_tag).map_err(fmt_error_to_partial_vm_error)?; + write!(out, "{} ", type_tag.to_canonical_string()) + .map_err(fmt_error_to_partial_vm_error)?; write!(out, "{}", STRUCT_BEGIN).map_err(fmt_error_to_partial_vm_error)?; // For each field, print its name and value (and type) diff --git a/third_party/move/move-stdlib/tests/type_name_tests.move b/third_party/move/move-stdlib/tests/type_name_tests.move index f587a85f5471e..64295e669e2f6 100644 --- a/third_party/move/move-stdlib/tests/type_name_tests.move +++ b/third_party/move/move-stdlib/tests/type_name_tests.move @@ -19,20 +19,18 @@ module 0xA::type_name_tests { assert!(into_string(get>()) == string(b"vector"), 0) } - // Note: these tests assume a 16 byte address length, and will fail on platforms where addresses are 20 or 32 bytes #[test] fun test_structs() { - assert!(into_string(get()) == string(b"000000000000000000000000000000000000000000000000000000000000000a::type_name_tests::TestStruct"), 0); - assert!(into_string(get()) == string(b"0000000000000000000000000000000000000000000000000000000000000001::ascii::String"), 0); - assert!(into_string(get>()) == string(b"0000000000000000000000000000000000000000000000000000000000000001::option::Option"), 0); - assert!(into_string(get()) == string(b"0000000000000000000000000000000000000000000000000000000000000001::string::String"), 0); + assert!(into_string(get()) == string(b"0xa::type_name_tests::TestStruct"), 0); + assert!(into_string(get()) == string(b"0x1::ascii::String"), 0); + assert!(into_string(get>()) == string(b"0x1::option::Option"), 0); + assert!(into_string(get()) == string(b"0x1::string::String"), 0); } - // Note: these tests assume a 16 byte address length, and will fail on platforms where addresses are 20 or 32 bytes #[test] fun test_generics() { - assert!(into_string(get>()) == string(b"000000000000000000000000000000000000000000000000000000000000000a::type_name_tests::TestGenerics<0000000000000000000000000000000000000000000000000000000000000001::string::String>"), 0); - assert!(into_string(get>>()) == string(b"vector<000000000000000000000000000000000000000000000000000000000000000a::type_name_tests::TestGenerics>"), 0); - assert!(into_string(get>>()) == string(b"0000000000000000000000000000000000000000000000000000000000000001::option::Option<000000000000000000000000000000000000000000000000000000000000000a::type_name_tests::TestGenerics>"), 0); + assert!(into_string(get>()) == string(b"0xa::type_name_tests::TestGenerics<0x1::string::String>"), 0); + assert!(into_string(get>>()) == string(b"vector<0xa::type_name_tests::TestGenerics>"), 0); + assert!(into_string(get>>()) == string(b"0x1::option::Option<0xa::type_name_tests::TestGenerics>"), 0); } } diff --git a/third_party/move/move-vm/integration-tests/src/tests/module_storage_tests.rs b/third_party/move/move-vm/integration-tests/src/tests/module_storage_tests.rs index 00424243732fe..32a78221a5a97 100644 --- a/third_party/move/move-vm/integration-tests/src/tests/module_storage_tests.rs +++ b/third_party/move/move-vm/integration-tests/src/tests/module_storage_tests.rs @@ -56,7 +56,7 @@ fn test_module_does_not_exist() { let result = module_storage.check_module_exists(&AccountAddress::ZERO, ident_str!("a")); assert!(!assert_ok!(result)); - let result = module_storage.fetch_module_size_in_bytes(&AccountAddress::ZERO, ident_str!("a")); + let result = module_storage.unmetered_get_module_size(&AccountAddress::ZERO, ident_str!("a")); assert_none!(assert_ok!(result)); let result = module_storage.fetch_module_metadata(&AccountAddress::ZERO, ident_str!("a")); diff --git a/third_party/move/move-vm/runtime/src/config.rs b/third_party/move/move-vm/runtime/src/config.rs index 944c385ced8ce..98be2e5f49051 100644 --- a/third_party/move/move-vm/runtime/src/config.rs +++ b/third_party/move/move-vm/runtime/src/config.rs @@ -3,7 +3,9 @@ use move_binary_format::deserializer::DeserializerConfig; use move_bytecode_verifier::VerifierConfig; -use move_vm_types::loaded_data::runtime_types::TypeBuilder; +use move_vm_types::{ + loaded_data::runtime_types::TypeBuilder, values::DEFAULT_MAX_VM_VALUE_NESTED_DEPTH, +}; use serde::Serialize; /// Dynamic config options for the Move VM. @@ -29,6 +31,7 @@ pub struct VMConfig { pub ty_builder: TypeBuilder, pub use_call_tree_and_instruction_cache: bool, pub enable_lazy_loading: bool, + pub enable_depth_checks: bool, } impl Default for VMConfig { @@ -38,7 +41,7 @@ impl Default for VMConfig { deserializer_config: DeserializerConfig::default(), paranoid_type_checks: false, check_invariant_in_swap_loc: true, - max_value_nest_depth: Some(128), + max_value_nest_depth: Some(DEFAULT_MAX_VM_VALUE_NESTED_DEPTH), layout_max_size: 256, layout_max_depth: 128, type_max_cost: 0, @@ -48,6 +51,7 @@ impl Default for VMConfig { ty_builder: TypeBuilder::with_limits(128, 20), use_call_tree_and_instruction_cache: true, enable_lazy_loading: false, + enable_depth_checks: true, } } } diff --git a/third_party/move/move-vm/runtime/src/data_cache.rs b/third_party/move/move-vm/runtime/src/data_cache.rs index 2c4dd7d741db2..9dd342b4744b7 100644 --- a/third_party/move/move-vm/runtime/src/data_cache.rs +++ b/third_party/move/move-vm/runtime/src/data_cache.rs @@ -22,7 +22,7 @@ use move_core_types::{ use move_vm_types::{ loaded_data::runtime_types::Type, resolver::ResourceResolver, - value_serde::ValueSerDeContext, + value_serde::{FunctionValueExtension, ValueSerDeContext}, values::{GlobalValue, Value}, }; use std::collections::btree_map::{BTreeMap, Entry}; @@ -77,7 +77,8 @@ impl TransactionDataCache { let resource_converter = |value: Value, layout: MoveTypeLayout, _: bool| -> PartialVMResult { let function_value_extension = FunctionValueExtensionAdapter { module_storage }; - ValueSerDeContext::new() + let max_value_nest_depth = function_value_extension.max_value_nest_depth(); + ValueSerDeContext::new(max_value_nest_depth) .with_func_args_deserialization(&function_value_extension) .serialize(&value, &layout)? .map(Into::into) @@ -170,13 +171,17 @@ impl TransactionDataCache { let function_value_extension = FunctionValueExtensionAdapter { module_storage }; let value = match data { Some(blob) => { - let val = ValueSerDeContext::new() + let max_value_nest_depth = function_value_extension.max_value_nest_depth(); + let val = ValueSerDeContext::new(max_value_nest_depth) .with_func_args_deserialization(&function_value_extension) .with_delayed_fields_serde() .deserialize(&blob, &layout) .ok_or_else(|| { - let msg = - format!("Failed to deserialize resource {} at {}!", struct_tag, addr); + let msg = format!( + "Failed to deserialize resource {} at {}!", + struct_tag.to_canonical_string(), + addr + ); PartialVMError::new(StatusCode::FAILED_TO_DESERIALIZE_RESOURCE) .with_message(msg) })?; diff --git a/third_party/move/move-vm/runtime/src/interpreter.rs b/third_party/move/move-vm/runtime/src/interpreter.rs index 21fed7be73522..62f3ed09f94c6 100644 --- a/third_party/move/move-vm/runtime/src/interpreter.rs +++ b/third_party/move/move-vm/runtime/src/interpreter.rs @@ -20,8 +20,8 @@ use crate::{ verify_pack_closure, FullRuntimeTypeCheck, NoRuntimeTypeCheck, RuntimeTypeCheck, }, storage::{ - dependencies_gas_charging::check_dependencies_and_charge_gas, - depth_formula_calculator::DepthFormulaCalculator, + dependencies_gas_charging::check_dependencies_and_charge_gas, loader::traits::Loader, + ty_depth_checker::TypeDepthChecker, }, trace, LoadedFunction, ModuleStorage, RuntimeEnvironment, }; @@ -39,7 +39,6 @@ use move_core_types::{ language_storage::TypeTag, vm_status::{StatusCode, StatusType}, }; -use move_vm_metrics::{Timer, VM_TIMER}; use move_vm_types::{ debug_write, debug_writeln, gas::{GasMeter, SimpleInstruction}, @@ -86,7 +85,7 @@ pub(crate) trait InterpreterDebugInterface { /// /// An `Interpreter` instance is a stand alone execution context for a function. /// It mimics execution on a single thread, with an call stack and an operand stack. -pub(crate) struct InterpreterImpl<'ctx> { +pub(crate) struct InterpreterImpl<'ctx, LoaderImpl> { /// Operand stack, where Move `Value`s are stored for stack operations. pub(crate) operand_stack: Stack, /// The stack of active functions. @@ -97,6 +96,8 @@ pub(crate) struct InterpreterImpl<'ctx> { access_control: AccessControlState, /// Reentrancy checker. reentrancy_checker: ReentrancyChecker, + /// Checks depth of types of values. Used to bound packing too deep structs or vectors. + ty_depth_checker: &'ctx TypeDepthChecker<'ctx, LoaderImpl>, } struct TypeWithRuntimeEnvironment<'a, 'b> { @@ -118,6 +119,7 @@ impl Interpreter { args: Vec, data_cache: &mut TransactionDataCache, module_storage: &impl ModuleStorage, + ty_depth_checker: &TypeDepthChecker, resource_resolver: &impl ResourceResolver, gas_meter: &mut impl GasMeter, traversal_context: &mut TraversalContext, @@ -128,6 +130,7 @@ impl Interpreter { args, data_cache, module_storage, + ty_depth_checker, resource_resolver, gas_meter, traversal_context, @@ -136,7 +139,10 @@ impl Interpreter { } } -impl InterpreterImpl<'_> { +impl InterpreterImpl<'_, LoaderImpl> +where + LoaderImpl: Loader, +{ /// Entrypoint into the interpreter. All external calls need to be routed through this /// function. pub(crate) fn entrypoint( @@ -144,6 +150,7 @@ impl InterpreterImpl<'_> { args: Vec, data_cache: &mut TransactionDataCache, module_storage: &impl ModuleStorage, + ty_depth_checker: &TypeDepthChecker, resource_resolver: &impl ResourceResolver, gas_meter: &mut impl GasMeter, traversal_context: &mut TraversalContext, @@ -155,6 +162,7 @@ impl InterpreterImpl<'_> { vm_config: module_storage.runtime_environment().vm_config(), access_control: AccessControlState::default(), reentrancy_checker: ReentrancyChecker::default(), + ty_depth_checker, }; let function = Rc::new(function); @@ -185,7 +193,10 @@ impl InterpreterImpl<'_> { } } - fn load_generic_function( + /// Loads a generic function with instantiated type arguments. Does not perform any checks if + /// the function is callable (i.e., visible to the caller). The visibility check should be done + /// at the call-site. + fn load_generic_function_no_visibility_checks( &mut self, module_storage: &impl ModuleStorage, current_frame: &Frame, @@ -198,15 +209,12 @@ impl InterpreterImpl<'_> { let function = current_frame .build_loaded_function_from_instantiation_and_ty_args(module_storage, idx, ty_args) .map_err(|e| self.set_location(e))?; - - if self.vm_config.paranoid_type_checks { - self.check_friend_or_private_call(¤t_frame.function, &function)?; - } - Ok(function) } - fn load_function( + /// Loads a non-generic function. Does not perform any checks if the function is callable + /// (i.e., visible to the caller). The visibility check should be done at the call-site. + fn load_function_no_visibility_checks( &mut self, module_storage: &impl ModuleStorage, current_frame: &Frame, @@ -215,11 +223,6 @@ impl InterpreterImpl<'_> { let function = current_frame .build_loaded_function_from_handle_and_ty_args(module_storage, fh_idx, vec![]) .map_err(|e| self.set_location(e))?; - - if self.vm_config.paranoid_type_checks { - self.check_friend_or_private_call(¤t_frame.function, &function)?; - } - Ok(function) } @@ -316,6 +319,7 @@ impl InterpreterImpl<'_> { resource_resolver, module_storage, gas_meter, + traversal_context, ) .map_err(|err| self.attach_state_if_invariant_violation(err, ¤t_frame))?; @@ -372,11 +376,12 @@ impl InterpreterImpl<'_> { (Rc::clone(&entry.0), Rc::clone(&entry.1)) }, btree_map::Entry::Vacant(entry) => { - let function = Rc::new(self.load_function( - module_storage, - ¤t_frame, - fh_idx, - )?); + let function = + Rc::new(self.load_function_no_visibility_checks( + module_storage, + ¤t_frame, + fh_idx, + )?); let frame_cache = FrameTypeCache::make_rc_for_function(&function); @@ -392,7 +397,7 @@ impl InterpreterImpl<'_> { } } } else { - let function = Rc::::new(self.load_function( + let function = Rc::new(self.load_function_no_visibility_checks( module_storage, ¤t_frame, fh_idx, @@ -401,16 +406,17 @@ impl InterpreterImpl<'_> { (function, frame_cache) }; + RTTCheck::check_call_visibility( + ¤t_frame.function, + &function, + CallType::Regular, + ) + .map_err(|err| set_err_info!(current_frame, err))?; + // Charge gas - let module_id = function.module_id().ok_or_else(|| { - let err = PartialVMError::new_invariant_violation( - "Failed to get native function module id", - ); - set_err_info!(current_frame, err) - })?; gas_meter .charge_call( - module_id, + function.owner_as_module()?.self_id(), function.name(), self.operand_stack .last_n(function.param_tys().len()) @@ -467,13 +473,14 @@ impl InterpreterImpl<'_> { (Rc::clone(&entry.0), Rc::clone(&entry.1)) }, btree_map::Entry::Vacant(entry) => { - let function = - Rc::::new(self.load_generic_function( + let function = Rc::::new( + self.load_generic_function_no_visibility_checks( module_storage, ¤t_frame, gas_meter, idx, - )?); + )?, + ); let frame_cache = FrameTypeCache::make_rc_for_function(&function); @@ -489,28 +496,29 @@ impl InterpreterImpl<'_> { } } } else { - let function = Rc::::new(self.load_generic_function( - module_storage, - ¤t_frame, - gas_meter, - idx, - )?); + let function = Rc::::new( + self.load_generic_function_no_visibility_checks( + module_storage, + ¤t_frame, + gas_meter, + idx, + )?, + ); let frame_cache = FrameTypeCache::make_rc(); (function, frame_cache) }; - let module_id = function - .module_id() - .ok_or_else(|| { - PartialVMError::new_invariant_violation( - "Failed to get native function module id", - ) - }) - .map_err(|e| set_err_info!(current_frame, e))?; + RTTCheck::check_call_visibility( + ¤t_frame.function, + &function, + CallType::Regular, + ) + .map_err(|err| set_err_info!(current_frame, err))?; + // Charge gas gas_meter .charge_call_generic( - module_id, + function.owner_as_module()?.self_id(), function.name(), function .ty_args() @@ -568,12 +576,15 @@ impl InterpreterImpl<'_> { let module_id = lazy_function.with_name_and_ty_args( |module_opt, _func_name, ty_arg_tags| { let Some(module_id) = module_opt else { - // TODO(#15664): currently we need the module id for gas charging - // of calls, so we can't proceed here without one. But we want - // to be able to let scripts use closures. + // Note: + // Module ID of a function should always exist because functions + // are defined in modules. The only way to have `None` here is + // when function is a script entrypoint. Note that in this case, + // entrypoint function cannot be packed as a closure, nor there + // can be any lambda-lifting in the script. let err = PartialVMError::new_invariant_violation(format!( "module id required to charge gas for function `{}`", - lazy_function.to_stable_string() + lazy_function.to_canonical_string() )); return Err(set_err_info!(current_frame, err)); }; @@ -603,9 +614,16 @@ impl InterpreterImpl<'_> { // Resolve the function. This may lead to loading the code related // to this function. let callee = lazy_function - .with_resolved_function(module_storage, |f| Ok(f.clone())) + .as_resolved(module_storage) .map_err(|e| set_err_info!(current_frame, e))?; + RTTCheck::check_call_visibility( + ¤t_frame.function, + &callee, + CallType::ClosureDynamicDispatch, + ) + .map_err(|err| set_err_info!(current_frame, err))?; + // Charge gas for call and for the parameters. The current APIs // require an ExactSizeIterator to be passed for charge_call, so // some acrobatics is needed (sigh). @@ -627,10 +645,6 @@ impl InterpreterImpl<'_> { ) .map_err(|e| set_err_info!(current_frame, e))?; - // In difference to regular calls, we skip visibility check. - // It is possible to call a private function of another module via - // a closure. - // Call function if callee.is_native() { self.call_native::( @@ -863,15 +877,6 @@ impl InterpreterImpl<'_> { } } - let mut native_context = NativeContext::new( - self, - data_cache, - resource_resolver, - module_storage, - extensions, - gas_meter.balance_internal(), - traversal_context, - ); let native_function = function.get_native()?; gas_meter.charge_native_function_before_execution( @@ -882,10 +887,17 @@ impl InterpreterImpl<'_> { args.iter(), )?; + let mut native_context = NativeContext::new( + self, + data_cache, + resource_resolver, + module_storage, + extensions, + gas_meter, + traversal_context, + ); let result = native_function(&mut native_context, ty_args.to_vec(), args)?; - gas_meter.charge_heap_memory(native_context.heap_memory_usage())?; - // Note(Gas): The order by which gas is charged / error gets returned MUST NOT be modified // here or otherwise it becomes an incompatible change!!! match result { @@ -952,19 +964,11 @@ impl InterpreterImpl<'_> { ty_args, )?; - if target_func.is_friend_or_private() - || target_func.module_id() == function.module_id() - { - return Err(PartialVMError::new(StatusCode::RUNTIME_DISPATCH_ERROR) - .with_message( - "Invoking private or friend function during dispatch".to_string(), - )); - } - - if target_func.is_native() { - return Err(PartialVMError::new(StatusCode::RUNTIME_DISPATCH_ERROR) - .with_message("Invoking native function during dispatch".to_string())); - } + RTTCheck::check_call_visibility( + function, + &target_func, + CallType::NativeDynamicDispatch, + )?; // Checking type of the dispatch target function // @@ -1035,38 +1039,6 @@ impl InterpreterImpl<'_> { } } - /// Make sure only private/friend function can only be invoked by modules under the same address. - fn check_friend_or_private_call( - &self, - caller: &LoadedFunction, - callee: &LoadedFunction, - ) -> VMResult<()> { - if callee.is_friend_or_private() { - match (caller.module_id(), callee.module_id()) { - (Some(caller_id), Some(callee_id)) => { - if caller_id.address() == callee_id.address() { - Ok(()) - } else { - Err(self.set_location(PartialVMError::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR) - .with_message( - format!("Private/Friend function invokation error, caller: {:?}::{:?}, callee: {:?}::{:?}", caller_id, caller.name(), callee_id, callee.name()), - ))) - } - }, - _ => Err(self.set_location( - PartialVMError::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR) - .with_message(format!( - "Private/Friend function invokation error caller: {:?}, callee {:?}", - caller.name(), - callee.name() - )), - )), - } - } else { - Ok(()) - } - } - /// Perform a binary operation to two values at the top of the stack. fn binop(&mut self, f: F) -> PartialVMResult<()> where @@ -1108,9 +1080,11 @@ impl InterpreterImpl<'_> { /// Creates a data cache entry for the specified address-type pair. Charges gas for the number /// of bytes loaded. fn create_and_charge_data_cache_entry( + &self, resource_resolver: &impl ResourceResolver, module_storage: &impl ModuleStorage, gas_meter: &mut impl GasMeter, + _traversal_context: &mut TraversalContext, addr: AccountAddress, ty: &Type, ) -> PartialVMResult { @@ -1134,18 +1108,21 @@ impl InterpreterImpl<'_> { /// Loads a resource from the data store and return the number of bytes read from the storage. fn load_resource<'c>( + &self, data_cache: &'c mut TransactionDataCache, resource_resolver: &impl ResourceResolver, module_storage: &impl ModuleStorage, gas_meter: &mut impl GasMeter, + traversal_context: &mut TraversalContext, addr: AccountAddress, ty: &Type, ) -> PartialVMResult<&'c mut GlobalValue> { if !data_cache.contains_resource(&addr, ty) { - let entry = Self::create_and_charge_data_cache_entry( + let entry = self.create_and_charge_data_cache_entry( resource_resolver, module_storage, gas_meter, + traversal_context, addr, ty, )?; @@ -1163,19 +1140,22 @@ impl InterpreterImpl<'_> { resource_resolver: &impl ResourceResolver, module_storage: &impl ModuleStorage, gas_meter: &mut impl GasMeter, + traversal_context: &mut TraversalContext, addr: AccountAddress, ty: &Type, ) -> PartialVMResult<()> { let runtime_environment = module_storage.runtime_environment(); - let res = Self::load_resource( - data_cache, - resource_resolver, - module_storage, - gas_meter, - addr, - ty, - )? - .borrow_global(); + let res = self + .load_resource( + data_cache, + resource_resolver, + module_storage, + gas_meter, + traversal_context, + addr, + ty, + )? + .borrow_global(); gas_meter.charge_borrow_global( is_mut, is_generic, @@ -1241,15 +1221,17 @@ impl InterpreterImpl<'_> { resource_resolver: &impl ResourceResolver, module_storage: &impl ModuleStorage, gas_meter: &mut impl GasMeter, + traversal_context: &mut TraversalContext, addr: AccountAddress, ty: &Type, ) -> PartialVMResult<()> { let runtime_environment = module_storage.runtime_environment(); - let gv = Self::load_resource( + let gv = self.load_resource( data_cache, resource_resolver, module_storage, gas_meter, + traversal_context, addr, ty, )?; @@ -1275,19 +1257,22 @@ impl InterpreterImpl<'_> { resource_resolver: &impl ResourceResolver, module_storage: &impl ModuleStorage, gas_meter: &mut impl GasMeter, + traversal_context: &mut TraversalContext, addr: AccountAddress, ty: &Type, ) -> PartialVMResult<()> { let runtime_environment = module_storage.runtime_environment(); - let resource = match Self::load_resource( - data_cache, - resource_resolver, - module_storage, - gas_meter, - addr, - ty, - )? - .move_from() + let resource = match self + .load_resource( + data_cache, + resource_resolver, + module_storage, + gas_meter, + traversal_context, + addr, + ty, + )? + .move_from() { Ok(resource) => { gas_meter.charge_move_from( @@ -1326,16 +1311,18 @@ impl InterpreterImpl<'_> { resource_resolver: &impl ResourceResolver, module_storage: &impl ModuleStorage, gas_meter: &mut impl GasMeter, + traversal_context: &mut TraversalContext, addr: AccountAddress, ty: &Type, resource: Value, ) -> PartialVMResult<()> { let runtime_environment = module_storage.runtime_environment(); - let gv = Self::load_resource( + let gv = self.load_resource( data_cache, resource_resolver, module_storage, gas_meter, + traversal_context, addr, ty, )?; @@ -1432,10 +1419,10 @@ impl InterpreterImpl<'_> { debug_write!(buf, "<")?; let mut it = ty_tags.iter(); if let Some(tag) = it.next() { - debug_write!(buf, "{}", tag)?; + debug_write!(buf, "{}", tag.to_canonical_string())?; for tag in it { debug_write!(buf, ", ")?; - debug_write!(buf, "{}", tag)?; + debug_write!(buf, "{}", tag.to_canonical_string())?; } } debug_write!(buf, ">")?; @@ -1526,7 +1513,10 @@ impl InterpreterImpl<'_> { } } -impl InterpreterDebugInterface for InterpreterImpl<'_> { +impl InterpreterDebugInterface for InterpreterImpl<'_, LoaderImpl> +where + LoaderImpl: Loader, +{ #[allow(dead_code)] fn debug_print_stack_trace( &self, @@ -1715,100 +1705,6 @@ impl CallStack { } } -fn check_depth_of_type(module_storage: &impl ModuleStorage, ty: &Type) -> PartialVMResult<()> { - let _timer = VM_TIMER.timer_with_label("Interpreter::check_depth_of_type"); - - // Start at 1 since we always call this right before we add a new node to the value's depth. - let max_depth = match module_storage - .runtime_environment() - .vm_config() - .max_value_nest_depth - { - Some(max_depth) => max_depth, - None => return Ok(()), - }; - check_depth_of_type_impl(module_storage, ty, max_depth, 1)?; - Ok(()) -} - -fn check_depth_of_type_impl( - module_storage: &impl ModuleStorage, - ty: &Type, - max_depth: u64, - depth: u64, -) -> PartialVMResult { - macro_rules! check_depth { - ($additional_depth:expr) => {{ - let new_depth = depth.saturating_add($additional_depth); - if new_depth > max_depth { - return Err(PartialVMError::new(StatusCode::VM_MAX_VALUE_DEPTH_REACHED)); - } else { - new_depth - } - }}; - } - - // Calculate depth of the type itself - let ty_depth = match ty { - Type::Bool - | Type::U8 - | Type::U16 - | Type::U32 - | Type::U64 - | Type::U128 - | Type::U256 - | Type::Address - | Type::Signer => check_depth!(0), - // Even though this is recursive this is OK since the depth of this recursion is - // bounded by the depth of the type arguments, which we have already checked. - Type::Reference(ty) | Type::MutableReference(ty) => { - check_depth_of_type_impl(module_storage, ty, max_depth, check_depth!(1))? - }, - Type::Vector(ty) => { - check_depth_of_type_impl(module_storage, ty, max_depth, check_depth!(1))? - }, - Type::Struct { idx, .. } => { - let formula = - DepthFormulaCalculator::new(module_storage).calculate_depth_of_struct(idx)?; - check_depth!(formula.solve(&[])) - }, - // NB: substitution must be performed before calling this function - Type::StructInstantiation { idx, ty_args, .. } => { - // Calculate depth of all type arguments, and make sure they themselves are not too deep. - let ty_arg_depths = ty_args - .iter() - .map(|ty| { - // Ty args should be fully resolved and not need any type arguments - check_depth_of_type_impl(module_storage, ty, max_depth, check_depth!(0)) - }) - .collect::>>()?; - let formula = - DepthFormulaCalculator::new(module_storage).calculate_depth_of_struct(idx)?; - check_depth!(formula.solve(&ty_arg_depths)) - }, - Type::Function { args, results, .. } => { - let mut ty_max_depth = depth; - for ty in args.iter().chain(results) { - ty_max_depth = ty_max_depth.max(check_depth_of_type_impl( - module_storage, - ty, - max_depth, - check_depth!(1), - )?); - } - ty_max_depth - }, - Type::TyParam(_) => { - return Err( - PartialVMError::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR) - .with_message("Type parameter should be fully resolved".to_string()), - ) - }, - }; - - Ok(ty_depth) -} - /// An `ExitCode` from `execute_code_unit`. #[derive(Debug)] enum ExitCode { @@ -1822,11 +1718,12 @@ impl Frame { /// Execute a Move function until a return or a call opcode is found. fn execute_code( &mut self, - interpreter: &mut InterpreterImpl, + interpreter: &mut InterpreterImpl, data_cache: &mut TransactionDataCache, resource_resolver: &impl ResourceResolver, module_storage: &impl ModuleStorage, gas_meter: &mut impl GasMeter, + traversal_context: &mut TraversalContext, ) -> VMResult { self.execute_code_impl::( interpreter, @@ -1834,6 +1731,7 @@ impl Frame { resource_resolver, module_storage, gas_meter, + traversal_context, ) .map_err(|e| { let e = if cfg!(feature = "testing") || cfg!(feature = "stacktrace") { @@ -1847,11 +1745,12 @@ impl Frame { fn execute_code_impl( &mut self, - interpreter: &mut InterpreterImpl, + interpreter: &mut InterpreterImpl, data_cache: &mut TransactionDataCache, resource_resolver: &impl ResourceResolver, module_storage: &impl ModuleStorage, gas_meter: &mut impl GasMeter, + traversal_context: &mut TraversalContext, ) -> PartialVMResult { use SimpleInstruction as S; @@ -2121,11 +2020,15 @@ impl Frame { interpreter.operand_stack.push(field_ref)?; }, Bytecode::Pack(sd_idx) => { - let get_field_count_charge_gas_and_check_depth = + let mut get_field_count_charge_gas_and_check_depth = || -> PartialVMResult { let field_count = self.field_count(*sd_idx); let struct_type = self.get_struct_ty(*sd_idx); - check_depth_of_type(module_storage, &struct_type)?; + interpreter.ty_depth_checker.check_depth_of_type( + gas_meter, + traversal_context, + &struct_type, + )?; Ok(field_count) }; @@ -2156,7 +2059,11 @@ impl Frame { Bytecode::PackVariant(idx) => { let info = self.get_struct_variant_at(*idx); let struct_type = self.create_struct_ty(&info.definition_struct_type); - check_depth_of_type(module_storage, &struct_type)?; + interpreter.ty_depth_checker.check_depth_of_type( + gas_meter, + traversal_context, + &struct_type, + )?; gas_meter.charge_pack_variant( false, interpreter @@ -2186,7 +2093,11 @@ impl Frame { let (ty, ty_count) = frame_cache.get_struct_type(*si_idx, self)?; gas_meter.charge_create_ty(ty_count)?; - check_depth_of_type(module_storage, ty)?; + interpreter.ty_depth_checker.check_depth_of_type( + gas_meter, + traversal_context, + ty, + )?; Ok(self.field_instantiation_count(*si_idx)) }; @@ -2228,7 +2139,11 @@ impl Frame { let (ty, ty_count) = frame_cache.get_struct_variant_type(*si_idx, self)?; gas_meter.charge_create_ty(ty_count)?; - check_depth_of_type(module_storage, ty)?; + interpreter.ty_depth_checker.check_depth_of_type( + gas_meter, + traversal_context, + ty, + )?; let info = self.get_struct_variant_instantiation_at(*si_idx); gas_meter.charge_pack_variant( @@ -2278,7 +2193,11 @@ impl Frame { let (ty, ty_count) = frame_cache.get_struct_type(*si_idx, self)?; gas_meter.charge_create_ty(ty_count)?; - check_depth_of_type(module_storage, ty)?; + interpreter.ty_depth_checker.check_depth_of_type( + gas_meter, + traversal_context, + ty, + )?; let struct_ = interpreter.operand_stack.pop_as::()?; @@ -2301,7 +2220,11 @@ impl Frame { let (ty, ty_count) = frame_cache.get_struct_variant_type(*si_idx, self)?; gas_meter.charge_create_ty(ty_count)?; - check_depth_of_type(module_storage, ty)?; + interpreter.ty_depth_checker.check_depth_of_type( + gas_meter, + traversal_context, + ty, + )?; let struct_ = interpreter.operand_stack.pop_as::()?; @@ -2340,6 +2263,13 @@ impl Frame { .push(reference.test_variant(info.variant)?)?; }, Bytecode::PackClosure(fh_idx, mask) => { + gas_meter.charge_pack_closure( + false, + interpreter + .operand_stack + .last_n(mask.captured_count() as usize)?, + )?; + let function = self .build_loaded_function_from_handle_and_ty_args( module_storage, @@ -2347,9 +2277,12 @@ impl Frame { vec![], ) .map(Rc::new)?; + let captured = interpreter.operand_stack.popn(mask.captured_count())?; let lazy_function = LazyLoadedFunction::new_resolved( - module_storage.runtime_environment(), + module_storage, + gas_meter, + traversal_context, function.clone(), *mask, )?; @@ -2367,6 +2300,13 @@ impl Frame { } }, Bytecode::PackClosureGeneric(fi_idx, mask) => { + gas_meter.charge_pack_closure( + true, + interpreter + .operand_stack + .last_n(mask.captured_count() as usize)?, + )?; + let ty_args = self.instantiate_generic_function(Some(gas_meter), *fi_idx)?; let function = self @@ -2376,9 +2316,12 @@ impl Frame { ty_args, ) .map(Rc::new)?; + let captured = interpreter.operand_stack.popn(mask.captured_count())?; let lazy_function = LazyLoadedFunction::new_resolved( - module_storage.runtime_environment(), + module_storage, + gas_meter, + traversal_context, function.clone(), *mask, )?; @@ -2561,6 +2504,7 @@ impl Frame { resource_resolver, module_storage, gas_meter, + traversal_context, addr, &ty, )?; @@ -2578,6 +2522,7 @@ impl Frame { resource_resolver, module_storage, gas_meter, + traversal_context, addr, ty, )?; @@ -2591,6 +2536,7 @@ impl Frame { resource_resolver, module_storage, gas_meter, + traversal_context, addr, &ty, )?; @@ -2605,6 +2551,7 @@ impl Frame { resource_resolver, module_storage, gas_meter, + traversal_context, addr, ty, )?; @@ -2618,6 +2565,7 @@ impl Frame { resource_resolver, module_storage, gas_meter, + traversal_context, addr, &ty, )?; @@ -2632,6 +2580,7 @@ impl Frame { resource_resolver, module_storage, gas_meter, + traversal_context, addr, ty, )?; @@ -2651,6 +2600,7 @@ impl Frame { resource_resolver, module_storage, gas_meter, + traversal_context, addr, &ty, resource, @@ -2672,6 +2622,7 @@ impl Frame { resource_resolver, module_storage, gas_meter, + traversal_context, addr, ty, resource, @@ -2693,7 +2644,11 @@ impl Frame { Bytecode::VecPack(si, num) => { let (ty, ty_count) = frame_cache.get_signature_index_type(*si, self)?; gas_meter.charge_create_ty(ty_count)?; - check_depth_of_type(module_storage, ty)?; + interpreter.ty_depth_checker.check_depth_of_type( + gas_meter, + traversal_context, + ty, + )?; gas_meter.charge_vec_pack( make_ty!(ty), interpreter.operand_stack.last_n(*num as usize)?, diff --git a/third_party/move/move-vm/runtime/src/lib.rs b/third_party/move/move-vm/runtime/src/lib.rs index 47a517b585b89..c0960a0ef73f8 100644 --- a/third_party/move/move-vm/runtime/src/lib.rs +++ b/third_party/move/move-vm/runtime/src/lib.rs @@ -44,6 +44,23 @@ pub use storage::{ unsync_code_storage::{AsUnsyncCodeStorage, UnsyncCodeStorage}, unsync_module_storage::{AsUnsyncModuleStorage, BorrowedOrOwned, UnsyncModuleStorage}, }, + loader::{eager::EagerLoader, lazy::LazyLoader, traits::Loader}, module_storage::{ambassador_impl_ModuleStorage, AsFunctionValueExtension, ModuleStorage}, publishing::{StagingModuleStorage, VerifiedModuleBundle}, }; + +#[macro_export] +macro_rules! dispatch_loader { + ($module_storage:expr, $loader:ident, $dispatch:stmt) => { + if $crate::WithRuntimeEnvironment::runtime_environment($module_storage) + .vm_config() + .enable_lazy_loading + { + let $loader = $crate::LazyLoader::new($module_storage); + $dispatch + } else { + let $loader = $crate::EagerLoader::new($module_storage); + $dispatch + } + }; +} diff --git a/third_party/move/move-vm/runtime/src/loader/function.rs b/third_party/move/move-vm/runtime/src/loader/function.rs index af5d44c14eb6a..df72f4565b3d5 100644 --- a/third_party/move/move-vm/runtime/src/loader/function.rs +++ b/third_party/move/move-vm/runtime/src/loader/function.rs @@ -4,9 +4,10 @@ use crate::{ loader::{access_specifier_loader::load_access_specifier, Module, Script}, + module_traversal::TraversalContext, native_functions::{NativeFunction, NativeFunctions, UnboxedNativeFunction}, storage::ty_layout_converter::{LayoutConverter, StorageLayoutConverter}, - ModuleStorage, RuntimeEnvironment, + ModuleStorage, }; use better_any::{Tid, TidAble, TidExt}; use move_binary_format::{ @@ -18,7 +19,7 @@ use move_binary_format::{ }, }; use move_core_types::{ - ability::{Ability, AbilitySet}, + ability::AbilitySet, function::ClosureMask, identifier::{IdentStr, Identifier}, language_storage, @@ -27,13 +28,14 @@ use move_core_types::{ vm_status::StatusCode, }; use move_vm_types::{ + gas::DependencyGasMeter, loaded_data::{ runtime_access_specifier::AccessSpecifier, runtime_types::{StructIdentifier, Type}, }, values::{AbstractFunction, SerializedFunctionData}, }; -use std::{cell::RefCell, cmp::Ordering, fmt::Debug, rc::Rc, sync::Arc}; +use std::{cell::RefCell, cmp::Ordering, fmt::Debug, mem, rc::Rc, sync::Arc}; /// A runtime function definition representation. pub struct Function { @@ -115,11 +117,15 @@ impl LoadedFunction { /// `LoadedFunction`. This is wrapped into a Rc so one can clone the /// function while sharing the loading state. #[derive(Clone, Tid)] -pub(crate) struct LazyLoadedFunction(pub(crate) Rc>); +pub(crate) struct LazyLoadedFunction { + pub(crate) state: Rc>, +} #[derive(Clone)] pub(crate) enum LazyLoadedFunctionState { Unresolved { + // Note: this contains layouts from storage, which may be out-dated (e.g., storing only old + // enum variant layouts even when enum has been upgraded to contain more variants). data: SerializedFunctionData, }, Resolved { @@ -131,29 +137,101 @@ pub(crate) enum LazyLoadedFunctionState { // unresolved case, the type argument tags are stored with the serialized data. ty_args: Vec, mask: ClosureMask, + // Layouts for captured arguments. The invariant is that these are always set for storable + // closures at construction time. Non-storable closures just have None as they will not be + // serialized anyway. + captured_layouts: Option>, }, } impl LazyLoadedFunction { pub(crate) fn new_unresolved(data: SerializedFunctionData) -> Self { - Self(Rc::new(RefCell::new(LazyLoadedFunctionState::Unresolved { - data, - }))) + Self { + state: Rc::new(RefCell::new(LazyLoadedFunctionState::Unresolved { data })), + } } pub(crate) fn new_resolved( - runtime_environment: &RuntimeEnvironment, + module_storage: &impl ModuleStorage, + gas_meter: &mut impl DependencyGasMeter, + traversal_context: &mut TraversalContext, fun: Rc, mask: ClosureMask, ) -> PartialVMResult { + let runtime_environment = module_storage.runtime_environment(); let ty_args = fun .ty_args .iter() .map(|t| runtime_environment.ty_to_ty_tag(t)) .collect::>>()?; - Ok(Self(Rc::new(RefCell::new( - LazyLoadedFunctionState::Resolved { fun, ty_args, mask }, - )))) + + // When building a closure, if it captures arguments, and it is persistent (i.e., it may + // be stored to storage), pre-compute layouts which will be stored alongside the captured + // arguments. This way layouts always exist for storable closures and there is no need to + // construct them at serialization time. This makes loading and metering logic much simpler + // while adding layout construction overhead only for storable closures. + let captured_layouts = fun + .function + .is_persistent() + .then(|| { + // In case there are delayed fields when constructing captured layouts, we need to + // fail early to not allow their capturing altogether. + Self::construct_captured_layouts( + module_storage, + gas_meter, + traversal_context, + &fun, + mask, + )? + .ok_or_else(|| { + PartialVMError::new(StatusCode::UNABLE_TO_CAPTURE_DELAYED_FIELDS) + .with_message("Function values cannot capture delayed fields".to_string()) + }) + }) + .transpose()?; + + Ok(Self { + state: Rc::new(RefCell::new(LazyLoadedFunctionState::Resolved { + fun, + ty_args, + mask, + captured_layouts, + })), + }) + } + + /// For a given function and a mask, constructs a vector of layouts for the captured arguments. + /// Returns [None] if there are any captured delayed fields in the layouts (i.e., the captured + /// values are not serializable not "displayable"). For all other failures, an error is + /// returned. + pub(crate) fn construct_captured_layouts( + module_storage: &impl ModuleStorage, + _gas_meter: &mut impl DependencyGasMeter, + _traversal_context: &mut TraversalContext, + fun: &LoadedFunction, + mask: ClosureMask, + ) -> PartialVMResult>> { + let ty_converter = StorageLayoutConverter::new(module_storage); + let ty_builder = &module_storage.runtime_environment().vm_config().ty_builder; + + mask.extract(fun.param_tys(), true) + .into_iter() + .map(|ty| { + let (layout, contains_delayed_fields) = if fun.ty_args.is_empty() { + ty_converter.type_to_type_layout_with_identifier_mappings(ty)? + } else { + let ty = ty_builder.create_ty_with_subst(ty, &fun.ty_args)?; + ty_converter.type_to_type_layout_with_identifier_mappings(&ty)? + }; + + // Do not allow delayed fields to be serialized. + if contains_delayed_fields { + return Ok(None); + } + + Ok(Some(layout)) + }) + .collect::>>>() } pub(crate) fn expect_this_impl( @@ -173,7 +251,7 @@ impl LazyLoadedFunction { &self, action: impl FnOnce(Option<&ModuleId>, &IdentStr, &[TypeTag]) -> T, ) -> T { - match &*self.0.borrow() { + match &*self.state.borrow() { LazyLoadedFunctionState::Unresolved { data: SerializedFunctionData { @@ -190,17 +268,15 @@ impl LazyLoadedFunction { } } - /// Executed an action with the resolved loaded function. If the function hasn't been - /// loaded yet, it will be loaded now. - #[allow(unused)] - pub(crate) fn with_resolved_function( + /// If the function hasn't been resolved (loaded) yet, loads it. The gas is also charged for + /// function loading and any other module accesses. + pub(crate) fn as_resolved( &self, - storage: &dyn ModuleStorage, - action: impl FnOnce(Rc) -> PartialVMResult, - ) -> PartialVMResult { - let mut state = self.0.borrow_mut(); - match &mut *state { - LazyLoadedFunctionState::Resolved { fun, .. } => action(fun.clone()), + module_storage: &impl ModuleStorage, + ) -> PartialVMResult> { + let mut state = self.state.borrow_mut(); + Ok(match &mut *state { + LazyLoadedFunctionState::Resolved { fun, .. } => fun.clone(), LazyLoadedFunctionState::Unresolved { data: SerializedFunctionData { @@ -212,85 +288,26 @@ impl LazyLoadedFunction { captured_layouts, }, } => { - let fun = - Self::resolve(storage, module_id, fun_id, ty_args, *mask, captured_layouts)?; - let result = action(fun.clone()); + let fun = module_storage + .load_function(module_id, fun_id, ty_args) + .map(Rc::new) + .map_err(|err| err.to_partial())?; + *state = LazyLoadedFunctionState::Resolved { - fun, - ty_args: ty_args.clone(), + fun: fun.clone(), + ty_args: mem::take(ty_args), mask: *mask, + captured_layouts: Some(mem::take(captured_layouts)), }; - result + fun }, - } - } - - /// Resolves a function into a loaded function. This verifies existence of the named - /// function as well as whether it has the type used for deserializing the captured values. - fn resolve( - module_storage: &dyn ModuleStorage, - module_id: &ModuleId, - fun_id: &IdentStr, - ty_args: &[TypeTag], - mask: ClosureMask, - captured_layouts: &[MoveTypeLayout], - ) -> PartialVMResult> { - let function = module_storage - .load_function(module_id, fun_id, ty_args) - .map_err(|err| err.to_partial())?; - - // Verify that the function argument types match the layouts used for deserialization. - // This is only done in paranoid mode. Since integrity of storage - // and guarantee of public function, this should not able to fail. - if module_storage - .runtime_environment() - .vm_config() - .paranoid_type_checks - { - // TODO(#15664): Determine whether we need to charge gas here. - let captured_arg_types = mask.extract(function.param_tys(), true); - let converter = StorageLayoutConverter::new(module_storage); - if captured_arg_types.len() != captured_layouts.len() { - return Err(PartialVMError::new(StatusCode::FUNCTION_RESOLUTION_FAILURE) - .with_message( - "captured argument count does not match declared parameters".to_string(), - )); - } - - let ty_builder = &module_storage.runtime_environment().vm_config().ty_builder; - for (actual_arg_ty, serialized_layout) in - captured_arg_types.into_iter().zip(captured_layouts) - { - // We do not allow function values to capture any delayed fields, for now. Note - // that this is enforced at serialization time. Here we cannot enforce it because - // function value could have stored an old version of an enum without an aggregator - // but the new layout has the new variant with the aggregator. In any case, the - // serializer will fail on this resolved closure if there is an attempt to put it - // back into storage. - let actual_arg_layout = if function.ty_args().is_empty() { - converter.type_to_type_layout(actual_arg_ty)? - } else { - let actual_arg_ty = - ty_builder.create_ty_with_subst(actual_arg_ty, function.ty_args())?; - converter.type_to_type_layout(&actual_arg_ty)? - }; - - if !serialized_layout.is_compatible_with(&actual_arg_layout) { - return Err(PartialVMError::new(StatusCode::FUNCTION_RESOLUTION_FAILURE) - .with_message( - "stored captured argument layout does not match declared parameters" - .to_string(), - )); - } - } - } - Ok(Rc::new(function)) + }) } } impl AbstractFunction for LazyLoadedFunction { fn closure_mask(&self) -> ClosureMask { - let state = self.0.borrow(); + let state = self.state.borrow(); match &*state { LazyLoadedFunctionState::Resolved { mask, .. } => *mask, LazyLoadedFunctionState::Unresolved { @@ -316,10 +333,10 @@ impl AbstractFunction for LazyLoadedFunction { Ok(Box::new(self.clone())) } - fn to_stable_string(&self) -> String { + fn to_canonical_string(&self) -> String { self.with_name_and_ty_args(|module_id, fun_id, ty_args| { let prefix = if let Some(m) = module_id { - format!("0x{}::{}::", m.address(), m.name()) + format!("{}::{}", m.address(), m.name()) } else { "".to_string() }; @@ -332,7 +349,7 @@ impl AbstractFunction for LazyLoadedFunction { .iter() .map(|t| t.to_canonical_string()) .collect::>() - .join(",") + .join(", ") ) }; format!("{}::{}{}", prefix, fun_id, ty_args_str) @@ -346,10 +363,6 @@ impl LoadedFunction { &self.ty_args } - pub fn abilities(&self) -> AbilitySet { - self.function.abilities() - } - /// Returns the corresponding module id of this function, i.e., its address and module name. pub fn module_id(&self) -> Option<&ModuleId> { match &self.owner { @@ -378,7 +391,7 @@ impl LoadedFunction { /// Returns true if the loaded function has friend or private visibility. pub fn is_friend_or_private(&self) -> bool { - self.function.is_friend() || self.function.is_private() + self.is_friend() || self.is_private() } /// Returns true if the loaded function has public visibility. @@ -386,6 +399,16 @@ impl LoadedFunction { self.function.is_public() } + /// Returns true if the loaded function has friend visibility. + pub fn is_friend(&self) -> bool { + self.function.is_friend() + } + + /// Returns true if the loaded function has private visibility. + pub fn is_private(&self) -> bool { + self.function.is_private() + } + /// Returns an error if the loaded function is **NOT** an entry function. pub fn is_entry_or_err(&self) -> VMResult<()> { if !self.function.is_entry() { @@ -576,28 +599,6 @@ impl Function { self.has_module_reentrancy_lock } - /// Creates the function type instance for this function. This requires cloning - /// the parameter and result types. - pub fn create_function_type(&self) -> Type { - Type::Function { - args: self.param_tys.clone(), - results: self.return_tys.clone(), - abilities: self.abilities(), - } - } - - /// Returns the abilities associated with this function, without consideration of any captured - /// closure arguments. By default, this is copy and drop, and if the function signature cannot - /// be changed (i.e., the function has `#[persistent]` attribute or is public), also store. - pub fn abilities(&self) -> AbilitySet { - let result = AbilitySet::singleton(Ability::Copy).add(Ability::Drop); - if self.is_persistent() { - result.add(Ability::Store) - } else { - result - } - } - pub fn is_native(&self) -> bool { self.is_native } diff --git a/third_party/move/move-vm/runtime/src/loader/modules.rs b/third_party/move/move-vm/runtime/src/loader/modules.rs index 0597fafb5e1d6..5846a7d546154 100644 --- a/third_party/move/move-vm/runtime/src/loader/modules.rs +++ b/third_party/move/move-vm/runtime/src/loader/modules.rs @@ -32,7 +32,7 @@ use move_vm_types::loaded_data::{ struct_name_indexing::{StructNameIndex, StructNameIndexMap}, }; use std::{ - collections::{BTreeMap, HashMap}, + collections::{BTreeMap, BTreeSet, HashMap}, fmt::Debug, ops::Deref, sync::Arc, @@ -93,6 +93,10 @@ pub struct Module { // `VecMutBorrow(SignatureIndex)`, the `SignatureIndex` maps to a single `SignatureToken`, and // hence, a single type. pub(crate) single_signature_token_map: BTreeMap, + + // Friends of this module. Needed for re-entrancy visibility checks if lazy loading is enabled. + // Particularly, if a callee has friend visibility, the caller's module must be in this set. + pub(crate) friends: BTreeSet, } #[derive(Clone, Debug)] @@ -153,6 +157,10 @@ impl Module { let _timer = VM_TIMER.timer_with_label("Module::new"); let id = module.self_id(); + let friends = module + .immediate_friends_iter() + .map(|(addr, name)| ModuleId::new(*addr, name.to_owned())) + .collect::>(); let mut structs = vec![]; let mut struct_instantiations = vec![]; @@ -373,9 +381,48 @@ impl Module { function_map, struct_map, single_signature_token_map, + friends, }) } + /// Creates a new Module instance for testing purposes. + /// This method creates a minimal Module with empty contents. + #[cfg(any(test, feature = "testing"))] + pub fn new_for_test(module_id: ModuleId) -> Self { + use move_binary_format::file_format::empty_module; + + // Start with an empty module + let mut empty_module = empty_module(); + + // Update the module ID + empty_module.identifiers[0] = module_id.name().to_owned(); + empty_module.address_identifiers[0] = *module_id.address(); + + // Create necessary empty collections + let module_arc = Arc::new(empty_module); + + Self { + id: module_id, + size: 0, + module: module_arc, + structs: vec![], + struct_instantiations: vec![], + struct_variant_infos: vec![], + struct_variant_instantiation_infos: vec![], + function_refs: vec![], + function_defs: vec![], + function_instantiations: vec![], + field_handles: vec![], + field_instantiations: vec![], + variant_field_infos: vec![], + variant_field_instantiation_infos: vec![], + function_map: HashMap::new(), + struct_map: HashMap::new(), + single_signature_token_map: BTreeMap::new(), + friends: BTreeSet::new(), + } + } + fn make_struct_type( module: &CompiledModule, struct_def: &StructDefinition, diff --git a/third_party/move/move-vm/runtime/src/module_traversal.rs b/third_party/move/move-vm/runtime/src/module_traversal.rs index e16758dd9eb62..dc2c78d35ed1f 100644 --- a/third_party/move/move-vm/runtime/src/module_traversal.rs +++ b/third_party/move/move-vm/runtime/src/module_traversal.rs @@ -64,6 +64,26 @@ impl<'a> TraversalContext<'a> { !addr.is_special() && self.visited.insert((addr, name), ()).is_none() } + /// If the address of the specified module id is not special, adds the address-name pair to the + /// visited set and returns true. If the address is special, or if the set already contains the + /// pair, returns false. + pub fn visit_if_not_special_module_id(&mut self, module_id: &ModuleId) -> bool { + let addr = module_id.address(); + if addr.is_special() { + return false; + } + + let name = module_id.name(); + if self.visited.contains_key(&(addr, name)) { + false + } else { + let module_id = self.referenced_module_ids.alloc(module_id.clone()); + self.visited + .insert((module_id.address(), module_id.name()), ()); + true + } + } + /// No-op if address is visited, otherwise returns an invariant violation error. fn check_visited_impl(&self, addr: &AccountAddress, name: &IdentStr) -> PartialVMResult<()> { if self.visited.contains_key(&(addr, name)) { @@ -128,8 +148,9 @@ mod test { let mut traversal_context = TraversalContext::new(&traversal_storage); let special = AccountAddress::ONE; - let non_special = AccountAddress::from_hex_literal("0x123").unwrap(); - assert!(special.is_special() && !non_special.is_special()); + let non_special_1 = AccountAddress::from_hex_literal("0x123").unwrap(); + let non_special_2 = AccountAddress::from_hex_literal("0x234").unwrap(); + assert!(special.is_special() && !non_special_1.is_special() && !non_special_2.is_special()); let allocated_module_id = |addr| { let module_id = ModuleId::new(addr, ident_str!("foo").to_owned()); @@ -145,25 +166,35 @@ mod test { .expect_err("0x1 is special address and should not be visited"); assert!(!traversal_context.visit_if_not_special_address(special.address(), special.name())); + assert!(!traversal_context.visit_if_not_special_module_id(special)); assert!(traversal_context.visited.is_empty()); traversal_context .legacy_check_visited(special.address(), special.name()) .expect_err("0x1 is special address but we don't allow them to be non-visited"); - let non_special = allocated_module_id(non_special); + let non_special_1 = allocated_module_id(non_special_1); + let non_special_2 = ModuleId::new(non_special_2, ident_str!("foo").to_owned()); traversal_context - .check_is_special_or_visited(non_special.address(), non_special.name()) + .check_is_special_or_visited(non_special_1.address(), non_special_1.name()) .expect_err("0x123 is non-special address and have not been visited"); + traversal_context + .check_is_special_or_visited(non_special_2.address(), non_special_2.name()) + .expect_err("0x234 is non-special address and have not been visited"); assert!(traversal_context - .visit_if_not_special_address(non_special.address(), non_special.name())); - assert_eq!(traversal_context.visited.len(), 1); + .visit_if_not_special_address(non_special_1.address(), non_special_1.name())); + assert!(traversal_context.visit_if_not_special_module_id(&non_special_2)); + assert_eq!(traversal_context.visited.len(), 2); traversal_context - .check_is_special_or_visited(non_special.address(), non_special.name()) + .check_is_special_or_visited(non_special_1.address(), non_special_1.name()) .expect("0x123 is non-special address but have been visited"); + traversal_context + .check_is_special_or_visited(non_special_2.address(), non_special_2.name()) + .expect("0x234 is non-special address but have been visited"); // Double insertion: should not be visiting anymore. assert!(!traversal_context - .visit_if_not_special_address(non_special.address(), non_special.name())); + .visit_if_not_special_address(non_special_1.address(), non_special_1.name())); + assert!(!traversal_context.visit_if_not_special_module_id(&non_special_2)); } } diff --git a/third_party/move/move-vm/runtime/src/move_vm.rs b/third_party/move/move-vm/runtime/src/move_vm.rs index 5b0e8efda6d57..ccdeb834c0355 100644 --- a/third_party/move/move-vm/runtime/src/move_vm.rs +++ b/third_party/move/move-vm/runtime/src/move_vm.rs @@ -4,10 +4,15 @@ use crate::{ data_cache::TransactionDataCache, + dispatch_loader, interpreter::Interpreter, module_traversal::TraversalContext, native_extensions::NativeContextExtensions, - storage::ty_layout_converter::{LayoutConverter, StorageLayoutConverter}, + storage::{ + loader::traits::Loader, + ty_depth_checker::TypeDepthChecker, + ty_layout_converter::{LayoutConverter, StorageLayoutConverter}, + }, AsFunctionValueExtension, LoadedFunction, ModuleStorage, }; use move_binary_format::{ @@ -20,7 +25,7 @@ use move_vm_types::{ gas::GasMeter, loaded_data::runtime_types::Type, resolver::ResourceResolver, - value_serde::ValueSerDeContext, + value_serde::{FunctionValueExtension, ValueSerDeContext}, values::{Locals, Reference, VMValueCast, Value}, }; use std::borrow::Borrow; @@ -60,6 +65,32 @@ impl MoveVM { extensions: &mut NativeContextExtensions, module_storage: &impl ModuleStorage, resource_resolver: &impl ResourceResolver, + ) -> VMResult { + dispatch_loader!(module_storage, loader, { + Self::execute_loaded_function_impl( + function, + serialized_args, + data_cache, + &loader, + gas_meter, + traversal_context, + extensions, + module_storage, + resource_resolver, + ) + }) + } + + pub fn execute_loaded_function_impl( + function: LoadedFunction, + serialized_args: Vec>, + data_cache: &mut TransactionDataCache, + loader: &impl Loader, + gas_meter: &mut impl GasMeter, + traversal_context: &mut TraversalContext, + extensions: &mut NativeContextExtensions, + module_storage: &impl ModuleStorage, + resource_resolver: &impl ResourceResolver, ) -> VMResult { let vm_config = module_storage.runtime_environment().vm_config(); let ty_builder = &vm_config.ty_builder; @@ -80,11 +111,13 @@ impl MoveVM { let return_values = { let _timer = VM_TIMER.timer_with_label("Interpreter::entrypoint"); + let ty_depth_checker = TypeDepthChecker::new(loader); Interpreter::entrypoint( function, deserialized_args, data_cache, module_storage, + &ty_depth_checker, resource_resolver, gas_meter, traversal_context, @@ -146,7 +179,8 @@ fn deserialize_arg( } let function_value_extension = module_storage.as_function_value_extension(); - ValueSerDeContext::new() + let max_value_nest_depth = function_value_extension.max_value_nest_depth(); + ValueSerDeContext::new(max_value_nest_depth) .with_func_args_deserialization(&function_value_extension) .deserialize(arg.borrow(), &layout) .ok_or_else(deserialization_error) @@ -226,7 +260,8 @@ fn serialize_return_value( } let function_value_extension = module_storage.as_function_value_extension(); - let bytes = ValueSerDeContext::new() + let max_value_nest_depth = function_value_extension.max_value_nest_depth(); + let bytes = ValueSerDeContext::new(max_value_nest_depth) .with_func_args_deserialization(&function_value_extension) .serialize(&value, &layout)? .ok_or_else(serialization_error)?; diff --git a/third_party/move/move-vm/runtime/src/native_functions.rs b/third_party/move/move-vm/runtime/src/native_functions.rs index 6b442c86d41c8..4c9f6e84d28ae 100644 --- a/third_party/move/move-vm/runtime/src/native_functions.rs +++ b/third_party/move/move-vm/runtime/src/native_functions.rs @@ -3,28 +3,43 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ + ambassador_impl_ModuleStorage, ambassador_impl_WithRuntimeEnvironment, + check_dependencies_and_charge_gas, data_cache::TransactionDataCache, interpreter::InterpreterDebugInterface, + loader::{LazyLoadedFunction, LazyLoadedFunctionState}, module_traversal::TraversalContext, native_extensions::NativeContextExtensions, storage::{ module_storage::FunctionValueExtensionAdapter, ty_layout_converter::{LayoutConverter, StorageLayoutConverter}, }, - ModuleStorage, + Function, LoadedFunction, Module, ModuleStorage, RuntimeEnvironment, WithRuntimeEnvironment, +}; +use ambassador::delegate_to_methods; +use bytes::Bytes; +use move_binary_format::{ + errors::{ExecutionState, PartialVMError, PartialVMResult, VMResult}, + CompiledModule, }; -use move_binary_format::errors::{ExecutionState, PartialVMError, PartialVMResult}; use move_core_types::{ account_address::AccountAddress, gas_algebra::{InternalGas, NumBytes}, - identifier::Identifier, - language_storage::TypeTag, + identifier::{IdentStr, Identifier}, + language_storage::{ModuleId, TypeTag}, + metadata::Metadata, value::MoveTypeLayout, vm_status::StatusCode, }; use move_vm_types::{ - loaded_data::runtime_types::Type, natives::function::NativeResult, resolver::ResourceResolver, - values::Value, + gas::{ambassador_impl_DependencyGasMeter, DependencyGasMeter, NativeGasMeter}, + loaded_data::{ + runtime_types::{StructType, Type}, + struct_name_indexing::StructNameIndex, + }, + natives::function::NativeResult, + resolver::ResourceResolver, + values::{AbstractFunction, Value}, }; use std::{ collections::{HashMap, VecDeque}, @@ -98,32 +113,25 @@ impl NativeFunctions { } } -pub struct NativeContext<'a, 'b> { +pub struct NativeContext<'a, 'b, 'c> { interpreter: &'a dyn InterpreterDebugInterface, data_store: &'a mut TransactionDataCache, resource_resolver: &'a dyn ResourceResolver, module_storage: &'a dyn ModuleStorage, extensions: &'a mut NativeContextExtensions<'b>, - gas_balance: InternalGas, - traversal_context: &'a TraversalContext<'a>, - - /// Counter used to record the (conceptual) heap memory usage by a native functions, - /// measured in abstract memory unit. - /// - /// This is a hack to emulate memory usage tracking, before we could refactor native functions - /// and allow them to access the gas meter directly. - heap_memory_usage: u64, + gas_meter: &'a mut dyn NativeGasMeter, + traversal_context: &'a mut TraversalContext<'c>, } -impl<'a, 'b> NativeContext<'a, 'b> { +impl<'a, 'b, 'c> NativeContext<'a, 'b, 'c> { pub(crate) fn new( interpreter: &'a dyn InterpreterDebugInterface, data_store: &'a mut TransactionDataCache, resource_resolver: &'a dyn ResourceResolver, module_storage: &'a dyn ModuleStorage, extensions: &'a mut NativeContextExtensions<'b>, - gas_balance: InternalGas, - traversal_context: &'a TraversalContext<'a>, + gas_meter: &'a mut dyn NativeGasMeter, + traversal_context: &'a mut TraversalContext<'c>, ) -> Self { Self { interpreter, @@ -131,15 +139,13 @@ impl<'a, 'b> NativeContext<'a, 'b> { resource_resolver, module_storage, extensions, - gas_balance, + gas_meter, traversal_context, - - heap_memory_usage: 0, } } } -impl<'b> NativeContext<'_, 'b> { +impl<'b, 'c> NativeContext<'_, 'b, 'c> { pub fn print_stack_trace(&self, buf: &mut String) -> PartialVMResult<()> { self.interpreter .debug_print_stack_trace(buf, self.module_storage.runtime_environment()) @@ -209,19 +215,30 @@ impl<'b> NativeContext<'_, 'b> { self.interpreter.get_stack_frames(count) } - pub fn gas_balance(&self) -> InternalGas { - self.gas_balance + pub fn legacy_gas_budget(&self) -> InternalGas { + self.gas_meter.legacy_gas_budget_in_native_context() } - pub fn use_heap_memory(&mut self, amount: u64) { - self.heap_memory_usage = self.heap_memory_usage.saturating_add(amount); + /// Returns the gas meter used for execution. Even if native functions cannot use it to + /// charge gas (feature-gating), gas meter can be used to query gas meter's balance. + pub fn gas_meter(&mut self) -> &mut dyn NativeGasMeter { + self.gas_meter } - pub fn heap_memory_usage(&self) -> u64 { - self.heap_memory_usage + pub fn charge_gas_for_dependencies(&mut self, module_id: ModuleId) -> VMResult<()> { + let arena_id = self + .traversal_context + .referenced_module_ids + .alloc(module_id); + check_dependencies_and_charge_gas( + self.module_storage, + self.gas_meter, + self.traversal_context, + [(arena_id.address(), arena_id.name())], + ) } - pub fn traversal_context(&self) -> &TraversalContext { + pub fn traversal_context(&self) -> &TraversalContext<'c> { self.traversal_context } @@ -230,4 +247,65 @@ impl<'b> NativeContext<'_, 'b> { module_storage: self.module_storage, } } + + /// Returns a vector of layouts for captured arguments. Used to format captured arguments as + /// strings. Returns [Ok(None)] in case layouts contain delayed fields (i.e., the values cannot + /// be formatted as strings). + pub fn get_captured_layouts_for_string_utils( + &mut self, + fun: &dyn AbstractFunction, + ) -> PartialVMResult>> { + Ok( + match &*LazyLoadedFunction::expect_this_impl(fun)?.state.borrow() { + LazyLoadedFunctionState::Unresolved { data, .. } => { + Some(data.captured_layouts.clone()) + }, + LazyLoadedFunctionState::Resolved { + fun, + mask, + captured_layouts, + .. + } => match captured_layouts.as_ref() { + Some(captured_layouts) => Some(captured_layouts.clone()), + None => LazyLoadedFunction::construct_captured_layouts( + &ModuleStorageWrapper { + module_storage: self.module_storage, + }, + &mut DependencyGasMeterWrapper { + gas_meter: self.gas_meter, + }, + self.traversal_context, + fun, + *mask, + )?, + }, + }, + ) + } +} + +// Wrappers to use trait objects where static dispatch is expected. +struct ModuleStorageWrapper<'a> { + module_storage: &'a dyn ModuleStorage, +} + +#[delegate_to_methods] +#[delegate(WithRuntimeEnvironment, target_ref = "inner")] +#[delegate(ModuleStorage, target_ref = "inner")] +impl<'a> ModuleStorageWrapper<'a> { + fn inner(&self) -> &dyn ModuleStorage { + self.module_storage + } +} + +struct DependencyGasMeterWrapper<'a> { + gas_meter: &'a mut dyn DependencyGasMeter, +} + +#[delegate_to_methods] +#[delegate(DependencyGasMeter, target_mut = "inner_mut")] +impl<'a> DependencyGasMeterWrapper<'a> { + fn inner_mut(&mut self) -> &mut dyn DependencyGasMeter { + self.gas_meter + } } diff --git a/third_party/move/move-vm/runtime/src/runtime_type_checks.rs b/third_party/move/move-vm/runtime/src/runtime_type_checks.rs index 8c4fc1f117a10..f2ea8378fdca9 100644 --- a/third_party/move/move-vm/runtime/src/runtime_type_checks.rs +++ b/third_party/move/move-vm/runtime/src/runtime_type_checks.rs @@ -1,12 +1,15 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::{frame::Frame, frame_type_cache::FrameTypeCache, interpreter::Stack, LoadedFunction}; +use crate::{ + frame::Frame, frame_type_cache::FrameTypeCache, interpreter::Stack, + reentrancy_checker::CallType, LoadedFunction, +}; use move_binary_format::{errors::*, file_format::Bytecode}; use move_core_types::{ ability::{Ability, AbilitySet}, function::ClosureMask, - vm_status::StatusCode, + vm_status::{sub_status::unknown_invariant_violation::EPARANOID_FAILURE, StatusCode}, }; use move_vm_types::loaded_data::runtime_types::{Type, TypeBuilder}; @@ -32,6 +35,57 @@ pub(crate) trait RuntimeTypeCheck { /// For any other checks are performed externally fn should_perform_checks() -> bool; + + /// Performs a runtime check of the caller is allowed to call the callee for any type of call, + /// including native dynamic dispatch or calling a closure. + fn check_call_visibility( + caller: &LoadedFunction, + callee: &LoadedFunction, + call_type: CallType, + ) -> PartialVMResult<()> { + match call_type { + CallType::Regular => { + // We only need to check cross-contract calls. + if caller.module_id() == callee.module_id() { + return Ok(()); + } + Self::check_cross_module_regular_call_visibility(caller, callee) + }, + CallType::ClosureDynamicDispatch => { + // In difference to regular calls, we skip visibility check. It is possible to call + // a private function of another module via a closure. + Ok(()) + }, + CallType::NativeDynamicDispatch => { + // Dynamic dispatch may fail at runtime and this is ok. Hence, these errors are not + // invariant violations as they cannot be checked at compile- or load-time. + // + // Note: native dispatch cannot call into the same module, otherwise the reentrancy + // check is broken. For more details, see AIP-73: + // https://github.com/aptos-foundation/AIPs/blob/main/aips/aip-73.md + if callee.is_friend_or_private() || callee.module_id() == caller.module_id() { + return Err(PartialVMError::new(StatusCode::RUNTIME_DISPATCH_ERROR) + .with_message( + "Invoking private or friend function during dispatch".to_string(), + )); + } + + if callee.is_native() { + return Err(PartialVMError::new(StatusCode::RUNTIME_DISPATCH_ERROR) + .with_message("Invoking native function during dispatch".to_string())); + } + Ok(()) + }, + } + } + + /// Performs a runtime check of the caller is allowed to call a cross-module callee. Applies + /// only on regular static calls (no dynamic dispatch!), with caller and callee being coming + /// from different modules. + fn check_cross_module_regular_call_visibility( + caller: &LoadedFunction, + callee: &LoadedFunction, + ) -> PartialVMResult<()>; } fn verify_pack<'a>( @@ -173,6 +227,13 @@ impl RuntimeTypeCheck for NoRuntimeTypeCheck { fn should_perform_checks() -> bool { false } + + fn check_cross_module_regular_call_visibility( + _caller: &LoadedFunction, + _callee: &LoadedFunction, + ) -> PartialVMResult<()> { + Ok(()) + } } impl RuntimeTypeCheck for FullRuntimeTypeCheck { @@ -784,4 +845,42 @@ impl RuntimeTypeCheck for FullRuntimeTypeCheck { fn should_perform_checks() -> bool { true } + + fn check_cross_module_regular_call_visibility( + caller: &LoadedFunction, + callee: &LoadedFunction, + ) -> PartialVMResult<()> { + if callee.is_private() { + let msg = format!( + "Function {}::{} cannot be called because it is private", + callee.module_or_script_id(), + callee.name() + ); + return Err( + PartialVMError::new_invariant_violation(msg).with_sub_status(EPARANOID_FAILURE) + ); + } + + if callee.is_friend() { + let callee_module = callee.owner_as_module().map_err(|err| err.to_partial())?; + if !caller + .module_id() + .is_some_and(|id| callee_module.friends.contains(id)) + { + let msg = format!( + "Function {}::{} cannot be called because it has friend visibility, but {} \ + is not {}'s friend", + callee.module_or_script_id(), + callee.name(), + caller.module_or_script_id(), + callee.module_or_script_id() + ); + return Err( + PartialVMError::new_invariant_violation(msg).with_sub_status(EPARANOID_FAILURE) + ); + } + } + + Ok(()) + } } diff --git a/third_party/move/move-vm/runtime/src/storage/dependencies_gas_charging.rs b/third_party/move/move-vm/runtime/src/storage/dependencies_gas_charging.rs index d70af398304cc..065b1ba04b45f 100644 --- a/third_party/move/move-vm/runtime/src/storage/dependencies_gas_charging.rs +++ b/third_party/move/move-vm/runtime/src/storage/dependencies_gas_charging.rs @@ -13,7 +13,10 @@ use move_core_types::{ language_storage::{ModuleId, TypeTag}, }; use move_vm_metrics::{Timer, VM_TIMER}; -use move_vm_types::{gas::GasMeter, module_linker_error}; +use move_vm_types::{ + gas::{DependencyGasMeter, GasMeter}, + module_linker_error, +}; use std::collections::BTreeSet; pub fn check_script_dependencies_and_check_gas( @@ -80,8 +83,8 @@ pub fn check_type_tag_dependencies_and_charge_gas( /// `ModuleId`, a.k.a. heap allocations, as much as possible, which is critical for /// performance. pub fn check_dependencies_and_charge_gas<'a, I>( - module_storage: &impl ModuleStorage, - gas_meter: &mut impl GasMeter, + module_storage: &dyn ModuleStorage, + gas_meter: &mut dyn DependencyGasMeter, traversal_context: &mut TraversalContext<'a>, ids: I, ) -> VMResult<()> @@ -99,7 +102,7 @@ where while let Some((addr, name)) = stack.pop() { let size = module_storage - .fetch_module_size_in_bytes(addr, name)? + .unmetered_get_module_size(addr, name)? .ok_or_else(|| module_linker_error!(addr, name))?; gas_meter .charge_dependency(false, addr, name, NumBytes::new(size as u64)) diff --git a/third_party/move/move-vm/runtime/src/storage/depth_formula_calculator.rs b/third_party/move/move-vm/runtime/src/storage/depth_formula_calculator.rs deleted file mode 100644 index d11d90d424164..0000000000000 --- a/third_party/move/move-vm/runtime/src/storage/depth_formula_calculator.rs +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright (c) The Move Contributors -// SPDX-License-Identifier: Apache-2.0 - -use crate::ModuleStorage; -use move_binary_format::{ - errors::{PartialVMError, PartialVMResult}, - file_format::TypeParameterIndex, -}; -use move_core_types::vm_status::StatusCode; -use move_vm_types::loaded_data::{ - runtime_types::{DepthFormula, StructLayout, Type}, - struct_name_indexing::StructNameIndex, -}; -use std::collections::{BTreeMap, HashMap}; - -/// Calculates [DepthFormula] for struct types. Stores a cache of visited formulas. -pub(crate) struct DepthFormulaCalculator<'a, M> { - module_storage: &'a M, - visited_formulas: HashMap, -} - -impl<'a, M> DepthFormulaCalculator<'a, M> -where - M: ModuleStorage, -{ - pub(crate) fn new(module_storage: &'a M) -> Self { - Self { - module_storage, - visited_formulas: HashMap::new(), - } - } - - pub(crate) fn calculate_depth_of_struct( - &mut self, - struct_name_idx: &StructNameIndex, - ) -> PartialVMResult { - if let Some(depth_formula) = self.visited_formulas.get(struct_name_idx) { - return Ok(depth_formula.clone()); - } - - let struct_type = self - .module_storage - .fetch_struct_ty_by_idx(struct_name_idx)?; - let formulas = match &struct_type.layout { - StructLayout::Single(fields) => fields - .iter() - .map(|(_, field_ty)| self.calculate_depth_of_type(field_ty)) - .collect::>>()?, - StructLayout::Variants(variants) => variants - .iter() - .flat_map(|variant| variant.1.iter().map(|(_, ty)| ty)) - .map(|field_ty| self.calculate_depth_of_type(field_ty)) - .collect::>>()?, - }; - - let formula = DepthFormula::normalize(formulas); - if self - .visited_formulas - .insert(*struct_name_idx, formula.clone()) - .is_some() - { - // Same thread has put this entry previously, which means there is a recursion. - let struct_name = self - .module_storage - .runtime_environment() - .struct_name_index_map() - .idx_to_struct_name_ref(*struct_name_idx)?; - return Err( - PartialVMError::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR).with_message( - format!( - "Depth formula for struct '{}' is already cached by the same thread", - struct_name.as_ref(), - ), - ), - ); - } - Ok(formula) - } - - fn calculate_depth_of_type(&mut self, ty: &Type) -> PartialVMResult { - Ok(match ty { - Type::Bool - | Type::U8 - | Type::U64 - | Type::U128 - | Type::Address - | Type::Signer - | Type::U16 - | Type::U32 - | Type::U256 => DepthFormula::constant(1), - Type::Vector(ty) => { - let mut inner = self.calculate_depth_of_type(ty)?; - inner.scale(1); - inner - }, - Type::Reference(ty) | Type::MutableReference(ty) => { - let mut inner = self.calculate_depth_of_type(ty)?; - inner.scale(1); - inner - }, - Type::TyParam(ty_idx) => DepthFormula::type_parameter(*ty_idx), - Type::Struct { idx, .. } => { - let mut struct_formula = self.calculate_depth_of_struct(idx)?; - debug_assert!(struct_formula.terms.is_empty()); - struct_formula.scale(1); - struct_formula - }, - Type::StructInstantiation { idx, ty_args, .. } => { - let ty_arg_map = ty_args - .iter() - .enumerate() - .map(|(idx, ty)| { - let var = idx as TypeParameterIndex; - Ok((var, self.calculate_depth_of_type(ty)?)) - }) - .collect::>>()?; - let struct_formula = self.calculate_depth_of_struct(idx)?; - let mut subst_struct_formula = struct_formula.subst(ty_arg_map)?; - subst_struct_formula.scale(1); - subst_struct_formula - }, - Type::Function { - args, - results, - abilities: _, - } => { - let mut inner = DepthFormula::normalize( - args.iter() - .chain(results) - .map(|arg_ty| self.calculate_depth_of_type(arg_ty)) - .collect::>>()?, - ); - inner.scale(1); - inner - }, - }) - } -} diff --git a/third_party/move/move-vm/runtime/src/storage/loader/eager.rs b/third_party/move/move-vm/runtime/src/storage/loader/eager.rs new file mode 100644 index 0000000000000..42003635648ab --- /dev/null +++ b/third_party/move/move-vm/runtime/src/storage/loader/eager.rs @@ -0,0 +1,66 @@ +// Copyright (c) The Move Contributors +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + module_traversal::TraversalContext, + storage::loader::traits::{Loader, StructDefinitionLoader}, + ModuleStorage, RuntimeEnvironment, WithRuntimeEnvironment, +}; +use move_binary_format::errors::PartialVMResult; +use move_vm_types::{ + gas::DependencyGasMeter, + loaded_data::{runtime_types::StructType, struct_name_indexing::StructNameIndex}, +}; +use std::sync::Arc; + +/// Eager loader implementation used prior to lazy loading. It uses eager module verification by +/// loading and verifying the transitive closure of module's dependencies and friends. The gas is +/// metered at "entrypoints" (entry function or a script, dynamic dispatch) for the whole closure +/// at once. +pub struct EagerLoader<'a, T> { + module_storage: &'a T, +} + +impl<'a, T> EagerLoader<'a, T> +where + T: ModuleStorage, +{ + /// Returns a new eager loader. + pub fn new(module_storage: &'a T) -> Self { + Self { module_storage } + } +} + +impl<'a, T> WithRuntimeEnvironment for EagerLoader<'a, T> +where + T: ModuleStorage, +{ + fn runtime_environment(&self) -> &RuntimeEnvironment { + self.module_storage.runtime_environment() + } +} + +impl<'a, T> StructDefinitionLoader for EagerLoader<'a, T> +where + T: ModuleStorage, +{ + fn load_struct_definition( + &self, + _gas_meter: &mut impl DependencyGasMeter, + _traversal_context: &mut TraversalContext, + idx: &StructNameIndex, + ) -> PartialVMResult> { + let struct_name = self + .runtime_environment() + .struct_name_index_map() + .idx_to_struct_name_ref(*idx)?; + + self.module_storage.fetch_struct_ty( + struct_name.module.address(), + struct_name.module.name(), + struct_name.name.as_ident_str(), + ) + } +} + +impl<'a, T> Loader for EagerLoader<'a, T> where T: ModuleStorage {} diff --git a/third_party/move/move-vm/runtime/src/storage/loader/lazy.rs b/third_party/move/move-vm/runtime/src/storage/loader/lazy.rs new file mode 100644 index 0000000000000..e15b41f411507 --- /dev/null +++ b/third_party/move/move-vm/runtime/src/storage/loader/lazy.rs @@ -0,0 +1,91 @@ +// Copyright (c) The Move Contributors +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + module_traversal::TraversalContext, + storage::loader::traits::{Loader, StructDefinitionLoader}, + ModuleStorage, RuntimeEnvironment, WithRuntimeEnvironment, +}; +use move_binary_format::errors::PartialVMResult; +use move_core_types::{gas_algebra::NumBytes, language_storage::ModuleId}; +use move_vm_types::{ + gas::DependencyGasMeter, + loaded_data::{runtime_types::StructType, struct_name_indexing::StructNameIndex}, + module_linker_error, +}; +use std::sync::Arc; + +/// Loader implementation used after lazy loading is enabled. Every module access is metered +/// dynamically (if it is first access to a module with the current [TraversalContext], then gas is +/// charged). Module verification is lazy: there is no loading of transitive closure of module's +/// dependencies and friends when accessing a verified module, a function definition or a struct +/// definition. +pub struct LazyLoader<'a, T> { + module_storage: &'a T, +} + +impl<'a, T> LazyLoader<'a, T> +where + T: ModuleStorage, +{ + /// Returns a new lazy loader. + pub fn new(module_storage: &'a T) -> Self { + Self { module_storage } + } + + /// Charges gas for the module load if the module has not been loaded already. + fn charge_module( + &self, + gas_meter: &mut impl DependencyGasMeter, + traversal_context: &mut TraversalContext, + module_id: &ModuleId, + ) -> PartialVMResult<()> { + if traversal_context.visit_if_not_special_module_id(module_id) { + let addr = module_id.address(); + let name = module_id.name(); + + let size = self + .module_storage + .unmetered_get_module_size(addr, name) + .map_err(|err| err.to_partial())? + .ok_or_else(|| module_linker_error!(addr, name).to_partial())?; + gas_meter.charge_dependency(false, addr, name, NumBytes::new(size as u64))?; + } + Ok(()) + } +} + +impl<'a, T> WithRuntimeEnvironment for LazyLoader<'a, T> +where + T: ModuleStorage, +{ + fn runtime_environment(&self) -> &RuntimeEnvironment { + self.module_storage.runtime_environment() + } +} + +impl<'a, T> StructDefinitionLoader for LazyLoader<'a, T> +where + T: ModuleStorage, +{ + fn load_struct_definition( + &self, + gas_meter: &mut impl DependencyGasMeter, + traversal_context: &mut TraversalContext, + idx: &StructNameIndex, + ) -> PartialVMResult> { + let struct_name = self + .runtime_environment() + .struct_name_index_map() + .idx_to_struct_name_ref(*idx)?; + + self.charge_module(gas_meter, traversal_context, &struct_name.module)?; + self.module_storage.fetch_struct_ty( + struct_name.module.address(), + struct_name.module.name(), + struct_name.name.as_ident_str(), + ) + } +} + +impl<'a, T> Loader for LazyLoader<'a, T> where T: ModuleStorage {} diff --git a/third_party/move/move-vm/runtime/src/storage/loader/mod.rs b/third_party/move/move-vm/runtime/src/storage/loader/mod.rs new file mode 100644 index 0000000000000..d09d46c287c0e --- /dev/null +++ b/third_party/move/move-vm/runtime/src/storage/loader/mod.rs @@ -0,0 +1,8 @@ +// Copyright (c) The Move Contributors +// SPDX-License-Identifier: Apache-2.0 + +pub(crate) mod eager; +pub(crate) mod lazy; +#[cfg(test)] +pub(crate) mod test_utils; +pub(crate) mod traits; diff --git a/third_party/move/move-vm/runtime/src/storage/loader/test_utils.rs b/third_party/move/move-vm/runtime/src/storage/loader/test_utils.rs new file mode 100644 index 0000000000000..b165ea16f6364 --- /dev/null +++ b/third_party/move/move-vm/runtime/src/storage/loader/test_utils.rs @@ -0,0 +1,172 @@ +// Copyright (c) The Move Contributors +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + module_traversal::TraversalContext, storage::loader::traits::StructDefinitionLoader, + RuntimeEnvironment, WithRuntimeEnvironment, +}; +use claims::assert_none; +use move_binary_format::errors::{PartialVMError, PartialVMResult}; +use move_core_types::{ + ability::AbilitySet, identifier::Identifier, language_storage::ModuleId, vm_status::StatusCode, +}; +use move_vm_types::{ + gas::DependencyGasMeter, + loaded_data::{ + runtime_types::{AbilityInfo, StructIdentifier, StructLayout, StructType, Type}, + struct_name_indexing::StructNameIndex, + }, +}; +use smallbitvec::SmallBitVec; +use std::{cell::RefCell, collections::HashMap, str::FromStr, sync::Arc}; + +/// Creates a dummy struct definition. +pub(crate) fn struct_definition( + idx: StructNameIndex, + fields: Vec<(Identifier, Type)>, +) -> StructType { + StructType { + idx, + layout: StructLayout::Single(fields), + // Below is irrelevant for tests. + ty_params: vec![], + phantom_ty_params_mask: SmallBitVec::default(), + abilities: AbilitySet::EMPTY, + } +} + +/// Creates a dummy enum definition. +pub(crate) fn enum_definition( + idx: StructNameIndex, + variants: Vec<(Identifier, Vec<(Identifier, Type)>)>, +) -> StructType { + StructType { + idx, + layout: StructLayout::Variants(variants), + // Below is irrelevant for tests. + ty_params: vec![], + phantom_ty_params_mask: SmallBitVec::default(), + abilities: AbilitySet::EMPTY, + } +} + +/// Creates a dummy struct (or enum) type. +pub(crate) fn struct_ty(idx: StructNameIndex) -> Type { + Type::Struct { + idx, + // Below is irrelevant for tests. + ability: AbilityInfo::struct_(AbilitySet::EMPTY), + } +} + +/// Creates a dummy generic struct (or enum) type. +pub(crate) fn generic_struct_ty(idx: StructNameIndex, ty_args: Vec) -> Type { + Type::StructInstantiation { + idx, + ty_args: triomphe::Arc::new(ty_args), + // Below is irrelevant for tests. + ability: AbilityInfo::struct_(AbilitySet::EMPTY), + } +} + +/// Mocks struct definition loading, by holding a cache of [StructType]s and runtime environment. +pub(crate) struct MockStructDefinitionLoader { + runtime_environment: RuntimeEnvironment, + struct_definitions: RefCell>>, +} + +impl MockStructDefinitionLoader { + /// Returns an index for a struct name. The struct name is added to the environment. + pub(crate) fn get_struct_identifier(&self, struct_name: &str) -> StructNameIndex { + let struct_identifier = StructIdentifier { + module: ModuleId::from_str("0x1::foo").unwrap(), + name: Identifier::from_str(struct_name).unwrap(), + }; + self.runtime_environment + .struct_name_to_idx_for_test(struct_identifier) + .unwrap() + } + + /// Adds a dummy struct to the mock cache. + pub(crate) fn add_struct<'a>( + &self, + struct_name: &str, + field_struct_names: impl IntoIterator, + ) { + let fields = field_struct_names + .into_iter() + .map(|(name, field_ty)| { + let field_name = Identifier::from_str(name).unwrap(); + (field_name, field_ty) + }) + .collect(); + + let idx = self.get_struct_identifier(struct_name); + let struct_definition = struct_definition(idx, fields); + + assert_none!(self + .struct_definitions + .borrow_mut() + .insert(struct_definition.idx, Arc::new(struct_definition))); + } + + /// Adds a dummy enum to the mock cache. + pub(crate) fn add_enum<'a>( + &self, + struct_name: &str, + variant_field_struct_names: impl IntoIterator)>, + ) { + let variants = variant_field_struct_names + .into_iter() + .map(|(variant, fields)| { + let variant_name = Identifier::from_str(variant).unwrap(); + let fields = fields + .into_iter() + .map(|(name, field_ty)| { + let field_name = Identifier::from_str(name).unwrap(); + (field_name, field_ty) + }) + .collect::>(); + (variant_name, fields) + }) + .collect(); + + let idx = self.get_struct_identifier(struct_name); + let struct_definition = enum_definition(idx, variants); + + assert_none!(self + .struct_definitions + .borrow_mut() + .insert(struct_definition.idx, Arc::new(struct_definition))); + } +} + +impl Default for MockStructDefinitionLoader { + fn default() -> Self { + Self { + runtime_environment: RuntimeEnvironment::new(vec![]), + struct_definitions: RefCell::new(HashMap::new()), + } + } +} + +impl WithRuntimeEnvironment for MockStructDefinitionLoader { + fn runtime_environment(&self) -> &RuntimeEnvironment { + &self.runtime_environment + } +} + +impl StructDefinitionLoader for MockStructDefinitionLoader { + fn load_struct_definition( + &self, + _gas_meter: &mut impl DependencyGasMeter, + _traversal_context: &mut TraversalContext, + idx: &StructNameIndex, + ) -> PartialVMResult> { + self.struct_definitions + .borrow() + .get(idx) + .cloned() + .ok_or_else(|| PartialVMError::new(StatusCode::LINKER_ERROR)) + } +} diff --git a/third_party/move/move-vm/runtime/src/storage/loader/traits.rs b/third_party/move/move-vm/runtime/src/storage/loader/traits.rs new file mode 100644 index 0000000000000..2f9a0572846ed --- /dev/null +++ b/third_party/move/move-vm/runtime/src/storage/loader/traits.rs @@ -0,0 +1,28 @@ +// Copyright (c) The Move Contributors +// SPDX-License-Identifier: Apache-2.0 + +use crate::{module_traversal::TraversalContext, WithRuntimeEnvironment}; +use move_binary_format::errors::PartialVMResult; +use move_vm_types::{ + gas::DependencyGasMeter, + loaded_data::{runtime_types::StructType, struct_name_indexing::StructNameIndex}, +}; +use std::sync::Arc; + +/// Provides access to struct definitions. +pub trait StructDefinitionLoader: WithRuntimeEnvironment { + /// Returns the struct definition corresponding to the specified index. The function may also + /// charge gas for loading the module where the struct is defined. Returns an error if such + /// metering fails, or if the struct / module where it is defined do not exist. + fn load_struct_definition( + &self, + gas_meter: &mut impl DependencyGasMeter, + traversal_context: &mut TraversalContext, + idx: &StructNameIndex, + ) -> PartialVMResult>; +} + +/// Encapsulates all possible module accesses in a safe, gas-metered way. This trait (and more +/// fine-grained) traits should be used when working with modules, functions, structs, and other +/// module information. +pub trait Loader: StructDefinitionLoader {} diff --git a/third_party/move/move-vm/runtime/src/storage/mod.rs b/third_party/move/move-vm/runtime/src/storage/mod.rs index 441c4d5283c8b..e9d326665be81 100644 --- a/third_party/move/move-vm/runtime/src/storage/mod.rs +++ b/third_party/move/move-vm/runtime/src/storage/mod.rs @@ -1,7 +1,8 @@ // Copyright (c) The Move Contributors // SPDX-License-Identifier: Apache-2.0 -pub(crate) mod depth_formula_calculator; +pub(crate) mod loader; +pub(crate) mod ty_depth_checker; pub(crate) mod ty_tag_converter; mod verified_module_cache; diff --git a/third_party/move/move-vm/runtime/src/storage/module_storage.rs b/third_party/move/move-vm/runtime/src/storage/module_storage.rs index b20a30afeada9..64b48543ef7dc 100644 --- a/third_party/move/move-vm/runtime/src/storage/module_storage.rs +++ b/third_party/move/move-vm/runtime/src/storage/module_storage.rs @@ -4,7 +4,6 @@ use crate::{ loader::{Function, LazyLoadedFunction, LazyLoadedFunctionState, LoadedFunctionOwner, Module}, logging::expect_no_verification_errors, - storage::ty_layout_converter::{LayoutConverter, StorageLayoutConverter}, LoadedFunction, WithRuntimeEnvironment, }; use ambassador::delegatable_trait; @@ -59,7 +58,10 @@ pub trait ModuleStorage: WithRuntimeEnvironment { /// Returns the size of a module in bytes, or [None] otherwise. An error is returned if the /// there is a storage error. - fn fetch_module_size_in_bytes( + /// + /// Note: this API is not metered! It is only used to get the size of a module so that metering + /// can actually be implemented before loading a module. + fn unmetered_get_module_size( &self, address: &AccountAddress, module_name: &IdentStr, @@ -274,7 +276,7 @@ where .map(|(module, _)| module.extension().bytes().clone())) } - fn fetch_module_size_in_bytes( + fn unmetered_get_module_size( &self, address: &AccountAddress, module_name: &IdentStr, @@ -552,6 +554,7 @@ where /// Avoids the orphan rule to implement external [FunctionValueExtension] for any generic type that /// implements [ModuleStorage]. pub struct FunctionValueExtensionAdapter<'a> { + #[allow(dead_code)] pub(crate) module_storage: &'a dyn ModuleStorage, } @@ -579,40 +582,23 @@ impl FunctionValueExtension for FunctionValueExtensionAdapter<'_> { &self, fun: &dyn AbstractFunction, ) -> PartialVMResult { - match &*LazyLoadedFunction::expect_this_impl(fun)?.0.borrow() { + match &*LazyLoadedFunction::expect_this_impl(fun)?.state.borrow() { LazyLoadedFunctionState::Unresolved { data, .. } => Ok(data.clone()), - LazyLoadedFunctionState::Resolved { fun, mask, ty_args } => { - let ty_converter = StorageLayoutConverter::new(self.module_storage); - let ty_builder = &self - .module_storage - .runtime_environment() - .vm_config() - .ty_builder; - - let captured_layouts = mask - .extract(fun.param_tys(), true) - .into_iter() - .map(|ty| { - let (layout, contains_delayed_fields) = if fun.ty_args.is_empty() { - ty_converter.type_to_type_layout_with_identifier_mappings(ty)? - } else { - let ty = ty_builder.create_ty_with_subst(ty, &fun.ty_args)?; - ty_converter.type_to_type_layout_with_identifier_mappings(&ty)? - }; - - // Do not allow delayed fields to be serialized. - if contains_delayed_fields { - let err = PartialVMError::new(StatusCode::VALUE_SERIALIZATION_ERROR) - .with_message( - "Function values that capture delayed fields cannot be serialized" - .to_string(), - ); - return Err(err); - } - - Ok(layout) - }) - .collect::>>()?; + LazyLoadedFunctionState::Resolved { + fun, + mask, + ty_args, + captured_layouts, + } => { + // If there are no captured layouts, then this closure is non-storable, i.e., the + // function is not persistent (not public or not private with #[persistent] + // attribute). This means that anonymous lambda-lifted functions are cannot be + // serialized as well. + let captured_layouts = captured_layouts.as_ref().cloned().ok_or_else(|| { + let msg = "Captured layouts must always be computed for storable closures"; + PartialVMError::new(StatusCode::VALUE_SERIALIZATION_ERROR) + .with_message(msg.to_string()) + })?; Ok(SerializedFunctionData { format_version: FUNCTION_DATA_SERIALIZATION_FORMAT_V1, @@ -632,4 +618,12 @@ impl FunctionValueExtension for FunctionValueExtensionAdapter<'_> { }, } } + + fn max_value_nest_depth(&self) -> Option { + let vm_config = self.module_storage.runtime_environment().vm_config(); + vm_config + .enable_depth_checks + .then_some(vm_config.max_value_nest_depth) + .flatten() + } } diff --git a/third_party/move/move-vm/runtime/src/storage/ty_depth_checker.rs b/third_party/move/move-vm/runtime/src/storage/ty_depth_checker.rs new file mode 100644 index 0000000000000..6119fe181cfc2 --- /dev/null +++ b/third_party/move/move-vm/runtime/src/storage/ty_depth_checker.rs @@ -0,0 +1,711 @@ +// Copyright (c) The Move Contributors +// SPDX-License-Identifier: Apache-2.0 + +use crate::{module_traversal::TraversalContext, storage::loader::traits::StructDefinitionLoader}; +use move_binary_format::{ + errors::{PartialVMError, PartialVMResult}, + file_format::TypeParameterIndex, +}; +use move_core_types::vm_status::StatusCode; +use move_vm_metrics::{Timer, VM_TIMER}; +use move_vm_types::{ + gas::DependencyGasMeter, + loaded_data::{ + runtime_types::{DepthFormula, StructIdentifier, StructLayout, Type}, + struct_name_indexing::StructNameIndex, + }, +}; +use std::{ + cell::RefCell, + collections::{BTreeMap, HashMap, HashSet}, + sync::Arc, +}; + +/// Checks depths for instantiated types in order to bound value size. The idea is that if the +/// depth of the type is bounded, so is the depth of the corresponding value. Note that this is +/// no longer the case with function values enabled: captured arguments are not visible in the type, +/// but do increase the value depth. As a result, it is possible to have a shallow function type, +/// while the value stores a long chain of nested function values via captured arguments. +/// TODO: consider deprecating since values are also bounded dynamically now. +/// +/// For structs, stores a cache of formulas. The cache is used for performance (avoid repeated +/// formula construction within a single transaction). +/// +/// While a formula is being constructed, also checks for cycles between struct definitions. That +/// is, cases like +/// ```text +/// struct A { b: B, } +/// struct B { a: A, } +/// ``` +/// are not allowed and constructing a formula for these types will fail. +pub(crate) struct TypeDepthChecker<'a, T> { + struct_definition_loader: &'a T, + /// If set, stores the maximum depth of a type allowed. Otherwise, checker is a no-op. + maybe_max_depth: Option, + /// Caches formulas visited so far. + formula_cache: RefCell>, +} + +impl<'a, T> TypeDepthChecker<'a, T> +where + T: StructDefinitionLoader, +{ + /// Creates a new depth checker for the specified loader to query struct definitions if needed. + pub(crate) fn new(struct_definition_loader: &'a T) -> Self { + let maybe_max_depth = struct_definition_loader + .runtime_environment() + .vm_config() + .max_value_nest_depth; + Self { + struct_definition_loader, + maybe_max_depth, + formula_cache: RefCell::new(HashMap::new()), + } + } + + /// Checks the depth of a type. If the type is too deep, returns an error. Note that the type + /// must be non-generic, i.e., all type substitutions must be performed. If needed, the check + /// traverses multiple modules where inner structs and their fields are defined. + pub(crate) fn check_depth_of_type( + &self, + gas_meter: &mut impl DependencyGasMeter, + traversal_context: &mut TraversalContext, + ty: &Type, + ) -> PartialVMResult<()> { + let max_depth = match self.maybe_max_depth { + Some(max_depth) => max_depth, + None => return Ok(()), + }; + + let _timer = VM_TIMER.timer_with_label("check_depth_of_type"); + + // Start at 1 since we always call this right before we add a new node to the value's depth. + self.recursive_check_depth_of_type(gas_meter, traversal_context, ty, max_depth, 1)?; + Ok(()) + } +} + +// Private interfaces below. +impl<'a, T> TypeDepthChecker<'a, T> +where + T: StructDefinitionLoader, +{ + /// Recursive implementation of type depth checks. For structs, computes and caches the depth + /// formula which is solved to get the actual depth and check it against the maximum allowed + /// value. + fn recursive_check_depth_of_type( + &self, + gas_meter: &mut impl DependencyGasMeter, + traversal_context: &mut TraversalContext, + ty: &Type, + max_depth: u64, + depth: u64, + ) -> PartialVMResult { + macro_rules! visit_struct { + ($idx:expr) => {{ + // Keeps track of formulas visited during construction (to detect cycles). Should + // always be empty before and after the formula is constructed end-to-end. + let mut currently_visiting = HashSet::new(); + let formula = self.calculate_struct_depth_formula( + gas_meter, + traversal_context, + &mut currently_visiting, + $idx, + )?; + if !currently_visiting.is_empty() { + let struct_name = self.get_struct_name($idx)?; + return Err(PartialVMError::new_invariant_violation(format!( + "Constructing a formula for {}::{}::{} has non-empty visiting set", + struct_name.module.address, struct_name.module.name, struct_name.name + ))); + } + formula + }}; + } + + macro_rules! check_depth { + ($additional_depth:expr) => {{ + let new_depth = depth.saturating_add($additional_depth); + if new_depth > max_depth { + return Err(PartialVMError::new(StatusCode::VM_MAX_VALUE_DEPTH_REACHED)); + } else { + new_depth + } + }}; + } + + let ty_depth = match ty { + Type::Bool + | Type::U8 + | Type::U16 + | Type::U32 + | Type::U64 + | Type::U128 + | Type::U256 + | Type::Address + | Type::Signer => check_depth!(0), + // For function types, we ignore the return/argument types because they do not bound + // value size, and we do not to error on a false positive (function operates on a + // nested value, but does not capture it). + Type::Function { .. } => check_depth!(0), + Type::Reference(ty) | Type::MutableReference(ty) => self + .recursive_check_depth_of_type( + gas_meter, + traversal_context, + ty, + max_depth, + check_depth!(1), + )?, + Type::Vector(ty) => self.recursive_check_depth_of_type( + gas_meter, + traversal_context, + ty, + max_depth, + check_depth!(1), + )?, + Type::Struct { idx, .. } => { + let formula = visit_struct!(idx); + check_depth!(formula.solve(&[])) + }, + Type::StructInstantiation { idx, ty_args, .. } => { + let ty_arg_depths = ty_args + .iter() + .map(|ty| { + self.recursive_check_depth_of_type( + gas_meter, + traversal_context, + ty, + max_depth, + check_depth!(0), + ) + }) + .collect::>>()?; + + let formula = visit_struct!(idx); + check_depth!(formula.solve(&ty_arg_depths)) + }, + Type::TyParam(_) => { + return Err( + PartialVMError::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR) + .with_message("Type parameter should be fully resolved".to_string()), + ) + }, + }; + + Ok(ty_depth) + } + + /// Calculates the depth formula for a (possibly generic) struct or enum. Returns an error if + /// the struct definition is recursive: i.e., some struct A uses struct B that uses struct A. + fn calculate_struct_depth_formula( + &self, + gas_meter: &mut impl DependencyGasMeter, + traversal_context: &mut TraversalContext, + currently_visiting: &mut HashSet, + idx: &StructNameIndex, + ) -> PartialVMResult { + // If the struct is being visited, we found a recursive definition. + if currently_visiting.contains(idx) { + let struct_name = self.get_struct_name(idx)?; + let msg = format!( + "Definition of struct {}::{}::{} is recursive: failed to construct its depth formula", + struct_name.module.address, struct_name.module.name, struct_name.name + ); + return Err( + PartialVMError::new(StatusCode::RUNTIME_CYCLIC_MODULE_DEPENDENCY).with_message(msg), + ); + } + + // Otherwise, check if we cached it. + if let Some(formula) = self.formula_cache.borrow().get(idx) { + return Ok(formula.clone()); + } + + // Struct has not been visited, mark as being visited. + assert!(currently_visiting.insert(*idx)); + + // Recursively visit its fields to construct the formula. + let struct_definition = self.struct_definition_loader.load_struct_definition( + gas_meter, + traversal_context, + idx, + )?; + + let formulas = match &struct_definition.layout { + StructLayout::Single(fields) => fields + .iter() + .map(|(_, field_ty)| { + self.calculate_type_depth_formula( + gas_meter, + traversal_context, + currently_visiting, + field_ty, + ) + }) + .collect::>>()?, + StructLayout::Variants(variants) => variants + .iter() + .flat_map(|variant| variant.1.iter().map(|(_, ty)| ty)) + .map(|field_ty| { + self.calculate_type_depth_formula( + gas_meter, + traversal_context, + currently_visiting, + field_ty, + ) + }) + .collect::>>()?, + }; + let formula = DepthFormula::normalize(formulas); + + // Add the formula to cace list and remove it from the currently visited set. + assert!(currently_visiting.remove(idx)); + let prev = self + .formula_cache + .borrow_mut() + .insert(*idx, formula.clone()); + if prev.is_some() { + // Clear cache if there is an invariant violation, to be safe. + self.formula_cache.borrow_mut().clear(); + let struct_name = self.get_struct_name(idx)?; + let msg = format!( + "Depth formula for struct {}::{}::{} is already cached", + struct_name.module.address, struct_name.module.name, struct_name.name + ); + return Err(PartialVMError::new_invariant_violation(msg)); + } + + Ok(formula) + } + + /// Calculates the depth formula of the specified [Type]. There are no constraints on the type, + /// it can also be a generic type parameter. + fn calculate_type_depth_formula( + &self, + gas_meter: &mut impl DependencyGasMeter, + traversal_context: &mut TraversalContext, + currently_visiting: &mut HashSet, + ty: &Type, + ) -> PartialVMResult { + Ok(match ty { + Type::Bool + | Type::U8 + | Type::U64 + | Type::U128 + | Type::Address + | Type::Signer + | Type::U16 + | Type::U32 + | Type::U256 => DepthFormula::constant(1), + // For function types, we ignore the return/argument types because they do not bound + // value size, and we do not to error on a false positive (function operates on a + // nested value, but does not capture it). Hence, we simply return a constant here. + Type::Function { .. } => DepthFormula::constant(1), + Type::Vector(ty) => self + .calculate_type_depth_formula(gas_meter, traversal_context, currently_visiting, ty)? + .scale(1), + Type::Reference(ty) | Type::MutableReference(ty) => self + .calculate_type_depth_formula(gas_meter, traversal_context, currently_visiting, ty)? + .scale(1), + Type::TyParam(ty_idx) => DepthFormula::type_parameter(*ty_idx), + Type::Struct { idx, .. } => { + let struct_formula = self.calculate_struct_depth_formula( + gas_meter, + traversal_context, + currently_visiting, + idx, + )?; + debug_assert!(struct_formula.terms.is_empty()); + struct_formula.scale(1) + }, + Type::StructInstantiation { idx, ty_args, .. } => { + let ty_arg_map = ty_args + .iter() + .enumerate() + .map(|(idx, ty)| { + let var = idx as TypeParameterIndex; + Ok(( + var, + self.calculate_type_depth_formula( + gas_meter, + traversal_context, + currently_visiting, + ty, + )?, + )) + }) + .collect::>>()?; + let struct_formula = self.calculate_struct_depth_formula( + gas_meter, + traversal_context, + currently_visiting, + idx, + )?; + struct_formula.subst(ty_arg_map)?.scale(1) + }, + }) + } + + /// Returns the struct name for the specified index. + fn get_struct_name(&self, idx: &StructNameIndex) -> PartialVMResult> { + self.struct_definition_loader + .runtime_environment() + .struct_name_index_map() + .idx_to_struct_name_ref(*idx) + } +} + +// Test-only interfaces below. +#[cfg(test)] +impl<'a, T> TypeDepthChecker<'a, T> +where + T: StructDefinitionLoader, +{ + /// Creates a new depth checker for the specified loader and with specified maximum depth. + fn new_with_max_depth(struct_definition_loader: &'a T, max_depth: u64) -> Self { + Self { + struct_definition_loader, + maybe_max_depth: Some(max_depth), + formula_cache: RefCell::new(HashMap::new()), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + module_traversal::TraversalStorage, + storage::loader::test_utils::{generic_struct_ty, struct_ty, MockStructDefinitionLoader}, + }; + use claims::{assert_err, assert_ok}; + use move_vm_types::gas::UnmeteredGasMeter; + + #[test] + fn test_struct_definition_not_found_cache_consistency() { + let mut gas_meter = UnmeteredGasMeter; + let traversal_storage = TraversalStorage::new(); + let mut traversal_context = TraversalContext::new(&traversal_storage); + + let loader = MockStructDefinitionLoader::default(); + + // Create structs A and B with definition of C missing: + // + // struct A {} + // struct B { c: C, } + let a = loader.get_struct_identifier("A"); + let b = loader.get_struct_identifier("B"); + let c = loader.get_struct_identifier("C"); + loader.add_struct("A", vec![]); + loader.add_struct("B", vec![("c", struct_ty(c))]); + + let checker = TypeDepthChecker::new(&loader); + let mut currently_visiting = HashSet::new(); + + assert_ok!(checker.calculate_struct_depth_formula( + &mut gas_meter, + &mut traversal_context, + &mut currently_visiting, + &a + )); + assert!(currently_visiting.is_empty()); + assert_eq!(checker.formula_cache.borrow().len(), 1); + + let err = assert_err!(checker.calculate_struct_depth_formula( + &mut gas_meter, + &mut traversal_context, + &mut currently_visiting, + &b + )); + assert_eq!(err.major_status(), StatusCode::LINKER_ERROR); + assert_eq!(checker.formula_cache.borrow().len(), 1); + } + + #[test] + fn test_runtime_cyclic_module_dependency() { + let mut gas_meter = UnmeteredGasMeter; + let traversal_storage = TraversalStorage::new(); + let mut traversal_context = TraversalContext::new(&traversal_storage); + + let loader = MockStructDefinitionLoader::default(); + + // No cycles (structs and enums): + // + // struct A {} + // enum B { + // V1 { x: bool, }, + // V2 { a: A, }, + // } + let a = loader.get_struct_identifier("A"); + let b = loader.get_struct_identifier("B"); + loader.add_struct("A", vec![]); + loader.add_enum("B", vec![ + ("V1", vec![("x", Type::Bool)]), + ("V2", vec![("a", struct_ty(a))]), + ]); + + // Cycles between structs C and D, and between E and itself: + // + // struct C { d: D, } + // struct D { c: C, } + // struct E { e: E, } + let c = loader.get_struct_identifier("C"); + let d = loader.get_struct_identifier("D"); + let e = loader.get_struct_identifier("E"); + loader.add_struct("C", vec![("d", struct_ty(d))]); + loader.add_struct("D", vec![("c", struct_ty(c))]); + loader.add_struct("E", vec![("e", struct_ty(e))]); + + // Cycles between enums F and G. + // + // enum F { + // V0 { g: G, }, + // } + // enum G { + // V0 { fs: vector, }, + // } + let f = loader.get_struct_identifier("F"); + let g = loader.get_struct_identifier("G"); + loader.add_enum("F", vec![("V0", vec![("g", struct_ty(g))])]); + loader.add_enum("G", vec![("V0", vec![( + "fs", + Type::Vector(triomphe::Arc::new(struct_ty(f))), + )])]); + + let checker = TypeDepthChecker::new(&loader); + let mut currently_visiting = HashSet::new(); + + for idx in [a, b] { + assert_ok!(checker.calculate_struct_depth_formula( + &mut gas_meter, + &mut traversal_context, + &mut currently_visiting, + &idx + )); + assert!(currently_visiting.is_empty()); + } + + assert_eq!(checker.formula_cache.borrow().len(), 2); + + for idx in [c, d, e, f, g] { + let err = assert_err!(checker.calculate_struct_depth_formula( + &mut gas_meter, + &mut traversal_context, + &mut currently_visiting, + &idx + )); + assert_eq!( + err.major_status(), + StatusCode::RUNTIME_CYCLIC_MODULE_DEPENDENCY + ); + } + assert_eq!(checker.formula_cache.borrow().len(), 2); + } + + #[test] + fn test_runtime_cyclic_module_dependency_generic() { + let mut gas_meter = UnmeteredGasMeter; + let traversal_storage = TraversalStorage::new(); + let mut traversal_context = TraversalContext::new(&traversal_storage); + + let loader = MockStructDefinitionLoader::default(); + + // Simple struct to store in the checker cache to check cache consistency. + let a = loader.get_struct_identifier("A"); + loader.add_struct("A", vec![]); + + // Cycle between generic struct B and C: + // + // struct B { c: C } + // struct C { x: T, b: B } + let b = loader.get_struct_identifier("B"); + let c = loader.get_struct_identifier("C"); + loader.add_struct("B", vec![( + "c", + generic_struct_ty(c, vec![Type::TyParam(0)]), + )]); + loader.add_struct("C", vec![ + ("x", Type::TyParam(0)), + ("b", generic_struct_ty(b, vec![Type::TyParam(0)])), + ]); + + // Cycle between generic enum and generic struct: + // + // struct D { x: T, e: E, } + // enum E { + // V0 { ds: vector>, }, + // } + let d = loader.get_struct_identifier("D"); + let e = loader.get_struct_identifier("E"); + loader.add_struct("D", vec![ + ("x", Type::TyParam(0)), + ("e", generic_struct_ty(e, vec![Type::U8])), + ]); + loader.add_enum("E", vec![("V0", vec![( + "ds", + Type::Vector(triomphe::Arc::new(struct_ty(d))), + )])]); + + let checker = TypeDepthChecker::new(&loader); + let mut currently_visiting = HashSet::new(); + + assert_ok!(checker.calculate_struct_depth_formula( + &mut gas_meter, + &mut traversal_context, + &mut currently_visiting, + &a + )); + + assert!(currently_visiting.is_empty()); + assert_eq!(checker.formula_cache.borrow().len(), 1); + + for idx in [b, c, d, e] { + let err = assert_err!(checker.calculate_struct_depth_formula( + &mut gas_meter, + &mut traversal_context, + &mut currently_visiting, + &idx, + )); + assert_eq!( + err.major_status(), + StatusCode::RUNTIME_CYCLIC_MODULE_DEPENDENCY + ); + assert_eq!(checker.formula_cache.borrow().len(), 1); + } + } + + #[test] + fn test_runtime_non_cyclic_module_dependencies_generic() { + let mut gas_meter = UnmeteredGasMeter; + let traversal_storage = TraversalStorage::new(); + let mut traversal_context = TraversalContext::new(&traversal_storage); + + let loader = MockStructDefinitionLoader::default(); + + // These structs use definitions recursively but in type arguments, which is safe. + // + // struct A { x: T, } + // struct B { xs: A>>, } + // struct C { x: T, } + // struct D { xs: A>>, } + + let a = loader.get_struct_identifier("A"); + let b = loader.get_struct_identifier("B"); + let c = loader.get_struct_identifier("C"); + let d = loader.get_struct_identifier("D"); + + let a_u8_ty = generic_struct_ty(a, vec![Type::U8]); + let a_a_u8_ty = generic_struct_ty(a, vec![a_u8_ty.clone()]); + let a_a_a_u8_ty = generic_struct_ty(a, vec![a_a_u8_ty]); + let c_a_u8_ty = generic_struct_ty(c, vec![a_u8_ty]); + let a_c_a_u8_ty = generic_struct_ty(a, vec![c_a_u8_ty]); + + loader.add_struct("A", vec![("x", Type::TyParam(0))]); + loader.add_struct("B", vec![("xs", a_a_a_u8_ty)]); + loader.add_struct("C", vec![("x", Type::TyParam(0))]); + loader.add_struct("D", vec![("xs", a_c_a_u8_ty)]); + + let checker = TypeDepthChecker::new(&loader); + let mut currently_visiting = HashSet::new(); + + for idx in [a, b, c, d] { + assert_ok!(checker.calculate_struct_depth_formula( + &mut gas_meter, + &mut traversal_context, + &mut currently_visiting, + &idx, + )); + assert!(currently_visiting.is_empty()); + } + + assert_eq!(checker.formula_cache.borrow().len(), 4); + } + + #[test] + fn test_runtime_non_cyclic_module_dependencies() { + let mut gas_meter = UnmeteredGasMeter; + let traversal_storage = TraversalStorage::new(); + let mut traversal_context = TraversalContext::new(&traversal_storage); + + let loader = MockStructDefinitionLoader::default(); + + let a = loader.get_struct_identifier("A"); + let b = loader.get_struct_identifier("B"); + let c = loader.get_struct_identifier("C"); + + // These structs are not recursive. + // + // struct C {} + // struct B { c: C, } + // struct A { c: C, b: B, } + + loader.add_struct("C", vec![]); + loader.add_struct("B", vec![("c", struct_ty(c))]); + loader.add_struct("A", vec![("c", struct_ty(c)), ("b", struct_ty(b))]); + + for idx in [a, b, c] { + let checker = TypeDepthChecker::new(&loader); + let mut currently_visiting = HashSet::new(); + + assert_ok!(checker.calculate_struct_depth_formula( + &mut gas_meter, + &mut traversal_context, + &mut currently_visiting, + &idx, + )); + } + } + + #[test] + fn test_ty_to_deep() { + let mut gas_meter = UnmeteredGasMeter; + let traversal_storage = TraversalStorage::new(); + let mut traversal_context = TraversalContext::new(&traversal_storage); + + let loader = MockStructDefinitionLoader::default(); + + let a = loader.get_struct_identifier("A"); + let b = loader.get_struct_identifier("B"); + let c = loader.get_struct_identifier("C"); + + loader.add_struct("C", vec![("dummy", Type::Bool)]); + loader.add_struct("B", vec![("c", struct_ty(c))]); + loader.add_struct("A", vec![("b", struct_ty(b))]); + + let checker = TypeDepthChecker::new_with_max_depth(&loader, 2); + + assert_ok!(checker.check_depth_of_type(&mut gas_meter, &mut traversal_context, &Type::U8)); + + let vec_u8_ty = Type::Vector(triomphe::Arc::new(Type::U8)); + assert_ok!(checker.check_depth_of_type(&mut gas_meter, &mut traversal_context, &vec_u8_ty)); + + let vec_vec_u8_ty = Type::Vector(triomphe::Arc::new(vec_u8_ty.clone())); + assert_err!(checker.check_depth_of_type( + &mut gas_meter, + &mut traversal_context, + &vec_vec_u8_ty + )); + let ref_vec_u8_ty = Type::Reference(Box::new(vec_u8_ty)); + assert_err!(checker.check_depth_of_type( + &mut gas_meter, + &mut traversal_context, + &ref_vec_u8_ty + )); + + assert_ok!(checker.check_depth_of_type( + &mut gas_meter, + &mut traversal_context, + &struct_ty(c) + )); + assert_err!(checker.check_depth_of_type( + &mut gas_meter, + &mut traversal_context, + &struct_ty(b) + )); + assert_err!(checker.check_depth_of_type( + &mut gas_meter, + &mut traversal_context, + &struct_ty(a) + )); + } +} diff --git a/third_party/move/move-vm/runtime/src/storage/ty_tag_converter.rs b/third_party/move/move-vm/runtime/src/storage/ty_tag_converter.rs index 9e3929df3bb8a..597f64f33d17c 100644 --- a/third_party/move/move-vm/runtime/src/storage/ty_tag_converter.rs +++ b/third_party/move/move-vm/runtime/src/storage/ty_tag_converter.rs @@ -5,7 +5,7 @@ use crate::{config::VMConfig, RuntimeEnvironment}; use hashbrown::{hash_map::Entry, HashMap}; use move_binary_format::errors::{PartialVMError, PartialVMResult}; use move_core_types::{ - language_storage::{FunctionTag, StructTag, TypeTag}, + language_storage::{FunctionParamOrReturnTag, FunctionTag, StructTag, TypeTag}, vm_status::StatusCode, }; use move_vm_types::loaded_data::{runtime_types::Type, struct_name_indexing::StructNameIndex}; @@ -282,7 +282,8 @@ impl<'a> TypeTagConverter<'a> { TypeTag::Struct(Box::new(struct_tag)) }, - // Functions: recurse + // Functions: recursively construct tags for argument and return types. Note that these + // can be references, unlike regular tags. Type::Function { args, results, @@ -290,9 +291,23 @@ impl<'a> TypeTagConverter<'a> { } => { let to_vec = |ts: &[Type], gas_ctx: &mut PseudoGasContext| - -> PartialVMResult> { + -> PartialVMResult> { ts.iter() - .map(|t| self.ty_to_ty_tag_impl(t, gas_ctx)) + .map(|t| { + Ok(match t { + Type::Reference(t) => FunctionParamOrReturnTag::Reference( + self.ty_to_ty_tag_impl(t, gas_ctx)?, + ), + Type::MutableReference(t) => { + FunctionParamOrReturnTag::MutableReference( + self.ty_to_ty_tag_impl(t, gas_ctx)?, + ) + }, + t => FunctionParamOrReturnTag::Value( + self.ty_to_ty_tag_impl(t, gas_ctx)?, + ), + }) + }) .collect() }; TypeTag::Function(Box::new(FunctionTag { diff --git a/third_party/move/move-vm/test-utils/src/gas_schedule.rs b/third_party/move/move-vm/test-utils/src/gas_schedule.rs index bbbc41411155e..3cdc79c7dc026 100644 --- a/third_party/move/move-vm/test-utils/src/gas_schedule.rs +++ b/third_party/move/move-vm/test-utils/src/gas_schedule.rs @@ -30,7 +30,7 @@ use move_core_types::{ vm_status::StatusCode, }; use move_vm_types::{ - gas::{GasMeter, SimpleInstruction}, + gas::{DependencyGasMeter, GasMeter, NativeGasMeter, SimpleInstruction}, views::{TypeView, ValueView}, }; use once_cell::sync::Lazy; @@ -195,6 +195,32 @@ impl GasStatus { } } +impl DependencyGasMeter for GasStatus { + fn charge_dependency( + &mut self, + _is_new: bool, + _addr: &AccountAddress, + _name: &IdentStr, + _size: NumBytes, + ) -> PartialVMResult<()> { + Ok(()) + } +} + +impl NativeGasMeter for GasStatus { + fn legacy_gas_budget_in_native_context(&self) -> InternalGas { + self.gas_left + } + + fn charge_native_execution(&mut self, _amount: InternalGas) -> PartialVMResult<()> { + Ok(()) + } + + fn use_heap_memory_in_native_context(&mut self, _amount: u64) -> PartialVMResult<()> { + Ok(()) + } +} + impl GasMeter for GasStatus { fn balance_internal(&self) -> InternalGas { self.gas_left @@ -320,6 +346,24 @@ impl GasMeter for GasStatus { ) } + fn charge_pack_closure( + &mut self, + is_generic: bool, + args: impl ExactSizeIterator, + ) -> PartialVMResult<()> { + let field_count = AbstractMemorySize::new(args.len() as u64); + self.charge_instr_with_size( + if is_generic { + Opcodes::PACK_CLOSURE_GENERIC + } else { + Opcodes::PACK_CLOSURE + }, + args.fold(field_count, |acc, val| { + acc + val.legacy_abstract_memory_size() + }), + ) + } + fn charge_read_ref(&mut self, ref_val: impl ValueView) -> PartialVMResult<()> { self.charge_instr_with_size(Opcodes::READ_REF, ref_val.legacy_abstract_memory_size()) } @@ -509,20 +553,6 @@ impl GasMeter for GasStatus { fn charge_create_ty(&mut self, _num_nodes: NumTypeNodes) -> PartialVMResult<()> { Ok(()) } - - fn charge_dependency( - &mut self, - _is_new: bool, - _addr: &AccountAddress, - _name: &IdentStr, - _size: NumBytes, - ) -> PartialVMResult<()> { - Ok(()) - } - - fn charge_heap_memory(&mut self, _amount: u64) -> PartialVMResult<()> { - Ok(()) - } } pub fn new_from_instructions(mut instrs: Vec<(Bytecode, GasCost)>) -> CostTable { diff --git a/third_party/move/move-vm/transactional-tests/tests/display/print_values.exp b/third_party/move/move-vm/transactional-tests/tests/display/print_values.exp index 94f8b5b43faef..442f9b501a4e1 100644 --- a/third_party/move/move-vm/transactional-tests/tests/display/print_values.exp +++ b/third_party/move/move-vm/transactional-tests/tests/display/print_values.exp @@ -1,49 +1,55 @@ processed 17 tasks -task 1 'run'. lines 87-87: +task 1 'run'. lines 88-88: return values: true -task 2 'run'. lines 89-89: +task 2 'run'. lines 90-90: return values: 0 -task 3 'run'. lines 91-91: +task 3 'run'. lines 92-92: return values: 1 -task 4 'run'. lines 93-93: +task 4 'run'. lines 94-94: return values: 2 -task 5 'run'. lines 95-95: +task 5 'run'. lines 96-96: return values: 3 -task 6 'run'. lines 97-97: +task 6 'run'. lines 98-98: return values: 4 -task 7 'run'. lines 99-99: +task 7 'run'. lines 100-100: return values: 0000000000000000000000000000000000000000000000000000000000000006 -task 8 'run'. lines 101-101: +task 8 'run'. lines 102-102: return values: [1, 2] -task 9 'run'. lines 103-103: +task 9 'run'. lines 104-104: return values: { 0, 0000000000000000000000000000000000000000000000000000000000000042 } -task 10 'run'. lines 105-105: +task 10 'run'. lines 106-106: return values: { { 1, 23 }, [1, 2, 3] } -task 11 'run'. lines 107-107: -return values: 0x0x0000000000000000000000000000000000000000000000000000000000000042::print_values::::__lambda__1__return_anonymous(..) +task 11 'run'. lines 108-108: +Error: Function execution failed with VMError: { + major_status: UNKNOWN_INVARIANT_VIOLATION_ERROR, + sub_status: None, + location: undefined, + indices: [], + offsets: [], +} -task 12 'run'. lines 109-109: -return values: 0x0x0000000000000000000000000000000000000000000000000000000000000042::print_values::::sum(..) +task 12 'run'. lines 110-110: +return values: 0x0000000000000000000000000000000000000000000000000000000000000042::print_values::sum(..) -task 13 'run'. lines 111-111: -return values: 0x0x0000000000000000000000000000000000000000000000000000000000000042::print_values::::sum(U64(5), ..) +task 13 'run'. lines 112-112: +return values: 0x0000000000000000000000000000000000000000000000000000000000000042::print_values::sum(U64(5), ..) -task 14 'run'. lines 113-113: -return values: 0x0x0000000000000000000000000000000000000000000000000000000000000042::print_values::::sum(_, U64(10), ..) +task 14 'run'. lines 114-114: +return values: 0x0000000000000000000000000000000000000000000000000000000000000042::print_values::sum(_, U64(10), ..) -task 15 'run'. lines 115-115: -return values: 0x0x0000000000000000000000000000000000000000000000000000000000000042::print_values::::sum(U64(2), U64(1), ..) +task 15 'run'. lines 116-116: +return values: 0x0000000000000000000000000000000000000000000000000000000000000042::print_values::sum(U64(2), U64(1), ..) -task 16 'run'. lines 117-117: -return values: 0x0x0000000000000000000000000000000000000000000000000000000000000042::print_values::::__lambda__1__return_closure_capture_struct((container: [(container: [U16(1), U8(23)]), (container: [1, 2, 3])]), ..) +task 16 'run'. lines 118-118: +return values: 0x0000000000000000000000000000000000000000000000000000000000000042::print_values::return_struct(..) diff --git a/third_party/move/move-vm/transactional-tests/tests/display/print_values.move b/third_party/move/move-vm/transactional-tests/tests/display/print_values.move index 29b25f8f93618..5627bfff8efa7 100644 --- a/third_party/move/move-vm/transactional-tests/tests/display/print_values.move +++ b/third_party/move/move-vm/transactional-tests/tests/display/print_values.move @@ -58,6 +58,8 @@ module 0x42::print_values { a + b } + // Note: fails with invariant violation because MoveVM serializes the return value, and returned value is not + // serializable because it is an anonymous lambda-lifted function. public fun return_anonymous(): || { || {} } @@ -79,8 +81,7 @@ module 0x42::print_values { } public fun return_closure_capture_struct(): ||B { - let b = B { a: A::V1 { x: 23 }, data: vector[1, 2, 3] }; - || { b } + return_struct } } diff --git a/third_party/move/move-vm/transactional-tests/tests/paranoid-tests/encapsulation_safety/cross_function_call.exp b/third_party/move/move-vm/transactional-tests/tests/paranoid-tests/encapsulation_safety/cross_function_call.exp deleted file mode 100644 index 2355ce3a2e25c..0000000000000 --- a/third_party/move/move-vm/transactional-tests/tests/paranoid-tests/encapsulation_safety/cross_function_call.exp +++ /dev/null @@ -1,10 +0,0 @@ -processed 3 tasks - -task 2 'run'. lines 24-31: -Error: Script execution failed with VMError: { - major_status: UNKNOWN_INVARIANT_VIOLATION_ERROR, - sub_status: None, - location: script, - indices: [], - offsets: [], -} diff --git a/third_party/move/move-vm/transactional-tests/tests/paranoid-tests/encapsulation_safety/cross_function_call.mvir b/third_party/move/move-vm/transactional-tests/tests/paranoid-tests/encapsulation_safety/cross_function_call.mvir deleted file mode 100644 index 62edd9cfc1618..0000000000000 --- a/third_party/move/move-vm/transactional-tests/tests/paranoid-tests/encapsulation_safety/cross_function_call.mvir +++ /dev/null @@ -1,31 +0,0 @@ -//# publish -module 0x2.A { - struct C has drop { x: u64 } - - make(): Self.C { - label b0: - return C { x: 0}; - } -} - -//# publish -module 0x3.B { - import 0x2.A; - - public make(): A.C { - let v: A.C; - label b0: - v = A.make(); - - return move(v); - } -} - -//# run --signers 0x1 -import 0x3.B; -import 0x2.A; -main(account: signer) { -label b0: - _ = B.make(); - return; -} diff --git a/third_party/move/move-vm/transactional-tests/tests/paranoid-tests/encapsulation_safety/cross_module_call.exp b/third_party/move/move-vm/transactional-tests/tests/paranoid-tests/encapsulation_safety/cross_module_call.exp new file mode 100644 index 0000000000000..00f6b1128c4fc --- /dev/null +++ b/third_party/move/move-vm/transactional-tests/tests/paranoid-tests/encapsulation_safety/cross_module_call.exp @@ -0,0 +1,46 @@ +processed 13 tasks + +task 4 'run'. lines 72-72: +Error: Function execution failed with VMError: { + major_status: UNKNOWN_INVARIANT_VIOLATION_ERROR, + sub_status: Some(1), + location: 0x2::a, + indices: [], + offsets: [(FunctionDefinitionIndex(0), 0)], +} + +task 7 'run'. lines 78-78: +Error: Function execution failed with VMError: { + major_status: UNKNOWN_INVARIANT_VIOLATION_ERROR, + sub_status: Some(1), + location: 0x2::c, + indices: [], + offsets: [(FunctionDefinitionIndex(0), 0)], +} + +task 8 'run'. lines 80-80: +Error: Function execution failed with VMError: { + major_status: UNKNOWN_INVARIANT_VIOLATION_ERROR, + sub_status: Some(1), + location: 0x2::c, + indices: [], + offsets: [(FunctionDefinitionIndex(1), 0)], +} + +task 10 'run'. lines 84-90: +Error: Script execution failed with VMError: { + major_status: UNKNOWN_INVARIANT_VIOLATION_ERROR, + sub_status: Some(1), + location: script, + indices: [], + offsets: [(FunctionDefinitionIndex(0), 0)], +} + +task 11 'run'. lines 92-98: +Error: Script execution failed with VMError: { + major_status: UNKNOWN_INVARIANT_VIOLATION_ERROR, + sub_status: Some(1), + location: script, + indices: [], + offsets: [(FunctionDefinitionIndex(0), 0)], +} diff --git a/third_party/move/move-vm/transactional-tests/tests/paranoid-tests/encapsulation_safety/cross_module_call.mvir b/third_party/move/move-vm/transactional-tests/tests/paranoid-tests/encapsulation_safety/cross_module_call.mvir new file mode 100644 index 0000000000000..73757b18a8819 --- /dev/null +++ b/third_party/move/move-vm/transactional-tests/tests/paranoid-tests/encapsulation_safety/cross_module_call.mvir @@ -0,0 +1,106 @@ +//# publish +module 0x2.a { + // Empty module for 0x2::b to link against when declaring as a friend. +} + +//# publish +module 0x2.b { + friend 0x2.a; + + private_function() { + label b0: + return; + } + + public(friend) friend_function() { + label b0: + return; + } + + public public_function() { + label b0: + return; + } +} + +//# publish +module 0x2.a { + import 0x2.b; + + public call_private_function() { + label b0: + b.private_function(); + return; + } + + public call_friend_function() { + label b0: + b.friend_function(); + return; + } + + public call_public_function() { + label b0: + b.public_function(); + return; + } +} + +//# publish +module 0x2.c { + import 0x2.b; + + public call_private_function() { + label b0: + b.private_function(); + return; + } + + public call_friend_function() { + label b0: + b.friend_function(); + return; + } + + public call_public_function() { + label b0: + b.public_function(); + return; + } +} + +//# run 0x2::a::call_private_function + +//# run 0x2::a::call_friend_function + +//# run 0x2::a::call_public_function + +//# run 0x2::c::call_private_function + +//# run 0x2::c::call_friend_function + +//# run 0x2::c::call_public_function + +//# run --signers 0x1 +import 0x2.b; +main(account: signer) { +label b0: + b.private_function(); + return; +} + +//# run --signers 0x1c +import 0x2.b; +main(account: signer) { +label b0: + b.friend_function(); + return; +} + +//# run --signers 0x1 +import 0x2.b; +main(account: signer) { +label b0: + b.public_function(); + return; +} diff --git a/third_party/move/move-vm/transactional-tests/tests/paranoid-tests/encapsulation_safety/cross_module_call_generic.exp b/third_party/move/move-vm/transactional-tests/tests/paranoid-tests/encapsulation_safety/cross_module_call_generic.exp new file mode 100644 index 0000000000000..00f6b1128c4fc --- /dev/null +++ b/third_party/move/move-vm/transactional-tests/tests/paranoid-tests/encapsulation_safety/cross_module_call_generic.exp @@ -0,0 +1,46 @@ +processed 13 tasks + +task 4 'run'. lines 72-72: +Error: Function execution failed with VMError: { + major_status: UNKNOWN_INVARIANT_VIOLATION_ERROR, + sub_status: Some(1), + location: 0x2::a, + indices: [], + offsets: [(FunctionDefinitionIndex(0), 0)], +} + +task 7 'run'. lines 78-78: +Error: Function execution failed with VMError: { + major_status: UNKNOWN_INVARIANT_VIOLATION_ERROR, + sub_status: Some(1), + location: 0x2::c, + indices: [], + offsets: [(FunctionDefinitionIndex(0), 0)], +} + +task 8 'run'. lines 80-80: +Error: Function execution failed with VMError: { + major_status: UNKNOWN_INVARIANT_VIOLATION_ERROR, + sub_status: Some(1), + location: 0x2::c, + indices: [], + offsets: [(FunctionDefinitionIndex(1), 0)], +} + +task 10 'run'. lines 84-90: +Error: Script execution failed with VMError: { + major_status: UNKNOWN_INVARIANT_VIOLATION_ERROR, + sub_status: Some(1), + location: script, + indices: [], + offsets: [(FunctionDefinitionIndex(0), 0)], +} + +task 11 'run'. lines 92-98: +Error: Script execution failed with VMError: { + major_status: UNKNOWN_INVARIANT_VIOLATION_ERROR, + sub_status: Some(1), + location: script, + indices: [], + offsets: [(FunctionDefinitionIndex(0), 0)], +} diff --git a/third_party/move/move-vm/transactional-tests/tests/paranoid-tests/encapsulation_safety/cross_module_call_generic.mvir b/third_party/move/move-vm/transactional-tests/tests/paranoid-tests/encapsulation_safety/cross_module_call_generic.mvir new file mode 100644 index 0000000000000..7169a6e0df282 --- /dev/null +++ b/third_party/move/move-vm/transactional-tests/tests/paranoid-tests/encapsulation_safety/cross_module_call_generic.mvir @@ -0,0 +1,106 @@ +//# publish +module 0x2.a { + // Empty module for 0x2::b to link against when declaring as a friend. +} + +//# publish +module 0x2.b { + friend 0x2.a; + + private_function() { + label b0: + return; + } + + public(friend) friend_function() { + label b0: + return; + } + + public public_function() { + label b0: + return; + } +} + +//# publish +module 0x2.a { + import 0x2.b; + + public call_private_function() { + label b0: + b.private_function(); + return; + } + + public call_friend_function() { + label b0: + b.friend_function(); + return; + } + + public call_public_function() { + label b0: + b.public_function(); + return; + } +} + +//# publish +module 0x2.c { + import 0x2.b; + + public call_private_function() { + label b0: + b.private_function(); + return; + } + + public call_friend_function() { + label b0: + b.friend_function(); + return; + } + + public call_public_function() { + label b0: + b.public_function(); + return; + } +} + +//# run 0x2::a::call_private_function + +//# run 0x2::a::call_friend_function + +//# run 0x2::a::call_public_function + +//# run 0x2::c::call_private_function + +//# run 0x2::c::call_friend_function + +//# run 0x2::c::call_public_function + +//# run --signers 0x1 +import 0x2.b; +main(account: signer) { +label b0: + b.private_function(); + return; +} + +//# run --signers 0x1 +import 0x2.b; +main(account: signer) { +label b0: + b.friend_function(); + return; +} + +//# run --signers 0x1 +import 0x2.b; +main(account: signer) { +label b0: + b.public_function(); + return; +} diff --git a/third_party/move/move-vm/transactional-tests/tests/paranoid-tests/encapsulation_safety/cross_module_call_native.exp b/third_party/move/move-vm/transactional-tests/tests/paranoid-tests/encapsulation_safety/cross_module_call_native.exp new file mode 100644 index 0000000000000..14dffb4ad58f1 --- /dev/null +++ b/third_party/move/move-vm/transactional-tests/tests/paranoid-tests/encapsulation_safety/cross_module_call_native.exp @@ -0,0 +1,19 @@ +processed 3 tasks + +task 1 'run'. lines 14-14: +Error: Function execution failed with VMError: { + major_status: UNKNOWN_INVARIANT_VIOLATION_ERROR, + sub_status: Some(1), + location: 0x2::a, + indices: [], + offsets: [(FunctionDefinitionIndex(0), 3)], +} + +task 2 'run'. lines 16-24: +Error: Script execution failed with VMError: { + major_status: UNKNOWN_INVARIANT_VIOLATION_ERROR, + sub_status: Some(1), + location: script, + indices: [], + offsets: [(FunctionDefinitionIndex(0), 3)], +} diff --git a/third_party/move/move-vm/transactional-tests/tests/paranoid-tests/encapsulation_safety/cross_module_call_native.mvir b/third_party/move/move-vm/transactional-tests/tests/paranoid-tests/encapsulation_safety/cross_module_call_native.mvir new file mode 100644 index 0000000000000..8103799c0acbc --- /dev/null +++ b/third_party/move/move-vm/transactional-tests/tests/paranoid-tests/encapsulation_safety/cross_module_call_native.mvir @@ -0,0 +1,24 @@ +//# publish +module 0x2.a { + import 0x1.string; + + public test() { + let b: vector; + label b0: + b = vec_pack_0(); + _ = string.internal_check_utf8(&b); + return; +} +} + +//# run 0x2::a::test + +//# run --signers 0x1 +import 0x1.string; +main(account: signer) { + let b: vector; +label b0: + b = vec_pack_0(); + _ = string.internal_check_utf8(&b); + return; +} diff --git a/third_party/move/move-vm/transactional-tests/tests/paranoid-tests/encapsulation_safety/cross_native_function_call.exp b/third_party/move/move-vm/transactional-tests/tests/paranoid-tests/encapsulation_safety/cross_native_function_call.exp deleted file mode 100644 index dc82e72d6ec94..0000000000000 --- a/third_party/move/move-vm/transactional-tests/tests/paranoid-tests/encapsulation_safety/cross_native_function_call.exp +++ /dev/null @@ -1,10 +0,0 @@ -processed 2 tasks - -task 1 'run'. lines 13-19: -Error: Script execution failed with VMError: { - major_status: UNKNOWN_INVARIANT_VIOLATION_ERROR, - sub_status: None, - location: script, - indices: [], - offsets: [], -} diff --git a/third_party/move/move-vm/transactional-tests/tests/paranoid-tests/encapsulation_safety/cross_native_function_call.mvir b/third_party/move/move-vm/transactional-tests/tests/paranoid-tests/encapsulation_safety/cross_native_function_call.mvir deleted file mode 100644 index a657f6024d99d..0000000000000 --- a/third_party/move/move-vm/transactional-tests/tests/paranoid-tests/encapsulation_safety/cross_native_function_call.mvir +++ /dev/null @@ -1,19 +0,0 @@ -//# publish -module 0x2.A { - import 0x1.string; - public test() { - let b: vector; - label b0: - b = vec_pack_0(); - _ = string.internal_check_utf8(&b); - return; - } -} - -//# run --signers 0x1 -import 0x2.A; -main(account: signer) { -label b0: - A.test(); - return; -} diff --git a/third_party/move/move-vm/types/src/gas.rs b/third_party/move/move-vm/types/src/gas.rs index f37961d0c231f..2409ad5496a44 100644 --- a/third_party/move/move-vm/types/src/gas.rs +++ b/third_party/move/move-vm/types/src/gas.rs @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::views::{TypeView, ValueView}; +use ambassador::delegatable_trait; use move_binary_format::{ errors::PartialVMResult, file_format::CodeOffset, file_format_common::Opcodes, }; @@ -138,9 +139,38 @@ impl SimpleInstruction { } } +/// Metering API for module or script dependencies. Defined as a stand-alone trait so it can be +/// used in native context. +/// +/// Note: because native functions are trait objects, it is not possible to make [GasMeter] a trait +/// object as well as it has APIs that are generic. +#[delegatable_trait] +pub trait DependencyGasMeter { + fn charge_dependency( + &mut self, + is_new: bool, + addr: &AccountAddress, + name: &IdentStr, + size: NumBytes, + ) -> PartialVMResult<()>; +} + +/// Gas meter to use to meter native function. +pub trait NativeGasMeter: DependencyGasMeter { + /// Returns the remaining gas budget of the meter. Same as [GasMeter::balance_internal] and + /// will be removed in the future. + fn legacy_gas_budget_in_native_context(&self) -> InternalGas; + + /// Charges the given gas amount. Only used if metering in native context is enabled. + fn charge_native_execution(&mut self, amount: InternalGas) -> PartialVMResult<()>; + + /// Tracks heap memory usage. + fn use_heap_memory_in_native_context(&mut self, amount: u64) -> PartialVMResult<()>; +} + /// Trait that defines a generic gas meter interface, allowing clients of the Move VM to implement /// their own metering scheme. -pub trait GasMeter { +pub trait GasMeter: NativeGasMeter { fn balance_internal(&self) -> InternalGas; /// Charge an instruction and fail if not enough gas units are left. @@ -212,6 +242,12 @@ pub trait GasMeter { self.charge_unpack(is_generic, args) } + fn charge_pack_closure( + &mut self, + is_generic: bool, + args: impl ExactSizeIterator + Clone, + ) -> PartialVMResult<()>; + fn charge_read_ref(&mut self, val: impl ValueView) -> PartialVMResult<()>; fn charge_write_ref( @@ -330,24 +366,38 @@ pub trait GasMeter { ) -> PartialVMResult<()>; fn charge_create_ty(&mut self, num_nodes: NumTypeNodes) -> PartialVMResult<()>; - - fn charge_dependency( - &mut self, - is_new: bool, - addr: &AccountAddress, - name: &IdentStr, - size: NumBytes, - ) -> PartialVMResult<()>; - - /// A special interface for the VM to signal to the gas meter that certain internal ops or - /// native functions have used additional heap memory that needs to be metered. - fn charge_heap_memory(&mut self, amount: u64) -> PartialVMResult<()>; } /// A dummy gas meter that does not meter anything. /// Charge operations will always succeed. pub struct UnmeteredGasMeter; +impl DependencyGasMeter for UnmeteredGasMeter { + fn charge_dependency( + &mut self, + _is_new: bool, + _addr: &AccountAddress, + _name: &IdentStr, + _size: NumBytes, + ) -> PartialVMResult<()> { + Ok(()) + } +} + +impl NativeGasMeter for UnmeteredGasMeter { + fn legacy_gas_budget_in_native_context(&self) -> InternalGas { + u64::MAX.into() + } + + fn charge_native_execution(&mut self, _amount: InternalGas) -> PartialVMResult<()> { + Ok(()) + } + + fn use_heap_memory_in_native_context(&mut self, _amount: u64) -> PartialVMResult<()> { + Ok(()) + } +} + impl GasMeter for UnmeteredGasMeter { fn balance_internal(&self) -> InternalGas { u64::MAX.into() @@ -433,6 +483,14 @@ impl GasMeter for UnmeteredGasMeter { Ok(()) } + fn charge_pack_closure( + &mut self, + _is_generic: bool, + _args: impl ExactSizeIterator, + ) -> PartialVMResult<()> { + Ok(()) + } + fn charge_read_ref(&mut self, _val: impl ValueView) -> PartialVMResult<()> { Ok(()) } @@ -577,18 +635,4 @@ impl GasMeter for UnmeteredGasMeter { fn charge_create_ty(&mut self, _num_nodes: NumTypeNodes) -> PartialVMResult<()> { Ok(()) } - - fn charge_dependency( - &mut self, - _is_new: bool, - _addr: &AccountAddress, - _name: &IdentStr, - _size: NumBytes, - ) -> PartialVMResult<()> { - Ok(()) - } - - fn charge_heap_memory(&mut self, _amount: u64) -> PartialVMResult<()> { - Ok(()) - } } diff --git a/third_party/move/move-vm/types/src/loaded_data/runtime_types.rs b/third_party/move/move-vm/types/src/loaded_data/runtime_types.rs index 9707308a4b1a6..b2ae927b85b2d 100644 --- a/third_party/move/move-vm/types/src/loaded_data/runtime_types.rs +++ b/third_party/move/move-vm/types/src/loaded_data/runtime_types.rs @@ -16,7 +16,7 @@ use move_binary_format::{ use move_core_types::{ ability::{Ability, AbilitySet}, identifier::Identifier, - language_storage::{FunctionTag, ModuleId, StructTag, TypeTag}, + language_storage::{FunctionParamOrReturnTag, FunctionTag, ModuleId, StructTag, TypeTag}, vm_status::{sub_status::unknown_invariant_violation::EPARANOID_FAILURE, StatusCode}, }; use serde::Serialize; @@ -32,7 +32,6 @@ use std::{ }; use triomphe::Arc as TriompheArc; -#[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Debug)] /// A formula describing the value depth of a type, using (the depths of) the type parameters as inputs. /// /// It has the form of `max(CBase, T1 + C1, T2 + C2, ..)` where `Ti` is the depth of the ith type parameter @@ -40,6 +39,7 @@ use triomphe::Arc as TriompheArc; /// /// This form has a special property: when you compute the max of multiple formulae, you can normalize /// them into a single formula. +#[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Debug)] pub struct DepthFormula { pub terms: Vec<(TypeParameterIndex, u64)>, // Ti + Ci pub constant: Option, // Cbase @@ -93,14 +93,13 @@ impl DepthFormula { formulas.push(DepthFormula::constant(*constant)) } for (t_i, c_i) in terms { - let Some(mut u_form) = map.remove(t_i) else { + let Some(u_form) = map.remove(t_i) else { return Err( PartialVMError::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR) .with_message(format!("{t_i:?} missing mapping")), ); }; - u_form.scale(*c_i); - formulas.push(u_form) + formulas.push(u_form.scale(*c_i)) } Ok(DepthFormula::normalize(formulas)) } @@ -114,14 +113,15 @@ impl DepthFormula { depth } - pub fn scale(&mut self, c: u64) { - let Self { terms, constant } = self; + pub fn scale(mut self, c: u64) -> Self { + let Self { terms, constant } = &mut self; for (_t_i, c_i) in terms { *c_i = (*c_i).saturating_add(c); } if let Some(cbase) = constant.as_mut() { *cbase = (*cbase).saturating_add(c); } + self } } @@ -1322,9 +1322,23 @@ impl TypeBuilder { results, abilities, } = fun.as_ref(); - let mut to_list = |ts: &[TypeTag]| { + let mut to_list = |ts: &[FunctionParamOrReturnTag]| { ts.iter() - .map(|t| self.create_ty_impl(t, resolver, count, depth + 1)) + .map(|t| { + // Note: for reference or mutable reference tags, we add 1 more level + // of depth, hence adding 2 to the counter. + Ok(match t { + FunctionParamOrReturnTag::Reference(t) => Reference(Box::new( + self.create_ty_impl(t, resolver, count, depth + 2)?, + )), + FunctionParamOrReturnTag::MutableReference(t) => MutableReference( + Box::new(self.create_ty_impl(t, resolver, count, depth + 2)?), + ), + FunctionParamOrReturnTag::Value(t) => { + self.create_ty_impl(t, resolver, count, depth + 1)? + }, + }) + }) .collect::>>() }; Function { diff --git a/third_party/move/move-vm/types/src/value_serde.rs b/third_party/move/move-vm/types/src/value_serde.rs index e532e3bf3e5d1..2aa2ee51f7cc5 100644 --- a/third_party/move/move-vm/types/src/value_serde.rs +++ b/third_party/move/move-vm/types/src/value_serde.rs @@ -31,6 +31,9 @@ pub trait FunctionValueExtension { &self, fun: &dyn AbstractFunction, ) -> PartialVMResult; + + /// Returns the maximum allowed nesting depth of a VM value. + fn max_value_nest_depth(&self) -> Option; } /// An extension to (de)serializer to lookup information about delayed fields. @@ -66,10 +69,6 @@ impl DelayedFieldsExtension<'_> { #[derive(Clone)] pub(crate) struct FunctionValueExtensionWithContext<'a> { extension: &'a dyn FunctionValueExtension, - /// Marker to indicate that function value serialization failed. Used to ensure we propagate - /// error status code correctly, as otherwise any serialization failure is treated as an - /// invariant violation. - function_value_serialization_error: RefCell>, } impl<'a> FunctionValueExtensionWithContext<'a> { @@ -78,11 +77,7 @@ impl<'a> FunctionValueExtensionWithContext<'a> { &self, fun: &dyn AbstractFunction, ) -> PartialVMResult { - self.extension - .get_serialization_data(fun) - .inspect_err(|err| { - *self.function_value_serialization_error.borrow_mut() = Some(err.clone()); - }) + self.extension.get_serialization_data(fun) } /// Creates a function from serialized data. @@ -100,16 +95,18 @@ pub struct ValueSerDeContext<'a> { pub(crate) function_extension: Option>, pub(crate) delayed_fields_extension: Option>, pub(crate) legacy_signer: bool, + /// Maximum allowed depth of a VM value. Enforced by serializer. + pub(crate) max_value_nested_depth: Option, } impl<'a> ValueSerDeContext<'a> { /// Default (de)serializer that disallows delayed fields. - #[allow(clippy::new_without_default)] - pub fn new() -> Self { + pub fn new(max_value_nested_depth: Option) -> Self { Self { function_extension: None, delayed_fields_extension: None, legacy_signer: false, + max_value_nested_depth, } } @@ -125,10 +122,7 @@ impl<'a> ValueSerDeContext<'a> { mut self, extension: &'a dyn FunctionValueExtension, ) -> Self { - self.function_extension = Some(FunctionValueExtensionWithContext { - extension, - function_value_serialization_error: RefCell::new(None), - }); + self.function_extension = Some(FunctionValueExtensionWithContext { extension }); self } @@ -148,7 +142,18 @@ impl<'a> ValueSerDeContext<'a> { function_extension: self.function_extension.clone(), delayed_fields_extension: None, legacy_signer: self.legacy_signer, + max_value_nested_depth: self.max_value_nested_depth, + } + } + + pub(crate) fn check_depth(&self, depth: u64) -> PartialVMResult<()> { + if self + .max_value_nested_depth + .map_or(false, |max_depth| depth > max_depth) + { + return Err(PartialVMError::new(StatusCode::VM_MAX_VALUE_DEPTH_REACHED)); } + Ok(()) } /// Custom (de)serializer such that: @@ -189,6 +194,7 @@ impl<'a> ValueSerDeContext<'a> { ctx: &self, layout, value: &value.0, + depth: 1, }; match bcs::to_bytes(&value).ok() { @@ -206,17 +212,6 @@ impl<'a> ValueSerDeContext<'a> { )); } } - - // Check if the error is because of function value serialization. - if let Some(function_extension) = self.function_extension { - if let Some(err) = function_extension - .function_value_serialization_error - .into_inner() - { - return Err(err); - } - } - Ok(None) }, } @@ -229,6 +224,7 @@ impl<'a> ValueSerDeContext<'a> { ctx: &self, layout, value: &value.0, + depth: 1, }; bcs::serialized_size(&value).map_err(|e| { PartialVMError::new(StatusCode::VALUE_SERIALIZATION_ERROR).with_message(format!( diff --git a/third_party/move/move-vm/types/src/values/function_values_impl.rs b/third_party/move/move-vm/types/src/values/function_values_impl.rs index 6c5d0e01743be..066f2651b3c6e 100644 --- a/third_party/move/move-vm/types/src/values/function_values_impl.rs +++ b/third_party/move/move-vm/types/src/values/function_values_impl.rs @@ -31,7 +31,7 @@ pub trait AbstractFunction: for<'a> Tid<'a> { fn closure_mask(&self) -> ClosureMask; fn cmp_dyn(&self, other: &dyn AbstractFunction) -> PartialVMResult; fn clone_dyn(&self) -> PartialVMResult>; - fn to_stable_string(&self) -> String; + fn to_canonical_string(&self) -> String; } /// A closure, consisting of an abstract function descriptor and the captured arguments. @@ -84,7 +84,7 @@ impl Closure { impl Debug for Closure { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { let Self(fun, captured) = self; - write!(f, "Closure({}, {:?})", fun.to_stable_string(), captured) + write!(f, "Closure({}, {:?})", fun.to_canonical_string(), captured) } } @@ -94,7 +94,7 @@ impl Display for Closure { let captured = fun .closure_mask() .format_arguments(captured.iter().map(|v| v.to_string()).collect()); - write!(f, "{}({})", fun.to_stable_string(), captured.join(", ")) + write!(f, "{}({})", fun.to_canonical_string(), captured.join(", ")) } } @@ -130,6 +130,7 @@ impl serde::Serialize for SerializationReadyValue<'_, '_, '_, (), Closure> { ctx: self.ctx, layout: &layout, value, + depth: self.depth + 1, })? } seq.end() diff --git a/third_party/move/move-vm/types/src/values/mod.rs b/third_party/move/move-vm/types/src/values/mod.rs index 31b773cffd3da..c86401cd697cc 100644 --- a/third_party/move/move-vm/types/src/values/mod.rs +++ b/third_party/move/move-vm/types/src/values/mod.rs @@ -10,6 +10,8 @@ mod value_tests; #[cfg(test)] mod serialization_tests; +#[cfg(test)] +mod value_depth_tests; #[cfg(all(test, feature = "fuzzing"))] mod value_prop_tests; diff --git a/third_party/move/move-vm/types/src/values/serialization_tests.rs b/third_party/move/move-vm/types/src/values/serialization_tests.rs index b19cdc41cc9e8..b2c99c07c4553 100644 --- a/third_party/move/move-vm/types/src/values/serialization_tests.rs +++ b/third_party/move/move-vm/types/src/values/serialization_tests.rs @@ -18,7 +18,7 @@ mod tests { account_address::AccountAddress, function::{ClosureMask, MoveClosure, FUNCTION_DATA_SERIALIZATION_FORMAT_V1}, identifier::Identifier, - language_storage::{FunctionTag, ModuleId, StructTag, TypeTag}, + language_storage::{FunctionParamOrReturnTag, FunctionTag, ModuleId, StructTag, TypeTag}, u256, value::{IdentifierMappingKind, MoveStruct, MoveStructLayout, MoveTypeLayout, MoveValue}, }; @@ -99,11 +99,11 @@ mod tests { )), ]; for value in good_values { - let blob = ValueSerDeContext::new() + let blob = ValueSerDeContext::new(None) .serialize(&value, &layout) .unwrap() .expect("serialization succeeds"); - let de_value = ValueSerDeContext::new() + let de_value = ValueSerDeContext::new(None) .deserialize(&blob, &layout) .expect("deserialization succeeds"); assert!( @@ -113,7 +113,7 @@ mod tests { } let bad_tag_value = Value::struct_(Struct::pack_variant(3, [Value::u64(42)])); assert!( - ValueSerDeContext::new() + ValueSerDeContext::new(None) .serialize(&bad_tag_value, &layout) .unwrap() .is_none(), @@ -121,7 +121,7 @@ mod tests { ); let bad_struct_value = Value::struct_(Struct::pack([Value::u64(42)])); assert!( - ValueSerDeContext::new() + ValueSerDeContext::new(None) .serialize(&bad_struct_value, &layout) .unwrap() .is_none(), @@ -184,7 +184,7 @@ mod tests { RustEnum::BoolNumber(true, 13), ]; for (move_value, rust_value) in move_values.into_iter().zip(rust_values) { - let from_move = ValueSerDeContext::new() + let from_move = ValueSerDeContext::new(None) .serialize(&move_value, &layout) .unwrap() .expect("from move succeeds"); @@ -192,7 +192,7 @@ mod tests { assert_eq!(to_rust, rust_value); let from_rust = bcs::to_bytes(&rust_value).expect("from rust succeeds"); - let to_move = ValueSerDeContext::new() + let to_move = ValueSerDeContext::new(None) .deserialize(&from_rust, &layout) .expect("to move succeeds"); assert!( @@ -214,13 +214,15 @@ mod tests { vec![ TypeTag::Address, TypeTag::Function(Box::new(FunctionTag { - args: vec![TypeTag::Struct(Box::new(StructTag { - address: AccountAddress::TEN, - module: Identifier::new("mod").unwrap(), - name: Identifier::new("st").unwrap(), - type_args: vec![TypeTag::Signer], - }))], - results: vec![TypeTag::Address], + args: vec![FunctionParamOrReturnTag::Value(TypeTag::Struct(Box::new( + StructTag { + address: AccountAddress::TEN, + module: Identifier::new("mod").unwrap(), + name: Identifier::new("st").unwrap(), + type_args: vec![TypeTag::Signer], + }, + )))], + results: vec![FunctionParamOrReturnTag::Value(TypeTag::Address)], abilities: AbilitySet::PUBLIC_FUNCTIONS, })), ] @@ -382,7 +384,7 @@ mod tests { unimplemented!() } - fn to_stable_string(&self) -> String { + fn to_canonical_string(&self) -> String { // Needed for assertion failure printing "".to_string() } @@ -407,11 +409,11 @@ mod tests { .expect_create_from_serialization_data() .returning(move |data| Ok(Box::new(MockAbstractFunction::new_from_data(data)))); let value = Value::closure(Box::new(fun), captured); - let blob = assert_ok!(ValueSerDeContext::new() + let blob = assert_ok!(ValueSerDeContext::new(None) .with_func_args_deserialization(&ext_mock) .serialize(&value, &fun_layout)) .expect("serialization result not None"); - let de_value = ValueSerDeContext::new() + let de_value = ValueSerDeContext::new(None) .with_func_args_deserialization(&ext_mock) .deserialize_or_err(&blob, &fun_layout); (value, de_value) @@ -550,11 +552,11 @@ mod tests { ), ]; for (value, layout) in good_values_layouts_sizes { - let bytes = assert_some!(assert_ok!(ValueSerDeContext::new() + let bytes = assert_some!(assert_ok!(ValueSerDeContext::new(None) .with_delayed_fields_serde() .serialize(&value, &layout))); - let size = assert_ok!(ValueSerDeContext::new() + let size = assert_ok!(ValueSerDeContext::new(None) .with_delayed_fields_serde() .serialized_size(&value, &layout)); assert_eq!(size, bytes.len()); @@ -570,7 +572,7 @@ mod tests { (Value::u64(12), Native(Aggregator, Box::new(U64))), ]; for (value, layout) in bad_values_layouts_sizes { - assert_err!(ValueSerDeContext::new() + assert_err!(ValueSerDeContext::new(None) .with_delayed_fields_serde() .serialized_size(&value, &layout)); } @@ -585,13 +587,13 @@ mod tests { let bytes = move_value.simple_serialize().unwrap(); let vm_value = Value::master_signer(AccountAddress::ZERO); - let vm_bytes = ValueSerDeContext::new() + let vm_bytes = ValueSerDeContext::new(None) .serialize(&vm_value, &MoveTypeLayout::Signer) .unwrap() .unwrap(); // VM Value Roundtrip - assert!(ValueSerDeContext::new() + assert!(ValueSerDeContext::new(None) .deserialize(&vm_bytes, &MoveTypeLayout::Signer) .unwrap() .equals(&vm_value) @@ -605,20 +607,20 @@ mod tests { // Permissioned Signer Roundtrip let vm_value = Value::permissioned_signer(AccountAddress::ZERO, AccountAddress::ONE); - let vm_bytes = ValueSerDeContext::new() + let vm_bytes = ValueSerDeContext::new(None) .serialize(&vm_value, &MoveTypeLayout::Signer) .unwrap() .unwrap(); // VM Value Roundtrip - assert!(ValueSerDeContext::new() + assert!(ValueSerDeContext::new(None) .deserialize(&vm_bytes, &MoveTypeLayout::Signer) .unwrap() .equals(&vm_value) .unwrap()); // Cannot serialize permissioned signer into bytes with legacy signer - assert!(ValueSerDeContext::new() + assert!(ValueSerDeContext::new(None) .with_legacy_signer() .serialize(&vm_value, &MoveTypeLayout::Signer) .unwrap() @@ -631,14 +633,14 @@ mod tests { let bytes = move_value.simple_serialize().unwrap(); let vm_value = Value::master_signer(AccountAddress::ZERO); - let vm_bytes = ValueSerDeContext::new() + let vm_bytes = ValueSerDeContext::new(None) .with_legacy_signer() .serialize(&vm_value, &MoveTypeLayout::Signer) .unwrap() .unwrap(); // VM Value Roundtrip - assert!(ValueSerDeContext::new() + assert!(ValueSerDeContext::new(None) .with_legacy_signer() .deserialize(&vm_bytes, &MoveTypeLayout::Signer) .is_none()); diff --git a/third_party/move/move-vm/types/src/values/value_depth_tests.rs b/third_party/move/move-vm/types/src/values/value_depth_tests.rs new file mode 100644 index 0000000000000..bd62cd4084eea --- /dev/null +++ b/third_party/move/move-vm/types/src/values/value_depth_tests.rs @@ -0,0 +1,243 @@ +// Copyright (c) The Move Contributors +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + value_serde::{MockFunctionValueExtension, ValueSerDeContext}, + values::{AbstractFunction, GlobalValue, SerializedFunctionData, Struct, StructRef, Value}, +}; +use better_any::{Tid, TidAble, TidExt}; +use claims::{assert_err, assert_none, assert_ok, assert_some}; +use move_binary_format::errors::PartialVMResult; +use move_core_types::{ + account_address::AccountAddress, + function::{ClosureMask, FUNCTION_DATA_SERIALIZATION_FORMAT_V1}, + ident_str, + language_storage::ModuleId, + value::{MoveStructLayout, MoveTypeLayout}, + vm_status::StatusCode, +}; +use std::{cmp::Ordering, fmt::Debug}; + +#[derive(Clone, Tid)] +struct MockFunction { + data: SerializedFunctionData, +} + +impl MockFunction { + fn closure( + mask: ClosureMask, + captured: impl IntoIterator, + captured_layouts: impl IntoIterator, + ) -> Value { + let data = SerializedFunctionData { + format_version: FUNCTION_DATA_SERIALIZATION_FORMAT_V1, + module_id: ModuleId::new(AccountAddress::ONE, ident_str!("mock").to_owned()), + fun_id: ident_str!("mock").to_owned(), + ty_args: vec![], + mask, + captured_layouts: captured_layouts.into_iter().collect(), + }; + Value::closure(Box::new(Self { data }), captured) + } +} + +impl AbstractFunction for MockFunction { + fn closure_mask(&self) -> ClosureMask { + self.data.mask + } + + fn cmp_dyn(&self, _other: &dyn AbstractFunction) -> PartialVMResult { + Ok(Ordering::Equal) + } + + fn clone_dyn(&self) -> PartialVMResult> { + Ok(Box::new(self.clone())) + } + + fn to_canonical_string(&self) -> String { + "0x1::mock::mock".to_string() + } +} + +#[test] +fn test_equals() { + test_binop_with_max_depth(|l, r, max_depth| l.equals_with_depth(r, max_depth)); +} + +#[test] +fn test_compare() { + test_binop_with_max_depth(|l, r, max_depth| l.compare_with_depth(r, max_depth)); +} + +#[test] +fn test_copy_value() { + test_unop_with_max_depth(|v, max_depth| v.copy_value_with_depth(max_depth)); + + // Special-case: reference clone Rcs, so their depth can be larger. + let v = assert_ok!(GlobalValue::cached(Value::struct_(Struct::pack(vec![ + Value::u8(0) + ])))); + let v_ref = assert_ok!(v.borrow_global()); + assert_ok!(v_ref.copy_value_with_depth(3)); + assert_ok!(v_ref.copy_value_with_depth(2)); + assert_ok!(v_ref.copy_value_with_depth(1)); +} + +#[test] +fn test_read_ref() { + let v = assert_ok!(GlobalValue::cached(Value::struct_(Struct::pack(vec![ + Value::u8(0) + ])))); + let v_ref = assert_ok!(assert_ok!(v.borrow_global()).value_as::()); + + // Note: reading a reference will clone the value, so here it is a clone of a struct with 1 + // field of depth 2. + assert_ok!(v_ref.read_ref_with_depth(2)); + + let v_ref = assert_ok!(assert_ok!(v.borrow_global()).value_as::()); + let err = assert_err!(v_ref.read_ref_with_depth(1)); + assert_eq!(err.major_status(), StatusCode::VM_MAX_VALUE_DEPTH_REACHED); +} + +#[test] +fn test_serialization() { + use MoveStructLayout::*; + use MoveTypeLayout as L; + + let mut extension = MockFunctionValueExtension::new(); + extension + .expect_get_serialization_data() + .returning(move |af| Ok(af.downcast_ref::().unwrap().data.clone())); + + let depth_1_ok = [ + (Value::u64(0), L::U64), + (Value::vector_u8(vec![0, 1]), L::Vector(Box::new(L::U8))), + ( + MockFunction::closure(ClosureMask::empty(), vec![], vec![]), + L::Function, + ), + ]; + let depth_2_ok = [ + ( + Value::struct_(Struct::pack(vec![Value::u16(0)])), + L::Struct(Runtime(vec![L::U16])), + ), + ( + Value::vector_for_testing_only(vec![Value::vector_u8(vec![0, 1])]), + L::Vector(Box::new(L::Vector(Box::new(L::U8)))), + ), + ( + // Serialize first variant, so the depth is 2. + Value::struct_(Struct::pack(vec![Value::u16(0), Value::bool(true)])), + L::Struct(RuntimeVariants(vec![vec![L::Bool], vec![L::Vector( + Box::new(L::Vector(Box::new(L::U8))), + )]])), + ), + ( + MockFunction::closure(ClosureMask::empty(), vec![Value::u16(0)], vec![L::U16]), + L::Function, + ), + ]; + let depth_3_ok = [( + // Serialize second variant, so the depth is 3. + Value::struct_(Struct::pack(vec![ + Value::u16(1), + Value::vector_for_testing_only(vec![Value::vector_u8(vec![1, 2])]), + ])), + L::Struct(RuntimeVariants(vec![vec![L::Bool], vec![L::Vector( + Box::new(L::Vector(Box::new(L::U8))), + )]])), + )]; + + let ctx = |max_depth: u64| { + ValueSerDeContext::new(Some(max_depth)).with_func_args_deserialization(&extension) + }; + + for (v, l) in &depth_1_ok { + assert_some!(assert_ok!(ctx(1).serialize(v, l))); + assert_ok!(ctx(1).serialized_size(v, l)); + } + + for (v, l) in &depth_2_ok { + assert_some!(assert_ok!(ctx(2).serialize(v, l))); + assert_ok!(ctx(2).serialized_size(v, l)); + assert_none!(assert_ok!(ctx(1).serialize(v, l))); + assert_err!(ctx(1).serialized_size(v, l)); + } + + for (v, l) in &depth_3_ok { + assert_some!(assert_ok!(ctx(3).serialize(v, l))); + assert_ok!(ctx(3).serialized_size(v, l)); + assert_none!(assert_ok!(ctx(2).serialize(v, l))); + assert_err!(ctx(2).serialized_size(v, l)); + assert_none!(assert_ok!(ctx(1).serialize(v, l))); + assert_err!(ctx(1).serialized_size(v, l)); + } +} + +fn test_binop_with_max_depth(f: F) +where + T: Debug, + F: Fn(&Value, &Value, u64) -> PartialVMResult, +{ + let v = Value::u8(0); + assert_ok!(f(&v, &v, 1)); + + let v = Value::vector_u8(vec![0, 1]); + assert_ok!(f(&v, &v, 1)); + + let v = Value::vector_for_testing_only(vec![Value::vector_u8(vec![0, 1])]); + let err = assert_err!(f(&v, &v, 1)); + assert_eq!(err.major_status(), StatusCode::VM_MAX_VALUE_DEPTH_REACHED); + + let v = Value::struct_(Struct::pack(vec![Value::u8(0)])); + let err = assert_err!(f(&v, &v, 1)); + assert_eq!(err.major_status(), StatusCode::VM_MAX_VALUE_DEPTH_REACHED); + + let v = MockFunction::closure(ClosureMask::empty(), vec![], vec![]); + assert_ok!(f(&v, &v, 1)); + + let v = MockFunction::closure(ClosureMask::new_for_leading(1), vec![Value::u8(0)], vec![ + MoveTypeLayout::U8, + ]); + let err = assert_err!(f(&v, &v, 1)); + assert_eq!(err.major_status(), StatusCode::VM_MAX_VALUE_DEPTH_REACHED); + + // Create a reference to struct with 1 field (3 nodes). + let v = assert_ok!(GlobalValue::cached(Value::struct_(Struct::pack(vec![ + Value::u8(0) + ])))); + let v_ref = assert_ok!(v.borrow_global()); + assert_ok!(f(&v_ref, &v_ref, 3)); + let err = assert_err!(f(&v_ref, &v_ref, 2)); + assert_eq!(err.major_status(), StatusCode::VM_MAX_VALUE_DEPTH_REACHED); +} + +fn test_unop_with_max_depth(f: F) +where + T: Debug, + F: Fn(&Value, u64) -> PartialVMResult, +{ + let v = Value::u8(0); + assert_ok!(f(&v, 1)); + + let v = Value::vector_u8(vec![0, 1]); + assert_ok!(f(&v, 1)); + + let v = Value::vector_for_testing_only(vec![Value::vector_u8(vec![0, 1])]); + let err = assert_err!(f(&v, 1)); + assert_eq!(err.major_status(), StatusCode::VM_MAX_VALUE_DEPTH_REACHED); + + let v = Value::struct_(Struct::pack(vec![Value::u8(0)])); + let err = assert_err!(f(&v, 1)); + assert_eq!(err.major_status(), StatusCode::VM_MAX_VALUE_DEPTH_REACHED); + + let v = MockFunction::closure(ClosureMask::empty(), vec![], vec![]); + assert_ok!(f(&v, 1)); + + let v = MockFunction::closure(ClosureMask::new_for_leading(1), vec![Value::u8(0)], vec![ + MoveTypeLayout::U8, + ]); + let err = assert_err!(f(&v, 1)); + assert_eq!(err.major_status(), StatusCode::VM_MAX_VALUE_DEPTH_REACHED); +} diff --git a/third_party/move/move-vm/types/src/values/value_prop_tests.rs b/third_party/move/move-vm/types/src/values/value_prop_tests.rs index 24e2ed8b3422d..979371208bc66 100644 --- a/third_party/move/move-vm/types/src/values/value_prop_tests.rs +++ b/third_party/move/move-vm/types/src/values/value_prop_tests.rs @@ -9,8 +9,8 @@ use proptest::prelude::*; proptest! { #[test] fn serializer_round_trip((layout, value) in layout_and_value_strategy()) { - let blob = ValueSerDeContext::new().serialize(&value, &layout).unwrap().expect("must serialize"); - let value_deserialized = ValueSerDeContext::new().deserialize(&blob, &layout).expect("must deserialize"); + let blob = ValueSerDeContext::new(None).serialize(&value, &layout).unwrap().expect("must serialize"); + let value_deserialized = ValueSerDeContext::new(None).deserialize(&blob, &layout).expect("must deserialize"); assert!(value.equals(&value_deserialized).unwrap()); let move_value = value.as_move_value(&layout); diff --git a/third_party/move/move-vm/types/src/values/values_impl.rs b/third_party/move/move-vm/types/src/values/values_impl.rs index 1ddfd9940c29e..c5c0f6e09d3a5 100644 --- a/third_party/move/move-vm/types/src/values/values_impl.rs +++ b/third_party/move/move-vm/types/src/values/values_impl.rs @@ -40,6 +40,15 @@ use std::{ rc::Rc, }; +/// Values can be recursive, and so it is important that we do not use recursive algorithms over +/// deeply nested values as it can cause stack overflow. Since it is not always possible to avoid +/// recursion, we opt for a reasonable limit on VM value depth. It is defined in Move VM config, +/// but since it is difficult to propagate config context everywhere, we use this constant. +/// +/// IMPORTANT: When changing this constant, make sure it is in-sync with one in VM config (it is +/// used there now). +pub const DEFAULT_MAX_VM_VALUE_NESTED_DEPTH: u64 = 128; + /*************************************************************************************** * * Internal Types @@ -395,9 +404,10 @@ impl ValueImpl { * **************************************************************************************/ impl ValueImpl { - fn copy_value(&self) -> PartialVMResult { + fn copy_value(&self, depth: u64, max_depth: Option) -> PartialVMResult { use ValueImpl::*; + check_depth(depth, max_depth)?; Ok(match self { Invalid => Invalid, @@ -410,12 +420,15 @@ impl ValueImpl { Bool(x) => Bool(*x), Address(x) => Address(*x), - ContainerRef(r) => ContainerRef(r.copy_value()), - IndexedRef(r) => IndexedRef(r.copy_value()), + // Note: refs copy only clones Rc, so no need to increment depth. + ContainerRef(r) => ContainerRef(r.copy_by_ref()), + IndexedRef(r) => IndexedRef(r.copy_by_ref()), - // When cloning a container, we need to make sure we make a deep - // copy of the data instead of a shallow copy of the Rc. - Container(c) => Container(c.copy_value()?), + // When cloning a container, we need to make sure we make a deep copy of the data + // instead of a shallow copy of the Rc. Note that we do not increment the depth here + // because we have done it when entering this value. Inside the container, depth will + // be further incremented for nested values. + Container(c) => Container(c.copy_value(depth, max_depth)?), // Native values can be copied because this is how read_ref operates, // and copying is an internal API. @@ -424,7 +437,7 @@ impl ValueImpl { ClosureValue(Closure(fun, captured)) => { let captured = captured .iter() - .map(|v| v.copy_value()) + .map(|v| v.copy_value(depth + 1, max_depth)) .collect::>()?; ClosureValue(Closure(fun.clone_dyn()?, captured)) }, @@ -433,12 +446,12 @@ impl ValueImpl { } impl Container { - fn copy_value(&self) -> PartialVMResult { + fn copy_value(&self, depth: u64, max_depth: Option) -> PartialVMResult { let copy_rc_ref_vec_val = |r: &Rc>>| { Ok(Rc::new(RefCell::new( r.borrow() .iter() - .map(|v| v.copy_value()) + .map(|v| v.copy_value(depth + 1, max_depth)) .collect::>()?, ))) }; @@ -485,16 +498,16 @@ impl Container { } impl IndexedRef { - fn copy_value(&self) -> Self { + fn copy_by_ref(&self) -> Self { Self { idx: self.idx, - container_ref: self.container_ref.copy_value(), + container_ref: self.container_ref.copy_by_ref(), } } } impl ContainerRef { - fn copy_value(&self) -> Self { + fn copy_by_ref(&self) -> Self { match self { Self::Local(container) => Self::Local(container.copy_by_ref()), Self::Global { status, container } => Self::Global { @@ -505,6 +518,13 @@ impl ContainerRef { } } +#[cfg(test)] +impl Value { + pub fn copy_value_with_depth(&self, max_depth: u64) -> PartialVMResult { + Ok(Self(self.0.copy_value(1, Some(max_depth))?)) + } +} + /*************************************************************************************** * * Equality @@ -523,9 +543,10 @@ impl ContainerRef { **************************************************************************************/ impl ValueImpl { - fn equals(&self, other: &Self) -> PartialVMResult { + fn equals(&self, other: &Self, depth: u64, max_depth: Option) -> PartialVMResult { use ValueImpl::*; + check_depth(depth, max_depth)?; let res = match (self, other) { (U8(l), U8(r)) => l == r, (U16(l), U16(r)) => l == r, @@ -536,10 +557,11 @@ impl ValueImpl { (Bool(l), Bool(r)) => l == r, (Address(l), Address(r)) => l == r, - (Container(l), Container(r)) => l.equals(r)?, + (Container(l), Container(r)) => l.equals(r, depth, max_depth)?, - (ContainerRef(l), ContainerRef(r)) => l.equals(r)?, - (IndexedRef(l), IndexedRef(r)) => l.equals(r)?, + // We count references as +1 in nesting, hence increasing the depth. + (ContainerRef(l), ContainerRef(r)) => l.equals(r, depth + 1, max_depth)?, + (IndexedRef(l), IndexedRef(r)) => l.equals(r, depth + 1, max_depth)?, // Disallow equality for delayed values. The rationale behind this // semantics is that identifiers might not be deterministic, and @@ -556,7 +578,7 @@ impl ValueImpl { && captured1.len() == captured2.len() { for (v1, v2) in captured1.iter().zip(captured2.iter()) { - if !v1.equals(v2)? { + if !v1.equals(v2, depth + 1, max_depth)? { return Ok(false); } } @@ -592,9 +614,15 @@ impl ValueImpl { Ok(res) } - fn compare(&self, other: &Self) -> PartialVMResult { + fn compare( + &self, + other: &Self, + depth: u64, + max_depth: Option, + ) -> PartialVMResult { use ValueImpl::*; + check_depth(depth, max_depth)?; let res = match (self, other) { (U8(l), U8(r)) => l.cmp(r), (U16(l), U16(r)) => l.cmp(r), @@ -605,10 +633,11 @@ impl ValueImpl { (Bool(l), Bool(r)) => l.cmp(r), (Address(l), Address(r)) => l.cmp(r), - (Container(l), Container(r)) => l.compare(r)?, + (Container(l), Container(r)) => l.compare(r, depth, max_depth)?, - (ContainerRef(l), ContainerRef(r)) => l.compare(r)?, - (IndexedRef(l), IndexedRef(r)) => l.compare(r)?, + // We count references as +1 in nesting, hence increasing the depth. + (ContainerRef(l), ContainerRef(r)) => l.compare(r, depth + 1, max_depth)?, + (IndexedRef(l), IndexedRef(r)) => l.compare(r, depth + 1, max_depth)?, // Disallow comparison for delayed values. // (see `ValueImpl::equals` above for details on reasoning behind it) @@ -621,7 +650,7 @@ impl ValueImpl { let o = fun1.cmp_dyn(fun2.as_ref())?; if o == Ordering::Equal { for (v1, v2) in captured1.iter().zip(captured2.iter()) { - let o = v1.compare(v2)?; + let o = v1.compare(v2, depth + 1, max_depth)?; if o != Ordering::Equal { return Ok(o); } @@ -660,7 +689,7 @@ impl ValueImpl { } impl Container { - fn equals(&self, other: &Self) -> PartialVMResult { + fn equals(&self, other: &Self, depth: u64, max_depth: Option) -> PartialVMResult { use Container::*; let res = match (self, other) { @@ -672,7 +701,7 @@ impl Container { return Ok(false); } for (v1, v2) in l.iter().zip(r.iter()) { - if !v1.equals(v2)? { + if !v1.equals(v2, depth + 1, max_depth)? { return Ok(false); } } @@ -710,7 +739,12 @@ impl Container { Ok(res) } - fn compare(&self, other: &Self) -> PartialVMResult { + fn compare( + &self, + other: &Self, + depth: u64, + max_depth: Option, + ) -> PartialVMResult { use Container::*; let res = match (self, other) { @@ -719,7 +753,7 @@ impl Container { let r = &r.borrow(); for (v1, v2) in l.iter().zip(r.iter()) { - let value_cmp = v1.compare(v2)?; + let value_cmp = v1.compare(v2, depth + 1, max_depth)?; if value_cmp.is_ne() { return Ok(value_cmp); } @@ -761,19 +795,30 @@ impl Container { } impl ContainerRef { - fn equals(&self, other: &Self) -> PartialVMResult { - self.container().equals(other.container()) + fn equals(&self, other: &Self, depth: u64, max_depth: Option) -> PartialVMResult { + // Note: the depth passed in accounts for the container. + check_depth(depth, max_depth)?; + self.container().equals(other.container(), depth, max_depth) } - fn compare(&self, other: &Self) -> PartialVMResult { - self.container().compare(other.container()) + fn compare( + &self, + other: &Self, + depth: u64, + max_depth: Option, + ) -> PartialVMResult { + // Note: the depth passed in accounts for the container. + check_depth(depth, max_depth)?; + self.container() + .compare(other.container(), depth, max_depth) } } impl IndexedRef { - fn equals(&self, other: &Self) -> PartialVMResult { + fn equals(&self, other: &Self, depth: u64, max_depth: Option) -> PartialVMResult { use Container::*; + check_depth(depth, max_depth)?; let res = match ( self.container_ref.container(), other.container_ref.container(), @@ -787,7 +832,9 @@ impl IndexedRef { | (Struct(r1), Locals(r2)) | (Locals(r1), Vec(r2)) | (Locals(r1), Struct(r2)) - | (Locals(r1), Locals(r2)) => r1.borrow()[self.idx].equals(&r2.borrow()[other.idx])?, + | (Locals(r1), Locals(r2)) => { + r1.borrow()[self.idx].equals(&r2.borrow()[other.idx], depth + 1, max_depth)? + }, (VecU8(r1), VecU8(r2)) => r1.borrow()[self.idx] == r2.borrow()[other.idx], (VecU16(r1), VecU16(r2)) => r1.borrow()[self.idx] == r2.borrow()[other.idx], @@ -872,7 +919,12 @@ impl IndexedRef { Ok(res) } - fn compare(&self, other: &Self) -> PartialVMResult { + fn compare( + &self, + other: &Self, + depth: u64, + max_depth: Option, + ) -> PartialVMResult { use Container::*; let res = match ( @@ -888,7 +940,9 @@ impl IndexedRef { | (Struct(r1), Locals(r2)) | (Locals(r1), Vec(r2)) | (Locals(r1), Struct(r2)) - | (Locals(r1), Locals(r2)) => r1.borrow()[self.idx].compare(&r2.borrow()[other.idx])?, + | (Locals(r1), Locals(r2)) => { + r1.borrow()[self.idx].compare(&r2.borrow()[other.idx], depth + 1, max_depth)? + }, (VecU8(r1), VecU8(r2)) => r1.borrow()[self.idx].cmp(&r2.borrow()[other.idx]), (VecU16(r1), VecU16(r2)) => r1.borrow()[self.idx].cmp(&r2.borrow()[other.idx]), @@ -976,11 +1030,25 @@ impl IndexedRef { impl Value { pub fn equals(&self, other: &Self) -> PartialVMResult { - self.0.equals(&other.0) + self.0 + .equals(&other.0, 1, Some(DEFAULT_MAX_VM_VALUE_NESTED_DEPTH)) } pub fn compare(&self, other: &Self) -> PartialVMResult { - self.0.compare(&other.0) + self.0 + .compare(&other.0, 1, Some(DEFAULT_MAX_VM_VALUE_NESTED_DEPTH)) + } + + // Test-only API to test depth checks. + #[cfg(test)] + pub fn equals_with_depth(&self, other: &Self, max_depth: u64) -> PartialVMResult { + self.0.equals(&other.0, 1, Some(max_depth)) + } + + // Test-only API to test depth checks. + #[cfg(test)] + pub fn compare_with_depth(&self, other: &Self, max_depth: u64) -> PartialVMResult { + self.0.compare(&other.0, 1, Some(max_depth)) } } @@ -993,18 +1061,20 @@ impl Value { **************************************************************************************/ impl ContainerRef { - fn read_ref(self) -> PartialVMResult { - Ok(Value(ValueImpl::Container(self.container().copy_value()?))) + fn read_ref(self, depth: u64, max_depth: Option) -> PartialVMResult { + Ok(Value(ValueImpl::Container( + self.container().copy_value(depth, max_depth)?, + ))) } } impl IndexedRef { - fn read_ref(self) -> PartialVMResult { + fn read_ref(self, depth: u64, max_depth: Option) -> PartialVMResult { use Container::*; let res = match self.container_ref.container() { - Vec(r) => r.borrow()[self.idx].copy_value()?, - Struct(r) => r.borrow()[self.idx].copy_value()?, + Vec(r) => r.borrow()[self.idx].copy_value(depth + 1, max_depth)?, + Struct(r) => r.borrow()[self.idx].copy_value(depth + 1, max_depth)?, VecU8(r) => ValueImpl::U8(r.borrow()[self.idx]), VecU16(r) => ValueImpl::U16(r.borrow()[self.idx]), @@ -1015,7 +1085,7 @@ impl IndexedRef { VecBool(r) => ValueImpl::Bool(r.borrow()[self.idx]), VecAddress(r) => ValueImpl::Address(r.borrow()[self.idx]), - Locals(r) => r.borrow()[self.idx].copy_value()?, + Locals(r) => r.borrow()[self.idx].copy_value(depth + 1, max_depth)?, }; Ok(Value(res)) @@ -1023,23 +1093,33 @@ impl IndexedRef { } impl ReferenceImpl { - fn read_ref(self) -> PartialVMResult { + fn read_ref(self, depth: u64, max_depth: Option) -> PartialVMResult { match self { - Self::ContainerRef(r) => r.read_ref(), - Self::IndexedRef(r) => r.read_ref(), + Self::ContainerRef(r) => r.read_ref(depth, max_depth), + Self::IndexedRef(r) => r.read_ref(depth, max_depth), } } } impl StructRef { pub fn read_ref(self) -> PartialVMResult { - self.0.read_ref() + self.0.read_ref(1, Some(DEFAULT_MAX_VM_VALUE_NESTED_DEPTH)) + } + + #[cfg(test)] + pub fn read_ref_with_depth(self, max_depth: u64) -> PartialVMResult { + self.0.read_ref(1, Some(max_depth)) } } impl Reference { pub fn read_ref(self) -> PartialVMResult { - self.0.read_ref() + self.0.read_ref(1, Some(DEFAULT_MAX_VM_VALUE_NESTED_DEPTH)) + } + + #[cfg(test)] + pub fn read_ref_with_depth(self, max_depth: u64) -> PartialVMResult { + self.0.read_ref(1, Some(max_depth)) } } @@ -1459,7 +1539,7 @@ impl ContainerRef { | ValueImpl::ClosureValue(_) | ValueImpl::DelayedFieldID { .. } => ValueImpl::IndexedRef(IndexedRef { idx, - container_ref: self.copy_value(), + container_ref: self.copy_by_ref(), }), ValueImpl::ContainerRef(_) | ValueImpl::Invalid | ValueImpl::IndexedRef(_) => { @@ -1480,7 +1560,7 @@ impl ContainerRef { | Container::VecAddress(_) | Container::VecBool(_) => ValueImpl::IndexedRef(IndexedRef { idx, - container_ref: self.copy_value(), + container_ref: self.copy_by_ref(), }), }) } @@ -1618,13 +1698,23 @@ impl Locals { } pub fn copy_loc(&self, idx: usize) -> PartialVMResult { + self.copy_loc_impl(idx, Some(DEFAULT_MAX_VM_VALUE_NESTED_DEPTH)) + } + + // Test-only API to test depth checks. + #[cfg(test)] + pub fn copy_loc_with_depth(&self, idx: usize, max_depth: u64) -> PartialVMResult { + self.copy_loc_impl(idx, Some(max_depth)) + } + + fn copy_loc_impl(&self, idx: usize, max_depth: Option) -> PartialVMResult { let v = self.0.borrow(); match v.get(idx) { Some(ValueImpl::Invalid) => Err(PartialVMError::new( StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR, ) .with_message(format!("cannot copy invalid value at index {}", idx))), - Some(v) => Ok(Value(v.copy_value()?)), + Some(v) => Ok(Value(v.copy_value(1, max_depth)?)), None => Err( PartialVMError::new(StatusCode::VERIFIER_INVARIANT_VIOLATION).with_message( format!("local index out of bounds: got {}, len: {}", idx, v.len()), @@ -3736,6 +3826,7 @@ pub(crate) struct SerializationReadyValue<'c, 'l, 'v, L, V> { pub(crate) layout: &'l L, // Value to serialize. pub(crate) value: &'v V, + pub(crate) depth: u64, } fn invariant_violation(message: String) -> S::Error { @@ -3748,6 +3839,7 @@ impl serde::Serialize for SerializationReadyValue<'_, '_, '_, MoveTypeLayout, Va fn serialize(&self, serializer: S) -> Result { use MoveTypeLayout as L; + self.ctx.check_depth(self.depth).map_err(S::Error::custom)?; match (self.layout, self.value) { // Primitive types. (L::U8, ValueImpl::U8(x)) => serializer.serialize_u8(*x), @@ -3765,6 +3857,9 @@ impl serde::Serialize for SerializationReadyValue<'_, '_, '_, MoveTypeLayout, Va ctx: self.ctx, layout: struct_layout, value: &*r.borrow(), + // Note: for struct, we increment depth for fields in the corresponding + // serializer. + depth: self.depth, }) .serialize(serializer) }, @@ -3774,6 +3869,9 @@ impl serde::Serialize for SerializationReadyValue<'_, '_, '_, MoveTypeLayout, Va ctx: self.ctx, layout: &(), value: clos, + // Note: for functions, we increment depth for captured arguments in the + // corresponding serializer. + depth: self.depth, } .serialize(serializer), @@ -3797,6 +3895,7 @@ impl serde::Serialize for SerializationReadyValue<'_, '_, '_, MoveTypeLayout, Va ctx: self.ctx, layout, value, + depth: self.depth + 1, })?; } t.end() @@ -3842,6 +3941,7 @@ impl serde::Serialize for SerializationReadyValue<'_, '_, '_, MoveTypeLayout, Va ctx: self.ctx, layout: &MoveStructLayout::signer_serialization_layout(), value: &*r.borrow(), + depth: self.depth, }) .serialize(serializer) } @@ -3875,6 +3975,7 @@ impl serde::Serialize for SerializationReadyValue<'_, '_, '_, MoveTypeLayout, Va ctx: &ctx, layout: layout.as_ref(), value: &value.0, + depth: self.depth, }; value.serialize(serializer) }, @@ -3928,6 +4029,7 @@ impl serde::Serialize for SerializationReadyValue<'_, '_, '_, MoveStructLayout, ctx: self.ctx, layout: &variant_layouts[0], value: &values[0], + depth: self.depth + 1, }, ), _ => { @@ -3942,6 +4044,7 @@ impl serde::Serialize for SerializationReadyValue<'_, '_, '_, MoveStructLayout, ctx: self.ctx, layout, value, + depth: self.depth + 1, })? } t.end() @@ -3961,6 +4064,7 @@ impl serde::Serialize for SerializationReadyValue<'_, '_, '_, MoveStructLayout, ctx: self.ctx, layout: field_layout, value, + depth: self.depth + 1, })?; } t.end() @@ -4312,7 +4416,11 @@ impl Value { pub fn deserialize_constant(constant: &Constant) -> Option { let layout = Self::constant_sig_token_to_layout(&constant.type_)?; - ValueSerDeContext::new().deserialize(&constant.data, &layout) + // INVARIANT: + // For constants, layout depth is bounded and cannot contain function values. Hence, + // serialization depth is bounded. We still enable depth checks as a precaution. + ValueSerDeContext::new(Some(DEFAULT_MAX_VM_VALUE_NESTED_DEPTH)) + .deserialize(&constant.data, &layout) } } @@ -4335,26 +4443,28 @@ impl Drop for Locals { * **************************************************************************************/ impl Container { - fn visit_impl(&self, visitor: &mut impl ValueVisitor, depth: usize) { + fn visit_impl(&self, visitor: &mut impl ValueVisitor, depth: u64) -> PartialVMResult<()> { use Container::*; match self { Locals(_) => unreachable!("Should not ba able to visit a Locals container directly"), Vec(r) => { let r = r.borrow(); - if visitor.visit_vec(depth, r.len()) { + if visitor.visit_vec(depth, r.len())? { for val in r.iter() { - val.visit_impl(visitor, depth + 1); + val.visit_impl(visitor, depth + 1)?; } } + Ok(()) }, Struct(r) => { let r = r.borrow(); - if visitor.visit_struct(depth, r.len()) { + if visitor.visit_struct(depth, r.len())? { for val in r.iter() { - val.visit_impl(visitor, depth + 1); + val.visit_impl(visitor, depth + 1)?; } } + Ok(()) }, VecU8(r) => visitor.visit_vec_u8(depth, &r.borrow()), VecU16(r) => visitor.visit_vec_u16(depth, &r.borrow()), @@ -4367,7 +4477,12 @@ impl Container { } } - fn visit_indexed(&self, visitor: &mut impl ValueVisitor, depth: usize, idx: usize) { + fn visit_indexed( + &self, + visitor: &mut impl ValueVisitor, + depth: u64, + idx: usize, + ) -> PartialVMResult<()> { use Container::*; match self { @@ -4385,18 +4500,19 @@ impl Container { } impl Closure { - fn visit_impl(&self, visitor: &mut impl ValueVisitor, depth: usize) { + fn visit_impl(&self, visitor: &mut impl ValueVisitor, depth: u64) -> PartialVMResult<()> { let Self(_, captured) = self; - if visitor.visit_closure(depth, captured.len()) { + if visitor.visit_closure(depth, captured.len())? { for val in captured { - val.visit_impl(visitor, depth + 1); + val.visit_impl(visitor, depth + 1)?; } } + Ok(()) } } impl ContainerRef { - fn visit_impl(&self, visitor: &mut impl ValueVisitor, depth: usize) { + fn visit_impl(&self, visitor: &mut impl ValueVisitor, depth: u64) -> PartialVMResult<()> { use ContainerRef::*; let (container, is_global) = match self { @@ -4404,14 +4520,15 @@ impl ContainerRef { Global { container, .. } => (container, false), }; - if visitor.visit_ref(depth, is_global) { - container.visit_impl(visitor, depth + 1); + if visitor.visit_ref(depth, is_global)? { + container.visit_impl(visitor, depth + 1)?; } + Ok(()) } } impl IndexedRef { - fn visit_impl(&self, visitor: &mut impl ValueVisitor, depth: usize) { + fn visit_impl(&self, visitor: &mut impl ValueVisitor, depth: u64) -> PartialVMResult<()> { use ContainerRef::*; let (container, is_global) = match &self.container_ref { @@ -4419,19 +4536,19 @@ impl IndexedRef { Global { container, .. } => (container, false), }; - if visitor.visit_ref(depth, is_global) { - container.visit_indexed(visitor, depth, self.idx) + if visitor.visit_ref(depth, is_global)? { + container.visit_indexed(visitor, depth, self.idx)?; } + Ok(()) } } impl ValueImpl { - fn visit_impl(&self, visitor: &mut impl ValueVisitor, depth: usize) { + fn visit_impl(&self, visitor: &mut impl ValueVisitor, depth: u64) -> PartialVMResult<()> { use ValueImpl::*; match self { Invalid => unreachable!("Should not be able to visit an invalid value"), - U8(val) => visitor.visit_u8(depth, *val), U16(val) => visitor.visit_u16(depth, *val), U32(val) => visitor.visit_u32(depth, *val), @@ -4440,64 +4557,46 @@ impl ValueImpl { U256(val) => visitor.visit_u256(depth, *val), Bool(val) => visitor.visit_bool(depth, *val), Address(val) => visitor.visit_address(depth, *val), - Container(c) => c.visit_impl(visitor, depth), - ContainerRef(r) => r.visit_impl(visitor, depth), IndexedRef(r) => r.visit_impl(visitor, depth), - ClosureValue(c) => c.visit_impl(visitor, depth), - DelayedFieldID { id } => visitor.visit_delayed(depth, *id), } } } impl ValueView for ValueImpl { - fn visit(&self, visitor: &mut impl ValueVisitor) { + fn visit(&self, visitor: &mut impl ValueVisitor) -> PartialVMResult<()> { self.visit_impl(visitor, 0) } } impl ValueView for Value { - fn visit(&self, visitor: &mut impl ValueVisitor) { + fn visit(&self, visitor: &mut impl ValueVisitor) -> PartialVMResult<()> { self.0.visit(visitor) } } impl ValueView for Struct { - fn visit(&self, visitor: &mut impl ValueVisitor) { - if visitor.visit_struct(0, self.fields.len()) { + fn visit(&self, visitor: &mut impl ValueVisitor) -> PartialVMResult<()> { + if visitor.visit_struct(0, self.fields.len())? { for val in self.fields.iter() { - val.visit_impl(visitor, 1); + val.visit_impl(visitor, 1)?; } } + Ok(()) } } impl ValueView for Vector { - fn visit(&self, visitor: &mut impl ValueVisitor) { + fn visit(&self, visitor: &mut impl ValueVisitor) -> PartialVMResult<()> { self.0.visit_impl(visitor, 0) } } -impl ValueView for IntegerValue { - fn visit(&self, visitor: &mut impl ValueVisitor) { - use IntegerValue::*; - - match self { - U8(val) => visitor.visit_u8(0, *val), - U16(val) => visitor.visit_u16(0, *val), - U32(val) => visitor.visit_u32(0, *val), - U64(val) => visitor.visit_u64(0, *val), - U128(val) => visitor.visit_u128(0, *val), - U256(val) => visitor.visit_u256(0, *val), - } - } -} - impl ValueView for Reference { - fn visit(&self, visitor: &mut impl ValueVisitor) { + fn visit(&self, visitor: &mut impl ValueVisitor) -> PartialVMResult<()> { use ReferenceImpl::*; match &self.0 { @@ -4508,19 +4607,13 @@ impl ValueView for Reference { } impl ValueView for VectorRef { - fn visit(&self, visitor: &mut impl ValueVisitor) { + fn visit(&self, visitor: &mut impl ValueVisitor) -> PartialVMResult<()> { self.0.visit_impl(visitor, 0) } } impl ValueView for StructRef { - fn visit(&self, visitor: &mut impl ValueVisitor) { - self.0.visit_impl(visitor, 0) - } -} - -impl ValueView for SignerRef { - fn visit(&self, visitor: &mut impl ValueVisitor) { + fn visit(&self, visitor: &mut impl ValueVisitor) -> PartialVMResult<()> { self.0.visit_impl(visitor, 0) } } @@ -4541,7 +4634,7 @@ impl Vector { } impl ValueView for ElemView<'_> { - fn visit(&self, visitor: &mut impl ValueVisitor) { + fn visit(&self, visitor: &mut impl ValueVisitor) -> PartialVMResult<()> { self.container.visit_indexed(visitor, 0, self.idx) } } @@ -4560,7 +4653,7 @@ impl Reference { struct ValueBehindRef<'b>(&'b ReferenceImpl); impl ValueView for ValueBehindRef<'_> { - fn visit(&self, visitor: &mut impl ValueVisitor) { + fn visit(&self, visitor: &mut impl ValueVisitor) -> PartialVMResult<()> { use ReferenceImpl::*; match self.0 { @@ -4581,13 +4674,14 @@ impl GlobalValue { struct Wrapper<'b>(&'b Rc>>); impl ValueView for Wrapper<'_> { - fn visit(&self, visitor: &mut impl ValueVisitor) { + fn visit(&self, visitor: &mut impl ValueVisitor) -> PartialVMResult<()> { let r = self.0.borrow(); - if visitor.visit_struct(0, r.len()) { + if visitor.visit_struct(0, r.len())? { for val in r.iter() { - val.visit_impl(visitor, 1); + val.visit_impl(visitor, 1)?; } } + Ok(()) } } @@ -4856,3 +4950,10 @@ fn try_get_variant_field_layouts<'a>( } None } + +fn check_depth(depth: u64, max_depth: Option) -> PartialVMResult<()> { + if max_depth.map_or(false, |max_depth| depth > max_depth) { + return Err(PartialVMError::new(StatusCode::VM_MAX_VALUE_DEPTH_REACHED)); + } + Ok(()) +} diff --git a/third_party/move/move-vm/types/src/views.rs b/third_party/move/move-vm/types/src/views.rs index 120e712adc89f..e68cc23434f49 100644 --- a/third_party/move/move-vm/types/src/views.rs +++ b/third_party/move/move-vm/types/src/views.rs @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{delayed_values::delayed_field_id::DelayedFieldID, values::LEGACY_CLOSURE_SIZE}; +use move_binary_format::errors::PartialVMResult; use move_core_types::{ account_address::AccountAddress, gas_algebra::AbstractMemorySize, language_storage::TypeTag, }; @@ -21,7 +22,7 @@ pub trait TypeView { /// This is used to expose certain info to clients (e.g. the gas meter), /// usually in a lazily evaluated fashion. pub trait ValueView { - fn visit(&self, visitor: &mut impl ValueVisitor); + fn visit(&self, visitor: &mut impl ValueVisitor) -> PartialVMResult<()>; /// Returns the abstract memory size of the value. /// @@ -36,99 +37,129 @@ pub trait ValueView { struct Acc(AbstractMemorySize); impl ValueVisitor for Acc { - fn visit_delayed(&mut self, _depth: usize, _id: DelayedFieldID) { + fn visit_delayed(&mut self, _depth: u64, _id: DelayedFieldID) -> PartialVMResult<()> { // TODO[agg_v2](cleanup): `legacy_abstract_memory_size` is not used // anyway, so this function will be removed soon (hopefully). // Contributions are appreciated! + Ok(()) } - fn visit_u8(&mut self, _depth: usize, _val: u8) { + fn visit_u8(&mut self, _depth: u64, _val: u8) -> PartialVMResult<()> { self.0 += LEGACY_CONST_SIZE; + Ok(()) } - fn visit_u16(&mut self, _depth: usize, _val: u16) { + fn visit_u16(&mut self, _depth: u64, _val: u16) -> PartialVMResult<()> { self.0 += LEGACY_CONST_SIZE; + Ok(()) } - fn visit_u32(&mut self, _depth: usize, _val: u32) { + fn visit_u32(&mut self, _depth: u64, _val: u32) -> PartialVMResult<()> { self.0 += LEGACY_CONST_SIZE; + Ok(()) } - fn visit_u64(&mut self, _depth: usize, _val: u64) { + fn visit_u64(&mut self, _depth: u64, _val: u64) -> PartialVMResult<()> { self.0 += LEGACY_CONST_SIZE; + Ok(()) } - fn visit_u128(&mut self, _depth: usize, _val: u128) { + fn visit_u128(&mut self, _depth: u64, _val: u128) -> PartialVMResult<()> { self.0 += LEGACY_CONST_SIZE; + Ok(()) } - fn visit_u256(&mut self, _depth: usize, _val: move_core_types::u256::U256) { + fn visit_u256( + &mut self, + _depth: u64, + _val: move_core_types::u256::U256, + ) -> PartialVMResult<()> { self.0 += LEGACY_CONST_SIZE; + Ok(()) } - fn visit_bool(&mut self, _depth: usize, _val: bool) { + fn visit_bool(&mut self, _depth: u64, _val: bool) -> PartialVMResult<()> { self.0 += LEGACY_CONST_SIZE; + Ok(()) } - fn visit_address(&mut self, _depth: usize, _val: AccountAddress) { + fn visit_address(&mut self, _depth: u64, _val: AccountAddress) -> PartialVMResult<()> { self.0 += AbstractMemorySize::new(AccountAddress::LENGTH as u64); + Ok(()) } - fn visit_struct(&mut self, _depth: usize, _len: usize) -> bool { + fn visit_struct(&mut self, _depth: u64, _len: usize) -> PartialVMResult { self.0 += LEGACY_STRUCT_SIZE; - true + Ok(true) } - fn visit_closure(&mut self, _depth: usize, _len: usize) -> bool { + fn visit_closure(&mut self, _depth: u64, _len: usize) -> PartialVMResult { self.0 += LEGACY_CLOSURE_SIZE; - true + Ok(true) } - fn visit_vec(&mut self, _depth: usize, _len: usize) -> bool { + fn visit_vec(&mut self, _depth: u64, _len: usize) -> PartialVMResult { self.0 += LEGACY_STRUCT_SIZE; - true + Ok(true) } - fn visit_vec_u8(&mut self, _depth: usize, vals: &[u8]) { + fn visit_vec_u8(&mut self, _depth: u64, vals: &[u8]) -> PartialVMResult<()> { self.0 += (size_of_val(vals) as u64).into(); + Ok(()) } - fn visit_vec_u16(&mut self, _depth: usize, vals: &[u16]) { + fn visit_vec_u16(&mut self, _depth: u64, vals: &[u16]) -> PartialVMResult<()> { self.0 += (size_of_val(vals) as u64).into(); + Ok(()) } - fn visit_vec_u32(&mut self, _depth: usize, vals: &[u32]) { + fn visit_vec_u32(&mut self, _depth: u64, vals: &[u32]) -> PartialVMResult<()> { self.0 += (size_of_val(vals) as u64).into(); + Ok(()) } - fn visit_vec_u64(&mut self, _depth: usize, vals: &[u64]) { + fn visit_vec_u64(&mut self, _depth: u64, vals: &[u64]) -> PartialVMResult<()> { self.0 += (size_of_val(vals) as u64).into(); + Ok(()) } - fn visit_vec_u128(&mut self, _depth: usize, vals: &[u128]) { + fn visit_vec_u128(&mut self, _depth: u64, vals: &[u128]) -> PartialVMResult<()> { self.0 += (size_of_val(vals) as u64).into(); + Ok(()) } - fn visit_vec_u256(&mut self, _depth: usize, vals: &[move_core_types::u256::U256]) { + fn visit_vec_u256( + &mut self, + _depth: u64, + vals: &[move_core_types::u256::U256], + ) -> PartialVMResult<()> { self.0 += (size_of_val(vals) as u64).into(); + Ok(()) } - fn visit_vec_bool(&mut self, _depth: usize, vals: &[bool]) { + fn visit_vec_bool(&mut self, _depth: u64, vals: &[bool]) -> PartialVMResult<()> { self.0 += (size_of_val(vals) as u64).into(); + Ok(()) } - fn visit_vec_address(&mut self, _depth: usize, vals: &[AccountAddress]) { + fn visit_vec_address( + &mut self, + _depth: u64, + vals: &[AccountAddress], + ) -> PartialVMResult<()> { self.0 += (size_of_val(vals) as u64).into(); + Ok(()) } - fn visit_ref(&mut self, _depth: usize, _is_global: bool) -> bool { + fn visit_ref(&mut self, _depth: u64, _is_global: bool) -> PartialVMResult { self.0 += LEGACY_REFERENCE_SIZE; - false + Ok(false) } } let mut acc = Acc(0.into()); - self.visit(&mut acc); + self.visit(&mut acc) + .expect("Legacy function: should not fail"); acc.0 } @@ -136,76 +167,86 @@ pub trait ValueView { /// Trait that defines a visitor that could be used to traverse a value recursively. pub trait ValueVisitor { - fn visit_delayed(&mut self, depth: usize, id: DelayedFieldID); - fn visit_u8(&mut self, depth: usize, val: u8); - fn visit_u16(&mut self, depth: usize, val: u16); - fn visit_u32(&mut self, depth: usize, val: u32); - fn visit_u64(&mut self, depth: usize, val: u64); - fn visit_u128(&mut self, depth: usize, val: u128); - fn visit_u256(&mut self, depth: usize, val: move_core_types::u256::U256); - fn visit_bool(&mut self, depth: usize, val: bool); - fn visit_address(&mut self, depth: usize, val: AccountAddress); - - fn visit_struct(&mut self, depth: usize, len: usize) -> bool; - fn visit_closure(&mut self, depth: usize, len: usize) -> bool; - fn visit_vec(&mut self, depth: usize, len: usize) -> bool; - - fn visit_ref(&mut self, depth: usize, is_global: bool) -> bool; - - fn visit_vec_u8(&mut self, depth: usize, vals: &[u8]) { - self.visit_vec(depth, vals.len()); + fn visit_delayed(&mut self, depth: u64, id: DelayedFieldID) -> PartialVMResult<()>; + fn visit_u8(&mut self, depth: u64, val: u8) -> PartialVMResult<()>; + fn visit_u16(&mut self, depth: u64, val: u16) -> PartialVMResult<()>; + fn visit_u32(&mut self, depth: u64, val: u32) -> PartialVMResult<()>; + fn visit_u64(&mut self, depth: u64, val: u64) -> PartialVMResult<()>; + fn visit_u128(&mut self, depth: u64, val: u128) -> PartialVMResult<()>; + fn visit_u256(&mut self, depth: u64, val: move_core_types::u256::U256) -> PartialVMResult<()>; + fn visit_bool(&mut self, depth: u64, val: bool) -> PartialVMResult<()>; + fn visit_address(&mut self, depth: u64, val: AccountAddress) -> PartialVMResult<()>; + fn visit_struct(&mut self, depth: u64, len: usize) -> PartialVMResult; + fn visit_closure(&mut self, depth: u64, len: usize) -> PartialVMResult; + fn visit_vec(&mut self, depth: u64, len: usize) -> PartialVMResult; + fn visit_ref(&mut self, depth: u64, is_global: bool) -> PartialVMResult; + + fn visit_vec_u8(&mut self, depth: u64, vals: &[u8]) -> PartialVMResult<()> { + self.visit_vec(depth, vals.len())?; for val in vals { - self.visit_u8(depth + 1, *val); + self.visit_u8(depth + 1, *val)?; } + Ok(()) } - fn visit_vec_u16(&mut self, depth: usize, vals: &[u16]) { - self.visit_vec(depth, vals.len()); + fn visit_vec_u16(&mut self, depth: u64, vals: &[u16]) -> PartialVMResult<()> { + self.visit_vec(depth, vals.len())?; for val in vals { - self.visit_u16(depth + 1, *val); + self.visit_u16(depth + 1, *val)?; } + Ok(()) } - fn visit_vec_u32(&mut self, depth: usize, vals: &[u32]) { - self.visit_vec(depth, vals.len()); + fn visit_vec_u32(&mut self, depth: u64, vals: &[u32]) -> PartialVMResult<()> { + self.visit_vec(depth, vals.len())?; for val in vals { - self.visit_u32(depth + 1, *val); + self.visit_u32(depth + 1, *val)?; } + Ok(()) } - fn visit_vec_u64(&mut self, depth: usize, vals: &[u64]) { - self.visit_vec(depth, vals.len()); + fn visit_vec_u64(&mut self, depth: u64, vals: &[u64]) -> PartialVMResult<()> { + self.visit_vec(depth, vals.len())?; for val in vals { - self.visit_u64(depth + 1, *val); + self.visit_u64(depth + 1, *val)?; } + Ok(()) } - fn visit_vec_u128(&mut self, depth: usize, vals: &[u128]) { - self.visit_vec(depth, vals.len()); + fn visit_vec_u128(&mut self, depth: u64, vals: &[u128]) -> PartialVMResult<()> { + self.visit_vec(depth, vals.len())?; for val in vals { - self.visit_u128(depth + 1, *val); + self.visit_u128(depth + 1, *val)?; } + Ok(()) } - fn visit_vec_u256(&mut self, depth: usize, vals: &[move_core_types::u256::U256]) { - self.visit_vec(depth, vals.len()); + fn visit_vec_u256( + &mut self, + depth: u64, + vals: &[move_core_types::u256::U256], + ) -> PartialVMResult<()> { + self.visit_vec(depth, vals.len())?; for val in vals { - self.visit_u256(depth + 1, *val); + self.visit_u256(depth + 1, *val)?; } + Ok(()) } - fn visit_vec_bool(&mut self, depth: usize, vals: &[bool]) { - self.visit_vec(depth, vals.len()); + fn visit_vec_bool(&mut self, depth: u64, vals: &[bool]) -> PartialVMResult<()> { + self.visit_vec(depth, vals.len())?; for val in vals { - self.visit_bool(depth + 1, *val); + self.visit_bool(depth + 1, *val)?; } + Ok(()) } - fn visit_vec_address(&mut self, depth: usize, vals: &[AccountAddress]) { - self.visit_vec(depth, vals.len()); + fn visit_vec_address(&mut self, depth: u64, vals: &[AccountAddress]) -> PartialVMResult<()> { + self.visit_vec(depth, vals.len())?; for val in vals { - self.visit_address(depth + 1, *val); + self.visit_address(depth + 1, *val)?; } + Ok(()) } } @@ -217,7 +258,7 @@ where ::legacy_abstract_memory_size(*self) } - fn visit(&self, visitor: &mut impl ValueVisitor) { + fn visit(&self, visitor: &mut impl ValueVisitor) -> PartialVMResult<()> { ::visit(*self, visitor) } } diff --git a/third_party/move/testing-infra/transactional-test-runner/src/vm_test_harness.rs b/third_party/move/testing-infra/transactional-test-runner/src/vm_test_harness.rs index 05dd883784212..0b8c8b7cbac5c 100644 --- a/third_party/move/testing-infra/transactional-test-runner/src/vm_test_harness.rs +++ b/third_party/move/testing-infra/transactional-test-runner/src/vm_test_harness.rs @@ -47,7 +47,11 @@ use move_vm_test_utils::{ gas_schedule::{CostTable, Gas, GasStatus}, InMemoryStorage, }; -use move_vm_types::{resolver::ResourceResolver, value_serde::ValueSerDeContext, values::Value}; +use move_vm_types::{ + resolver::ResourceResolver, + value_serde::{FunctionValueExtension, ValueSerDeContext}, + values::Value, +}; use once_cell::sync::Lazy; use std::{ collections::{BTreeMap, BTreeSet}, @@ -376,8 +380,10 @@ impl<'a> MoveTestAdapter<'a> for SimpleVMTestAdapter<'a> { fn deserialize(&self, bytes: &[u8], layout: &MoveTypeLayout) -> Option { let module_storage = self.storage.as_unsync_module_storage(); - ValueSerDeContext::new() - .with_func_args_deserialization(&module_storage.as_function_value_extension()) + let function_extension = module_storage.as_function_value_extension(); + let max_value_nest_depth = function_extension.max_value_nest_depth(); + ValueSerDeContext::new(max_value_nest_depth) + .with_func_args_deserialization(&function_extension) .deserialize(bytes, layout) } } diff --git a/third_party/move/tools/move-decompiler/tests/nested_loops.exp b/third_party/move/tools/move-decompiler/tests/nested_loops.exp new file mode 100644 index 0000000000000..cdff30260333f --- /dev/null +++ b/third_party/move/tools/move-decompiler/tests/nested_loops.exp @@ -0,0 +1,235 @@ + +module 0x99::m { + fun nested_for_loops() { + let _t4; + let _t3; + let _t2; + let _t1; + let _t0; + _t0 = 0; + _t1 = 0; + _t2 = false; + 'l0: loop { + if (_t2) _t1 = _t1 + 1 else _t2 = true; + if (!(_t1 < 10)) break; + _t0 = _t0 + 1; + _t3 = _t1; + _t4 = false; + loop { + if (_t4) _t3 = _t3 + 1 else _t4 = true; + if (!(_t3 < 10)) continue 'l0; + _t0 = _t0 + 1 + }; + break + }; + } + fun nested_for_while_loop_loops() { + let _t4; + let _t3; + let _t2; + let _t1; + let _t0; + _t0 = 0; + _t1 = 0; + _t2 = false; + 'l0: loop { + if (_t2) _t1 = _t1 + 1 else _t2 = true; + if (!(_t1 < 5)) break; + _t0 = _t0 + 1; + _t3 = _t1; + 'l1: loop { + if (!(_t3 < 10)) continue 'l0; + _t0 = _t0 + 1; + _t4 = _t3; + _t3 = _t3 + 1; + loop { + _t0 = _t0 + 1; + if (_t4 > 10) continue 'l1; + _t4 = _t4 + 1 + }; + break + }; + break + }; + } + fun nested_for_while_loops() { + let _t2; + let _t1; + let _t0; + _t0 = 0; + _t1 = 0; + _t2 = false; + 'l0: loop { + if (_t2) _t1 = _t1 + 1 else _t2 = true; + if (!(_t1 < 5)) break; + _t0 = _t0 + 1; + loop { + if (!(_t0 < 5)) continue 'l0; + _t0 = _t0 + 10 + }; + break + }; + } + fun nested_loop_for_loops() { + let _t4; + let _t3; + let _t2; + let _t1; + let _t0; + _t0 = 0; + _t1 = 0; + 'l0: loop { + _t0 = _t0 + 1; + _t2 = 0; + if (_t0 > 3) break; + _t3 = 0; + _t4 = false; + loop { + if (_t4) _t3 = _t3 + 1 else _t4 = true; + if (!(_t3 < 5)) continue 'l0; + _t2 = _t2 + 1; + _t1 = _t1 + 1 + }; + break + }; + } + fun nested_loop_loops() { + let _t2; + let _t1; + let _t0; + _t0 = 0; + _t1 = 0; + 'l0: loop { + _t0 = _t0 + 1; + _t2 = 0; + if (_t0 > 3) break; + loop { + _t2 = _t2 + 1; + _t1 = _t1 + 1; + if (_t2 > 7) continue 'l0 + }; + break + }; + } + fun nested_loop_while_loops() { + let _t2; + let _t1; + let _t0; + _t0 = 0; + _t1 = 0; + 'l0: loop { + _t0 = _t0 + 1; + _t2 = 0; + if (_t0 > 3) break; + loop { + if (!(_t2 < 7)) continue 'l0; + _t2 = _t2 + 1; + _t1 = _t1 + 1 + }; + break + }; + } + fun nested_while_loops() { + let _t2; + let _t1; + let _t0; + _t0 = 0; + _t1 = 0; + 'l0: while (_t0 < 3) { + _t0 = _t0 + 1; + _t2 = 0; + loop { + if (!(_t2 < 7)) continue 'l0; + _t2 = _t2 + 1; + _t1 = _t1 + 1 + }; + break + }; + } + fun three_layer_for_loops() { + let _t6; + let _t5; + let _t4; + let _t3; + let _t2; + let _t1; + let _t0; + _t0 = 0; + _t1 = 0; + _t2 = false; + 'l0: loop { + if (_t2) _t1 = _t1 + 1 else _t2 = true; + if (!(_t1 < 10)) break; + _t0 = _t0 + 1; + _t3 = _t1; + _t4 = false; + 'l1: loop { + if (_t4) _t3 = _t3 + 1 else _t4 = true; + if (!(_t3 < 10)) continue 'l0; + _t0 = _t0 + 1; + _t5 = _t3; + _t6 = false; + loop { + if (_t6) _t5 = _t5 + 1 else _t6 = true; + if (!(_t5 < 10)) continue 'l1; + _t0 = _t0 + 1 + }; + break + }; + break + }; + } + fun three_layer_loop_loops() { + let _t3; + let _t2; + let _t1; + let _t0; + _t0 = 0; + _t1 = 0; + 'l0: loop { + _t0 = _t0 + 1; + _t2 = _t1; + if (_t1 > 10) break; + _t1 = _t1 + 1; + 'l1: loop { + _t0 = _t0 + 1; + _t3 = _t2; + if (_t2 > 10) continue 'l0; + _t2 = _t2 + 1; + loop { + _t0 = _t0 + 1; + if (_t3 > 10) continue 'l1; + _t3 = _t3 + 1 + }; + break + }; + break + }; + } + fun three_layer_while_loops() { + let _t3; + let _t2; + let _t1; + let _t0; + _t0 = 0; + _t1 = 0; + 'l0: while (_t1 < 10) { + _t0 = _t0 + 1; + _t2 = _t1; + _t1 = _t1 + 1; + 'l1: loop { + if (!(_t2 < 10)) continue 'l0; + _t0 = _t0 + 1; + _t3 = _t2; + _t2 = _t2 + 1; + loop { + if (!(_t3 < 10)) continue 'l1; + _t0 = _t0 + 1; + _t3 = _t3 + 1 + }; + break + }; + break + }; + } +} diff --git a/third_party/move/tools/move-decompiler/tests/nested_loops.move b/third_party/move/tools/move-decompiler/tests/nested_loops.move new file mode 100644 index 0000000000000..b298bab4c0138 --- /dev/null +++ b/third_party/move/tools/move-decompiler/tests/nested_loops.move @@ -0,0 +1,163 @@ +//* Test cases with nested loops +module 0x99::m { + fun nested_for_loops() { + let y = 0; + for (i in 0..10) { + y = y + 1; + for (j in i..10) { + y = y + 1; + }; + }; + } + + fun nested_while_loops() { + let x = 0; + let z = 0; + let y; + while (x < 3) { + x = x + 1; + y = 0; + while (y < 7) { + y = y + 1; + z = z + 1; + } + }; + } + + fun nested_loop_loops () { + let x = 0; + let z = 0; + let y; + loop { + x = x + 1; + y = 0; + if (x > 3) + break; + loop { + y = y + 1; + z = z + 1; + if (y > 7) + break; + }; + }; + } + + fun nested_for_while_loops() { + let y = 0; + for (i in 0..5) { + y = y + 1; + while (y < 5) { + y = y + 10; + }; + }; + } + + fun nested_loop_while_loops() { + let x = 0; + let z = 0; + let y; + loop { + x = x + 1; + y = 0; + if (x > 3) + break; + while (y < 7) { + y = y + 1; + z = z + 1; + } + }; + } + + fun nested_loop_for_loops() { + let x = 0; + let z = 0; + let y; + loop { + x = x + 1; + y = 0; + if (x > 3) + break; + for (i in 0..5) { + y = y + 1; + z = z + 1; + } + }; + } + + fun three_layer_for_loops(){ + let y = 0; + for (i in 0..10) { + y = y + 1; + for (j in i..10) { + y = y + 1; + for (k in j..10) { + y = y + 1; + }; + }; + }; + } + + fun three_layer_while_loops(){ + let y = 0; + let i = 0; + while(i < 10) { + y = y + 1; + let j = i; + i = i + 1; + while(j < 10) { + y = y + 1; + let k = j; + j = j + 1; + while(k < 10) { + y = y + 1; + k = k + 1; + }; + }; + }; + } + + fun three_layer_loop_loops(){ + let y = 0; + let i = 0; + loop { + y = y + 1; + let j = i; + if (i > 10) + break; + i = i + 1; + loop { + y = y + 1; + let k = j; + if (j > 10) + break; + j = j + 1; + loop { + y = y + 1; + if (k > 10) + break; + k = k + 1; + }; + }; + }; + } + + fun nested_for_while_loop_loops(){ + let y = 0; + let i = 0; + for(i in 0..5) { + y = y + 1; + let j = i; + while(j < 10) { + y = y + 1; + let k = j; + j = j + 1; + loop { + y = y + 1; + if (k > 10) + break; + k = k + 1; + }; + }; + }; + } +} diff --git a/third_party/move/tools/move-package/src/compilation/model_builder.rs b/third_party/move/tools/move-package/src/compilation/model_builder.rs index 6762736742d05..c5511f85a5074 100644 --- a/third_party/move/tools/move-package/src/compilation/model_builder.rs +++ b/third_party/move/tools/move-package/src/compilation/model_builder.rs @@ -140,6 +140,13 @@ impl ModelBuilder { options.known_attributes.clone_from(known_attributes); options.skip_attribute_checks = skip_attribute_checks; options.compile_verify_code = true; + options.experiments.clone_from( + &self + .resolution_graph + .build_options + .compiler_config + .experiments, + ); let mut error_writer = StandardStream::stderr(ColorChoice::Auto); move_compiler_v2::run_move_compiler_for_analysis(&mut error_writer, options) }, diff --git a/third_party/move/tools/move-resource-viewer/src/fat_type.rs b/third_party/move/tools/move-resource-viewer/src/fat_type.rs index 4e5577e7e97f1..0de1e174ed5e1 100644 --- a/third_party/move/tools/move-resource-viewer/src/fat_type.rs +++ b/third_party/move/tools/move-resource-viewer/src/fat_type.rs @@ -9,7 +9,7 @@ use move_core_types::{ ability::AbilitySet, account_address::AccountAddress, identifier::Identifier, - language_storage::{FunctionTag, StructTag, TypeTag}, + language_storage::{FunctionParamOrReturnTag, FunctionTag, StructTag, TypeTag}, value::{MoveStructLayout, MoveTypeLayout}, vm_status::StatusCode, }; @@ -218,7 +218,17 @@ impl FatFunctionType { pub fn fun_tag(&self, limiter: &mut Limiter) -> PartialVMResult { let tag_slice = |limiter: &mut Limiter, tys: &[FatType]| { tys.iter() - .map(|ty| ty.type_tag(limiter)) + .map(|ty| { + Ok(match ty { + FatType::Reference(ty) => { + FunctionParamOrReturnTag::Reference(ty.type_tag(limiter)?) + }, + FatType::MutableReference(ty) => { + FunctionParamOrReturnTag::MutableReference(ty.type_tag(limiter)?) + }, + ty => FunctionParamOrReturnTag::Value(ty.type_tag(limiter)?), + }) + }) .collect::>>() }; Ok(FunctionTag { @@ -424,9 +434,24 @@ impl From<&StructTag> for FatStructType { } } +impl From<&FunctionParamOrReturnTag> for FatType { + fn from(tag: &FunctionParamOrReturnTag) -> FatType { + use FatType::*; + match tag { + FunctionParamOrReturnTag::Reference(tag) => Reference(Box::new(tag.into())), + FunctionParamOrReturnTag::MutableReference(tag) => { + MutableReference(Box::new(tag.into())) + }, + FunctionParamOrReturnTag::Value(tag) => tag.into(), + } + } +} + impl From<&FunctionTag> for FatFunctionType { fn from(fun_tag: &FunctionTag) -> FatFunctionType { - let into_slice = |tys: &[TypeTag]| tys.iter().map(|ty| ty.into()).collect::>(); + let into_slice = |tys: &[FunctionParamOrReturnTag]| { + tys.iter().map(|ty| ty.into()).collect::>() + }; FatFunctionType { args: into_slice(&fun_tag.args), results: into_slice(&fun_tag.results), diff --git a/third_party/move/tools/move-resource-viewer/src/lib.rs b/third_party/move/tools/move-resource-viewer/src/lib.rs index 0f4851a7074f3..059574f85f9b8 100644 --- a/third_party/move/tools/move-resource-viewer/src/lib.rs +++ b/third_party/move/tools/move-resource-viewer/src/lib.rs @@ -372,11 +372,6 @@ impl MoveValueAnnotator { sig: &SignatureToken, limit: &mut Limiter, ) -> anyhow::Result { - let resolve_slice = |toks: &[SignatureToken], limit: &mut Limiter| { - toks.iter() - .map(|tok| self.resolve_signature(module, tok, limit)) - .collect::>>() - }; Ok(match sig { SignatureToken::Bool => FatType::Bool, SignatureToken::U8 => FatType::U8, @@ -391,6 +386,39 @@ impl MoveValueAnnotator { FatType::Vector(Box::new(self.resolve_signature(module, ty, limit)?)) }, SignatureToken::Function(args, results, abilities) => { + let resolve_slice = |toks: &[SignatureToken], limit: &mut Limiter| { + toks.iter() + .map(|tok| { + // Function type can have references as immediate argument or return + // types. + Ok(match tok { + SignatureToken::Reference(t) => FatType::Reference(Box::new( + self.resolve_signature(module, t, limit)?, + )), + SignatureToken::MutableReference(t) => FatType::MutableReference( + Box::new(self.resolve_signature(module, t, limit)?), + ), + SignatureToken::Bool + | SignatureToken::U8 + | SignatureToken::U64 + | SignatureToken::U128 + | SignatureToken::Address + | SignatureToken::Signer + | SignatureToken::Vector(_) + | SignatureToken::Function(_, _, _) + | SignatureToken::Struct(_) + | SignatureToken::StructInstantiation(_, _) + | SignatureToken::TypeParameter(_) + | SignatureToken::U16 + | SignatureToken::U32 + | SignatureToken::U256 => { + self.resolve_signature(module, tok, limit)? + }, + }) + }) + .collect::>>() + }; + FatType::Function(Box::new(FatFunctionType { args: resolve_slice(args, limit)?, results: resolve_slice(results, limit)?, @@ -402,7 +430,10 @@ impl MoveValueAnnotator { }, SignatureToken::StructInstantiation(idx, toks) => { let struct_ty = self.resolve_struct_handle(module, *idx, limit)?; - let args = resolve_slice(toks, limit)?; + let args = toks + .iter() + .map(|tok| self.resolve_signature(module, tok, limit)) + .collect::>>()?; FatType::Struct(Box::new( struct_ty .subst(&args, limit) @@ -457,8 +488,8 @@ impl MoveValueAnnotator { TypeTag::U128 => FatType::U128, TypeTag::Vector(ty) => FatType::Vector(Box::new(self.resolve_type_impl(ty, limit)?)), TypeTag::Function(..) => { - // TODO(#15664) implement functions for fat types" - todo!("functions for fat types") + // TODO(#15664) implement functions for fat types + bail!("TODO: support functions for fat types") }, }) } @@ -756,7 +787,7 @@ fn pretty_print_struct( indent: u64, ) -> std::fmt::Result { pretty_print_ability_modifiers(f, value.abilities)?; - write!(f, "{}", value.ty_tag)?; + write!(f, "{}", value.ty_tag.to_canonical_string())?; if let Some((_, name)) = &value.variant_info { write!(f, "::{}", name)?; } @@ -817,7 +848,7 @@ fn pretty_print_closure( for ty in ty_args { f.write_str(last_sep)?; last_sep = ", "; - write!(f, "{}", ty)? + write!(f, "{}", ty.to_canonical_string())? } write!(f, ">")? } diff --git a/third_party/move/tools/move-unit-test/src/extensions.rs b/third_party/move/tools/move-unit-test/src/extensions.rs index 05b1058540ef4..0d929a067af15 100644 --- a/third_party/move/tools/move-unit-test/src/extensions.rs +++ b/third_party/move/tools/move-unit-test/src/extensions.rs @@ -84,7 +84,7 @@ fn print_table_extension( "new tables {}", cs.new_tables .iter() - .map(|(k, v)| format!("{}<{},{}>", k, v.key_type, v.value_type)) + .map(|(handle, table)| format!("({handle}: {table})")) .join(", ") ) .unwrap(); diff --git a/types/src/access_path.rs b/types/src/access_path.rs index fd2f537040970..06e92e516357e 100644 --- a/types/src/access_path.rs +++ b/types/src/access_path.rs @@ -89,10 +89,10 @@ impl fmt::Display for Path { write!(f, "Code({})", module_id) }, Path::Resource(struct_tag) => { - write!(f, "Resource({})", struct_tag) + write!(f, "Resource({})", struct_tag.to_canonical_string()) }, Path::ResourceGroup(struct_tag) => { - write!(f, "ResourceGroup({})", struct_tag) + write!(f, "ResourceGroup({})", struct_tag.to_canonical_string()) }, } } diff --git a/types/src/account_config/constants/addresses.rs b/types/src/account_config/constants/addresses.rs index c942549c12c83..1f2ca58bf4876 100644 --- a/types/src/account_config/constants/addresses.rs +++ b/types/src/account_config/constants/addresses.rs @@ -3,7 +3,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::account_address::AccountAddress; -pub use move_core_types::language_storage::CORE_CODE_ADDRESS; +pub use move_core_types::language_storage::{CORE_CODE_ADDRESS, EXPERIMENTAL_CODE_ADDRESS}; pub fn aptos_test_root_address() -> AccountAddress { AccountAddress::from_hex_literal("0xA550C18") diff --git a/types/src/block_executor/config.rs b/types/src/block_executor/config.rs index f98f6f6c8009a..07e7968c9c3f9 100644 --- a/types/src/block_executor/config.rs +++ b/types/src/block_executor/config.rs @@ -4,6 +4,8 @@ use crate::on_chain_config::BlockGasLimitType; use serde::{Deserialize, Serialize}; +const DEFAULT_GAS_PRICE_TO_BURN: u64 = 90; + /// Local, per-node configurations for module cache. While caches can be persisted across multiple /// block executions, these configurations allow to specify cache sizes, etc. #[derive(Clone, Debug)] @@ -65,14 +67,20 @@ pub struct BlockExecutorConfigFromOnchain { pub block_gas_limit_type: BlockGasLimitType, enable_per_block_gas_limit: bool, per_block_gas_limit: Option, + gas_price_to_burn: Option, } impl BlockExecutorConfigFromOnchain { - pub fn new(block_gas_limit_type: BlockGasLimitType, enable_per_block_gas_limit: bool) -> Self { + pub fn new( + block_gas_limit_type: BlockGasLimitType, + enable_per_block_gas_limit: bool, + gas_price_to_burn: Option, + ) -> Self { Self { block_gas_limit_type, enable_per_block_gas_limit, per_block_gas_limit: None, + gas_price_to_burn, } } @@ -81,6 +89,7 @@ impl BlockExecutorConfigFromOnchain { block_gas_limit_type: BlockGasLimitType::NoLimit, enable_per_block_gas_limit: false, per_block_gas_limit: None, + gas_price_to_burn: None, } } @@ -90,6 +99,7 @@ impl BlockExecutorConfigFromOnchain { .map_or(BlockGasLimitType::NoLimit, BlockGasLimitType::Limit), enable_per_block_gas_limit: false, per_block_gas_limit: None, + gas_price_to_burn: None, } } @@ -105,11 +115,12 @@ impl BlockExecutorConfigFromOnchain { conflict_penalty_window: 8, use_module_publishing_block_conflict: true, include_user_txn_size_in_block_output: true, - add_block_limit_outcome_onchain: false, + add_block_limit_outcome_onchain: true, use_granular_resource_group_conflicts: false, }, enable_per_block_gas_limit: false, per_block_gas_limit: None, + gas_price_to_burn: None, } } @@ -118,6 +129,7 @@ impl BlockExecutorConfigFromOnchain { block_gas_limit_type: self.block_gas_limit_type, enable_per_block_gas_limit: self.enable_per_block_gas_limit, per_block_gas_limit: block_gas_limit_override, + gas_price_to_burn: self.gas_price_to_burn, } } @@ -128,6 +140,10 @@ impl BlockExecutorConfigFromOnchain { None } } + + pub fn gas_price_to_burn(&self) -> u64 { + self.gas_price_to_burn.unwrap_or(DEFAULT_GAS_PRICE_TO_BURN) + } } /// Configuration for the BlockExecutor. diff --git a/types/src/block_executor/partitioner.rs b/types/src/block_executor/partitioner.rs index 5ca67f3266463..cef3921d00bf2 100644 --- a/types/src/block_executor/partitioner.rs +++ b/types/src/block_executor/partitioner.rs @@ -3,8 +3,8 @@ use crate::transaction::{ analyzed_transaction::{AnalyzedTransaction, StorageLocation}, - signature_verified_transaction::{into_signature_verified_block, SignatureVerifiedTransaction}, - Transaction, + signature_verified_transaction::SignatureVerifiedTransaction, + AuxiliaryInfo, Transaction, }; use aptos_crypto::HashValue; use serde::{Deserialize, Serialize}; @@ -433,28 +433,66 @@ impl TransactionWithDependencies { pub struct ExecutableBlock { pub block_id: HashValue, pub transactions: ExecutableTransactions, + pub auxiliary_info: Vec, } impl ExecutableBlock { - pub fn new(block_id: HashValue, transactions: ExecutableTransactions) -> Self { + pub fn new( + block_id: HashValue, + transactions: ExecutableTransactions, + auxiliary_info: Vec, + ) -> Self { + match &transactions { + ExecutableTransactions::Unsharded(txns) => { + assert!(txns.len() == auxiliary_info.len()); + }, + ExecutableTransactions::Sharded(_) => { + // Not supporting auxiliary info here because the sharded executor is only for + // benchmark purpose right now. + // TODO: Revisit when we need it. + assert!(auxiliary_info.is_empty()); + }, + } Self { block_id, transactions, + auxiliary_info, } } } impl From<(HashValue, Vec)> for ExecutableBlock { fn from((block_id, transactions): (HashValue, Vec)) -> Self { - Self::new(block_id, ExecutableTransactions::Unsharded(transactions)) + let auxiliary_info = transactions + .iter() + .map(|_| AuxiliaryInfo::new_empty()) + .collect(); + Self::new( + block_id, + ExecutableTransactions::Unsharded(transactions), + auxiliary_info, + ) } } -impl From<(HashValue, Vec)> for ExecutableBlock { - fn from((block_id, transactions): (HashValue, Vec)) -> Self { +impl + From<( + HashValue, + Vec, + Vec, + )> for ExecutableBlock +{ + fn from( + (block_id, transactions, auxiliary_info): ( + HashValue, + Vec, + Vec, + ), + ) -> Self { Self::new( block_id, - ExecutableTransactions::Unsharded(into_signature_verified_block(transactions)), + ExecutableTransactions::Unsharded(transactions), + auxiliary_info, ) } } diff --git a/types/src/lib.rs b/types/src/lib.rs index 4a50857d1f44d..cc7f4b40b436d 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -35,6 +35,7 @@ pub mod on_chain_config; pub mod proof; #[cfg(any(test, feature = "fuzzing"))] pub mod proptest_types; +pub mod quorum_store; pub mod randomness; pub mod serde_helper; pub mod stake_pool; diff --git a/types/src/mempool_status.rs b/types/src/mempool_status.rs index 33ef3583a8fb1..b541747e68815 100644 --- a/types/src/mempool_status.rs +++ b/types/src/mempool_status.rs @@ -64,6 +64,8 @@ pub enum MempoolStatusCode { // transaction didn't pass vm_validation VmError = 5, UnknownStatus = 6, + // The transaction filter has rejected the transaction + RejectedByFilter = 7, } impl TryFrom for MempoolStatusCode { @@ -78,6 +80,7 @@ impl TryFrom for MempoolStatusCode { 4 => Ok(MempoolStatusCode::InvalidUpdate), 5 => Ok(MempoolStatusCode::VmError), 6 => Ok(MempoolStatusCode::UnknownStatus), + 7 => Ok(MempoolStatusCode::RejectedByFilter), _ => Err("invalid StatusCode"), } } diff --git a/types/src/on_chain_config/aptos_features.rs b/types/src/on_chain_config/aptos_features.rs index 505a4a05ff9bc..e905f653a134b 100644 --- a/types/src/on_chain_config/aptos_features.rs +++ b/types/src/on_chain_config/aptos_features.rs @@ -239,9 +239,8 @@ impl FeatureFlag { FeatureFlag::JWK_CONSENSUS_PER_KEY_MODE, FeatureFlag::TRANSACTION_PAYLOAD_V2, FeatureFlag::ORDERLESS_TRANSACTIONS, - // TODO(grao): Enable priority fee feature flags. - // FeatureFlag::CALCULATE_TRANSACTION_FEE_FOR_DISTRIBUTION, - // FeatureFlag::DISTRIBUTE_TRANSACTION_FEE, + FeatureFlag::CALCULATE_TRANSACTION_FEE_FOR_DISTRIBUTION, + FeatureFlag::DISTRIBUTE_TRANSACTION_FEE, ] } } diff --git a/types/src/on_chain_config/execution_config.rs b/types/src/on_chain_config/execution_config.rs index aabd6a4da4638..37f6066b23175 100644 --- a/types/src/on_chain_config/execution_config.rs +++ b/types/src/on_chain_config/execution_config.rs @@ -19,6 +19,7 @@ pub enum OnChainExecutionConfig { // Reminder: Add V4 and future versions here, after Missing (order matters for enums). V4(ExecutionConfigV4), V5(ExecutionConfigV5), + V6(ExecutionConfigV6), } /// The public interface that exposes all values with safe fallback. @@ -32,6 +33,7 @@ impl OnChainExecutionConfig { OnChainExecutionConfig::V3(config) => config.transaction_shuffler_type.clone(), OnChainExecutionConfig::V4(config) => config.transaction_shuffler_type.clone(), OnChainExecutionConfig::V5(config) => config.transaction_shuffler_type.clone(), + OnChainExecutionConfig::V6(config) => config.transaction_shuffler_type.clone(), } } @@ -48,6 +50,7 @@ impl OnChainExecutionConfig { .map_or(BlockGasLimitType::NoLimit, BlockGasLimitType::Limit), OnChainExecutionConfig::V4(config) => config.block_gas_limit_type.clone(), OnChainExecutionConfig::V5(config) => config.block_gas_limit_type.clone(), + OnChainExecutionConfig::V6(config) => config.block_gas_limit_type.clone(), } } @@ -59,6 +62,19 @@ impl OnChainExecutionConfig { | OnChainExecutionConfig::V3(_) | OnChainExecutionConfig::V4(_) => false, OnChainExecutionConfig::V5(config) => config.enable_per_block_gas_limit, + OnChainExecutionConfig::V6(config) => config.enable_per_block_gas_limit, + } + } + + pub fn gas_price_to_burn(&self) -> Option { + match self { + OnChainExecutionConfig::Missing + | OnChainExecutionConfig::V1(_) + | OnChainExecutionConfig::V2(_) + | OnChainExecutionConfig::V3(_) + | OnChainExecutionConfig::V4(_) + | OnChainExecutionConfig::V5(_) => None, + OnChainExecutionConfig::V6(config) => Some(config.gas_price_to_burn), } } @@ -66,6 +82,7 @@ impl OnChainExecutionConfig { BlockExecutorConfigFromOnchain::new( self.block_gas_limit_type(), self.enable_per_block_gas_limit(), + self.gas_price_to_burn(), ) } @@ -79,17 +96,19 @@ impl OnChainExecutionConfig { OnChainExecutionConfig::V3(config) => config.transaction_deduper_type.clone(), OnChainExecutionConfig::V4(config) => config.transaction_deduper_type.clone(), OnChainExecutionConfig::V5(config) => config.transaction_deduper_type.clone(), + OnChainExecutionConfig::V6(config) => config.transaction_deduper_type.clone(), } } /// The default values to use for new networks, e.g., devnet, forge. /// Features that are ready for deployment can be enabled here. pub fn default_for_genesis() -> Self { - OnChainExecutionConfig::V5(ExecutionConfigV5 { + OnChainExecutionConfig::V6(ExecutionConfigV6 { transaction_shuffler_type: TransactionShufflerType::default_for_genesis(), block_gas_limit_type: BlockGasLimitType::default_for_genesis(), enable_per_block_gas_limit: false, transaction_deduper_type: TransactionDeduperType::TxnHashAndAuthenticatorV1, + gas_price_to_burn: 90, }) } @@ -167,6 +186,15 @@ pub struct ExecutionConfigV5 { pub transaction_deduper_type: TransactionDeduperType, } +#[derive(Clone, Debug, Deserialize, PartialEq, Eq, Serialize)] +pub struct ExecutionConfigV6 { + pub transaction_shuffler_type: TransactionShufflerType, + pub block_gas_limit_type: BlockGasLimitType, + pub enable_per_block_gas_limit: bool, + pub transaction_deduper_type: TransactionDeduperType, + pub gas_price_to_burn: u64, +} + #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] #[serde(rename_all = "snake_case")] // cannot use tag = "type" as nested enums cannot work, and bcs doesn't support it pub enum TransactionShufflerType { diff --git a/types/src/proof/unit_tests/proof_test.rs b/types/src/proof/unit_tests/proof_test.rs index e7fe455443e70..18a5c5cdacf61 100644 --- a/types/src/proof/unit_tests/proof_test.rs +++ b/types/src/proof/unit_tests/proof_test.rs @@ -288,6 +288,7 @@ fn test_verify_transaction() { Some(state_root1_hash), /* gas_used = */ 0, /* major_status = */ ExecutionStatus::Success, + None, ); let txn_info1_hash = txn_info1.hash(); @@ -321,6 +322,7 @@ fn test_verify_transaction() { Some(state_root1_hash), /* gas_used = */ 0, /* major_status = */ ExecutionStatus::Success, + None, ); let proof = TransactionInfoWithProof::new(ledger_info_to_transaction_info_proof, fake_txn_info); assert!(proof.verify(&ledger_info, 1).is_err()); @@ -368,7 +370,7 @@ fn test_accumulator_extension_proof() { #[test] fn test_transaction_info_list_with_proof() { // Create transaction info list proof - let transaction_info_list_proof = create_single_transaction_info_proof(None, None, None); + let transaction_info_list_proof = create_single_transaction_info_proof(None, None, None, None); // Verify first transaction version must match the proof let empty_ledger_info = LedgerInfo::new(BlockInfo::empty(), HashValue::zero()); @@ -411,7 +413,7 @@ fn test_transaction_list_with_proof() { transactions.clone(), Some(vec![vec![event.clone()]]), Some(1), - create_single_transaction_info_proof(None, None, None), + create_single_transaction_info_proof(None, None, None, None), ); // Verify first transaction version must match the proof @@ -427,7 +429,7 @@ fn test_transaction_list_with_proof() { // Verify transaction hashes match but info root hash verification fails (ledger info expected zero root hash) let transaction_list_proof = - create_single_transaction_info_proof(Some(transactions[0].hash()), None, None); + create_single_transaction_info_proof(Some(transactions[0].hash()), None, None, None); let transaction_list_with_proof = TransactionListWithProof::new( transactions.clone(), Some(vec![vec![event.clone()]]), @@ -451,6 +453,7 @@ fn test_transaction_list_with_proof() { Some(transactions[0].hash()), Some(event.hash()), None, + None, ); let transaction_list_with_proof = TransactionListWithProof::new( transactions, @@ -500,6 +503,8 @@ fn test_transaction_and_output_list_with_proof() { Some(txn_hash), Some(event_root_hash), Some(write_set_hash), + // TODO(grao): Test a real one. + None, ); // Verify first transaction version must match the proof @@ -515,6 +520,7 @@ fn test_transaction_and_output_list_with_proof() { Some(txn_hash), None, Some(write_set_hash), + None, ); let ledger_info = create_ledger_info_at_version0(root_hash); transaction_output_list_proof @@ -528,6 +534,7 @@ fn test_transaction_and_output_list_with_proof() { Some(txn_hash), Some(event_root_hash), None, + None, ); let ledger_info = create_ledger_info_at_version0(root_hash); transaction_output_list_proof @@ -541,6 +548,7 @@ fn test_transaction_and_output_list_with_proof() { Some(txn_hash), Some(event_root_hash), Some(write_set_hash), + None, ); let ledger_info = create_ledger_info_at_version0(root_hash); transaction_output_list_proof @@ -559,9 +567,14 @@ fn create_txn_output_list_with_proof( transaction_hash: Option, event_root_hash: Option, state_change_hash: Option, + auxiliary_info_hash: Option, ) -> (HashValue, TransactionOutputListWithProof) { - let transaction_info_list_proof = - create_single_transaction_info_proof(transaction_hash, event_root_hash, state_change_hash); + let transaction_info_list_proof = create_single_transaction_info_proof( + transaction_hash, + event_root_hash, + state_change_hash, + auxiliary_info_hash, + ); let root_hash = transaction_info_list_proof.transaction_infos[0].hash(); let transaction_output_list_proof = TransactionOutputListWithProof::new( vec![(transaction.clone(), transaction_output.clone())], @@ -576,11 +589,13 @@ fn create_single_transaction_info_proof( transaction_hash: Option, event_root_hash: Option, state_change_hash: Option, + auxiliary_info_hash: Option, ) -> TransactionInfoListWithProof { let transaction_infos = vec![create_transaction_info( transaction_hash, event_root_hash, state_change_hash, + auxiliary_info_hash, )]; TransactionInfoListWithProof::new(AccumulatorRangeProof::new_empty(), transaction_infos) } @@ -589,6 +604,7 @@ fn create_transaction_info( transaction_hash: Option, event_root_hash: Option, state_change_hash: Option, + auxiliary_info_hash: Option, ) -> TransactionInfo { TransactionInfo::new( transaction_hash.unwrap_or_else(HashValue::random), @@ -597,6 +613,7 @@ fn create_transaction_info( Some(HashValue::random()), 0, ExecutionStatus::MiscellaneousError(None), + auxiliary_info_hash, ) } diff --git a/types/src/quorum_store/mod.rs b/types/src/quorum_store/mod.rs new file mode 100644 index 0000000000000..d1b5b86ede101 --- /dev/null +++ b/types/src/quorum_store/mod.rs @@ -0,0 +1,57 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use aptos_crypto_derive::{BCSCryptoHash, CryptoHasher}; +use serde::{Deserialize, Serialize}; +use std::{ + cmp::Ordering, + fmt::{Display, Formatter}, +}; + +/// A unique identifier for a batch of transactions in quorum store +#[derive( + Copy, Clone, Debug, Deserialize, Serialize, PartialEq, Eq, Hash, CryptoHasher, BCSCryptoHash, +)] +pub struct BatchId { + pub id: u64, + /// A number that is stored in the DB and updated only if the value does not exist in + /// the DB: (a) at the start of an epoch, or (b) the DB was wiped. When the nonce is updated, + /// id starts again at 0. Using the current system time allows the nonce to be ordering. + pub nonce: u64, +} + +impl BatchId { + pub fn new(nonce: u64) -> Self { + Self { id: 0, nonce } + } + + pub fn new_for_test(id: u64) -> Self { + Self { id, nonce: 0 } + } + + pub fn increment(&mut self) { + self.id += 1; + } +} + +impl PartialOrd for BatchId { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for BatchId { + fn cmp(&self, other: &Self) -> Ordering { + match self.nonce.cmp(&other.nonce) { + Ordering::Equal => {}, + ordering => return ordering, + } + self.id.cmp(&other.id) + } +} + +impl Display for BatchId { + fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { + write!(f, "({}, {})", self.id, self.nonce) + } +} diff --git a/types/src/transaction/authenticator.rs b/types/src/transaction/authenticator.rs index a654c2ce068a8..0dc27ca3ea86e 100644 --- a/types/src/transaction/authenticator.rs +++ b/types/src/transaction/authenticator.rs @@ -1047,6 +1047,12 @@ impl MultiKey { "The number of required signatures is 0." ); + ensure!( + public_keys.len() <= MAX_NUM_OF_SIGS, // This max number of signatures is also the max number of public keys. + "The number of public keys is greater than {}.", + MAX_NUM_OF_SIGS + ); + ensure!( public_keys.len() >= signatures_required as usize, "The number of public keys is smaller than the number of required signatures, {} < {}", @@ -2089,4 +2095,21 @@ mod tests { assert!(signed_txn.verify_signature().is_err()); } + + #[test] + fn test_multi_key_with_33_keys_fails() { + let mut keys = Vec::new(); + for _ in 0..33 { + let private_key = Ed25519PrivateKey::generate(&mut rand::thread_rng()); + let public_key = private_key.public_key(); + keys.push(AnyPublicKey::ed25519(public_key)); + } + + let result = MultiKey::new(keys, 3); + assert!(result.is_err()); + assert_eq!( + result.unwrap_err().to_string(), + "The number of public keys is greater than 32." + ); + } } diff --git a/types/src/transaction/block_epilogue.rs b/types/src/transaction/block_epilogue.rs index d7c5f6bd58f2a..9efeda2cc8618 100644 --- a/types/src/transaction/block_epilogue.rs +++ b/types/src/transaction/block_epilogue.rs @@ -21,16 +21,37 @@ pub enum BlockEpiloguePayload { block_id: HashValue, block_end_info: BlockEndInfo, }, + V1 { + block_id: HashValue, + block_end_info: BlockEndInfo, + fee_distribution: FeeDistribution, + }, } impl BlockEpiloguePayload { pub fn try_as_block_end_info(&self) -> Option<&BlockEndInfo> { match self { BlockEpiloguePayload::V0 { block_end_info, .. } => Some(block_end_info), + BlockEpiloguePayload::V1 { block_end_info, .. } => Some(block_end_info), } } } +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr(any(test, feature = "fuzzing"), derive(Arbitrary))] +pub enum FeeDistribution { + V0 { + // Validator index -> Octa + amount: BTreeMap, + }, +} + +impl FeeDistribution { + pub fn new(amount: BTreeMap) -> Self { + Self::V0 { amount } + } +} + #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub enum BlockEndInfo { V0 { diff --git a/types/src/transaction/mod.rs b/types/src/transaction/mod.rs index 75b7e27de026a..14a6604401537 100644 --- a/types/src/transaction/mod.rs +++ b/types/src/transaction/mod.rs @@ -53,7 +53,7 @@ pub mod use_case; pub mod user_transaction_context; pub mod webauthn; -pub use self::block_epilogue::{BlockEndInfo, BlockEpiloguePayload}; +pub use self::block_epilogue::{BlockEndInfo, BlockEpiloguePayload, FeeDistribution}; use crate::{ block_metadata_ext::BlockMetadataExt, contract_event::TransactionEvent, @@ -1860,6 +1860,7 @@ impl TransactionInfo { state_checkpoint_hash: Option, gas_used: u64, status: ExecutionStatus, + auxiliary_info_hash: Option, ) -> Self { Self::V0(TransactionInfoV0::new( transaction_hash, @@ -1868,6 +1869,7 @@ impl TransactionInfo { state_checkpoint_hash, gas_used, status, + auxiliary_info_hash, )) } @@ -1884,6 +1886,7 @@ impl TransactionInfo { state_checkpoint_hash, gas_used, status, + None, ) } @@ -1896,6 +1899,7 @@ impl TransactionInfo { None, 0, ExecutionStatus::Success, + None, ) } } @@ -1936,8 +1940,8 @@ pub struct TransactionInfoV0 { /// only, like per block. state_checkpoint_hash: Option, - /// Potentially summarizes all evicted items from state. Always `None` for now. - state_cemetery_hash: Option, + /// The hash value summarizing PersistedAuxiliaryInfo. + auxiliary_info_hash: Option, } impl TransactionInfoV0 { @@ -1948,6 +1952,7 @@ impl TransactionInfoV0 { state_checkpoint_hash: Option, gas_used: u64, status: ExecutionStatus, + auxiliary_info_hash: Option, ) -> Self { Self { gas_used, @@ -1956,7 +1961,7 @@ impl TransactionInfoV0 { event_root_hash, state_change_hash, state_checkpoint_hash, - state_cemetery_hash: None, + auxiliary_info_hash, } } @@ -1976,6 +1981,10 @@ impl TransactionInfoV0 { self.state_checkpoint_hash } + pub fn auxiliary_info_hash(&self) -> Option { + self.auxiliary_info_hash + } + pub fn ensure_state_checkpoint_hash(&self) -> Result { self.state_checkpoint_hash .ok_or_else(|| format_err!("State checkpoint hash not present in TransactionInfo")) @@ -2499,13 +2508,25 @@ impl From for Transaction { } impl Transaction { - pub fn block_epilogue(block_id: HashValue, block_end_info: BlockEndInfo) -> Self { + pub fn block_epilogue_v0(block_id: HashValue, block_end_info: BlockEndInfo) -> Self { Self::BlockEpilogue(BlockEpiloguePayload::V0 { block_id, block_end_info, }) } + pub fn block_epilogue_v1( + block_id: HashValue, + block_end_info: BlockEndInfo, + fee_distribution: FeeDistribution, + ) -> Self { + Self::BlockEpilogue(BlockEpiloguePayload::V1 { + block_id, + block_end_info, + fee_distribution, + }) + } + pub fn try_as_signed_user_txn(&self) -> Option<&SignedTransaction> { match self { Transaction::UserTransaction(txn) => Some(txn), @@ -2603,20 +2624,140 @@ pub trait BlockExecutableTransaction: Sync + Send + Clone + 'static { + Debug + DeserializeOwned + Serialize; - type Value: Send + Sync + Debug + Clone + TransactionWrite; + type Value: Send + Sync + Debug + Clone + Eq + PartialEq + TransactionWrite; type Event: Send + Sync + Debug + Clone + TransactionEvent; /// Size of the user transaction in bytes, 0 otherwise fn user_txn_bytes_len(&self) -> usize; + + /// None if it is not a user transaction. + fn try_as_signed_user_txn(&self) -> Option<&SignedTransaction> { + None + } + + fn from_txn(_txn: Transaction) -> Self { + unimplemented!() + } +} + +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum ViewFunctionError { + // This is to represent errors are from a MoveAbort and has error info from the module metadata to display. + // The ExecutionStatus is used to construct the error message in the same way as MoveAborts for entry functions. + MoveAbort(ExecutionStatus, Option), + // This is a generic error message that takes in a string and display it in the error response. + ErrorMessage(String, Option), +} + +impl std::fmt::Display for ViewFunctionError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + ViewFunctionError::MoveAbort(status, vm_status) => { + write!( + f, + "Execution status: {:?}, VM status: {:?}", + status, vm_status + ) + }, + ViewFunctionError::ErrorMessage(msg, vm_status) => { + write!(f, "Error: {}, VM status: {:?}", msg, vm_status) + }, + } + } } pub struct ViewFunctionOutput { - pub values: Result>>, + pub values: Result>, ViewFunctionError>, pub gas_used: u64, } impl ViewFunctionOutput { - pub fn new(values: Result>>, gas_used: u64) -> Self { + pub fn new(values: Result>, ViewFunctionError>, gas_used: u64) -> Self { Self { values, gas_used } } + + pub fn new_ok(values: Vec>, gas_used: u64) -> Self { + Self { + values: Ok(values), + gas_used, + } + } + + pub fn new_error_message( + message: String, + vm_status: Option, + gas_used: u64, + ) -> Self { + Self { + values: Err(ViewFunctionError::ErrorMessage(message, vm_status)), + gas_used, + } + } + + pub fn new_move_abort_error( + status: ExecutionStatus, + vm_status: Option, + gas_used: u64, + ) -> Self { + Self { + values: Err(ViewFunctionError::MoveAbort(status, vm_status)), + gas_used, + } + } +} + +#[derive(Debug, Clone, Copy)] +pub struct AuxiliaryInfo { + persisted_info: PersistedAuxiliaryInfo, + ephemeral_info: Option, +} + +impl AuxiliaryInfo { + pub fn new( + persisted_info: PersistedAuxiliaryInfo, + ephemeral_info: Option, + ) -> Self { + Self { + persisted_info, + ephemeral_info, + } + } + + pub fn new_empty() -> Self { + Self { + persisted_info: PersistedAuxiliaryInfo::None, + ephemeral_info: None, + } + } + + pub fn into_persisted_info(self) -> PersistedAuxiliaryInfo { + self.persisted_info + } + + pub fn persisted_info(&self) -> &PersistedAuxiliaryInfo { + &self.persisted_info + } + + pub fn ephemeral_info(&self) -> &Option { + &self.ephemeral_info + } +} + +#[derive( + BCSCryptoHash, Clone, Copy, CryptoHasher, Debug, Eq, Serialize, Deserialize, PartialEq, +)] +#[cfg_attr(any(test, feature = "fuzzing"), derive(Arbitrary))] +pub enum PersistedAuxiliaryInfo { + None, + // The index of the transaction in a block (after shuffler, before execution). + // Note that this would be slightly different from the index of transactions that get committed + // onchain, as this considers transactions that may get discarded. + V1 { transaction_index: u32 }, +} + +#[derive(Debug, Clone, Copy)] +pub struct EphemeralAuxiliaryInfo { + // TODO(grao): After execution pool is implemented we might want this information be persisted + // onchain? + pub proposer_index: u64, } diff --git a/types/src/transaction/script.rs b/types/src/transaction/script.rs index c2fc4cea836a4..a2ee80a6b27d9 100644 --- a/types/src/transaction/script.rs +++ b/types/src/transaction/script.rs @@ -155,7 +155,10 @@ impl EntryFunction { self.module.address, self.module.name().to_string(), self.function.to_string(), - self.ty_args.iter().map(|ty| ty.to_string()).collect(), + self.ty_args + .iter() + .map(|ty| ty.to_canonical_string()) + .collect(), self.args.clone(), ) } diff --git a/types/src/transaction/signature_verified_transaction.rs b/types/src/transaction/signature_verified_transaction.rs index c524593a87198..e708ce49504ae 100644 --- a/types/src/transaction/signature_verified_transaction.rs +++ b/types/src/transaction/signature_verified_transaction.rs @@ -4,7 +4,7 @@ use crate::{ contract_event::ContractEvent, state_store::state_key::StateKey, - transaction::{BlockExecutableTransaction, Transaction}, + transaction::{BlockExecutableTransaction, SignedTransaction, Transaction}, write_set::WriteOp, }; use aptos_crypto::{hash::CryptoHash, HashValue}; @@ -98,6 +98,17 @@ impl BlockExecutableTransaction for SignatureVerifiedTransaction { _ => 0, } } + + fn try_as_signed_user_txn(&self) -> Option<&SignedTransaction> { + match self { + SignatureVerifiedTransaction::Valid(Transaction::UserTransaction(txn)) => Some(txn), + _ => None, + } + } + + fn from_txn(txn: Transaction) -> Self { + txn.into() + } } impl From for SignatureVerifiedTransaction { diff --git a/types/src/write_set.rs b/types/src/write_set.rs index 8208f6616344b..c762729da9c43 100644 --- a/types/src/write_set.rs +++ b/types/src/write_set.rs @@ -89,6 +89,7 @@ pub enum BaseStateOp { Modification(StateValue), Deletion(StateValueMetadata), MakeHot { prev_slot: StateSlot }, + Eviction { prev_slot: StateSlot }, } impl BaseStateOp { @@ -99,6 +100,7 @@ impl BaseStateOp { Creation(val) | Modification(val) => Some(val), Deletion(_) => None, MakeHot { prev_slot } => prev_slot.as_state_value_opt(), + Eviction { prev_slot } => prev_slot.as_state_value_opt(), } } @@ -107,7 +109,7 @@ impl BaseStateOp { match self { Creation(_) | Modification(_) | Deletion(_) => Some(WriteOp::ref_cast(self)), - MakeHot { .. } => None, + MakeHot { .. } | Eviction { .. } => None, } } @@ -120,7 +122,7 @@ impl BaseStateOp { match self { Creation(_) | Modification(_) | Deletion(_) => true, - MakeHot { .. } => false, + MakeHot { .. } | Eviction { .. } => false, } } } @@ -140,7 +142,9 @@ impl WriteOp { BaseStateOp::Creation(v) => Creation(v.bytes().clone()), BaseStateOp::Modification(v) => Modification(v.bytes().clone()), BaseStateOp::Deletion { .. } => Deletion, - BaseStateOp::MakeHot { .. } => unreachable!("malformed write op"), + BaseStateOp::MakeHot { .. } | BaseStateOp::Eviction { .. } => { + unreachable!("malformed write op") + }, }, Some(metadata) => match &self.0 { BaseStateOp::Creation(v) => CreationWithMetadata { @@ -152,7 +156,9 @@ impl WriteOp { metadata, }, BaseStateOp::Deletion { .. } => DeletionWithMetadata { metadata }, - BaseStateOp::MakeHot { .. } => unreachable!("malformed write op"), + BaseStateOp::MakeHot { .. } | BaseStateOp::Eviction { .. } => { + unreachable!("malformed write op") + }, }, } } @@ -165,7 +171,8 @@ impl WriteOp { use BaseStateOp::*; match (&mut op.0, other.0) { - (MakeHot {..}, ..) | (.., MakeHot {..}) => unreachable!("malformed write op"), + (MakeHot { .. }, ..) | (.., MakeHot { .. }) + | (Eviction { .. }, ..) | (.., Eviction { .. }) => unreachable!("malformed write op"), (Modification { .. } | Creation { .. }, Creation { .. }) // create existing | (Deletion { .. }, Modification { .. } | Deletion { .. }) // delete or modify already deleted => { @@ -236,7 +243,7 @@ impl WriteOp { match &self.0 { Creation(v) | Modification(v) => v.metadata(), Deletion(meta) => meta, - MakeHot { .. } => unreachable!("malformed write op"), + MakeHot { .. } | Eviction { .. } => unreachable!("malformed write op"), } } @@ -246,7 +253,7 @@ impl WriteOp { match &mut self.0 { Creation(v) | Modification(v) => v.metadata_mut(), Deletion(meta) => meta, - MakeHot { .. } => unreachable!("malformed write op"), + MakeHot { .. } | Eviction { .. } => unreachable!("malformed write op"), } } @@ -256,7 +263,7 @@ impl WriteOp { match self.0 { Creation(v) | Modification(v) => v.into_metadata(), Deletion(meta) => meta, - MakeHot { .. } => unreachable!("malformed write op"), + MakeHot { .. } | Eviction { .. } => unreachable!("malformed write op"), } } @@ -306,7 +313,7 @@ impl WriteOp { write_len: get_size().expect("Modification must have size"), }, Deletion { .. } => WriteOpSize::Deletion, - MakeHot { .. } => unreachable!("malformed write op"), + MakeHot { .. } | Eviction { .. } => unreachable!("malformed write op"), } } @@ -324,7 +331,7 @@ impl WriteOp { match &self.0 { Creation(_) | Modification(_) => false, Deletion(_) => true, - MakeHot { .. } => unreachable!("malformed write op"), + MakeHot { .. } | Eviction { .. } => unreachable!("malformed write op"), } } } @@ -455,7 +462,9 @@ impl TransactionWrite for WriteOp { BaseStateOp::Creation { .. } => Creation, BaseStateOp::Modification { .. } => Modification, BaseStateOp::Deletion { .. } => Deletion, - BaseStateOp::MakeHot { .. } => unreachable!("malformed write op"), + BaseStateOp::MakeHot { .. } | BaseStateOp::Eviction { .. } => { + unreachable!("malformed write op") + }, } } @@ -465,7 +474,7 @@ impl TransactionWrite for WriteOp { match &mut self.0 { Creation(v) | Modification(v) => v.set_bytes(bytes), Deletion { .. } => (), - MakeHot { .. } => unreachable!("malformed write op"), + MakeHot { .. } | Eviction { .. } => unreachable!("malformed write op"), } } } @@ -497,7 +506,7 @@ impl Debug for WriteOp { Deletion(metadata) => { write!(f, "Deletion(metadata:{:?})", metadata,) }, - MakeHot { .. } => unreachable!("malformed write op"), + MakeHot { .. } | Eviction { .. } => unreachable!("malformed write op"), } } } @@ -541,6 +550,9 @@ impl Debug for HotStateOp { MakeHot { prev_slot } => { write!(f, "MakeHot(prev_slot:{:?})", prev_slot) }, + Eviction { prev_slot } => { + write!(f, "Eviction(prev_slot:{:?})", prev_slot) + }, Creation(_) | Modification(_) | Deletion(_) => { unreachable!("malformed hot state op") },