diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000000..39b059596f --- /dev/null +++ b/.dockerignore @@ -0,0 +1,2 @@ +.git +target \ No newline at end of file diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 9b1e99a1cb..91ada621c4 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -354,13 +354,12 @@ jobs: cd wasm bash build-release popd - mv wasm/release/kaspa-wasm32-sdk.zip wasm/release/kaspa-wasm32-sdk-${{ env.SHORT_SHA }}.zip - name: Upload WASM build to GitHub uses: actions/upload-artifact@v4 with: - name: kaspa-wasm32-sdk-${{ env.SHORT_SHA }}.zip - path: wasm/release/kaspa-wasm32-sdk-${{ env.SHORT_SHA }}.zip + name: kaspa-wasm32-sdk-${{ env.SHORT_SHA }} + path: wasm/release/ build-release: name: Build Linux Release runs-on: ubuntu-latest @@ -405,4 +404,4 @@ jobs: # Run build script for musl toolchain source musl-toolchain/build.sh # Build for musl - cargo --verbose build --bin kaspad --bin rothschild --bin kaspa-wallet --release --target x86_64-unknown-linux-musl + cargo --verbose build --bin kaspad --bin rothschild --bin kaspa-wallet-daemon --release --target x86_64-unknown-linux-musl diff --git a/.github/workflows/deploy.yaml b/.github/workflows/deploy.yaml index 537eeef898..4298b1d68f 100644 --- a/.github/workflows/deploy.yaml +++ b/.github/workflows/deploy.yaml @@ -1,200 +1,186 @@ -name: Build and upload assets -on: - release: - types: [ published ] - -jobs: - build: - runs-on: ${{ matrix.os }} - strategy: - fail-fast: false - matrix: - # Build gnu-linux on ubuntu-18.04 and musl on ubuntu latest - # os: [ ubuntu-18.04, ubuntu-latest, windows-latest, macos-latest ] - os: [ ubuntu-latest, windows-latest, macos-latest ] - name: Building, ${{ matrix.os }} - steps: - - name: Fix CRLF on Windows - if: runner.os == 'Windows' - run: git config --global core.autocrlf false - - - name: Checkout sources - uses: actions/checkout@v3 - - - name: Install Protoc - uses: arduino/setup-protoc@v3 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - - - name: Install stable toolchain - uses: dtolnay/rust-toolchain@stable - - - name: Cache Cargo Build Outputs - uses: actions/cache@v3 - with: - path: | - ~/.cargo/bin/ - ~/.cargo/registry/index/ - ~/.cargo/registry/cache/ - ~/.cargo/git/db/ - target/ - key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} - - - name: Cache Toolchain - uses: actions/cache@v4 - with: - path: | - ~/x-tools - key: ${{ runner.os }}-musl-${{ hashFiles('**/musl-toolchain/preset.sh') }} - restore-keys: | - ${{ runner.os }}-musl- - - - name: Build on Linux - if: runner.os == 'Linux' - # We're using musl to make the binaries statically linked and portable - run: | - # Run build script for musl toolchain - source musl-toolchain/build.sh - - # Go back to the workspace - cd $GITHUB_WORKSPACE - - # Build for musl - cargo --verbose build --bin kaspad --bin rothschild --bin kaspa-wallet --release --target x86_64-unknown-linux-musl - mkdir bin || true - cp target/x86_64-unknown-linux-musl/release/kaspad bin/ - cp target/x86_64-unknown-linux-musl/release/rothschild bin/ - cp target/x86_64-unknown-linux-musl/release/kaspa-wallet bin/ - archive="bin/rusty-kaspa-${{ github.event.release.tag_name }}-linux-amd64.zip" - asset_name="rusty-kaspa-${{ github.event.release.tag_name }}-linux-amd64.zip" - zip -r "${archive}" ./bin/* - echo "archive=${archive}" >> $GITHUB_ENV - echo "asset_name=${asset_name}" >> $GITHUB_ENV - - - name: Build on Windows - if: runner.os == 'Windows' - shell: bash - run: | - cargo build --bin kaspad --release - cargo build --bin rothschild --release - cargo build --bin kaspa-wallet --release - mkdir bin || true - cp target/release/kaspad.exe bin/ - cp target/release/rothschild.exe bin/ - cp target/release/kaspa-wallet.exe bin/ - archive="bin/rusty-kaspa-${{ github.event.release.tag_name }}-win64.zip" - asset_name="rusty-kaspa-${{ github.event.release.tag_name }}-win64.zip" - powershell "Compress-Archive bin/* \"${archive}\"" - echo "archive=${archive}" >> $GITHUB_ENV - echo "asset_name=${asset_name}" >> $GITHUB_ENV - - - name: Build on MacOS - if: runner.os == 'macOS' - run: | - cargo build --bin kaspad --release - cargo build --bin rothschild --release - cargo build --bin kaspa-wallet --release - mkdir bin || true - cp target/release/kaspad bin/ - cp target/release/rothschild bin/ - cp target/release/kaspa-wallet bin/ - archive="bin/rusty-kaspa-${{ github.event.release.tag_name }}-osx.zip" - asset_name="rusty-kaspa-${{ github.event.release.tag_name }}-osx.zip" - zip -r "${archive}" ./bin/* - echo "archive=${archive}" >> $GITHUB_ENV - echo "asset_name=${asset_name}" >> $GITHUB_ENV - - - name: Upload release asset - uses: actions/upload-release-asset@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ github.event.release.upload_url }} - asset_path: "./${{ env.archive }}" - asset_name: "${{ env.asset_name }}" - asset_content_type: application/zip - - build-wasm: - runs-on: ubuntu-latest - name: Building WASM32 SDK - steps: - - name: Checkout sources - uses: actions/checkout@v3 - - - name: Install Protoc - uses: arduino/setup-protoc@v3 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - - - name: Install stable toolchain - uses: dtolnay/rust-toolchain@stable - - - name: Install llvm - id: install_llvm - continue-on-error: true - run: | - wget -qO- https://apt.llvm.org/llvm-snapshot.gpg.key | sudo tee /etc/apt/trusted.gpg.d/apt.llvm.org.asc - sudo apt-get install -y clang-15 lldb-15 lld-15 clangd-15 clang-tidy-15 clang-format-15 clang-tools-15 llvm-15-dev lld-15 lldb-15 llvm-15-tools libomp-15-dev libc++-15-dev libc++abi-15-dev libclang-common-15-dev libclang-15-dev libclang-cpp15-dev libunwind-15-dev - # Make Clang 15 default - sudo update-alternatives --install /usr/bin/clang++ clang++ /usr/lib/llvm-15/bin/clang++ 100 - sudo update-alternatives --install /usr/bin/clang clang /usr/lib/llvm-15/bin/clang 100 - sudo update-alternatives --install /usr/bin/clang-format clang-format /usr/lib/llvm-15/bin/clang-format 100 - sudo update-alternatives --install /usr/bin/clang-tidy clang-tidy /usr/lib/llvm-15/bin/clang-tidy 100 - sudo update-alternatives --install /usr/bin/run-clang-tidy run-clang-tidy /usr/lib/llvm-15/bin/run-clang-tidy 100 - # Alias cc to clang - sudo update-alternatives --install /usr/bin/cc cc /usr/lib/llvm-15/bin/clang 0 - sudo update-alternatives --install /usr/bin/c++ c++ /usr/lib/llvm-15/bin/clang++ 0 - - - name: Install gcc-multilib - # gcc-multilib allows clang to find gnu libraries properly - run: sudo apt install -y gcc-multilib - - - name: Install stable toolchain - if: steps.install_llvm.outcome == 'success' && steps.install_llvm.conclusion == 'success' - uses: dtolnay/rust-toolchain@stable - - - name: Install wasm-pack - run: cargo install wasm-pack - - - name: Add wasm32 target - run: rustup target add wasm32-unknown-unknown - - - name: Install NodeJS - uses: actions/setup-node@v4 - with: - node-version: '20' - - - name: Install NodeJS dependencies - run: npm install --global typedoc typescript - - - name: Cache - uses: actions/cache@v3 - with: - path: | - ~/.cargo/bin/ - ~/.cargo/registry/index/ - ~/.cargo/registry/cache/ - ~/.cargo/git/db/ - target/ - key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} - - - name: Build WASM32 SDK - run: | - cd wasm - bash build-release - mv release/kaspa-wasm32-sdk.zip ../kaspa-wasm32-sdk-${{ github.event.release.tag_name }}.zip - - archive="kaspa-wasm32-sdk-${{ github.event.release.tag_name }}.zip" - asset_name="kaspa-wasm32-sdk-${{ github.event.release.tag_name }}.zip" - echo "archive=${archive}" >> $GITHUB_ENV - echo "asset_name=${asset_name}" >> $GITHUB_ENV - - - name: Upload WASM32 SDK - uses: actions/upload-release-asset@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ github.event.release.upload_url }} - asset_path: "./${{ env.archive }}" - asset_name: "${{ env.asset_name }}" - asset_content_type: application/zip +name: Build and upload assets +on: + release: + types: [ published ] + +jobs: + build: + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + # Build gnu-linux on ubuntu-18.04 and musl on ubuntu latest + # os: [ ubuntu-18.04, ubuntu-latest, windows-latest, macos-latest ] + os: [ ubuntu-latest, windows-latest, macos-latest ] + name: Building, ${{ matrix.os }} + steps: + - name: Fix CRLF on Windows + if: runner.os == 'Windows' + run: git config --global core.autocrlf false + + - name: Checkout sources + uses: actions/checkout@v3 + + - name: Install Protoc + uses: arduino/setup-protoc@v3 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + + - name: Install stable toolchain + uses: dtolnay/rust-toolchain@stable + + - name: Cache Cargo Build Outputs + uses: actions/cache@v3 + with: + path: | + ~/.cargo/bin/ + ~/.cargo/registry/index/ + ~/.cargo/registry/cache/ + ~/.cargo/git/db/ + target/ + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + + - name: Cache Toolchain + uses: actions/cache@v4 + with: + path: | + ~/x-tools + key: ${{ runner.os }}-musl-${{ hashFiles('**/musl-toolchain/preset.sh') }} + restore-keys: | + ${{ runner.os }}-musl- + + - name: Build on Linux + if: runner.os == 'Linux' + # We're using musl to make the binaries statically linked and portable + run: | + # Run build script for musl toolchain + source musl-toolchain/build.sh + + # Go back to the workspace + cd $GITHUB_WORKSPACE + + # Build for musl + cargo --verbose build --bin kaspad --bin rothschild --bin kaspa-wallet-daemon --release --target x86_64-unknown-linux-musl + mkdir bin || true + cp target/x86_64-unknown-linux-musl/release/kaspad bin/ + cp target/x86_64-unknown-linux-musl/release/rothschild bin/ + cp target/x86_64-unknown-linux-musl/release/kaspa-wallet-daemon bin/ + archive="bin/rusty-kaspa-${{ github.event.release.tag_name }}-linux-amd64.zip" + zip -r "${archive}" ./bin/* + echo "archive=${archive}" >> $GITHUB_ENV + + - name: Build on Windows + if: runner.os == 'Windows' + shell: bash + run: | + cargo build --bin kaspad --release + cargo build --bin rothschild --release + cargo build --bin kaspa-wallet-daemon --release + mkdir bin || true + cp target/release/kaspad.exe bin/ + cp target/release/rothschild.exe bin/ + cp target/release/kaspa-wallet-daemon.exe bin/ + archive="bin/rusty-kaspa-${{ github.event.release.tag_name }}-win64.zip" + powershell "Compress-Archive bin/* \"${archive}\"" + echo "archive=${archive}" >> $GITHUB_ENV + + - name: Build on MacOS + if: runner.os == 'macOS' + run: | + cargo build --bin kaspad --release + cargo build --bin rothschild --release + cargo build --bin kaspa-wallet-daemon --release + mkdir bin || true + cp target/release/kaspad bin/ + cp target/release/rothschild bin/ + cp target/release/kaspa-wallet-daemon bin/ + archive="bin/rusty-kaspa-${{ github.event.release.tag_name }}-osx.zip" + zip -r "${archive}" ./bin/* + echo "archive=${archive}" >> $GITHUB_ENV + + - name: Upload release asset + uses: softprops/action-gh-release@v2 + with: + files: ./${{ env.archive }} + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + build-wasm: + runs-on: ubuntu-latest + name: Building WASM32 SDK + steps: + - name: Checkout sources + uses: actions/checkout@v3 + + - name: Install Protoc + uses: arduino/setup-protoc@v3 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + + - name: Install stable toolchain + uses: dtolnay/rust-toolchain@stable + + - name: Install llvm + id: install_llvm + continue-on-error: true + run: | + wget -qO- https://apt.llvm.org/llvm-snapshot.gpg.key | sudo tee /etc/apt/trusted.gpg.d/apt.llvm.org.asc + sudo apt-get install -y clang-15 lldb-15 lld-15 clangd-15 clang-tidy-15 clang-format-15 clang-tools-15 llvm-15-dev lld-15 lldb-15 llvm-15-tools libomp-15-dev libc++-15-dev libc++abi-15-dev libclang-common-15-dev libclang-15-dev libclang-cpp15-dev libunwind-15-dev + # Make Clang 15 default + sudo update-alternatives --install /usr/bin/clang++ clang++ /usr/lib/llvm-15/bin/clang++ 100 + sudo update-alternatives --install /usr/bin/clang clang /usr/lib/llvm-15/bin/clang 100 + sudo update-alternatives --install /usr/bin/clang-format clang-format /usr/lib/llvm-15/bin/clang-format 100 + sudo update-alternatives --install /usr/bin/clang-tidy clang-tidy /usr/lib/llvm-15/bin/clang-tidy 100 + sudo update-alternatives --install /usr/bin/run-clang-tidy run-clang-tidy /usr/lib/llvm-15/bin/run-clang-tidy 100 + # Alias cc to clang + sudo update-alternatives --install /usr/bin/cc cc /usr/lib/llvm-15/bin/clang 0 + sudo update-alternatives --install /usr/bin/c++ c++ /usr/lib/llvm-15/bin/clang++ 0 + + - name: Install gcc-multilib + # gcc-multilib allows clang to find gnu libraries properly + run: sudo apt install -y gcc-multilib + + - name: Install stable toolchain + if: steps.install_llvm.outcome == 'success' && steps.install_llvm.conclusion == 'success' + uses: dtolnay/rust-toolchain@stable + + - name: Install wasm-pack + run: cargo install wasm-pack + + - name: Add wasm32 target + run: rustup target add wasm32-unknown-unknown + + - name: Install NodeJS + uses: actions/setup-node@v4 + with: + node-version: '20' + + - name: Install NodeJS dependencies + run: npm install --global typedoc typescript + + - name: Cache + uses: actions/cache@v3 + with: + path: | + ~/.cargo/bin/ + ~/.cargo/registry/index/ + ~/.cargo/registry/cache/ + ~/.cargo/git/db/ + target/ + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + + - name: Build WASM32 SDK + run: | + cd wasm + bash build-release + cd release + zip -q -r kaspa-wasm32-sdk-${{ github.event.release.tag_name }}.zip kaspa-wasm32-sdk + archive="wasm/release/kaspa-wasm32-sdk-${{ github.event.release.tag_name }}.zip" + echo "archive=${archive}" >> $GITHUB_ENV + + - name: Upload release asset + uses: softprops/action-gh-release@v2 + with: + files: ./${{ env.archive }} + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.gitignore b/.gitignore index 0199232fe6..53625dde02 100644 --- a/.gitignore +++ b/.gitignore @@ -3,6 +3,7 @@ web-root **/.idea/ /rust-toolchain /.vscode/ +.zed **/db-* /testing/integration/testdata/dags_for_json_tests/goref-mainnet /testing/integration/testdata/dags_for_json_tests/goref-1.6M-tx-10K-blocks diff --git a/Cargo.lock b/Cargo.lock index c4e998e45a..e2a2ea78c3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11,7 +11,7 @@ dependencies = [ "macroific", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.110", ] [[package]] @@ -57,7 +57,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if 1.0.0", - "getrandom", + "getrandom 0.2.15", "once_cell", "version_check", "zerocopy", @@ -324,7 +324,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.110", ] [[package]] @@ -335,13 +335,13 @@ checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" [[package]] name = "async-trait" -version = "0.1.83" +version = "0.1.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" +checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.110", ] [[package]] @@ -486,22 +486,20 @@ dependencies = [ [[package]] name = "bindgen" -version = "0.69.4" +version = "0.72.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a00dc851838a2120612785d195287475a3ac45514741da670b735818822129a0" +checksum = "993776b509cfb49c750f11b8f07a46fa23e0a1386ffc01fb1e7d343efc387895" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.4", "cexpr", "clang-sys", - "itertools 0.12.1", - "lazy_static", - "lazycell", + "itertools 0.13.0", "proc-macro2", "quote", "regex", - "rustc-hash 1.1.0", + "rustc-hash 2.1.1", "shlex", - "syn 2.0.79", + "syn 2.0.110", ] [[package]] @@ -512,9 +510,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.6.0" +version = "2.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" +checksum = "2261d10cca569e4643e526d8dc2e62e433cc8aba21ab764233731f8d369bf394" [[package]] name = "blake2" @@ -578,7 +576,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.110", "syn_derive", ] @@ -829,9 +827,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.19" +version = "4.5.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7be5744db7978a28d9df86a214130d106a89ce49644cbc4e3f0c22c3fba30615" +checksum = "4c26d721170e0295f191a69bd9a1f93efcdb0aff38684b61ab5750468972e5f5" dependencies = [ "clap_builder", "clap_derive", @@ -839,9 +837,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.19" +version = "4.5.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5fbc17d3ef8278f55b282b2a2e75ae6f6c7d4bb70ed3d0382375104bfafdb4b" +checksum = "75835f0c7bf681bfd05abe44e965760fea999a5286c6eb2d59883634fd02011a" dependencies = [ "anstream", "anstyle", @@ -851,21 +849,21 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.18" +version = "4.5.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" +checksum = "2a0b5487afeab2deb2ff4e03a807ad1a03ac532ff5a2cee5d86884440c7f7671" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.110", ] [[package]] name = "clap_lex" -version = "0.7.2" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" +checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d" [[package]] name = "colorchoice" @@ -965,7 +963,7 @@ dependencies = [ "anes", "cast", "ciborium", - "clap 4.5.19", + "clap 4.5.51", "criterion-plot", "is-terminal", "itertools 0.10.5", @@ -1030,7 +1028,7 @@ version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f476fe445d41c9e991fd07515a6f463074b782242ccf4a5b7b1d1012e70824df" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.4", "crossterm_winapi", "libc", "mio 0.8.11", @@ -1062,7 +1060,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", - "rand_core", + "rand_core 0.6.4", "typenum", ] @@ -1130,7 +1128,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.110", ] [[package]] @@ -1139,8 +1137,18 @@ version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989" dependencies = [ - "darling_core", - "darling_macro", + "darling_core 0.20.10", + "darling_macro 0.20.10", +] + +[[package]] +name = "darling" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cdf337090841a411e2a7f3deb9187445851f91b309c0c0a29e05f74a00a48c0" +dependencies = [ + "darling_core 0.21.3", + "darling_macro 0.21.3", ] [[package]] @@ -1154,7 +1162,21 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.79", + "syn 2.0.110", +] + +[[package]] +name = "darling_core" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1247195ecd7e3c85f83c8d2a366e4210d588e802133e1e355180a9870b517ea4" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim 0.11.1", + "syn 2.0.110", ] [[package]] @@ -1163,9 +1185,20 @@ version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ - "darling_core", + "darling_core 0.20.10", "quote", - "syn 2.0.79", + "syn 2.0.110", +] + +[[package]] +name = "darling_macro" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" +dependencies = [ + "darling_core 0.21.3", + "quote", + "syn 2.0.110", ] [[package]] @@ -1197,7 +1230,7 @@ dependencies = [ "macroific", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.110", ] [[package]] @@ -1236,10 +1269,10 @@ version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7431fa049613920234f22c47fdc33e6cf3ee83067091ea4277a3f8c4587aae38" dependencies = [ - "darling", + "darling 0.20.10", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.110", ] [[package]] @@ -1249,7 +1282,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4abae7035bf79b9877b779505d8cf3749285b80c43941eda66604841889451dc" dependencies = [ "derive_builder_core", - "syn 2.0.79", + "syn 2.0.110", ] [[package]] @@ -1262,7 +1295,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version", - "syn 2.0.79", + "syn 2.0.110", ] [[package]] @@ -1390,7 +1423,7 @@ checksum = "ba7795da175654fe16979af73f81f26a8ea27638d8d9823d317016888a63dc4c" dependencies = [ "num-traits", "quote", - "syn 2.0.79", + "syn 2.0.110", ] [[package]] @@ -1467,7 +1500,7 @@ dependencies = [ "macroific", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.110", ] [[package]] @@ -1612,7 +1645,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.110", ] [[package]] @@ -1669,6 +1702,18 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "getrandom" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "r-efi", + "wasip2", +] + [[package]] name = "gimli" version = "0.31.1" @@ -2081,7 +2126,7 @@ dependencies = [ "http 0.2.12", "hyper 0.14.30", "log", - "rand", + "rand 0.8.5", "tokio", "url", "xmltree", @@ -2098,7 +2143,7 @@ dependencies = [ "delegate-display", "fancy_constructor", "js-sys", - "uuid 1.10.0", + "uuid 1.18.1", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", @@ -2211,15 +2256,6 @@ dependencies = [ "either", ] -[[package]] -name = "itertools" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" -dependencies = [ - "either", -] - [[package]] name = "itertools" version = "0.13.0" @@ -2285,7 +2321,7 @@ dependencies = [ "local-ip-address", "log", "parking_lot", - "rand", + "rand 0.8.5", "rocksdb", "rv", "serde", @@ -2307,15 +2343,15 @@ dependencies = [ "borsh", "bs58", "faster-hex", - "getrandom", + "getrandom 0.2.15", "hmac", "js-sys", "kaspa-consensus-core", "kaspa-utils", "once_cell", "pbkdf2", - "rand", - "rand_core", + "rand 0.8.5", + "rand_core 0.6.4", "ripemd", "secp256k1", "serde", @@ -2387,7 +2423,7 @@ dependencies = [ "kaspa-utils", "log", "parking_lot", - "rand", + "rand 0.8.5", "tokio", ] @@ -2423,7 +2459,7 @@ dependencies = [ "log", "once_cell", "parking_lot", - "rand", + "rand 0.8.5", "rand_distr", "rayon", "rocksdb", @@ -2452,7 +2488,7 @@ dependencies = [ "kaspa-txscript", "kaspa-utils", "kaspa-wasm-core", - "rand", + "rand 0.8.5", "secp256k1", "serde", "serde-wasm-bindgen", @@ -2470,12 +2506,13 @@ dependencies = [ "arc-swap", "async-trait", "bincode", + "bitflags 2.9.4", "borsh", "cfg-if 1.0.0", "criterion", "faster-hex", "futures-util", - "getrandom", + "getrandom 0.2.15", "itertools 0.13.0", "js-sys", "kaspa-addresses", @@ -2486,7 +2523,7 @@ dependencies = [ "kaspa-muhash", "kaspa-txscript-errors", "kaspa-utils", - "rand", + "rand 0.8.5", "secp256k1", "serde", "serde-wasm-bindgen", @@ -2534,7 +2571,7 @@ dependencies = [ "kaspa-hashes", "kaspa-txscript", "kaspa-utils", - "rand", + "rand 0.8.5", "secp256k1", "serde", "serde-wasm-bindgen", @@ -2560,7 +2597,7 @@ dependencies = [ "kaspa-utils", "log", "parking_lot", - "rand", + "rand 0.8.5", "tokio", ] @@ -2619,7 +2656,7 @@ dependencies = [ "num-traits", "num_cpus", "parking_lot", - "rand", + "rand 0.8.5", "rocksdb", "serde", "smallvec", @@ -2649,7 +2686,7 @@ dependencies = [ "parking_lot", "paste", "prost", - "rand", + "rand 0.8.5", "regex", "rustls", "thiserror", @@ -2669,6 +2706,7 @@ dependencies = [ "faster-hex", "futures", "h2 0.4.6", + "itertools 0.13.0", "kaspa-addresses", "kaspa-consensus-core", "kaspa-core", @@ -2678,7 +2716,7 @@ dependencies = [ "log", "paste", "prost", - "rand", + "rand 0.8.5", "regex", "thiserror", "tokio", @@ -2715,14 +2753,24 @@ dependencies = [ "parking_lot", "paste", "prost", - "rand", + "rand 0.8.5", "rustls", "thiserror", "tokio", "tokio-stream", "tonic", "triggered", - "uuid 1.10.0", + "uuid 1.18.1", +] + +[[package]] +name = "kaspa-grpc-simple-client-example" +version = "1.0.1" +dependencies = [ + "futures", + "kaspa-grpc-client", + "kaspa-rpc-core", + "tokio", ] [[package]] @@ -2738,7 +2786,7 @@ dependencies = [ "kaspa-utils", "keccak", "once_cell", - "rand", + "rand 0.8.5", "serde", "sha2", "sha3", @@ -2787,7 +2835,7 @@ dependencies = [ "log", "parking_lot", "paste", - "rand", + "rand 0.8.5", "thiserror", "tokio", "triggered", @@ -2804,7 +2852,7 @@ dependencies = [ "kaspa-utils", "malachite-base", "malachite-nz", - "rand_chacha", + "rand_chacha 0.3.1", "serde", "serde-wasm-bindgen", "thiserror", @@ -2855,7 +2903,7 @@ dependencies = [ "kaspa-utils", "log", "parking_lot", - "rand", + "rand 0.8.5", "secp256k1", "serde", "smallvec", @@ -2879,8 +2927,8 @@ dependencies = [ "criterion", "kaspa-hashes", "kaspa-math", - "rand", - "rand_chacha", + "rand 0.8.5", + "rand_chacha 0.3.1", "rayon", "serde", ] @@ -2910,7 +2958,7 @@ dependencies = [ "log", "parking_lot", "paste", - "rand", + "rand 0.8.5", "serde", "thiserror", "tokio", @@ -2946,11 +2994,11 @@ dependencies = [ "kaspa-utils-tower", "log", "parking_lot", - "rand", + "rand 0.8.5", "thiserror", "tokio", "tokio-stream", - "uuid 1.10.0", + "uuid 1.18.1", ] [[package]] @@ -2973,7 +3021,7 @@ dependencies = [ "log", "parking_lot", "prost", - "rand", + "rand 0.8.5", "seqlock", "serde", "thiserror", @@ -2981,7 +3029,7 @@ dependencies = [ "tokio-stream", "tonic", "tonic-build", - "uuid 1.10.0", + "uuid 1.18.1", ] [[package]] @@ -3058,13 +3106,14 @@ dependencies = [ "kaspa-utils", "log", "paste", - "rand", + "rand 0.8.5", "serde", "serde-wasm-bindgen", "serde_json", + "serde_nested_with", "smallvec", "thiserror", - "uuid 1.10.0", + "uuid 1.18.1", "wasm-bindgen", "workflow-core", "workflow-serializer", @@ -3121,7 +3170,7 @@ dependencies = [ "async-trait", "bincode", "chrono", - "clap 4.5.19", + "clap 4.5.51", "criterion", "crossbeam-channel", "dhat", @@ -3159,7 +3208,7 @@ dependencies = [ "kaspad", "log", "parking_lot", - "rand", + "rand 0.8.5", "rand_distr", "rayon", "rocksdb", @@ -3193,7 +3242,7 @@ dependencies = [ "kaspa-wasm-core", "log", "parking_lot", - "rand", + "rand 0.8.5", "secp256k1", "serde", "serde-wasm-bindgen", @@ -3235,7 +3284,7 @@ dependencies = [ "num_cpus", "once_cell", "parking_lot", - "rand", + "rand 0.8.5", "rlimit", "serde", "serde_json", @@ -3245,7 +3294,7 @@ dependencies = [ "thiserror", "tokio", "triggered", - "uuid 1.10.0", + "uuid 1.18.1", "wasm-bindgen", ] @@ -3280,24 +3329,12 @@ dependencies = [ "kaspa-utils", "log", "parking_lot", - "rand", + "rand 0.8.5", "rocksdb", "serde", "thiserror", ] -[[package]] -name = "kaspa-wallet" -version = "1.0.1" -dependencies = [ - "async-std", - "async-trait", - "kaspa-cli", - "tokio", - "workflow-log", - "workflow-terminal", -] - [[package]] name = "kaspa-wallet-cli-wasm" version = "1.0.1" @@ -3365,7 +3402,7 @@ dependencies = [ "md-5", "pad", "pbkdf2", - "rand", + "rand 0.8.5", "regex", "ripemd", "secp256k1", @@ -3393,6 +3430,56 @@ dependencies = [ "zeroize", ] +[[package]] +name = "kaspa-wallet-daemon" +version = "1.0.1" +dependencies = [ + "async-std", + "async-trait", + "clap 4.5.19", + "futures-util", + "kaspa-consensus-core", + "kaspa-core", + "kaspa-wallet-core", + "kaspa-wallet-grpc-core", + "kaspa-wallet-grpc-server", + "log", + "tokio", + "tonic", + "workflow-core", + "workflow-log", + "workflow-terminal", +] + +[[package]] +name = "kaspa-wallet-grpc-core" +version = "1.0.1" +dependencies = [ + "kaspa-bip32", + "kaspa-rpc-core", + "kaspa-txscript", + "kaspa-wallet-core", + "prost", + "thiserror", + "tonic", + "tonic-build", +] + +[[package]] +name = "kaspa-wallet-grpc-server" +version = "1.0.1" +dependencies = [ + "futures-util", + "kaspa-addresses", + "kaspa-consensus-core", + "kaspa-rpc-core", + "kaspa-wallet-core", + "kaspa-wallet-grpc-core", + "log", + "tokio", + "tonic", +] + [[package]] name = "kaspa-wallet-keys" version = "1.0.1" @@ -3410,7 +3497,7 @@ dependencies = [ "kaspa-txscript-errors", "kaspa-utils", "kaspa-wasm-core", - "rand", + "rand 0.8.5", "ripemd", "secp256k1", "serde", @@ -3525,7 +3612,7 @@ dependencies = [ "kaspa-rpc-core", "kaspa-rpc-macros", "paste", - "rand", + "rand 0.8.5", "regex", "rustls", "serde", @@ -3564,7 +3651,7 @@ name = "kaspa-wrpc-proxy" version = "1.0.1" dependencies = [ "async-trait", - "clap 4.5.19", + "clap 4.5.51", "kaspa-consensus-core", "kaspa-grpc-client", "kaspa-rpc-core", @@ -3616,6 +3703,17 @@ dependencies = [ "tokio", ] +[[package]] +name = "kaspa-wrpc-vcc-v2" +version = "1.0.1" +dependencies = [ + "futures", + "kaspa-addresses", + "kaspa-rpc-core", + "kaspa-wrpc-client", + "tokio", +] + [[package]] name = "kaspa-wrpc-wasm" version = "1.0.1" @@ -3652,7 +3750,7 @@ version = "1.0.1" dependencies = [ "async-channel 2.3.1", "cfg-if 1.0.0", - "clap 4.5.19", + "clap 4.5.51", "dhat", "dirs", "futures-util", @@ -3684,10 +3782,11 @@ dependencies = [ "kaspa-wrpc-server", "log", "num_cpus", - "rand", + "rand 0.8.5", "rayon", "rocksdb", "serde", + "serde_json", "serde_with", "tempfile", "thiserror", @@ -3750,8 +3849,9 @@ checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" [[package]] name = "libmimalloc-sys" -version = "0.1.42" -source = "git+https://github.com/purpleprotocol/mimalloc_rust?rev=eff21096d5ee5337ec89e2b7174f1bbb11026c70#eff21096d5ee5337ec89e2b7174f1bbb11026c70" +version = "0.1.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "667f4fec20f29dfc6bc7357c582d91796c169ad7e2fce709468aefeb2c099870" dependencies = [ "cc", "libc", @@ -3763,21 +3863,20 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.4", "libc", "redox_syscall", ] [[package]] name = "librocksdb-sys" -version = "0.16.0+8.10.0" +version = "0.17.3+10.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce3d60bc059831dc1c83903fb45c103f75db65c5a7bf22272764d9cc683e348c" +checksum = "cef2a00ee60fe526157c9023edab23943fae1ce2ab6f4abb2a807c1746835de9" dependencies = [ - "bindgen 0.69.4", + "bindgen 0.72.1", "bzip2-sys", "cc", - "glob", "libc", "libz-sys", "lz4-sys", @@ -3877,7 +3976,7 @@ dependencies = [ "log-mdc", "once_cell", "parking_lot", - "rand", + "rand 0.8.5", "serde", "serde-value", "serde_json", @@ -3946,7 +4045,7 @@ dependencies = [ "cfg-if 1.0.0", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.110", ] [[package]] @@ -3957,7 +4056,7 @@ checksum = "13198c120864097a565ccb3ff947672d969932b7975ebd4085732c9f09435e55" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.110", ] [[package]] @@ -3970,7 +4069,7 @@ dependencies = [ "macroific_core", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.110", ] [[package]] @@ -4051,8 +4150,9 @@ dependencies = [ [[package]] name = "mimalloc" -version = "0.1.46" -source = "git+https://github.com/purpleprotocol/mimalloc_rust?rev=eff21096d5ee5337ec89e2b7174f1bbb11026c70#eff21096d5ee5337ec89e2b7174f1bbb11026c70" +version = "0.1.48" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1ee66a4b64c74f4ef288bcbb9192ad9c3feaad75193129ac8509af543894fd8" dependencies = [ "libmimalloc-sys", ] @@ -4130,7 +4230,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3ffa00dec017b5b1a8b7cf5e2c008bfda1aa7e0697ac1508b491fdf2622fb4d8" dependencies = [ - "rand", + "rand 0.8.5", ] [[package]] @@ -4164,7 +4264,7 @@ version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.4", "cfg-if 1.0.0", "cfg_aliases 0.1.1", "libc", @@ -4177,7 +4277,7 @@ version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.4", "cfg-if 1.0.0", "cfg_aliases 0.2.1", "libc", @@ -4446,7 +4546,7 @@ checksum = "70df726c43c645ef1dde24c7ae14692036ebe5457c92c5f0ec4cfceb99634ff6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.110", ] [[package]] @@ -4456,7 +4556,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "346f04948ba92c43e8469c1ee6736c7563d71012b17d40745260fe106aac2166" dependencies = [ "base64ct", - "rand_core", + "rand_core 0.6.4", "subtle", ] @@ -4498,7 +4598,7 @@ dependencies = [ "order-stat", "peroxide-ad", "puruspe", - "rand", + "rand 0.8.5", "rand_distr", ] @@ -4539,7 +4639,7 @@ checksum = "a4502d8515ca9f32f1fb543d987f63d95a14934883db45bdb48060b6b69257f8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.110", ] [[package]] @@ -4625,7 +4725,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "479cf940fbbb3426c32c5d5176f62ad57549a0bb84773423ba8be9d089f5faba" dependencies = [ "proc-macro2", - "syn 2.0.79", + "syn 2.0.110", ] [[package]] @@ -4660,11 +4760,33 @@ dependencies = [ "version_check", ] +[[package]] +name = "proc-macro-error-attr2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5" +dependencies = [ + "proc-macro2", + "quote", +] + +[[package]] +name = "proc-macro-error2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802" +dependencies = [ + "proc-macro-error-attr2", + "proc-macro2", + "quote", + "syn 2.0.110", +] + [[package]] name = "proc-macro2" -version = "1.0.86" +version = "1.0.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" +checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8" dependencies = [ "unicode-ident", ] @@ -4696,7 +4818,7 @@ dependencies = [ "prost", "prost-types", "regex", - "syn 2.0.79", + "syn 2.0.110", "tempfile", ] @@ -4710,7 +4832,7 @@ dependencies = [ "itertools 0.13.0", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.110", ] [[package]] @@ -4738,7 +4860,7 @@ dependencies = [ "pin-project-lite", "quinn-proto", "quinn-udp", - "rustc-hash 2.0.0", + "rustc-hash 2.1.1", "rustls", "socket2", "thiserror", @@ -4753,9 +4875,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fadfaed2cd7f389d0161bb73eeb07b7b78f8691047a6f3e73caaeae55310a4a6" dependencies = [ "bytes", - "rand", + "rand 0.8.5", "ring", - "rustc-hash 2.0.0", + "rustc-hash 2.1.1", "rustls", "slab", "thiserror", @@ -4778,13 +4900,19 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.37" +version = "1.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" +checksum = "a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f" dependencies = [ "proc-macro2", ] +[[package]] +name = "r-efi" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" + [[package]] name = "rand" version = "0.8.5" @@ -4792,8 +4920,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", - "rand_chacha", - "rand_core", + "rand_chacha 0.3.1", + "rand_core 0.6.4", +] + +[[package]] +name = "rand" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" +dependencies = [ + "rand_chacha 0.9.0", + "rand_core 0.9.3", ] [[package]] @@ -4803,7 +4941,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_chacha" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" +dependencies = [ + "ppv-lite86", + "rand_core 0.9.3", ] [[package]] @@ -4812,7 +4960,16 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom", + "getrandom 0.2.15", +] + +[[package]] +name = "rand_core" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" +dependencies = [ + "getrandom 0.3.4", ] [[package]] @@ -4822,7 +4979,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32cb0b9bc82b0a0876c2dd994a7e7a2683d3e7390ca40e6886785ef0c7e3ee31" dependencies = [ "num-traits", - "rand", + "rand 0.8.5", ] [[package]] @@ -4857,7 +5014,7 @@ version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.4", ] [[package]] @@ -4866,7 +5023,7 @@ version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" dependencies = [ - "getrandom", + "getrandom 0.2.15", "libredox", "thiserror", ] @@ -4953,7 +5110,7 @@ checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", "cfg-if 1.0.0", - "getrandom", + "getrandom 0.2.15", "libc", "spin", "untrusted", @@ -4980,9 +5137,9 @@ dependencies = [ [[package]] name = "rocksdb" -version = "0.22.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bd13e55d6d7b8cd0ea569161127567cd587676c99f4472f779a0279aa60a7a7" +checksum = "ddb7af00d2b17dbd07d82c0063e25411959748ff03e8d4f96134c2ff41fce34f" dependencies = [ "libc", "librocksdb-sys", @@ -4993,7 +5150,7 @@ name = "rothschild" version = "1.0.1" dependencies = [ "async-channel 2.3.1", - "clap 4.5.19", + "clap 4.5.51", "criterion", "faster-hex", "itertools 0.13.0", @@ -5007,7 +5164,7 @@ dependencies = [ "kaspa-utils", "log", "parking_lot", - "rand", + "rand 0.8.5", "rayon", "secp256k1", "tokio", @@ -5027,9 +5184,9 @@ checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "rustc-hash" -version = "2.0.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "583034fd73374156e66797ed8e5b0d5690409c9226b22d87cb7f19821c05d152" +checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" [[package]] name = "rustc_version" @@ -5046,7 +5203,7 @@ version = "0.38.37" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.4", "errno", "libc", "linux-raw-sys", @@ -5111,7 +5268,7 @@ dependencies = [ "num", "num-traits", "peroxide", - "rand", + "rand 0.8.5", "rand_distr", "special", ] @@ -5152,7 +5309,7 @@ version = "0.29.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9465315bc9d4566e1724f0fffcbcc446268cb522e60f9a27bcded6b19c108113" dependencies = [ - "rand", + "rand 0.8.5", "secp256k1-sys", "serde", ] @@ -5228,7 +5385,7 @@ checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.110", ] [[package]] @@ -5243,6 +5400,18 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_nested_with" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc84538493ef215370434907a7dca8117778d16ac1acd0482ce88a0f5cf19707" +dependencies = [ + "darling 0.21.3", + "proc-macro-error2", + "quote", + "syn 2.0.110", +] + [[package]] name = "serde_repr" version = "0.1.19" @@ -5251,7 +5420,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.110", ] [[package]] @@ -5299,10 +5468,10 @@ version = "3.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d846214a9854ef724f3da161b426242d8de7c1fc7de2f89bb1efcb154dca79d" dependencies = [ - "darling", + "darling 0.20.10", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.110", ] [[package]] @@ -5402,7 +5571,7 @@ version = "1.0.1" dependencies = [ "async-channel 2.3.1", "cfg-if 1.0.0", - "clap 4.5.19", + "clap 4.5.51", "dhat", "futures", "futures-util", @@ -5419,10 +5588,11 @@ dependencies = [ "kaspa-utils", "log", "num_cpus", - "rand", + "rand 0.8.5", "rand_distr", "rayon", "secp256k1", + "serde_json", "tokio", ] @@ -5534,9 +5704,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.79" +version = "2.0.110" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89132cd0bf050864e1d38dc3bbc07a0eb8e7530af26344d3d2bbbef83499f590" +checksum = "a99801b5bd34ede4cf3fc688c5919368fea4e4814a4664359503e6015b280aea" dependencies = [ "proc-macro2", "quote", @@ -5552,7 +5722,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.110", ] [[package]] @@ -5590,7 +5760,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.4", "core-foundation", "system-configuration-sys", ] @@ -5664,7 +5834,7 @@ checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.110", ] [[package]] @@ -5775,7 +5945,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.110", ] [[package]] @@ -5908,7 +6078,7 @@ dependencies = [ "prost-build", "prost-types", "quote", - "syn 2.0.79", + "syn 2.0.110", ] [[package]] @@ -5922,7 +6092,7 @@ dependencies = [ "indexmap 1.9.3", "pin-project", "pin-project-lite", - "rand", + "rand 0.8.5", "slab", "tokio", "tokio-util", @@ -5951,7 +6121,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.4", "bytes", "http 1.1.0", "http-body 1.0.1", @@ -5992,7 +6162,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.110", ] [[package]] @@ -6028,7 +6198,7 @@ dependencies = [ "http 1.1.0", "httparse", "log", - "rand", + "rand 0.8.5", "rustls", "rustls-pki-types", "sha1", @@ -6150,17 +6320,18 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" dependencies = [ - "getrandom", + "getrandom 0.2.15", ] [[package]] name = "uuid" -version = "1.10.0" +version = "1.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314" +checksum = "2f87b8aa10b915a06587d0dec516c282ff295b475d94abf425d62b57710070a2" dependencies = [ - "getrandom", - "rand", + "getrandom 0.3.4", + "js-sys", + "rand 0.9.2", "serde", "wasm-bindgen", ] @@ -6229,6 +6400,15 @@ version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +[[package]] +name = "wasip2" +version = "1.0.1+wasi-0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7" +dependencies = [ + "wit-bindgen", +] + [[package]] name = "wasm-bindgen" version = "0.2.100" @@ -6253,7 +6433,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.110", "wasm-bindgen-shared", ] @@ -6288,7 +6468,7 @@ checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.110", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -6323,7 +6503,7 @@ checksum = "17d5042cc5fa009658f9a7333ef24291b1291a25b6382dd68862a7f3b969f69b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.110", ] [[package]] @@ -6427,7 +6607,7 @@ checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.110", ] [[package]] @@ -6438,7 +6618,7 @@ checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.110", ] [[package]] @@ -6637,6 +6817,12 @@ dependencies = [ "memchr", ] +[[package]] +name = "wit-bindgen" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" + [[package]] name = "workflow-chrome" version = "0.18.0" @@ -6667,10 +6853,10 @@ dependencies = [ "dirs", "faster-hex", "futures", - "getrandom", + "getrandom 0.2.15", "instant", "js-sys", - "rand", + "rand 0.8.5", "rlimit", "serde", "serde-wasm-bindgen", @@ -6798,7 +6984,7 @@ dependencies = [ "futures", "js-sys", "nw-sys", - "rand", + "rand 0.8.5", "serde", "serde-wasm-bindgen", "thiserror", @@ -6848,9 +7034,9 @@ dependencies = [ "downcast-rs", "futures", "futures-util", - "getrandom", + "getrandom 0.2.15", "manual_future", - "rand", + "rand 0.8.5", "serde", "serde_json", "thiserror", @@ -7093,7 +7279,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.110", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index d6367a9af2..1241093913 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,7 +6,9 @@ members = [ "core", "wallet/macros", "wallet/core", - "wallet/native", + "wallet/grpc/core", + "wallet/grpc/server", + "wallet/daemon", "wallet/wasm", "wallet/bip32", "wallet/keys", @@ -36,12 +38,14 @@ members = [ "rpc/grpc/core", "rpc/grpc/client", "rpc/grpc/server", + "rpc/grpc/examples/simple_client", "rpc/wrpc/server", "rpc/wrpc/client", "rpc/wrpc/proxy", "rpc/wrpc/wasm", "rpc/wrpc/examples/subscriber", "rpc/wrpc/examples/simple_client", + "rpc/wrpc/examples/vcc_v2", "mining", "mining/errors", "protocol/p2p", @@ -121,11 +125,13 @@ kaspa-txscript-errors = { version = "1.0.1", path = "crypto/txscript/errors" } kaspa-utils = { version = "1.0.1", path = "utils" } kaspa-utils-tower = { version = "1.0.1", path = "utils/tower" } kaspa-utxoindex = { version = "1.0.1", path = "indexes/utxoindex" } -kaspa-wallet = { version = "1.0.1", path = "wallet/native" } +kaspa-wallet-daemon = { version = "1.0.1", path = "wallet/daemon" } kaspa-wallet-cli-wasm = { version = "1.0.1", path = "wallet/wasm" } kaspa-wallet-keys = { version = "1.0.1", path = "wallet/keys" } kaspa-wallet-pskt = { version = "1.0.1", path = "wallet/pskt" } kaspa-wallet-core = { version = "1.0.1", path = "wallet/core" } +kaspa-wallet-grpc-core = { version = "1.0.0", path = "wallet/grpc/core" } +kaspa-wallet-grpc-server = { version = "1.0.0", path = "wallet/grpc/server" } kaspa-wallet-macros = { version = "1.0.1", path = "wallet/macros" } kaspa-wasm = { version = "1.0.1", path = "wasm" } kaspa-wasm-core = { version = "1.0.1", path = "wasm/core" } @@ -136,6 +142,7 @@ kaspa-wrpc-wasm = { version = "1.0.1", path = "rpc/wrpc/wasm" } kaspa-wrpc-example-subscriber = { version = "1.0.1", path = "rpc/wrpc/examples/subscriber" } kaspad = { version = "1.0.1", path = "kaspad" } kaspa-alloc = { version = "1.0.1", path = "utils/alloc" } +kaspa-wallet = { version = "1.0.1", path = "wallet/native" } # external aes = "0.8.3" @@ -145,9 +152,10 @@ argon2 = "0.5.2" async-channel = "2.0.0" async-std = { version = "1.12.0", features = ['attributes'] } async-stream = "0.3.5" -async-trait = "0.1.74" +async-trait = "0.1.88" base64 = "0.22.1" bincode = { version = "1.3.3", default-features = false } +bitflags = "2.9.4" blake2b_simd = "1.0.2" borsh = { version = "1.5.1", features = ["derive", "rc"] } bs58 = { version = "0.5.0", features = ["check"], default-features = false } @@ -156,7 +164,7 @@ cc = "1.0.83" cfb-mode = "0.8.2" cfg-if = "1.0.0" chacha20poly1305 = "0.10.1" -clap = { version = "4.4.7", features = ["derive", "string", "cargo"] } +clap = { version = "4.5.35", features = ["derive", "string", "cargo"] } convert_case = "0.6.0" criterion = { version = "0.5.1", default-features = false } crossbeam-channel = "0.5.8" @@ -212,7 +220,7 @@ num-traits = "0.2.17" once_cell = "1.18.0" pad = "0.1.6" parking_lot = "0.12.1" -paste = "1.0.14" +paste = "1.0.15" pbkdf2 = "0.12.2" portable-atomic = { version = "1.5.1", features = ["float"] } prost = "0.13.2" @@ -224,7 +232,7 @@ rayon = "1.8.0" regex = "1.10.2" ripemd = { version = "0.1.3", default-features = false } rlimit = "0.10.1" -rocksdb = "0.22.0" +rocksdb = "0.24.0" rv = "0.16.4" secp256k1 = { version = "0.29.0", features = [ "global-context", @@ -235,6 +243,7 @@ separator = "0.4.1" seqlock = "0.2.0" serde = { version = "1.0.190", features = ["derive", "rc"] } serde_bytes = "0.11.12" +serde_nested_with = "0.2.5" # helper, can be removed when https://github.com/serde-rs/serde/issues/723 is reseolved serde_json = "1.0.107" serde_repr = "0.1.18" serde-value = "0.7.0" @@ -243,7 +252,7 @@ sha1 = "0.10.6" sha2 = "0.10.8" sha3 = "0.10.8" slugify-rs = "0.0.3" -smallvec = { version = "1.11.1", features = ["serde"] } +smallvec = { version = "1.11.1", features = ["serde", "const_generics"] } sorted-insert = "0.2.3" subtle = { version = "2.5.0", default-features = false } sysinfo = "0.31.2" @@ -256,7 +265,7 @@ toml = "0.8.8" tonic = { version = "0.12.3", features = ["tls-webpki-roots", "gzip", "transport"] } tonic-build = { version = "0.12.3", features = ["prost"] } triggered = "0.1.2" -uuid = { version = "1.5.0", features = ["v4", "fast-rng", "serde"] } +uuid = { version = "1.16.0", features = ["v4", "fast-rng", "serde", "js"] } wasm-bindgen = { version = "0.2.100", features = ["serde-serialize"] } wasm-bindgen-futures = "0.4.43" wasm-bindgen-test = "0.3.50" diff --git a/README.md b/README.md index 05d7a07cff..47a2a1b0b7 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ [![DeepWiki](https://img.shields.io/badge/DeepWiki-kaspanet%2Frusty--kaspa-blue.svg?logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACwAAAAyCAYAAAAnWDnqAAAAAXNSR0IArs4c6QAAA05JREFUaEPtmUtyEzEQhtWTQyQLHNak2AB7ZnyXZMEjXMGeK/AIi+QuHrMnbChYY7MIh8g01fJoopFb0uhhEqqcbWTp06/uv1saEDv4O3n3dV60RfP947Mm9/SQc0ICFQgzfc4CYZoTPAswgSJCCUJUnAAoRHOAUOcATwbmVLWdGoH//PB8mnKqScAhsD0kYP3j/Yt5LPQe2KvcXmGvRHcDnpxfL2zOYJ1mFwrryWTz0advv1Ut4CJgf5uhDuDj5eUcAUoahrdY/56ebRWeraTjMt/00Sh3UDtjgHtQNHwcRGOC98BJEAEymycmYcWwOprTgcB6VZ5JK5TAJ+fXGLBm3FDAmn6oPPjR4rKCAoJCal2eAiQp2x0vxTPB3ALO2CRkwmDy5WohzBDwSEFKRwPbknEggCPB/imwrycgxX2NzoMCHhPkDwqYMr9tRcP5qNrMZHkVnOjRMWwLCcr8ohBVb1OMjxLwGCvjTikrsBOiA6fNyCrm8V1rP93iVPpwaE+gO0SsWmPiXB+jikdf6SizrT5qKasx5j8ABbHpFTx+vFXp9EnYQmLx02h1QTTrl6eDqxLnGjporxl3NL3agEvXdT0WmEost648sQOYAeJS9Q7bfUVoMGnjo4AZdUMQku50McDcMWcBPvr0SzbTAFDfvJqwLzgxwATnCgnp4wDl6Aa+Ax283gghmj+vj7feE2KBBRMW3FzOpLOADl0Isb5587h/U4gGvkt5v60Z1VLG8BhYjbzRwyQZemwAd6cCR5/XFWLYZRIMpX39AR0tjaGGiGzLVyhse5C9RKC6ai42ppWPKiBagOvaYk8lO7DajerabOZP46Lby5wKjw1HCRx7p9sVMOWGzb/vA1hwiWc6jm3MvQDTogQkiqIhJV0nBQBTU+3okKCFDy9WwferkHjtxib7t3xIUQtHxnIwtx4mpg26/HfwVNVDb4oI9RHmx5WGelRVlrtiw43zboCLaxv46AZeB3IlTkwouebTr1y2NjSpHz68WNFjHvupy3q8TFn3Hos2IAk4Ju5dCo8B3wP7VPr/FGaKiG+T+v+TQqIrOqMTL1VdWV1DdmcbO8KXBz6esmYWYKPwDL5b5FA1a0hwapHiom0r/cKaoqr+27/XcrS5UwSMbQAAAABJRU5ErkJggg==)](https://deepwiki.com/kaspanet/rusty-kaspa) -

Kaspa On Rust

+

Kaspa on Rust

Welcome to the Rust-based implementation of the Kaspa full-node and its ancillary libraries. The contained node release serves as a drop-in replacement to the established Golang node and to date is the recommended node software for the Kaspa network, introducing developers to the possibilities of Rust in the Kaspa network's context. @@ -158,6 +158,52 @@ To build WASM on MacOS you need to install `llvm` from homebrew (at the time of +
+ Building with Docker + + You can build the project using Docker in two ways: a simple single-architecture build, or a multi-architecture build using the provided script. + + #### 1. Simple Docker Build + + To build for your current architecture (e.g., `linux/amd64`): + + ```sh + docker build -f docker/Dockerfile.kaspad -t kaspad:latest . + ``` + + Replace `Dockerfile.kaspad` with the appropriate Dockerfile for your target (`kaspad`, `kaspa-wallet`, `rothschild`, or `simpa`). + + #### 2. Multi-Architecture Build + + To build images for multiple architectures (e.g., `linux/amd64` and `linux/arm64`) and optionally push them to a registry, use the `build-docker-multi-arch.sh` script: + + ```sh + ./build-docker-multi-arch.sh --tag --artifact [--arches ""] [--push] + ``` + + - `--tag `: **(required)** The Docker image tag to use. + - `--artifact `: The build target/artifact (default: `kaspad`). Must match the Dockerfile name, e.g., `kaspad` for `Dockerfile.kaspad`. + - `--arches ""`: Space-separated list of architectures (default: `"linux/amd64 linux/arm64"`). + - `--push`: If specified, the built images will be pushed to your Docker registry. + + **Examples:** + + Build and push a multi-arch image for `kaspad`: + + ```sh + ./build-docker-multi-arch.sh --tag myrepo/kaspad:latest --artifact kaspad --push + ``` + + Build a multi-arch image for `kaspa-wallet` without pushing: + + ```sh + ./build-docker-multi-arch.sh --tag kaspa-wallet:test --artifact kaspa-wallet + ``` + + **Note:** + In order to use `build-docker-multi-arch.sh`, you need Docker with Buildx enabled. +
+
Building WASM32 SDK @@ -241,6 +287,24 @@ The framework is compatible with all major desktop and mobile browsers. ```bash cargo run --release --bin kaspad -- --testnet ``` + +
+ + Start a devnet node + + +Start the DevNet node with the following command: + +```bash +cargo run --bin kaspad -- --devnet --enable-unsynced-mining --rpclisten=127.0.0.1 --rpclisten-borsh=127.0.0.1 --utxoindex +``` +* `enable-unsynced-mining` is required when the network isn't synchronized, which is the case on the first launch +* `uxtoindex` enables the UTXO index, which is necessary for wallet functionality. +* `rpclisten-borsh` and `rpclisten-borsh` are likely to be required by mining softwares + +note: it will take a bit of time for difficulty to adjust, so you may need to wait a bit before you see blocks being mined consistently. + +
@@ -421,4 +485,12 @@ Logging in `kaspad` and `simpa` can be [filtered](https://docs.rs/env_logger/0.1 ``` In this command we set the `loglevel` to `INFO`. +
+ +
+ +Override consensus parameters + +You can experiment with non-standard consensus parameters in non-mainnet environments by supplying a JSON file with `--override-params-file `. See [docs/override-params.md](docs/override-params.md) for a more detailed explanation. +
\ No newline at end of file diff --git a/build-docker-multi-arch.sh b/build-docker-multi-arch.sh new file mode 100755 index 0000000000..ec46b1af2e --- /dev/null +++ b/build-docker-multi-arch.sh @@ -0,0 +1,83 @@ +#!/bin/sh + +set -e + +# Parse command line arguments for --tag and --repo +ARCHES="linux/amd64 linux/arm64" +ARTIFACT="kaspad" +while [ $# -gt 0 ]; do + case "$1" in + --tag) + shift + TAG="$1" + ;; + --arches) + shift + ARCHES="$1" + ;; + --push) + PUSH="push" + ;; + --artifact) + shift + ARTIFACT="$1" + ;; + --help|-h) + echo "Usage: $0 --tag --artifact [--arches ] [--push]" + echo "" + echo " --tag Docker image tag (required)" + echo " --artifact Build target/artifact (default: \"$ARTIFACT\")" + echo " --arches Space-separated list of architectures (default: \"$ARCHES\")" + echo " --push Push the built images" + echo " --help, -h Show this help message" + exit 0 + ;; + *) + break + ;; + esac + shift +done + +if [ -z "$TAG" ]; then + echo "Error: --tag argument is required" + exit 1 +fi + +BUILD_DIR="$(dirname $0)" +docker=docker +id -nG $USER | grep -qw docker || docker="sudo $docker" + +multi_arch_build() { + echo + echo "====================================================" + echo " Running build for $1" + echo "====================================================" + dockerRepo="${DOCKER_REPO_PREFIX}-$1" + dockerRepoArgs= + + if [ "$PUSH" = "push" ]; then + dockerRepoArgs="$dockerRepoArgs --push" + fi + + dockerRepoArgs="$dockerRepoArgs --tag $TAG" + dockerRepoArgs="$dockerRepoArgs -f docker/Dockerfile.$1" + + $docker buildx build --platform=$(echo $ARCHES | sed 's/ /,/g') $dockerRepoArgs \ + --tag $TAG "$BUILD_DIR" + echo "====================================================" + echo " Completed build for $1" + echo "====================================================" +} + +echo +echo "====================================================" +echo " Setup multi arch build ($ARCHES)" +echo "====================================================" +$docker buildx create --name mybuilder \ +--driver docker-container \ +--node mybuilder0 \ +--use --bootstrap +$docker buildx create --name=mybuilder --append --node=mybuilder0 --platform=$(echo $ARCHES | sed 's/ /,/g') --bootstrap --use +echo "SUCCESS - doing multi arch build" +multi_arch_build $ARTIFACT diff --git a/cli/src/modules/rpc.rs b/cli/src/modules/rpc.rs index 75bc50f421..6e42922b14 100644 --- a/cli/src/modules/rpc.rs +++ b/cli/src/modules/rpc.rs @@ -290,6 +290,29 @@ impl Rpc { self.println(&ctx, result); } + RpcApiOps::GetVirtualChainFromBlockV2 => { + if argv.is_empty() { + return Err(Error::custom("Missing startHash argument")); + }; + + let start_hash = RpcHash::from_hex(argv.remove(0).as_str())?; + + let verbosity_level_i32 = argv.pop().and_then(|arg| arg.parse::().ok()).unwrap_or_default(); + let verbosity_level = RpcDataVerbosityLevel::try_from(verbosity_level_i32)?; + + let result = rpc + .get_virtual_chain_from_block_v2_call( + None, + GetVirtualChainFromBlockV2Request { + start_hash, + data_verbosity_level: Some(verbosity_level), + min_confirmation_count: None, + }, + ) + .await; + + self.println(&ctx, result); + } _ => { tprintln!(ctx, "rpc method exists but is not supported by the cli: '{op_str}'\r\n"); return Ok(()); diff --git a/components/addressmanager/src/lib.rs b/components/addressmanager/src/lib.rs index b220a8ab1b..9f630204c3 100644 --- a/components/addressmanager/src/lib.rs +++ b/components/addressmanager/src/lib.rs @@ -535,7 +535,11 @@ mod address_store_with_cache { assert_eq!(iter.count(), 0); } + // This test is indeterminate, so it is ignored by default. + // Every developer that changes the logic of the address manager should run this test locally before sending a PR. + // TODO: Maybe change statistical parameters to reduce the failure rate? #[test] + #[ignore] fn test_network_distribution_weighting() { kaspa_core::log::try_init_logger("info"); diff --git a/components/consensusmanager/src/session.rs b/components/consensusmanager/src/session.rs index 46782a318b..c77cf730be 100644 --- a/components/consensusmanager/src/session.rs +++ b/components/consensusmanager/src/session.rs @@ -3,7 +3,7 @@ //! We use newtypes in order to simplify changing the underlying lock in the future use kaspa_consensus_core::{ - acceptance_data::AcceptanceData, + acceptance_data::{AcceptanceData, MergesetBlockAcceptanceData}, api::{BlockCount, BlockValidationFutures, ConsensusApi, ConsensusStats, DynConsensus}, block::Block, blockstatus::BlockStatus, @@ -13,8 +13,7 @@ use kaspa_consensus_core::{ mass::{ContextualMasses, NonContextualMasses}, pruning::{PruningPointProof, PruningPointTrustedData, PruningPointsList}, trusted::{ExternalGhostdagData, TrustedBlock}, - tx::{MutableTransaction, SignableTransaction, Transaction, TransactionOutpoint, UtxoEntry}, - utxo::utxo_inquirer::UtxoInquirerError, + tx::{MutableTransaction, Transaction, TransactionId, TransactionOutpoint, TransactionQueryResult, TransactionType, UtxoEntry}, BlockHashSet, BlueWorkType, ChainPath, Hash, }; use kaspa_utils::sync::rwlock::*; @@ -262,7 +261,7 @@ impl ConsensusSessionOwned { self.clone().spawn_blocking(move |c| c.get_current_block_color(hash)).await } - /// retention period root refers to the earliest block from which the current node has full header & block data + /// retention period root refers to the earliest block from which the current node has full header & block data pub async fn async_get_retention_period_root(&self) -> Hash { self.clone().spawn_blocking(|c| c.get_retention_period_root()).await } @@ -316,12 +315,27 @@ impl ConsensusSessionOwned { self.clone().spawn_blocking(|c| c.get_chain_block_samples()).await } - pub async fn async_get_populated_transaction( + pub async fn async_get_transactions_by_accepting_daa_score( &self, - txid: Hash, - accepting_block_daa_score: u64, - ) -> Result { - self.clone().spawn_blocking(move |c| c.get_populated_transaction(txid, accepting_block_daa_score)).await + accepting_daa_score: u64, + tx_ids: Option>, + tx_type: TransactionType, + ) -> ConsensusResult { + self.clone().spawn_blocking(move |c| c.get_transactions_by_accepting_daa_score(accepting_daa_score, tx_ids, tx_type)).await + } + + pub async fn async_get_transactions_by_block_acceptance_data( + &self, + accepting_block: Hash, + block_acceptance_data: MergesetBlockAcceptanceData, + tx_ids: Option>, + tx_type: TransactionType, + ) -> ConsensusResult { + self.clone() + .spawn_blocking(move |c| { + c.get_transactions_by_block_acceptance_data(accepting_block, block_acceptance_data, tx_ids, tx_type) + }) + .await } /// Returns the antipast of block `hash` from the POV of `context`, i.e. `antipast(hash) ∩ past(context)`. @@ -369,6 +383,10 @@ impl ConsensusSessionOwned { self.clone().spawn_blocking(move |c| c.get_block(hash)).await } + pub async fn async_get_block_body(&self, hash: Hash) -> ConsensusResult>> { + self.clone().spawn_blocking(move |c| c.get_block_body(hash)).await + } + pub async fn async_get_block_even_if_header_only(&self, hash: Hash) -> ConsensusResult { self.clone().spawn_blocking(move |c| c.get_block_even_if_header_only(hash)).await } @@ -424,16 +442,16 @@ impl ConsensusSessionOwned { self.clone().spawn_blocking(move |c| c.get_missing_block_body_hashes(high)).await } - pub async fn async_pruning_point(&self) -> Hash { - self.clone().spawn_blocking(|c| c.pruning_point()).await + pub async fn async_get_body_missing_anticone(&self) -> Vec { + self.clone().spawn_blocking(move |c| c.get_body_missing_anticone()).await } - pub async fn async_get_daa_window(&self, hash: Hash) -> ConsensusResult> { - self.clone().spawn_blocking(move |c| c.get_daa_window(hash)).await + pub async fn async_clear_body_missing_anticone_set(&self) { + self.clone().spawn_blocking(move |c| c.clear_body_missing_anticone_set()).await } - pub async fn async_get_trusted_block_associated_ghostdag_data_block_hashes(&self, hash: Hash) -> ConsensusResult> { - self.clone().spawn_blocking(move |c| c.get_trusted_block_associated_ghostdag_data_block_hashes(hash)).await + pub async fn async_pruning_point(&self) -> Hash { + self.clone().spawn_blocking(|c| c.pruning_point()).await } pub async fn async_estimate_network_hashes_per_second( @@ -459,6 +477,30 @@ impl ConsensusSessionOwned { pub async fn async_finality_point(&self) -> Hash { self.clone().spawn_blocking(move |c| c.finality_point()).await } + pub async fn async_clear_pruning_utxo_set(&self) { + self.clone().spawn_blocking(move |c| c.clear_pruning_utxo_set()).await + } + pub async fn async_is_pruning_utxoset_stable(&self) -> bool { + self.clone().spawn_blocking(move |c| c.is_pruning_utxoset_stable()).await + } + pub async fn async_is_pruning_point_anticone_fully_synced(&self) -> bool { + self.clone().spawn_blocking(move |c| c.is_pruning_point_anticone_fully_synced()).await + } + pub async fn async_is_consensus_in_transitional_ibd_state(&self) -> bool { + self.clone().spawn_blocking(move |c| c.is_consensus_in_transitional_ibd_state()).await + } + pub async fn async_set_pruning_utxoset_unstable(&self) { + self.clone().spawn_blocking(move |c| c.set_pruning_utxoset_stable_flag(false)).await + } + pub async fn async_set_pruning_utxoset_stable(&self) { + self.clone().spawn_blocking(move |c| c.set_pruning_utxoset_stable_flag(true)).await + } + pub async fn async_verify_is_pruning_sample(&self, candidate_hash: Hash) -> ConsensusResult<()> { + self.clone().spawn_blocking(move |c| c.verify_is_pruning_sample(candidate_hash)).await + } + pub async fn async_intrusive_pruning_point_update(&self, new_pruning_point: Hash, syncer_sink: Hash) -> ConsensusResult<()> { + self.clone().spawn_blocking(move |c| c.intrusive_pruning_point_update(new_pruning_point, syncer_sink)).await + } } pub type ConsensusProxy = ConsensusSessionOwned; diff --git a/consensus/benches/check_scripts.rs b/consensus/benches/check_scripts.rs index 772d2c9070..42804bc328 100644 --- a/consensus/benches/check_scripts.rs +++ b/consensus/benches/check_scripts.rs @@ -89,7 +89,7 @@ fn benchmark_check_scripts(c: &mut Criterion) { let cache = Cache::new(inputs_count as u64); b.iter(|| { cache.clear(); - check_scripts_sequential(black_box(&cache), black_box(&tx.as_verifiable()), false, false).unwrap(); + check_scripts_sequential(black_box(&cache), black_box(&tx.as_verifiable())).unwrap(); }) }); @@ -98,7 +98,7 @@ fn benchmark_check_scripts(c: &mut Criterion) { let cache = Cache::new(inputs_count as u64); b.iter(|| { cache.clear(); - check_scripts_par_iter(black_box(&cache), black_box(&tx.as_verifiable()), false, false).unwrap(); + check_scripts_par_iter(black_box(&cache), black_box(&tx.as_verifiable())).unwrap(); }) }); @@ -110,14 +110,7 @@ fn benchmark_check_scripts(c: &mut Criterion) { let cache = Cache::new(inputs_count as u64); b.iter(|| { cache.clear(); - check_scripts_par_iter_pool( - black_box(&cache), - black_box(&tx.as_verifiable()), - black_box(&pool), - false, - false, - ) - .unwrap(); + check_scripts_par_iter_pool(black_box(&cache), black_box(&tx.as_verifiable()), black_box(&pool)).unwrap(); }) }); } @@ -153,7 +146,7 @@ fn benchmark_check_scripts_with_payload(c: &mut Criterion) { let cache = Cache::new(inputs_count as u64); b.iter(|| { cache.clear(); - check_scripts_par_iter(black_box(&cache), black_box(&tx.as_verifiable()), false, false).unwrap(); + check_scripts_par_iter(black_box(&cache), black_box(&tx.as_verifiable())).unwrap(); }) }); } diff --git a/consensus/client/src/error.rs b/consensus/client/src/error.rs index e632f517d5..8f4575dd84 100644 --- a/consensus/client/src/error.rs +++ b/consensus/client/src/error.rs @@ -42,6 +42,9 @@ pub enum Error { #[error(transparent)] NetworkType(#[from] kaspa_consensus_core::network::NetworkTypeError), + #[error(transparent)] + CompressedParents(#[from] kaspa_consensus_core::errors::header::CompressedParentsError), + #[error("Error converting property `{0}`: {1}")] Convert(&'static str, String), diff --git a/consensus/client/src/header.rs b/consensus/client/src/header.rs index 7d2e25b393..5b4392ed4d 100644 --- a/consensus/client/src/header.rs +++ b/consensus/client/src/header.rs @@ -233,7 +233,7 @@ impl Header { #[wasm_bindgen(setter = parentsByLevel)] pub fn set_parents_by_level_from_js_value(&mut self, js_value: JsValue) { let array = Array::from(&js_value); - self.inner_mut().parents_by_level = array + let parents = array .iter() .map(|jsv| { Array::from(&jsv) @@ -246,6 +246,8 @@ impl Header { .unwrap_or_else(|err| { panic!("{}", err); }); + + self.inner_mut().parents_by_level = parents.try_into().unwrap(); } #[wasm_bindgen(getter = blueWork)] @@ -272,7 +274,7 @@ impl TryCastFromJs for Header { { Self::resolve(value, || { if let Some(object) = Object::try_from(value.as_ref()) { - let parents_by_level = object + let parents_by_level_vec = object .get_vec("parentsByLevel")? .iter() .map(|jsv| { @@ -284,6 +286,8 @@ impl TryCastFromJs for Header { }) .collect::>, Error>>()?; + let parents_by_level = parents_by_level_vec.try_into()?; + let header = native::Header { hash: object.get_value("hash")?.try_into_owned().unwrap_or_default(), version: object.get_u16("version")?, diff --git a/consensus/client/src/outpoint.rs b/consensus/client/src/outpoint.rs index a9b39f5e4f..c2f59092e1 100644 --- a/consensus/client/src/outpoint.rs +++ b/consensus/client/src/outpoint.rs @@ -188,6 +188,12 @@ impl From<&TransactionOutpoint> for cctx::TransactionOutpoint { } } +impl PartialEq for TransactionOutpoint { + fn eq(&self, other: &cctx::TransactionOutpoint) -> bool { + self.inner.transaction_id == other.transaction_id && self.inner.index == other.index + } +} + impl TransactionOutpoint { pub fn simulated() -> Self { Self::new(TransactionId::from_slice(&rand::random::<[u8; kaspa_hashes::HASH_SIZE]>()), 0) diff --git a/consensus/core/Cargo.toml b/consensus/core/Cargo.toml index 228b4ac11d..ad493998e3 100644 --- a/consensus/core/Cargo.toml +++ b/consensus/core/Cargo.toml @@ -18,6 +18,7 @@ default = [] arc-swap.workspace = true async-trait.workspace = true borsh.workspace = true +bitflags.workspace = true cfg-if.workspace = true faster-hex.workspace = true futures-util.workspace = true diff --git a/consensus/core/src/acceptance_data.rs b/consensus/core/src/acceptance_data.rs index 2ab2355839..95fcaa20ea 100644 --- a/consensus/core/src/acceptance_data.rs +++ b/consensus/core/src/acceptance_data.rs @@ -3,9 +3,11 @@ use serde::{Deserialize, Serialize}; use crate::tx::TransactionId; +/// Holds a mergeset acceptance data, a list of all its merged block with their accepted transactions pub type AcceptanceData = Vec; #[derive(Debug, Clone, Serialize, Deserialize)] +/// Holds a merged block with its accepted transactions pub struct MergesetBlockAcceptanceData { pub block_hash: Hash, pub accepted_transactions: Vec, diff --git a/consensus/core/src/api/mod.rs b/consensus/core/src/api/mod.rs index c33a537a76..39c40da352 100644 --- a/consensus/core/src/api/mod.rs +++ b/consensus/core/src/api/mod.rs @@ -3,7 +3,7 @@ use kaspa_muhash::MuHash; use std::sync::Arc; use crate::{ - acceptance_data::AcceptanceData, + acceptance_data::{AcceptanceData, MergesetBlockAcceptanceData}, api::args::{TransactionValidationArgs, TransactionValidationBatchArgs}, block::{Block, BlockTemplate, TemplateBuildMode, TemplateTransactionSelector, VirtualStateApproxId}, blockstatus::BlockStatus, @@ -20,8 +20,10 @@ use crate::{ mass::{ContextualMasses, NonContextualMasses}, pruning::{PruningPointProof, PruningPointTrustedData, PruningPointsList, PruningProofMetadata}, trusted::{ExternalGhostdagData, TrustedBlock}, - tx::{MutableTransaction, SignableTransaction, Transaction, TransactionOutpoint, UtxoEntry}, - utxo::utxo_inquirer::UtxoInquirerError, + tx::{ + MutableTransaction, Transaction, TransactionId, TransactionIndexType, TransactionOutpoint, TransactionQueryResult, + TransactionType, UtxoEntry, + }, BlockHashSet, BlueWorkType, ChainPath, }; use kaspa_hashes::Hash; @@ -151,7 +153,7 @@ pub trait ConsensusApi: Send + Sync { unimplemented!() } - /// retention period root refers to the earliest block from which the current node has full header & block data + /// retention period root refers to the earliest block from which the current node has full header & block data fn get_retention_period_root(&self) -> Hash { unimplemented!() } @@ -162,7 +164,7 @@ pub trait ConsensusApi: Send + Sync { /// Gets the virtual chain paths from `low` to the `sink` hash, or until `chain_path_added_limit` is reached /// - /// Note: + /// Note: /// 1) `chain_path_added_limit` will populate removed fully, and then the added chain path, up to `chain_path_added_limit` amount of hashes. /// 1.1) use `None to impose no limit with optimized backward chain iteration, for better performance in cases where batching is not required. fn get_virtual_chain_from_block(&self, low: Hash, chain_path_added_limit: Option) -> ConsensusResult { @@ -175,7 +177,32 @@ pub trait ConsensusApi: Send + Sync { /// Returns the fully populated transaction with the given txid which was accepted at the provided accepting_block_daa_score. /// The argument `accepting_block_daa_score` is expected to be the DAA score of the accepting chain block of `txid`. - fn get_populated_transaction(&self, txid: Hash, accepting_block_daa_score: u64) -> Result { + /// Note: If the transaction vec is None, the function returns all accepted transactions. + fn get_transactions_by_accepting_daa_score( + &self, + accepting_daa_score: u64, + tx_ids: Option>, + tx_type: TransactionType, + ) -> ConsensusResult { + unimplemented!() + } + + fn get_transactions_by_block_acceptance_data( + &self, + accepting_block: Hash, + block_acceptance_data: MergesetBlockAcceptanceData, + tx_ids: Option>, + tx_type: TransactionType, + ) -> ConsensusResult { + unimplemented!() + } + + fn get_transactions_by_accepting_block( + &self, + accepting_block: Hash, + tx_ids: Option>, + tx_type: TransactionType, + ) -> ConsensusResult { unimplemented!() } @@ -208,7 +235,7 @@ pub trait ConsensusApi: Send + Sync { unimplemented!() } - fn calc_transaction_hash_merkle_root(&self, txs: &[Transaction], pov_daa_score: u64) -> Hash { + fn calc_transaction_hash_merkle_root(&self, txs: &[Transaction]) -> Hash { unimplemented!() } @@ -284,6 +311,14 @@ pub trait ConsensusApi: Send + Sync { unimplemented!() } + fn get_block_transactions(&self, hash: Hash, indices: Option>) -> ConsensusResult> { + unimplemented!() + } + + fn get_block_body(&self, hash: Hash) -> ConsensusResult>> { + unimplemented!() + } + fn get_block_even_if_header_only(&self, hash: Hash) -> ConsensusResult { unimplemented!() } @@ -336,19 +371,14 @@ pub trait ConsensusApi: Send + Sync { fn get_missing_block_body_hashes(&self, high: Hash) -> ConsensusResult> { unimplemented!() } - - fn pruning_point(&self) -> Hash { + fn get_body_missing_anticone(&self) -> Vec { unimplemented!() } - - // TODO: Delete this function once there's no need for go-kaspad backward compatibility. - fn get_daa_window(&self, hash: Hash) -> ConsensusResult> { + fn clear_body_missing_anticone_set(&self) { unimplemented!() } - // TODO: Think of a better name. - // TODO: Delete this function once there's no need for go-kaspad backward compatibility. - fn get_trusted_block_associated_ghostdag_data_block_hashes(&self, hash: Hash) -> ConsensusResult> { + fn pruning_point(&self) -> Hash { unimplemented!() } @@ -371,6 +401,34 @@ pub trait ConsensusApi: Send + Sync { fn finality_point(&self) -> Hash { unimplemented!() } + + fn clear_pruning_utxo_set(&self) { + unimplemented!() + } + + fn set_pruning_utxoset_stable_flag(&self, val: bool) { + unimplemented!() + } + + fn is_pruning_utxoset_stable(&self) -> bool { + unimplemented!() + } + + fn is_pruning_point_anticone_fully_synced(&self) -> bool { + unimplemented!() + } + + fn is_consensus_in_transitional_ibd_state(&self) -> bool { + unimplemented!() + } + + fn verify_is_pruning_sample(&self, candidate_hash: Hash) -> ConsensusResult<()> { + unimplemented!() + } + + fn intrusive_pruning_point_update(&self, new_pruning_point: Hash, syncer_sink: Hash) -> ConsensusResult<()> { + unimplemented!() + } } pub type DynConsensus = Arc; diff --git a/consensus/core/src/block.rs b/consensus/core/src/block.rs index cbd76b42dc..2af0b3dde5 100644 --- a/consensus/core/src/block.rs +++ b/consensus/core/src/block.rs @@ -68,6 +68,8 @@ impl Block { Block::from_header(Header::from_precomputed_hash(hash, parents)) } + /// Check if the block in-memory size is too large to be cached as a pending-validation orphan block. + /// Returns None if the block is too large pub fn asses_for_cache(&self) -> Option<()> { (self.estimate_mem_bytes() < 1_000_000).then_some(()) } diff --git a/consensus/core/src/config/constants.rs b/consensus/core/src/config/constants.rs index 02eabb7114..4b0e050966 100644 --- a/consensus/core/src/config/constants.rs +++ b/consensus/core/src/config/constants.rs @@ -166,7 +166,7 @@ pub mod perf { impl PerfParams { pub fn adjust_to_consensus_params(&mut self, consensus_params: &Params) { // Allow caching up to 10x over the baseline - self.block_data_cache_size *= consensus_params.bps().upper_bound().clamp(1, 10) as usize; + self.block_data_cache_size *= consensus_params.bps().after().clamp(1, 10) as usize; } } } diff --git a/consensus/core/src/config/genesis.rs b/consensus/core/src/config/genesis.rs index 06d1431ed2..0c7281ed28 100644 --- a/consensus/core/src/config/genesis.rs +++ b/consensus/core/src/config/genesis.rs @@ -1,4 +1,9 @@ -use crate::{block::Block, header::Header, subnets::SUBNETWORK_ID_COINBASE, tx::Transaction}; +use crate::{ + block::Block, + header::{CompressedParents, Header}, + subnets::SUBNETWORK_ID_COINBASE, + tx::Transaction, +}; use kaspa_hashes::{Hash, ZERO_HASH}; use kaspa_muhash::EMPTY_MUHASH; @@ -26,7 +31,7 @@ impl From<&GenesisBlock> for Header { fn from(genesis: &GenesisBlock) -> Self { Header::new_finalized( genesis.version, - Vec::new(), + CompressedParents::default(), genesis.hash_merkle_root, ZERO_HASH, genesis.utxo_commitment, @@ -231,7 +236,7 @@ mod tests { fn test_genesis_hashes() { [GENESIS, TESTNET_GENESIS, TESTNET11_GENESIS, SIMNET_GENESIS, DEVNET_GENESIS].into_iter().for_each(|genesis| { let block: Block = (&genesis).into(); - assert_hashes_eq(calc_hash_merkle_root(block.transactions.iter(), false), block.header.hash_merkle_root); + assert_hashes_eq(calc_hash_merkle_root(block.transactions.iter()), block.header.hash_merkle_root); assert_hashes_eq(block.hash(), genesis.hash); }); } diff --git a/consensus/core/src/config/params.rs b/consensus/core/src/config/params.rs index 0773aeaa43..fcee365a6b 100644 --- a/consensus/core/src/config/params.rs +++ b/consensus/core/src/config/params.rs @@ -10,9 +10,10 @@ use crate::{ }; use kaspa_addresses::Prefix; use kaspa_math::Uint256; +use serde::{Deserialize, Serialize}; use std::cmp::min; -#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)] +#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Serialize, Deserialize)] pub struct ForkActivation(u64); impl ForkActivation { @@ -139,7 +140,7 @@ impl ForkedParam { } /// Fork params for the Crescendo hardfork -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Serialize, Deserialize)] pub struct CrescendoParams { pub past_median_time_sampled_window_size: u64, pub sampled_difficulty_window_size: u64, @@ -194,6 +195,93 @@ pub const CRESCENDO: CrescendoParams = CrescendoParams { max_script_public_key_len: 10_000, }; +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct OverrideParams { + pub prior_ghostdag_k: Option, + + /// Timestamp deviation tolerance (in seconds) + pub timestamp_deviation_tolerance: Option, + + /// Target time per block (in milliseconds) + pub prior_target_time_per_block: Option, + + /// Size of full blocks window that is inspected to calculate the required difficulty of each block + pub prior_difficulty_window_size: Option, + + /// The minimum size a difficulty window (full or sampled) must have to trigger a DAA calculation + pub min_difficulty_window_size: Option, + + pub prior_max_block_parents: Option, + pub prior_mergeset_size_limit: Option, + pub prior_merge_depth: Option, + pub prior_finality_depth: Option, + pub prior_pruning_depth: Option, + + pub coinbase_payload_script_public_key_max_len: Option, + pub max_coinbase_payload_len: Option, + + pub prior_max_tx_inputs: Option, + pub prior_max_tx_outputs: Option, + pub prior_max_signature_script_len: Option, + pub prior_max_script_public_key_len: Option, + + pub mass_per_tx_byte: Option, + pub mass_per_script_pub_key_byte: Option, + pub mass_per_sig_op: Option, + pub max_block_mass: Option, + + /// The parameter for scaling inverse KAS value to mass units (KIP-0009) + pub storage_mass_parameter: Option, + + /// DAA score after which the pre-deflationary period switches to the deflationary period + pub deflationary_phase_daa_score: Option, + + pub pre_deflationary_phase_base_subsidy: Option, + pub prior_coinbase_maturity: Option, + pub skip_proof_of_work: Option, + pub max_block_level: Option, + pub pruning_proof_m: Option, + + pub crescendo: Option, + pub crescendo_activation: Option, +} + +impl From for OverrideParams { + fn from(p: Params) -> Self { + Self { + prior_ghostdag_k: Some(p.prior_ghostdag_k), + timestamp_deviation_tolerance: Some(p.timestamp_deviation_tolerance), + prior_target_time_per_block: Some(p.prior_target_time_per_block), + prior_difficulty_window_size: Some(p.prior_difficulty_window_size), + min_difficulty_window_size: Some(p.min_difficulty_window_size), + prior_max_block_parents: Some(p.prior_max_block_parents), + prior_mergeset_size_limit: Some(p.prior_mergeset_size_limit), + prior_merge_depth: Some(p.prior_merge_depth), + prior_finality_depth: Some(p.prior_finality_depth), + prior_pruning_depth: Some(p.prior_pruning_depth), + coinbase_payload_script_public_key_max_len: Some(p.coinbase_payload_script_public_key_max_len), + max_coinbase_payload_len: Some(p.max_coinbase_payload_len), + prior_max_tx_inputs: Some(p.prior_max_tx_inputs), + prior_max_tx_outputs: Some(p.prior_max_tx_outputs), + prior_max_signature_script_len: Some(p.prior_max_signature_script_len), + prior_max_script_public_key_len: Some(p.prior_max_script_public_key_len), + mass_per_tx_byte: Some(p.mass_per_tx_byte), + mass_per_script_pub_key_byte: Some(p.mass_per_script_pub_key_byte), + mass_per_sig_op: Some(p.mass_per_sig_op), + max_block_mass: Some(p.max_block_mass), + storage_mass_parameter: Some(p.storage_mass_parameter), + deflationary_phase_daa_score: Some(p.deflationary_phase_daa_score), + pre_deflationary_phase_base_subsidy: Some(p.pre_deflationary_phase_base_subsidy), + prior_coinbase_maturity: Some(p.prior_coinbase_maturity), + skip_proof_of_work: Some(p.skip_proof_of_work), + max_block_level: Some(p.max_block_level), + pruning_proof_m: Some(p.pruning_proof_m), + crescendo: Some(p.crescendo), + crescendo_activation: Some(p.crescendo_activation), + } + } +} + /// Consensus parameters. Contains settings and configurations which are consensus-sensitive. /// Changing one of these on a network node would exclude and prevent it from reaching consensus /// with the other unmodified nodes. @@ -439,6 +527,69 @@ impl Params { pub fn default_rpc_port(&self) -> u16 { self.net.default_rpc_port() } + + pub fn override_params(self, overrides: OverrideParams) -> Self { + Self { + dns_seeders: self.dns_seeders, + net: self.net, + genesis: self.genesis.clone(), + prior_ghostdag_k: overrides.prior_ghostdag_k.unwrap_or(self.prior_ghostdag_k), + + timestamp_deviation_tolerance: overrides.timestamp_deviation_tolerance.unwrap_or(self.timestamp_deviation_tolerance), + + prior_target_time_per_block: overrides.prior_target_time_per_block.unwrap_or(self.prior_target_time_per_block), + + max_difficulty_target: self.max_difficulty_target, + max_difficulty_target_f64: self.max_difficulty_target_f64, + + prior_difficulty_window_size: overrides.prior_difficulty_window_size.unwrap_or(self.prior_difficulty_window_size), + + min_difficulty_window_size: overrides.min_difficulty_window_size.unwrap_or(self.min_difficulty_window_size), + + prior_max_block_parents: overrides.prior_max_block_parents.unwrap_or(self.prior_max_block_parents), + + prior_mergeset_size_limit: overrides.prior_mergeset_size_limit.unwrap_or(self.prior_mergeset_size_limit), + + prior_merge_depth: overrides.prior_merge_depth.unwrap_or(self.prior_merge_depth), + prior_finality_depth: overrides.prior_finality_depth.unwrap_or(self.prior_finality_depth), + prior_pruning_depth: overrides.prior_pruning_depth.unwrap_or(self.prior_pruning_depth), + + coinbase_payload_script_public_key_max_len: overrides + .coinbase_payload_script_public_key_max_len + .unwrap_or(self.coinbase_payload_script_public_key_max_len), + + max_coinbase_payload_len: overrides.max_coinbase_payload_len.unwrap_or(self.max_coinbase_payload_len), + + prior_max_tx_inputs: overrides.prior_max_tx_inputs.unwrap_or(self.prior_max_tx_inputs), + prior_max_tx_outputs: overrides.prior_max_tx_outputs.unwrap_or(self.prior_max_tx_outputs), + prior_max_signature_script_len: overrides.prior_max_signature_script_len.unwrap_or(self.prior_max_signature_script_len), + prior_max_script_public_key_len: overrides.prior_max_script_public_key_len.unwrap_or(self.prior_max_script_public_key_len), + + mass_per_tx_byte: overrides.mass_per_tx_byte.unwrap_or(self.mass_per_tx_byte), + mass_per_script_pub_key_byte: overrides.mass_per_script_pub_key_byte.unwrap_or(self.mass_per_script_pub_key_byte), + mass_per_sig_op: overrides.mass_per_sig_op.unwrap_or(self.mass_per_sig_op), + max_block_mass: overrides.max_block_mass.unwrap_or(self.max_block_mass), + + storage_mass_parameter: overrides.storage_mass_parameter.unwrap_or(self.storage_mass_parameter), + + deflationary_phase_daa_score: overrides.deflationary_phase_daa_score.unwrap_or(self.deflationary_phase_daa_score), + + pre_deflationary_phase_base_subsidy: overrides + .pre_deflationary_phase_base_subsidy + .unwrap_or(self.pre_deflationary_phase_base_subsidy), + + prior_coinbase_maturity: overrides.prior_coinbase_maturity.unwrap_or(self.prior_coinbase_maturity), + + skip_proof_of_work: overrides.skip_proof_of_work.unwrap_or(self.skip_proof_of_work), + + max_block_level: overrides.max_block_level.unwrap_or(self.max_block_level), + + pruning_proof_m: overrides.pruning_proof_m.unwrap_or(self.pruning_proof_m), + + crescendo: overrides.crescendo.clone().unwrap_or(self.crescendo.clone()), + crescendo_activation: overrides.crescendo_activation.unwrap_or(self.crescendo_activation), + } + } } impl From for Params { diff --git a/consensus/core/src/errors/block.rs b/consensus/core/src/errors/block.rs index afe4bf11dc..6dba66ddb4 100644 --- a/consensus/core/src/errors/block.rs +++ b/consensus/core/src/errors/block.rs @@ -97,9 +97,6 @@ pub enum RuleError { #[error("coinbase blue score of {0} is not the expected value of {1}")] BadCoinbasePayloadBlueScore(u64, u64), - #[error("coinbase mass commitment field is not zero")] - CoinbaseNonZeroMassCommitment, - #[error("transaction in isolation validation failed for tx {0}: {1}")] TxInIsolationValidationFailed(TransactionId, TxRuleError), diff --git a/consensus/core/src/errors/consensus.rs b/consensus/core/src/errors/consensus.rs index 58c5ed35e9..1a2c8a3c76 100644 --- a/consensus/core/src/errors/consensus.rs +++ b/consensus/core/src/errors/consensus.rs @@ -1,6 +1,8 @@ use kaspa_hashes::Hash; use thiserror::Error; +use crate::{tx::TransactionIndexType, utxo::utxo_inquirer::UtxoInquirerError}; + use super::{difficulty::DifficultyError, sync::SyncManagerError, traversal::TraversalError}; #[derive(Error, Debug, Clone)] @@ -11,6 +13,12 @@ pub enum ConsensusError { #[error("cannot find header {0}")] HeaderNotFound(Hash), + #[error("trying to query {0} txs in block {1}, but the block only holds {2} txs")] + TransactionQueryTooLarge(usize, Hash, usize), + + #[error("index {0} out of max {1} in block {2} is out of bounds")] + TransactionIndexOutOfBounds(TransactionIndexType, usize, Hash), + #[error("block {0} is invalid")] InvalidBlock(Hash), @@ -35,6 +43,9 @@ pub enum ConsensusError { #[error("{0}")] General(&'static str), + #[error("utxo inquirer error: {0}")] + UtxoInquirerError(#[from] UtxoInquirerError), + #[error("{0}")] GeneralOwned(String), } diff --git a/consensus/core/src/errors/header.rs b/consensus/core/src/errors/header.rs new file mode 100644 index 0000000000..f3b458a23b --- /dev/null +++ b/consensus/core/src/errors/header.rs @@ -0,0 +1,9 @@ +use thiserror::Error; + +#[derive(Error, Debug, Clone)] +pub enum CompressedParentsError { + #[error("Parents by level exceeds maximum levels of 255")] + LevelsExceeded, +} + +pub type CompressedParentsResult = std::result::Result; diff --git a/consensus/core/src/errors/mod.rs b/consensus/core/src/errors/mod.rs index c65ea57c51..8347a76ee0 100644 --- a/consensus/core/src/errors/mod.rs +++ b/consensus/core/src/errors/mod.rs @@ -3,6 +3,7 @@ pub mod coinbase; pub mod config; pub mod consensus; pub mod difficulty; +pub mod header; pub mod pruning; pub mod sync; pub mod traversal; diff --git a/consensus/core/src/errors/pruning.rs b/consensus/core/src/errors/pruning.rs index 5c69eb0142..173ef8a0f6 100644 --- a/consensus/core/src/errors/pruning.rs +++ b/consensus/core/src/errors/pruning.rs @@ -39,8 +39,8 @@ pub enum PruningImportError { #[error("block {0} already appeared in the proof headers for level {1}")] PruningProofDuplicateHeaderAtLevel(Hash, BlockLevel), - #[error("got header-only trusted block {0} which is not in pruning point past according to available reachability")] - PruningPointPastMissingReachability(Hash), + #[error("trusted block {0} is in the anticone of the pruning point but does not have block body")] + PruningPointAnticoneMissingBody(Hash), #[error("new pruning point has an invalid transaction {0}: {1}")] NewPruningPointTxError(Hash, TxRuleError), @@ -80,6 +80,9 @@ pub enum PruningImportError { #[error("a past pruning point has not been pointed at")] UnpointedPruningPoint, + + #[error("got trusted block {0} in the future of the pruning point {1}")] + TrustedBlockInPruningPointFuture(Hash, Hash), } pub type PruningImportResult = std::result::Result; diff --git a/consensus/core/src/errors/tx.rs b/consensus/core/src/errors/tx.rs index f21409857f..5cefa7921d 100644 --- a/consensus/core/src/errors/tx.rs +++ b/consensus/core/src/errors/tx.rs @@ -15,9 +15,6 @@ pub enum TxRuleError { #[error("transaction has non zero gas value")] TxHasGas, - #[error("a non coinbase transaction has a payload")] - NonCoinbaseTxHasPayload, - #[error("transaction version {0} is unknown")] UnknownTxVersion(u16), @@ -45,6 +42,9 @@ pub enum TxRuleError { #[error("script public key of coinbase output #{0} is too long")] CoinbaseScriptPublicKeyTooLong(usize), + #[error("coinbase mass commitment field is not zero")] + CoinbaseNonZeroMassCommitment, + #[error( "transaction input #{0} tried to spend coinbase outpoint {1} with daa score of {2} while the merging block daa score is {3} and the coinbase maturity period of {4} hasn't passed yet" diff --git a/consensus/core/src/hashing/header.rs b/consensus/core/src/hashing/header.rs index 3ad90fa760..e531f0cd8e 100644 --- a/consensus/core/src/hashing/header.rs +++ b/consensus/core/src/hashing/header.rs @@ -6,10 +6,10 @@ use kaspa_hashes::{Hash, HasherBase}; #[inline] pub fn hash_override_nonce_time(header: &Header, nonce: u64, timestamp: u64) -> Hash { let mut hasher = kaspa_hashes::BlockHash::new(); - hasher.update(header.version.to_le_bytes()).write_len(header.parents_by_level.len()); // Write the number of parent levels + hasher.update(header.version.to_le_bytes()).write_len(header.parents_by_level.expanded_len()); // Write the number of parent levels // Write parents at each level - header.parents_by_level.iter().for_each(|level| { + header.parents_by_level.expanded_iter().for_each(|level| { hasher.write_var_array(level); }); @@ -43,7 +43,7 @@ mod tests { fn test_header_hashing() { let header = Header::new_finalized( 1, - vec![vec![1.into()]], + vec![vec![1.into()]].try_into().unwrap(), Default::default(), Default::default(), Default::default(), diff --git a/consensus/core/src/hashing/tx.rs b/consensus/core/src/hashing/tx.rs index f9cac0311a..b5d3b966fb 100644 --- a/consensus/core/src/hashing/tx.rs +++ b/consensus/core/src/hashing/tx.rs @@ -2,33 +2,47 @@ use super::HasherExtensions; use crate::tx::{Transaction, TransactionId, TransactionInput, TransactionOutpoint, TransactionOutput}; use kaspa_hashes::{Hash, Hasher}; -/// A bitmask defining which transaction fields we -/// want to encode and which to ignore. -type TxEncodingFlags = u8; - -pub const TX_ENCODING_FULL: TxEncodingFlags = 0; -pub const TX_ENCODING_EXCLUDE_SIGNATURE_SCRIPT: TxEncodingFlags = 1; +bitflags::bitflags! { + /// A bitmask defining which transaction fields we want to encode and which to ignore. + #[derive(Debug, Clone, Copy, PartialEq, Eq)] + pub struct TxEncodingFlags: u8 { + const FULL = 0; + const EXCLUDE_SIGNATURE_SCRIPT = 1 << 0; + const EXCLUDE_MASS_COMMIT = 1 << 1; + } +} /// Returns the transaction hash. Note that this is different than the transaction ID. -pub fn hash(tx: &Transaction, include_mass_field: bool) -> Hash { +pub fn hash(tx: &Transaction) -> Hash { let mut hasher = kaspa_hashes::TransactionHash::new(); - write_transaction(&mut hasher, tx, TX_ENCODING_FULL, include_mass_field); + write_transaction(&mut hasher, tx, TxEncodingFlags::FULL); + hasher.finalize() +} + +/// Returns the transaction hash pre-crescendo (which excludes the mass commitment) +pub fn hash_pre_crescendo(tx: &Transaction) -> Hash { + let mut hasher = kaspa_hashes::TransactionHash::new(); + write_transaction(&mut hasher, tx, TxEncodingFlags::EXCLUDE_MASS_COMMIT); hasher.finalize() } /// Not intended for direct use by clients. Instead use `tx.id()` pub(crate) fn id(tx: &Transaction) -> TransactionId { // Encode the transaction, replace signature script with an empty array, skip - // sigop counts and mass and hash the result. + // sigop counts and mass commitment and hash the result. - let encoding_flags = if tx.is_coinbase() { TX_ENCODING_FULL } else { TX_ENCODING_EXCLUDE_SIGNATURE_SCRIPT }; + let encoding_flags = if tx.is_coinbase() { + TxEncodingFlags::FULL + } else { + TxEncodingFlags::EXCLUDE_SIGNATURE_SCRIPT | TxEncodingFlags::EXCLUDE_MASS_COMMIT + }; let mut hasher = kaspa_hashes::TransactionID::new(); - write_transaction(&mut hasher, tx, encoding_flags, false); + write_transaction(&mut hasher, tx, encoding_flags); hasher.finalize() } /// Write the transaction into the provided hasher according to the encoding flags -fn write_transaction(hasher: &mut T, tx: &Transaction, encoding_flags: TxEncodingFlags, include_mass_field: bool) { +fn write_transaction(hasher: &mut T, tx: &Transaction, encoding_flags: TxEncodingFlags) { hasher.update(tx.version.to_le_bytes()).write_len(tx.inputs.len()); for input in tx.inputs.iter() { // Write the tx input @@ -47,7 +61,7 @@ fn write_transaction(hasher: &mut T, tx: &Transaction, encoding_flags Design principles (mostly related to the new mass commitment field; see KIP-0009): 1. The new mass field should not modify tx::id (since it is essentially a commitment by the miner re block space usage so there is no need to modify the id definition which will require wide-spread changes in ecosystem software). - 2. Coinbase tx hash and id should ideally remain equal + 2. Coinbase tx hash should ideally remain unchanged Solution: 1. Hash the mass field only for tx::hash @@ -57,13 +71,10 @@ fn write_transaction(hasher: &mut T, tx: &Transaction, encoding_flags This way we have: - Unique commitment for tx::hash per any possible mass value (with only zero being a no-op) - tx::id remains unmodified - - Coinbase tx hash and id remain the same and equal + - Coinbase tx hash remains unchanged */ - // TODO (post HF): - // 1. Avoid passing a boolean - // 2. Use TxEncodingFlags to avoid including the mass for tx ID - if include_mass_field { + if !encoding_flags.contains(TxEncodingFlags::EXCLUDE_MASS_COMMIT) { let mass = tx.mass(); if mass > 0 { hasher.update(mass.to_le_bytes()); @@ -74,7 +85,7 @@ fn write_transaction(hasher: &mut T, tx: &Transaction, encoding_flags #[inline(always)] fn write_input(hasher: &mut T, input: &TransactionInput, encoding_flags: TxEncodingFlags) { write_outpoint(hasher, &input.previous_outpoint); - if encoding_flags & TX_ENCODING_EXCLUDE_SIGNATURE_SCRIPT != TX_ENCODING_EXCLUDE_SIGNATURE_SCRIPT { + if !encoding_flags.contains(TxEncodingFlags::EXCLUDE_SIGNATURE_SCRIPT) { hasher.write_var_bytes(input.signature_script.as_slice()).update([input.sig_op_count]); } else { hasher.write_var_bytes(&[]); @@ -183,12 +194,7 @@ mod tests { for (i, test) in tests.iter().enumerate() { assert_eq!(test.tx.id(), Hash::from_str(test.expected_id).unwrap(), "transaction id failed for test {}", i + 1); - assert_eq!( - hash(&test.tx, false), - Hash::from_str(test.expected_hash).unwrap(), - "transaction hash failed for test {}", - i + 1 - ); + assert_eq!(hash(&test.tx), Hash::from_str(test.expected_hash).unwrap(), "transaction hash failed for test {}", i + 1); } // Avoid compiler warnings on the last clone diff --git a/consensus/core/src/header.rs b/consensus/core/src/header.rs index e53de44255..17443ab6e7 100644 --- a/consensus/core/src/header.rs +++ b/consensus/core/src/header.rs @@ -1,8 +1,107 @@ use crate::{hashing, BlueWorkType}; use borsh::{BorshDeserialize, BorshSerialize}; use kaspa_hashes::Hash; -use kaspa_utils::mem_size::MemSizeEstimator; +use kaspa_utils::{ + iter::{IterExtensions, IterExtensionsRle}, + mem_size::MemSizeEstimator, +}; use serde::{Deserialize, Serialize}; +use std::mem::size_of; + +/// An efficient run-length encoding for the parent-by-level vector in the block header. +/// The i-th run `(cum_count, parents)` indicates that for all levels in the range `prev_cum_count..cum_count`, +/// the parents are `parents`. +/// +/// Example: `[(3, [A]), (5, [B])]` means levels 0-2 have parents `[A]`, +/// and levels 3-4 have parents `[B]`. +#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +pub struct CompressedParents(Vec<(u8, Vec)>); + +impl CompressedParents { + pub fn expanded_len(&self) -> usize { + self.0.last().map(|(cum, _)| *cum as usize).unwrap_or(0) + } + + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + pub fn get(&self, index: usize) -> Option<&[Hash]> { + if index >= self.expanded_len() { + return None; + } + if index == 0 { + // Fast path for the common case of getting the first level (direct parents) + return Some(&self.0[0].1); + } + // `partition_point` returns the index of the first element for which the predicate is false. + // The predicate `cum - 1 < index` checks if a run is before the desired `index`. + // The first run for which this is false is the one that contains our index. + let i = self.0.partition_point(|(cum, _)| (*cum as usize) - 1 < index); + Some(&self.0[i].1) + } + + pub fn expanded_iter(&self) -> impl Iterator { + self.0.iter().map(|(cum, v)| (*cum as usize, v.as_slice())).expand_rle() + } + + /// Adds a new level of parents. This extends the last run if parents_at_level + /// is identical to the last level, otherwise it starts a new run + pub fn push(&mut self, parents_at_level: Vec) { + match self.0.last_mut() { + Some((count, last_parents)) if *last_parents == parents_at_level => { + *count = count.checked_add(1).expect("exceeded max levels of 255"); + } + Some((count, _)) => { + let next_cum = count.checked_add(1).expect("exceeded max levels of 255"); + self.0.push((next_cum, parents_at_level)); + } + None => { + self.0.push((1, parents_at_level)); + } + } + } + + /// Sets the direct parents (level 0) to the given value, preserving all other levels. + /// + /// NOTE: inefficient implementation, should be used for testing purposes only. + pub fn set_direct_parents(&mut self, direct_parents: Vec) { + if self.0.is_empty() { + self.0.push((1, direct_parents)); + return; + } + let mut parents: Vec> = std::mem::take(self).into(); + parents[0] = direct_parents; + *self = parents.try_into().unwrap(); + } +} + +use crate::errors::header::CompressedParentsError; + +impl TryFrom>> for CompressedParents { + type Error = CompressedParentsError; + + fn try_from(parents: Vec>) -> Result { + if parents.len() > u8::MAX as usize { + return Err(CompressedParentsError::LevelsExceeded); + } + + // Casting count from usize to u8 is safe because of the check above + Ok(Self(parents.into_iter().rle_cumulative().map(|(count, level)| (count as u8, level)).collect())) + } +} + +impl From for Vec> { + fn from(value: CompressedParents) -> Self { + value.0.into_iter().map(|(cum, v)| (cum as usize, v)).expand_rle().collect() + } +} + +impl From<&CompressedParents> for Vec> { + fn from(value: &CompressedParents) -> Self { + value.expanded_iter().map(|x| x.to_vec()).collect() + } +} /// @category Consensus #[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] @@ -11,7 +110,7 @@ pub struct Header { /// Cached hash pub hash: Hash, pub version: u16, - pub parents_by_level: Vec>, + pub parents_by_level: CompressedParents, pub hash_merkle_root: Hash, pub accepted_id_merkle_root: Hash, pub utxo_commitment: Hash, @@ -29,7 +128,7 @@ impl Header { #[allow(clippy::too_many_arguments)] pub fn new_finalized( version: u16, - parents_by_level: Vec>, + parents_by_level: CompressedParents, hash_merkle_root: Hash, accepted_id_merkle_root: Hash, utxo_commitment: Hash, @@ -66,10 +165,9 @@ impl Header { } pub fn direct_parents(&self) -> &[Hash] { - if self.parents_by_level.is_empty() { - &[] - } else { - &self.parents_by_level[0] + match self.parents_by_level.get(0) { + Some(parents) => parents, + None => &[], } } @@ -78,7 +176,7 @@ impl Header { Header { version: crate::constants::BLOCK_VERSION, hash, - parents_by_level: vec![parents], + parents_by_level: vec![parents].try_into().unwrap(), hash_merkle_root: Default::default(), accepted_id_merkle_root: Default::default(), utxo_commitment: Default::default(), @@ -101,7 +199,9 @@ impl AsRef
for Header { impl MemSizeEstimator for Header { fn estimate_mem_bytes(&self) -> usize { - size_of::() + self.parents_by_level.iter().map(|l| l.len()).sum::() * size_of::() + size_of::() + + self.parents_by_level.0.iter().map(|(_, l)| l.len()).sum::() * size_of::() + + self.parents_by_level.0.len() * size_of::<(u8, Vec)>() } } @@ -111,11 +211,29 @@ mod tests { use kaspa_math::Uint192; use serde_json::Value; + fn hash(val: u8) -> Hash { + Hash::from(val as u64) + } + + fn vec_from(slice: &[u8]) -> Vec { + slice.iter().map(|&v| hash(v)).collect() + } + + fn serialize_parents(parents: &[Vec]) -> Vec { + let compressed: CompressedParents = (parents.to_vec()).try_into().unwrap(); + bincode::serialize(&compressed).unwrap() + } + + fn deserialize_parents(bytes: &[u8]) -> bincode::Result>> { + let parents: CompressedParents = bincode::deserialize(bytes)?; + Ok(parents.into()) + } + #[test] fn test_header_ser() { let header = Header::new_finalized( 1, - vec![vec![1.into()]], + vec![vec![1.into()]].try_into().unwrap(), Default::default(), Default::default(), Default::default(), @@ -141,4 +259,111 @@ mod tests { let h = serde_json::from_str::
(&json).unwrap(); assert!(h.blue_score == header.blue_score && h.blue_work == header.blue_work); } + + #[test] + fn parents_vrle_round_trip_multiple_runs() { + let parents = vec![ + vec_from(&[1, 2, 3]), + vec_from(&[1, 2, 3]), + vec_from(&[1, 2, 3]), + vec_from(&[4, 5]), + vec_from(&[4, 5]), + vec_from(&[6]), + ]; + + let bytes = serialize_parents(&parents); + let decoded = deserialize_parents(&bytes).unwrap(); + assert_eq!(decoded, parents); + } + + #[test] + fn parents_vrle_round_trip_single_run() { + let repeated = vec_from(&[9, 8, 7]); + let parents = vec![repeated.clone(), repeated.clone(), repeated.clone()]; + + let bytes = serialize_parents(&parents); + let decoded = deserialize_parents(&bytes).unwrap(); + assert_eq!(decoded, parents); + } + + #[test] + fn parents_vrle_round_trip_empty() { + let bytes = serialize_parents(&[]); + let decoded = deserialize_parents(&bytes).unwrap(); + assert!(decoded.is_empty()); + } + + #[test] + fn compressed_parents_len_and_get() { + // Test with multiple runs of different lengths + let first = vec_from(&[1]); + let second = vec_from(&[2, 3]); + let third = vec_from(&[4]); + let parents = vec![first.clone(), first.clone(), second.clone(), second.clone(), third.clone()]; + let compressed = CompressedParents::try_from(parents.clone()).unwrap(); + + assert_eq!(compressed.expanded_len(), parents.len()); + assert!(!compressed.is_empty()); + + // Test `get` at various positions + assert_eq!(compressed.get(0), Some(first.as_slice()), "get first element"); + assert_eq!(compressed.get(1), Some(first.as_slice()), "get element in the middle of a run"); + assert_eq!(compressed.get(2), Some(second.as_slice()), "get first element of a new run"); + assert_eq!(compressed.get(3), Some(second.as_slice()), "get element in the middle of a new run"); + assert_eq!(compressed.get(4), Some(third.as_slice()), "get last element"); + assert_eq!(compressed.get(5), None, "get out of bounds (just over)"); + assert_eq!(compressed.get(10), None, "get out of bounds (far over)"); + + let collected: Vec<&[Hash]> = compressed.expanded_iter().collect(); + let expected: Vec<&[Hash]> = parents.iter().map(|v| v.as_slice()).collect(); + assert_eq!(collected, expected); + + // Test with an empty vec + let parents_empty: Vec> = vec![]; + let compressed_empty: CompressedParents = parents_empty.try_into().unwrap(); + assert_eq!(compressed_empty.expanded_len(), 0); + assert!(compressed_empty.is_empty()); + assert_eq!(compressed_empty.get(0), None); + + // Test with a single run + let parents_single_run = vec![first.clone(), first.clone(), first.clone()]; + let compressed_single_run: CompressedParents = parents_single_run.try_into().unwrap(); + assert_eq!(compressed_single_run.expanded_len(), 3); + assert_eq!(compressed_single_run.get(0), Some(first.as_slice())); + assert_eq!(compressed_single_run.get(1), Some(first.as_slice())); + assert_eq!(compressed_single_run.get(2), Some(first.as_slice())); + assert_eq!(compressed_single_run.get(3), None); + } + + #[test] + fn test_compressed_parents_push() { + let mut compressed = CompressedParents(Vec::new()); + let level1 = vec_from(&[1, 2]); + let level2 = vec_from(&[3, 4]); + + // 1. Push to empty + compressed.push(level1.clone()); + assert_eq!(compressed.expanded_len(), 1); + assert_eq!(compressed.0, vec![(1, level1.clone())]); + + // 2. Push same (extend run) + compressed.push(level1.clone()); + assert_eq!(compressed.expanded_len(), 2); + assert_eq!(compressed.0, vec![(2, level1.clone())]); + + // 3. Push different (new run) + compressed.push(level2.clone()); + assert_eq!(compressed.expanded_len(), 3); + assert_eq!(compressed.0, vec![(2, level1), (3, level2)]); + } + + #[test] + fn compressed_parents_binary_format_matches_runs() { + let parents = vec![vec_from(&[1, 2, 3]), vec_from(&[1, 2, 3]), vec_from(&[4])]; + let compressed: CompressedParents = parents.try_into().unwrap(); + + let encoded = bincode::serialize(&compressed).unwrap(); + let expected = bincode::serialize(&compressed.0).unwrap(); + assert_eq!(encoded, expected); + } } diff --git a/consensus/core/src/mass/mod.rs b/consensus/core/src/mass/mod.rs index 90099487c2..e349eb47b6 100644 --- a/consensus/core/src/mass/mod.rs +++ b/consensus/core/src/mass/mod.rs @@ -433,7 +433,7 @@ mod tests { */ for net in NetworkType::iter() { let params: Params = net.into(); - let max_spk_len = (params.max_script_public_key_len().upper_bound() as u64) + let max_spk_len = (params.max_script_public_key_len().after() as u64) .min(params.max_block_mass.div_ceil(params.mass_per_script_pub_key_byte)); let max_plurality = (UTXO_CONST_STORAGE + max_spk_len).div_ceil(UTXO_UNIT_SIZE); // see utxo_plurality let product = params.storage_mass_parameter.checked_mul(max_plurality).and_then(|x| x.checked_mul(max_plurality)); diff --git a/consensus/core/src/merkle.rs b/consensus/core/src/merkle.rs index 59c6ca7c4c..46b2ce6791 100644 --- a/consensus/core/src/merkle.rs +++ b/consensus/core/src/merkle.rs @@ -2,13 +2,17 @@ use crate::{hashing, tx::Transaction}; use kaspa_hashes::Hash; use kaspa_merkle::calc_merkle_root; -pub fn calc_hash_merkle_root<'a>(txs: impl ExactSizeIterator, include_mass_field: bool) -> Hash { - calc_merkle_root(txs.map(|tx| hashing::tx::hash(tx, include_mass_field))) +pub fn calc_hash_merkle_root<'a>(txs: impl ExactSizeIterator) -> Hash { + calc_merkle_root(txs.map(hashing::tx::hash)) +} + +pub fn calc_hash_merkle_root_pre_crescendo<'a>(txs: impl ExactSizeIterator) -> Hash { + calc_merkle_root(txs.map(hashing::tx::hash_pre_crescendo)) } #[cfg(test)] mod tests { - use crate::merkle::calc_hash_merkle_root; + use crate::merkle::{calc_hash_merkle_root, calc_hash_merkle_root_pre_crescendo}; use crate::{ subnets::{SUBNETWORK_ID_COINBASE, SUBNETWORK_ID_NATIVE}, tx::{scriptvec, ScriptPublicKey, Transaction, TransactionId, TransactionInput, TransactionOutpoint, TransactionOutput}, @@ -17,7 +21,7 @@ mod tests { #[test] fn merkle_root_test() { - let txs = vec![ + let txs = [ Transaction::new( 0, vec![], @@ -238,7 +242,27 @@ mod tests { ), ]; assert_eq!( - calc_hash_merkle_root(txs.iter(), false), + calc_hash_merkle_root(txs.iter()), + Hash::from_slice(&[ + 0x46, 0xec, 0xf4, 0x5b, 0xe3, 0xba, 0xca, 0x34, 0x9d, 0xfe, 0x8a, 0x78, 0xde, 0xaf, 0x05, 0x3b, 0x0a, 0xa6, 0xd5, + 0x38, 0x97, 0x4d, 0xa5, 0x0f, 0xd6, 0xef, 0xb4, 0xd2, 0x66, 0xbc, 0x8d, 0x21, + ]) + ); + + // Test a tx with storage mass commitment > 0 + txs[0].set_mass(7); + + assert_eq!( + calc_hash_merkle_root(txs.iter()), + Hash::from_slice(&[ + 0x75, 0x4a, 0x1, 0x59, 0xdc, 0x4b, 0x3d, 0xaa, 0x16, 0x95, 0x28, 0x4d, 0x96, 0xc8, 0x2a, 0xba, 0x27, 0x2a, 0x11, 0x43, + 0xe4, 0x2e, 0x60, 0x4, 0xaf, 0x2b, 0xaa, 0x1e, 0x3c, 0xed, 0x23, 0x7, + ]) + ); + + // Make sure that pre-crescendo hash is unaffected by the mass set + assert_eq!( + calc_hash_merkle_root_pre_crescendo(txs.iter()), Hash::from_slice(&[ 0x46, 0xec, 0xf4, 0x5b, 0xe3, 0xba, 0xca, 0x34, 0x9d, 0xfe, 0x8a, 0x78, 0xde, 0xaf, 0x05, 0x3b, 0x0a, 0xa6, 0xd5, 0x38, 0x97, 0x4d, 0xa5, 0x0f, 0xd6, 0xef, 0xb4, 0xd2, 0x66, 0xbc, 0x8d, 0x21, diff --git a/consensus/core/src/trusted.rs b/consensus/core/src/trusted.rs index 0e5bd97bfc..8ff0994900 100644 --- a/consensus/core/src/trusted.rs +++ b/consensus/core/src/trusted.rs @@ -17,6 +17,7 @@ pub struct ExternalGhostdagData { /// Represents an externally provided block with associated Ghostdag data which /// is only partially validated by the consensus layer. Note there is no actual trust /// but rather these blocks are indirectly validated through the PoW mined over them +#[derive(Clone)] pub struct TrustedBlock { pub block: Block, pub ghostdag: ExternalGhostdagData, diff --git a/consensus/core/src/tx.rs b/consensus/core/src/tx.rs index b542738f07..64f8286fe9 100644 --- a/consensus/core/src/tx.rs +++ b/consensus/core/src/tx.rs @@ -19,6 +19,7 @@ use serde::{Deserialize, Serialize}; use std::collections::HashSet; use std::sync::atomic::AtomicU64; use std::sync::atomic::Ordering::SeqCst; +use std::sync::Arc; use std::{ fmt::Display, ops::Range, @@ -94,10 +95,6 @@ pub struct TransactionInput { #[serde(with = "serde_bytes")] pub signature_script: Vec, // TODO: Consider using SmallVec pub sequence: u64, - - // TODO: Since this field is used for calculating mass context free, and we already commit - // to the mass in a dedicated field (on the tx level), it follows that this field is no longer - // needed, and can be removed if we ever implement a v2 transaction pub sig_op_count: u8, } @@ -233,8 +230,8 @@ impl Transaction { self.id } - /// Set the storage mass commitment field of this transaction. This field is expected to be activated on mainnet - /// as part of the Crescendo hardfork. The field has no effect on tx ID so no need to finalize following this call. + /// Set the storage mass commitment field of this transaction. This field has been activated on mainnet as part + /// of the Crescendo hardfork. The field has no effect on tx ID so no need to finalize following this call. pub fn set_mass(&self, mass: u64) { self.mass.0.store(mass, SeqCst) } @@ -542,6 +539,18 @@ impl MutableTransaction { /// and can also be modified internally and signed etc. pub type SignableTransaction = MutableTransaction; +#[derive(Debug, Clone)] +pub enum TransactionType { + Transaction, + SignableTransaction, +} + +#[derive(Debug, Clone)] +pub enum TransactionQueryResult { + Transaction(Arc>), + SignableTransaction(Arc>), +} + #[cfg(test)] mod tests { use super::*; @@ -688,7 +697,7 @@ mod tests { let vec = (0..SCRIPT_VECTOR_SIZE as u8).collect::>(); let spk = ScriptPublicKey::from_vec(0xc0de, vec.clone()); let hex: String = serde_json::to_string(&spk).unwrap(); - assert_eq!("\"c0de000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20212223\"", hex); + assert_eq!("\"c0de000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122\"", hex); let spk = serde_json::from_str::(&hex).unwrap(); assert_eq!(spk.version, 0xc0de); assert_eq!(spk.script.as_slice(), vec.as_slice()); diff --git a/consensus/core/src/tx/script_public_key.rs b/consensus/core/src/tx/script_public_key.rs index 180cde4f20..92a2151ef0 100644 --- a/consensus/core/src/tx/script_public_key.rs +++ b/consensus/core/src/tx/script_public_key.rs @@ -19,7 +19,7 @@ use wasm_bindgen::prelude::*; use workflow_wasm::prelude::*; /// Size of the underlying script vector of a script. -pub const SCRIPT_VECTOR_SIZE: usize = 36; +pub const SCRIPT_VECTOR_SIZE: usize = 35; /// Used as the underlying type for script public key data, optimized for the common p2pk script size (34). pub type ScriptVec = SmallVec<[u8; SCRIPT_VECTOR_SIZE]>; @@ -415,7 +415,7 @@ mod tests { let vec = (0..SCRIPT_VECTOR_SIZE as u8).collect::>(); let spk = ScriptPublicKey::from_vec(0xc0de, vec.clone()); // 0xc0de == 49374, let hex: String = serde_json::to_string(&spk).unwrap(); - assert_eq!("\"c0de000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20212223\"", hex); + assert_eq!("\"c0de000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122\"", hex); let spk = serde_json::from_str::(&hex).unwrap(); assert_eq!(spk.version, 0xc0de); assert_eq!(spk.script.as_slice(), vec.as_slice()); @@ -453,7 +453,7 @@ mod tests { let version = 0xc0de; let vec: Vec = (0..SCRIPT_VECTOR_SIZE as u8).collect(); let spk = ScriptPublicKey::from_vec(version, vec.clone()); - let str = "c0de000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20212223"; + let str = "c0de000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122"; let js = to_value(&spk).unwrap(); assert_eq!(js.as_string().unwrap(), str); let script_hex = spk.script_as_hex(); diff --git a/consensus/core/src/utxo/utxo_inquirer.rs b/consensus/core/src/utxo/utxo_inquirer.rs index 3aa1000295..07dea8fbab 100644 --- a/consensus/core/src/utxo/utxo_inquirer.rs +++ b/consensus/core/src/utxo/utxo_inquirer.rs @@ -1,16 +1,18 @@ use kaspa_hashes::Hash; use thiserror::Error; +use crate::tx::{TransactionId, TransactionOutpoint}; + #[derive(Error, Debug, Clone)] pub enum UtxoInquirerError { - #[error("Transaction is already pruned")] - AlreadyPruned, #[error("Transaction return address is coinbase")] TxFromCoinbase, #[error("Transaction not found at given accepting daa score")] NoTxAtScore, #[error("Transaction was found but not standard")] NonStandard, + #[error("No transaction specified")] + TransactionNotFound, #[error("Did not find compact header for block hash {0} ")] MissingCompactHeaderForBlockHash(Hash), #[error("Did not find containing_acceptance for tx {0} ")] @@ -19,8 +21,6 @@ pub enum UtxoInquirerError { MissingBlockFromBlockTxStore(Hash), #[error("Did not find index {0} in transactions of block {1}")] MissingTransactionIndexOfBlock(usize, Hash), - #[error("Expected {0} to match {1} when checking block_transaction_store using array index of transaction")] - UnexpectedTransactionMismatch(Hash, Hash), #[error("Did not find a utxo diff for chain block {0} ")] MissingUtxoDiffForChainBlock(Hash), #[error("Transaction {0} acceptance data must also be in the same block in this case")] @@ -33,6 +33,22 @@ pub enum UtxoInquirerError { MissingHashAtIndex(u64), #[error("Did not find acceptance data for chain block {0}")] MissingAcceptanceDataForChainBlock(Hash), + #[error("Did not find utxo entry for outpoint {0}")] + MissingUtxoEntryForOutpoint(TransactionOutpoint), + #[error("Did not find queried transactions in acceptance data: {0:?}")] + MissingQueriedTransactions(Vec), #[error("Utxo entry is not filled")] UnfilledUtxoEntry, + #[error(transparent)] + UtxoInquirerFindTxsFromAcceptanceDataError(#[from] UtxoInquirerFindTxsFromAcceptanceDataError), } + +#[derive(Error, Debug, Clone)] +pub enum UtxoInquirerFindTxsFromAcceptanceDataError { + #[error("Tx ids filter is not allowed to be empty when not None.")] + TxIdsFilterIsEmptyError, + #[error("More than one tx id filter element is not supported yet.")] + TxIdsFilterNeedsLessOrEqualThanOneElementError, +} + +pub type UtxoInquirerResult = std::result::Result; diff --git a/consensus/src/consensus/factory.rs b/consensus/src/consensus/factory.rs index e387329c85..9d163f93ab 100644 --- a/consensus/src/consensus/factory.rs +++ b/consensus/src/consensus/factory.rs @@ -3,7 +3,7 @@ use super::utxo_set_override::{set_genesis_utxo_commitment_from_config, set_init use super::{ctl::Ctl, Consensus}; use crate::{model::stores::U64Key, pipeline::ProcessingCounters}; use itertools::Itertools; -use kaspa_consensus_core::{config::Config, mining_rules::MiningRules}; +use kaspa_consensus_core::{api::ConsensusApi, config::Config, mining_rules::MiningRules}; use kaspa_consensus_notify::root::ConsensusNotificationRoot; use kaspa_consensusmanager::{ConsensusFactory, ConsensusInstance, DynConsensusCtl, SessionLock}; use kaspa_core::{debug, time::unix_now, warn}; @@ -59,7 +59,7 @@ pub struct MultiConsensusMetadata { version: u32, } -const LATEST_DB_VERSION: u32 = 4; +pub const LATEST_DB_VERSION: u32 = 5; impl Default for MultiConsensusMetadata { fn default() -> Self { Self { @@ -366,6 +366,10 @@ impl ConsensusFactory for Factory { self.mining_rules.clone(), )); + // The default for the body_missing_anticone_set is an empty vector, which corresponds precisely to the state before a consensus commit + // But The default value for the pruning_utxoset_stable_flag is true, but a staging consensus does not have a utxo and hence the flag is dropped explicitly + consensus.set_pruning_utxoset_stable_flag(false); + (ConsensusInstance::new(session_lock, consensus.clone()), Arc::new(Ctl::new(self.management_store.clone(), db, consensus))) } diff --git a/consensus/src/consensus/mod.rs b/consensus/src/consensus/mod.rs index 0819da4062..b10e5426c7 100644 --- a/consensus/src/consensus/mod.rs +++ b/consensus/src/consensus/mod.rs @@ -21,12 +21,12 @@ use crate::{ headers_selected_tip::HeadersSelectedTipStoreReader, past_pruning_points::PastPruningPointsStoreReader, pruning::PruningStoreReader, - pruning_samples::{PruningSamplesStore, PruningSamplesStoreReader}, - reachability::ReachabilityStoreReader, relations::RelationsStoreReader, + selected_chain::SelectedChainStore, statuses::StatusesStoreReader, - tips::TipsStoreReader, + tips::{TipsStore, TipsStoreReader}, utxo_set::{UtxoSetStore, UtxoSetStoreReader}, + virtual_state::VirtualState, DB, }, }, @@ -44,7 +44,7 @@ use crate::{ }, }; use kaspa_consensus_core::{ - acceptance_data::AcceptanceData, + acceptance_data::{AcceptanceData, MergesetBlockAcceptanceData}, api::{ args::{TransactionValidationArgs, TransactionValidationBatchArgs}, stats::BlockCount, @@ -70,8 +70,10 @@ use kaspa_consensus_core::{ network::NetworkType, pruning::{PruningPointProof, PruningPointTrustedData, PruningPointsList, PruningProofMetadata}, trusted::{ExternalGhostdagData, TrustedBlock}, - tx::{MutableTransaction, SignableTransaction, Transaction, TransactionOutpoint, UtxoEntry}, - utxo::utxo_inquirer::UtxoInquirerError, + tx::{ + MutableTransaction, Transaction, TransactionId, TransactionIndexType, TransactionOutpoint, TransactionQueryResult, + TransactionType, UtxoEntry, + }, BlockHashSet, BlueWorkType, ChainPath, HashMapCustomHasher, }; use kaspa_consensus_notify::root::ConsensusNotificationRoot; @@ -82,22 +84,26 @@ use crossbeam_channel::{ use itertools::Itertools; use kaspa_consensusmanager::{SessionLock, SessionReadGuard}; -use kaspa_database::prelude::{StoreResultEmptyTuple, StoreResultExtensions}; +use kaspa_core::info; +use kaspa_database::prelude::StoreResultExtensions; use kaspa_hashes::Hash; use kaspa_muhash::MuHash; use kaspa_txscript::caches::TxScriptCacheCounters; +use kaspa_utils::arc::ArcExtensions; use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; +use rocksdb::WriteBatch; use std::{ + cmp, cmp::Reverse, - collections::{BinaryHeap, VecDeque}, + collections::{BinaryHeap, HashSet, VecDeque}, future::Future, iter::once, ops::Deref, - sync::{atomic::Ordering, Arc}, -}; -use std::{ - sync::atomic::AtomicBool, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, thread::{self, JoinHandle}, }; use tokio::sync::oneshot; @@ -106,8 +112,6 @@ use self::{services::ConsensusServices, storage::ConsensusStorage}; use crate::model::stores::selected_chain::SelectedChainStoreReader; -use std::cmp; - pub struct Consensus { // DB db: Arc, @@ -320,10 +324,7 @@ impl Consensus { fn run_database_upgrades(&self) { // Upgrade to initialize the new retention root field correctly self.retention_root_database_upgrade(); - - // TODO (post HF): remove this upgrade - // Database upgrade to include pruning samples - self.pruning_samples_database_upgrade(); + self.consensus_transitional_flags_upgrade(); } fn retention_root_database_upgrade(&self) { @@ -336,60 +337,25 @@ impl Consensus { pruning_point_store.set_retention_period_root(&mut batch, retention_checkpoint).unwrap(); } else { // For non-archival nodes the retention root was the pruning point - let pruning_point = pruning_point_store.get().unwrap().pruning_point; + let pruning_point = pruning_point_store.pruning_point().unwrap(); pruning_point_store.set_retention_period_root(&mut batch, pruning_point).unwrap(); } self.db.write(batch).unwrap(); } } - fn pruning_samples_database_upgrade(&self) { - // - // For the first time this version runs, make sure we populate pruning samples - // from pov for all qualified chain blocks in the pruning point future - // - - let sink = self.get_sink(); - if self.storage.pruning_samples_store.pruning_sample_from_pov(sink).unwrap_option().is_some() { - // Sink is populated so we assume the database is upgraded - return; + fn consensus_transitional_flags_upgrade(&self) { + // Write the defaults to the internal storage so they will remain in cache + // *For a new staging consensus these flags will be updated again explicitly* + let mut batch = rocksdb::WriteBatch::default(); + let mut pruning_meta_write = self.storage.pruning_meta_stores.write(); + if pruning_meta_write.is_anticone_fully_synced() { + pruning_meta_write.set_body_missing_anticone(&mut batch, vec![]).unwrap(); } - - // Populate past pruning points (including current one) - for (p1, p2) in (0..=self.pruning_point_store.read().get().unwrap().index) - .map(|index| self.past_pruning_points_store.get(index).unwrap()) - .tuple_windows() - { - // Set p[i] to point at p[i-1] - self.pruning_samples_store.insert(p2, p1).unwrap_or_exists(); - } - - let pruning_point = self.pruning_point(); - let reachability = self.reachability_store.read(); - - // We walk up via reachability tree children so that we only iterate blocks B s.t. pruning point ∈ chain(B) - let mut queue = VecDeque::::from_iter(reachability.get_children(pruning_point).unwrap().iter().copied()); - let mut processed = 0; - kaspa_core::info!("Upgrading database to include and populate the pruning samples store"); - while let Some(current) = queue.pop_front() { - if !self.get_block_status(current).is_some_and(|s| s == BlockStatus::StatusUTXOValid) { - // Skip branches of the tree which are not chain qualified. - // This is sufficient since we will only assume this field exists - // for such chain qualified blocks - continue; - } - queue.extend(reachability.get_children(current).unwrap().iter()); - - processed += 1; - - // Populate the data - let ghostdag_data = self.ghostdag_store.get_compact_data(current).unwrap(); - let pruning_sample_from_pov = - self.services.pruning_point_manager.expected_header_pruning_point_v2(ghostdag_data).pruning_sample; - self.pruning_samples_store.insert(current, pruning_sample_from_pov).unwrap_or_exists(); + if pruning_meta_write.pruning_utxoset_stable_flag() { + pruning_meta_write.set_pruning_utxoset_stable_flag(&mut batch, true).unwrap(); } - - kaspa_core::info!("Done upgrading database (populated {} entries)", processed); + self.db.write(batch).unwrap(); } pub fn run_processors(&self) -> Vec> { @@ -479,13 +445,125 @@ impl Consensus { fn pruning_point_compact_headers(&self) -> Vec<(Hash, CompactHeaderData)> { // PRUNE SAFETY: index is monotonic and past pruning point headers are expected permanently - let current_pp_info = self.pruning_point_store.read().get().unwrap(); - (0..current_pp_info.index) + let (pruning_point, pruning_index) = self.pruning_point_store.read().pruning_point_and_index().unwrap(); + (0..pruning_index) .map(|index| self.past_pruning_points_store.get(index).unwrap()) - .chain(once(current_pp_info.pruning_point)) + .chain(once(pruning_point)) .map(|hash| (hash, self.headers_store.get_compact_header_data(hash).unwrap())) .collect_vec() } + + /// See: intrusive_pruning_point_update implementation below for details + pub fn intrusive_pruning_point_store_writes( + &self, + new_pruning_point: Hash, + syncer_sink: Hash, + pruning_points_to_add: VecDeque, + ) -> ConsensusResult<()> { + let mut batch = WriteBatch::default(); + let mut pruning_point_write = self.pruning_point_store.write(); + let old_pp_index = pruning_point_write.pruning_point_index().unwrap(); + let retention_period_root = pruning_point_write.retention_period_root().unwrap(); + + let new_pp_index = old_pp_index + pruning_points_to_add.len() as u64; + pruning_point_write.set_batch(&mut batch, new_pruning_point, new_pp_index).unwrap(); + for (i, &past_pp) in pruning_points_to_add.iter().rev().enumerate() { + self.past_pruning_points_store.insert_batch(&mut batch, old_pp_index + i as u64 + 1, past_pp).unwrap(); + } + + // For archival nodes, keep the retention root in place + if !self.config.is_archival { + let adjusted_retention_period_root = + self.pruning_processor.advance_retention_period_root(retention_period_root, new_pruning_point); + pruning_point_write.set_retention_period_root(&mut batch, adjusted_retention_period_root).unwrap(); + } + + // Update virtual state based to the new pruning point + // Updating of the utxoset is done separately as it requires downloading the new utxoset in its entirety. + let virtual_parents = vec![new_pruning_point]; + let virtual_state = Arc::new(VirtualState { + parents: virtual_parents.clone(), + ghostdag_data: self.services.ghostdag_manager.ghostdag(&virtual_parents), + ..VirtualState::default() + }); + self.virtual_stores.write().state.set_batch(&mut batch, virtual_state).unwrap(); + // Remove old body tips and insert pruning point as the current tip + self.body_tips_store.write().delete_all_tips(&mut batch).unwrap(); + self.body_tips_store.write().init_batch(&mut batch, &virtual_parents).unwrap(); + // Update selected_chain + self.selected_chain_store.write().init_with_pruning_point(&mut batch, new_pruning_point).unwrap(); + // It is important to set this flag to false together with writing the batch, in case the node crashes suddenly before syncing of new utxo starts + self.pruning_meta_stores.write().set_pruning_utxoset_stable_flag(&mut batch, false).unwrap(); + // Store the currently bodyless anticone from the POV of the syncer, for trusted body validation at a later stage. + let mut anticone = self.services.dag_traversal_manager.anticone(new_pruning_point, [syncer_sink].into_iter(), None)?; + // Add the pruning point itself which is also missing a body + anticone.push(new_pruning_point); + self.pruning_meta_stores.write().set_body_missing_anticone(&mut batch, anticone).unwrap(); + self.db.write(batch).unwrap(); + drop(pruning_point_write); + Ok(()) + } + + /// Verify that the new pruning point can be safely imported + /// and return all new pruning point on path to it that needs to be updated in consensus + fn get_and_verify_path_to_new_pruning_point(&self, new_pruning_point: Hash, syncer_sink: Hash) -> ConsensusResult> { + // Let B.sp denote the selected parent of a block B, let f be the finality depth, and let p be the pruning depth. + // The new pruning point P can be "finalized" into consensus if: + // 1) P satisfies P.blue_score>Nf and selected_parent(P).blue_score<=NF + // where N is some integer (i.e. it is a valid pruning point based on score) + // *this condition is assumed to have already been checked externally and we do not repeat it here*. + + // 2) There are sufficient headers built on top of it, specifically, + // a header is validated whose blue_score is greater than P.B+p: + let syncer_pp_bscore = self.get_header(new_pruning_point).unwrap().blue_score; + let syncer_virtual_bscore = self.get_header(syncer_sink).unwrap().blue_score; + // [Crescendo]: Remove after() + if syncer_virtual_bscore < syncer_pp_bscore + self.config.pruning_depth().after() { + return Err(ConsensusError::General("declared pruning point is not of sufficient depth")); + } + // 3) The syncer pruning point is on the selected chain from that header. + if !self.services.reachability_service.is_chain_ancestor_of(new_pruning_point, syncer_sink) { + return Err(ConsensusError::General("new pruning point is not in the past of syncer sink")); + } + info!("Setting {new_pruning_point} as the pruning point"); + // 4) The pruning points declared on headers on that path must be consistent with those already known by the node: + let pruning_point_read = self.pruning_point_store.read(); + let old_pruning_point = pruning_point_read.pruning_point().unwrap(); + + // Note that the function below also updates the pruning samples, + // and implicitly confirms any pruning point pointed at en route to virtual is a pruning sample. + // it is emphasized that updating pruning samples for individual blocks is not harmful + // even if the verification ultimately does not succeed. + let mut pruning_points_to_add = + self.services.pruning_point_manager.pruning_points_on_path_to_syncer_sink(old_pruning_point, syncer_sink).map_err( + |e: PruningImportError| { + ConsensusError::GeneralOwned(format!("pruning points en route to syncer sink do not form a valid chain: {}", e)) + }, + )?; + // next we filter the returned list so it contains only the pruning point that must be introduced to consensus + + // Remove the excess pruning points before the old pruning point + while let Some(past_pp) = pruning_points_to_add.pop_back() { + if past_pp == old_pruning_point { + break; + } + } + if pruning_points_to_add.is_empty() { + return Err(ConsensusError::General("old pruning points is inconsistent with synced headers")); + } + // Remove the excess pruning points beyond the new pruning_point + while let Some(&future_pp) = pruning_points_to_add.front() { + if future_pp == new_pruning_point { + break; + } + // Here we only pop_front after checking as we want the new pruning_point to stay in the list + pruning_points_to_add.pop_front(); + } + if pruning_points_to_add.is_empty() { + return Err(ConsensusError::General("new pruning point is inconsistent with synced headers")); + } + Ok(pruning_points_to_add) + } } impl ConsensusApi for Consensus { @@ -667,7 +745,7 @@ impl ConsensusApi for Consensus { /// Estimates the number of blocks and headers stored in the node database. /// /// This is an estimation based on the DAA score difference between the node's `retention root` and `virtual`'s DAA score, - /// as such, it does not include non-daa blocks, and does not include headers stored as part of the pruning proof. + /// as such, it does not include non-daa blocks, and does not include headers stored as part of the pruning proof. fn estimate_block_count(&self) -> BlockCount { // PRUNE SAFETY: retention root is always a current or past pruning point which its header is kept permanently let retention_period_root_score = self.headers_store.get_daa_score(self.get_retention_period_root()).unwrap(); @@ -771,11 +849,133 @@ impl ConsensusApi for Consensus { sample_headers } + fn get_transactions_by_accepting_daa_score( + &self, + accepting_daa_score: u64, + tx_ids: Option>, + tx_type: TransactionType, + ) -> ConsensusResult { + // We need consistency between the acceptance store and the block transaction store, + let _guard = self.pruning_lock.blocking_read(); + let accepting_block = self + .virtual_processor + .find_accepting_chain_block_hash_at_daa_score(accepting_daa_score, self.get_retention_period_root())?; + self.get_transactions_by_accepting_block(accepting_block, tx_ids, tx_type) + } - fn get_populated_transaction(&self, txid: Hash, accepting_block_daa_score: u64) -> Result { - // We need consistency between the pruning_point_store, utxo_diffs_store, block_transactions_store, selected chain and headers store reads + fn get_transactions_by_block_acceptance_data( + &self, + accepting_block: Hash, + block_acceptance_data: MergesetBlockAcceptanceData, + tx_ids: Option>, + tx_type: TransactionType, + ) -> ConsensusResult { + // Need consistency between the acceptance store and the block transaction store. let _guard = self.pruning_lock.blocking_read(); - self.virtual_processor.get_populated_transaction(txid, accepting_block_daa_score, self.get_retention_period_root()) + + match tx_type { + TransactionType::Transaction => { + if let Some(tx_ids) = tx_ids { + let mut tx_ids_filter = HashSet::with_capacity(tx_ids.len()); + tx_ids_filter.extend(tx_ids); + + Ok(TransactionQueryResult::Transaction(Arc::new( + self.get_block_transactions( + block_acceptance_data.block_hash, + Some( + block_acceptance_data + .accepted_transactions + .into_iter() + .filter_map(|atx| { + if tx_ids_filter.contains(&atx.transaction_id) { + Some(atx.index_within_block) + } else { + None + } + }) + .collect(), + ), + )?, + ))) + } else { + Ok(TransactionQueryResult::Transaction(Arc::new(self.get_block_transactions( + block_acceptance_data.block_hash, + Some(block_acceptance_data.accepted_transactions.iter().map(|atx| atx.index_within_block).collect()), + )?))) + } + } + TransactionType::SignableTransaction => Ok(TransactionQueryResult::SignableTransaction(Arc::new( + self.virtual_processor.get_populated_transactions_by_block_acceptance_data( + tx_ids, + block_acceptance_data, + accepting_block, + )?, + ))), + } + } + + fn get_transactions_by_accepting_block( + &self, + accepting_block: Hash, + tx_ids: Option>, + tx_type: TransactionType, + ) -> ConsensusResult { + // need consistency between the acceptance store and the block transaction store, + let _guard = self.pruning_lock.blocking_read(); + + match tx_type { + TransactionType::Transaction => { + let accepting_block_mergeset_acceptance_data_iter = self + .acceptance_data_store + .get(accepting_block) + .map_err(|_| ConsensusError::MissingData(accepting_block))? + .unwrap_or_clone() + .into_iter(); + + if let Some(tx_ids) = tx_ids { + let mut tx_ids_filter = HashSet::with_capacity(tx_ids.len()); + tx_ids_filter.extend(tx_ids); + + Ok(TransactionQueryResult::Transaction(Arc::new( + accepting_block_mergeset_acceptance_data_iter + .flat_map(|mbad| { + self.get_block_transactions( + mbad.block_hash, + Some( + mbad.accepted_transactions + .into_iter() + .filter_map(|atx| { + if tx_ids_filter.contains(&atx.transaction_id) { + Some(atx.index_within_block) + } else { + None + } + }) + .collect(), + ), + ) + }) + .flatten() + .collect::>(), + ))) + } else { + Ok(TransactionQueryResult::Transaction(Arc::new( + accepting_block_mergeset_acceptance_data_iter + .flat_map(|mbad| { + self.get_block_transactions( + mbad.block_hash, + Some(mbad.accepted_transactions.iter().map(|atx| atx.index_within_block).collect()), + ) + }) + .flatten() + .collect::>(), + ))) + } + } + TransactionType::SignableTransaction => Ok(TransactionQueryResult::SignableTransaction(Arc::new( + self.virtual_processor.get_populated_transactions_by_accepting_block(tx_ids, accepting_block)?, + ))), + } } fn get_virtual_parents(&self) -> BlockHashSet { @@ -815,10 +1015,10 @@ impl ConsensusApi for Consensus { if self.pruning_point_store.read().pruning_point().unwrap() != expected_pruning_point { return Err(ConsensusError::UnexpectedPruningPoint); } - let pruning_utxoset_read = self.pruning_utxoset_stores.read(); - let iter = pruning_utxoset_read.utxo_set.seek_iterator(from_outpoint, chunk_size, skip_first); + let pruning_meta_read = self.pruning_meta_stores.read(); + let iter = pruning_meta_read.utxo_set.seek_iterator(from_outpoint, chunk_size, skip_first); let utxos = iter.map(|item| item.unwrap()).collect(); - drop(pruning_utxoset_read); + drop(pruning_meta_read); // We recheck the expected pruning point in case it was switched just before the utxo set read. // NOTE: we rely on order of operations by pruning processor. See extended comment therein. @@ -833,9 +1033,8 @@ impl ConsensusApi for Consensus { self.services.coinbase_manager.modify_coinbase_payload(payload, miner_data) } - fn calc_transaction_hash_merkle_root(&self, txs: &[Transaction], pov_daa_score: u64) -> Hash { - let storage_mass_activated = self.config.crescendo_activation.is_active(pov_daa_score); - calc_hash_merkle_root(txs.iter(), storage_mass_activated) + fn calc_transaction_hash_merkle_root(&self, txs: &[Transaction]) -> Hash { + calc_hash_merkle_root(txs.iter()) } fn validate_pruning_proof( @@ -855,8 +1054,8 @@ impl ConsensusApi for Consensus { } fn append_imported_pruning_point_utxos(&self, utxoset_chunk: &[(TransactionOutpoint, UtxoEntry)], current_multiset: &mut MuHash) { - let mut pruning_utxoset_write = self.pruning_utxoset_stores.write(); - pruning_utxoset_write.utxo_set.write_many(utxoset_chunk).unwrap(); + let mut pruning_meta_write = self.pruning_meta_stores.write(); + pruning_meta_write.utxo_set.write_many(utxoset_chunk).unwrap(); // Parallelize processing using the context of an existing thread pool. let inner_multiset = self.virtual_processor.install(|| { @@ -875,16 +1074,16 @@ impl ConsensusApi for Consensus { fn validate_pruning_points(&self, syncer_virtual_selected_parent: Hash) -> ConsensusResult<()> { let hst = self.storage.headers_selected_tip_store.read().get().unwrap().hash; - let pp_info = self.pruning_point_store.read().get().unwrap(); - if !self.services.pruning_point_manager.is_valid_pruning_point(pp_info.pruning_point, hst) { + let (synced_pruning_point, synced_pp_index) = self.pruning_point_store.read().pruning_point_and_index().unwrap(); + if !self.services.pruning_point_manager.is_valid_pruning_point(synced_pruning_point, hst) { return Err(ConsensusError::General("pruning point does not coincide with the synced header selected tip")); } - if !self.services.pruning_point_manager.is_valid_pruning_point(pp_info.pruning_point, syncer_virtual_selected_parent) { + if !self.services.pruning_point_manager.is_valid_pruning_point(synced_pruning_point, syncer_virtual_selected_parent) { return Err(ConsensusError::General("pruning point does not coincide with the syncer's sink (virtual selected parent)")); } self.services .pruning_point_manager - .are_pruning_points_in_valid_chain(pp_info, syncer_virtual_selected_parent) + .are_pruning_points_in_valid_chain(synced_pruning_point, synced_pp_index, syncer_virtual_selected_parent) .map_err(|e| ConsensusError::GeneralOwned(format!("past pruning points do not form a valid chain: {}", e))) } @@ -898,7 +1097,7 @@ impl ConsensusApi for Consensus { // max_blocks has to be greater than the merge set size limit fn get_hashes_between(&self, low: Hash, high: Hash, max_blocks: usize) -> ConsensusResult<(Vec, Hash)> { let _guard = self.pruning_lock.blocking_read(); - assert!(max_blocks as u64 > self.config.mergeset_size_limit().upper_bound()); + assert!(max_blocks as u64 > self.config.mergeset_size_limit().after()); self.validate_block_exists(low)?; self.validate_block_exists(high)?; @@ -948,10 +1147,10 @@ impl ConsensusApi for Consensus { fn pruning_point_headers(&self) -> Vec> { // PRUNE SAFETY: index is monotonic and past pruning point headers are expected permanently - let current_pp_info = self.pruning_point_store.read().get().unwrap(); - (0..current_pp_info.index) + let (pruning_point, pruning_index) = self.pruning_point_store.read().pruning_point_and_index().unwrap(); + (0..pruning_index) .map(|index| self.past_pruning_points_store.get(index).unwrap()) - .chain(once(current_pp_info.pruning_point)) + .chain(once(pruning_point)) .map(|hash| self.headers_store.get_header(hash).unwrap()) .collect_vec() } @@ -976,6 +1175,44 @@ impl ConsensusApi for Consensus { }) } + fn get_block_transactions(&self, hash: Hash, indices: Option>) -> ConsensusResult> { + let transactions = self.block_transactions_store.get(hash).unwrap_option().ok_or(ConsensusError::BlockNotFound(hash))?; + let tx_len = transactions.len(); + + if let Some(indices) = indices { + if tx_len < indices.len() { + return Err(ConsensusError::TransactionQueryTooLarge(indices.len(), hash, transactions.len())); + } + + let res = transactions + .unwrap_or_clone() + .into_iter() + .enumerate() + .filter(|(index, _tx)| indices.contains(&(*index as TransactionIndexType))) + .map(|(_, tx)| tx) + .collect::>(); + + if res.len() != indices.len() { + Err(ConsensusError::TransactionIndexOutOfBounds(*indices.iter().max().unwrap(), tx_len, hash)) + } else { + Ok(res) + } + } else { + Ok(transactions.unwrap_or_clone()) + } + } + + fn get_block_body(&self, hash: Hash) -> ConsensusResult>> { + if match self.statuses_store.read().get(hash).unwrap_option() { + Some(status) => !status.has_block_body(), + None => true, + } { + return Err(ConsensusError::BlockNotFound(hash)); + } + + self.block_transactions_store.get(hash).unwrap_option().ok_or(ConsensusError::BlockNotFound(hash)) + } + fn get_block_even_if_header_only(&self, hash: Hash) -> ConsensusResult { let Some(status) = self.statuses_store.read().get(hash).unwrap_option().filter(|&status| status.has_block_header()) else { return Err(ConsensusError::HeaderNotFound(hash)); @@ -1061,45 +1298,22 @@ impl ConsensusApi for Consensus { self.validate_block_exists(high)?; Ok(self.services.sync_manager.get_missing_block_body_hashes(high)?) } - - fn pruning_point(&self) -> Hash { - self.pruning_point_store.read().pruning_point().unwrap() + /// Returns the set of blocks in the anticone of the current pruning point + /// which (may) lack a block body due to being in a transitional state + /// If not in a transitional state this list is supposed to be empty + fn get_body_missing_anticone(&self) -> Vec { + self.pruning_meta_stores.read().get_body_missing_anticone() } - fn get_daa_window(&self, hash: Hash) -> ConsensusResult> { - let _guard = self.pruning_lock.blocking_read(); - self.validate_block_exists(hash)?; - Ok(self - .services - .window_manager - .block_window(&self.ghostdag_store.get_data(hash).unwrap(), WindowType::DifficultyWindow) - .unwrap() - .deref() - .iter() - .map(|block| block.0.hash) - .collect()) + fn clear_body_missing_anticone_set(&self) { + let mut pruning_meta_write = self.pruning_meta_stores.write(); + let mut batch = rocksdb::WriteBatch::default(); + pruning_meta_write.set_body_missing_anticone(&mut batch, vec![]).unwrap(); + self.db.write(batch).unwrap(); } - fn get_trusted_block_associated_ghostdag_data_block_hashes(&self, hash: Hash) -> ConsensusResult> { - let _guard = self.pruning_lock.blocking_read(); - self.validate_block_exists(hash)?; - - // In order to guarantee the chain height is at least k, we check that the pruning point is not genesis. - let pruning_point = self.pruning_point(); - if pruning_point == self.config.genesis.hash { - return Err(ConsensusError::UnexpectedPruningPoint); - } - - // [Crescendo]: get ghostdag k based on the pruning point's DAA score. The off-by-one of not going by selected parent - // DAA score is not important here as we simply increase K one block earlier which is more conservative (saving/sending more data) - let ghostdag_k = self.config.ghostdag_k().get(self.headers_store.get_daa_score(pruning_point).unwrap()); - - // Note: the method `get_ghostdag_chain_k_depth` might return a partial chain if data is missing. - // Ideally this node when synced would validate it got all of the associated data up to k blocks - // back and then we would be able to assert we actually got `k + 1` blocks, however we choose to - // simply ignore, since if the data was truly missing we wouldn't accept the staging consensus in - // the first place - Ok(self.services.pruning_proof_manager.get_ghostdag_chain_k_depth(hash, ghostdag_k)) + fn pruning_point(&self) -> Hash { + self.pruning_point_store.read().pruning_point().unwrap() } fn create_block_locator_from_pruning_point(&self, high: Hash, limit: usize) -> ConsensusResult> { @@ -1141,4 +1355,74 @@ impl ConsensusApi for Consensus { fn finality_point(&self) -> Hash { self.virtual_processor.virtual_finality_point(&self.lkg_virtual_state.load().ghostdag_data, self.pruning_point()) } + + /// The utxoset is an additive structure, + /// to make room for the gradual aggregation of a new utxoset, + /// first the old one must be cleared. + /// Likewise, clearing the old utxoset is also a gradual process. + /// The utxo stable flag guarantees that a full utxoset is never mistaken for + /// an incomplete or partially deleted one. + fn clear_pruning_utxo_set(&self) { + let mut pruning_meta_write = self.pruning_meta_stores.write(); + let mut batch = rocksdb::WriteBatch::default(); + // Currently under the conditions in which this function is called, this flag should already be false. + // We lower it down regardless as it is conceptually true to do so. + pruning_meta_write.set_pruning_utxoset_stable_flag(&mut batch, false).unwrap(); + self.db.write(batch).unwrap(); + pruning_meta_write.utxo_set.clear().unwrap(); + } + + fn verify_is_pruning_sample(&self, pruning_candidate: Hash) -> ConsensusResult<()> { + if pruning_candidate == self.config.genesis.hash { + return Ok(()); + } + let Ok(candidate_ghostdag_data) = self.get_ghostdag_data(pruning_candidate) else { + return Err(ConsensusError::General("pruning candidate missing ghostdag data")); + }; + let Ok(selected_parent_ghostdag_data) = self.get_ghostdag_data(candidate_ghostdag_data.selected_parent) else { + return Err(ConsensusError::General("pruning candidate selected parent missing ghostdag data")); + }; + self.services + .pruning_point_manager + .is_pruning_sample( + candidate_ghostdag_data.blue_score, + selected_parent_ghostdag_data.blue_score, + self.config.params.finality_depth().after(), + ) + .then_some(()) + .ok_or(ConsensusError::General("pruning candidate is not a pruning sample")) + } + + /// The usual flow consists of the pruning point naturally updating during pruning, and hence maintains consistency by default + /// During pruning catchup, we need to manually update the pruning point and + /// make sure that consensus looks "as if" it has just moved to a new pruning point. + fn intrusive_pruning_point_update(&self, new_pruning_point: Hash, syncer_sink: Hash) -> ConsensusResult<()> { + let pruning_points_to_add = self.get_and_verify_path_to_new_pruning_point(new_pruning_point, syncer_sink)?; + + // If all has gone well, we can finally update pruning point and other stores. + self.intrusive_pruning_point_store_writes(new_pruning_point, syncer_sink, pruning_points_to_add) + } + + fn set_pruning_utxoset_stable_flag(&self, val: bool) { + let mut pruning_meta_write = self.pruning_meta_stores.write(); + let mut batch = rocksdb::WriteBatch::default(); + + pruning_meta_write.set_pruning_utxoset_stable_flag(&mut batch, val).unwrap(); + self.db.write(batch).unwrap(); + } + + fn is_pruning_utxoset_stable(&self) -> bool { + let pruning_meta_read = self.pruning_meta_stores.read(); + pruning_meta_read.pruning_utxoset_stable_flag() + } + + fn is_pruning_point_anticone_fully_synced(&self) -> bool { + let pruning_meta_read = self.pruning_meta_stores.read(); + pruning_meta_read.is_anticone_fully_synced() + } + + fn is_consensus_in_transitional_ibd_state(&self) -> bool { + let pruning_meta_read = self.pruning_meta_stores.read(); + pruning_meta_read.is_in_transitional_ibd_state() + } } diff --git a/consensus/src/consensus/services.rs b/consensus/src/consensus/services.rs index d608938cd7..5e2b92fd8f 100644 --- a/consensus/src/consensus/services.rs +++ b/consensus/src/consensus/services.rs @@ -150,6 +150,7 @@ impl ConsensusServices { params.max_script_public_key_len(), params.coinbase_payload_script_public_key_max_len, params.coinbase_maturity(), + params.ghostdag_k().after(), tx_script_cache_counters, mass_calculator.clone(), params.crescendo_activation, diff --git a/consensus/src/consensus/storage.rs b/consensus/src/consensus/storage.rs index c943f233db..f6d7e88508 100644 --- a/consensus/src/consensus/storage.rs +++ b/consensus/src/consensus/storage.rs @@ -11,8 +11,8 @@ use crate::{ headers_selected_tip::DbHeadersSelectedTipStore, past_pruning_points::DbPastPruningPointsStore, pruning::DbPruningStore, + pruning_meta::PruningMetaStores, pruning_samples::DbPruningSamplesStore, - pruning_utxoset::PruningUtxosetStores, reachability::{DbReachabilityStore, ReachabilityData}, relations::DbRelationsStore, selected_chain::DbSelectedChainStore, @@ -46,7 +46,7 @@ pub struct ConsensusStorage { pub pruning_point_store: Arc>, pub headers_selected_tip_store: Arc>, pub body_tips_store: Arc>, - pub pruning_utxoset_stores: Arc>, + pub pruning_meta_stores: Arc>, pub virtual_stores: Arc>, pub selected_chain_store: Arc>, @@ -83,9 +83,8 @@ impl ConsensusStorage { let perf_params = &config.perf; // Lower and upper bounds - // [Crescendo]: all usages of pruning upper bounds also bound by actual memory bytes, so we can safely use the larger values - let pruning_depth = params.pruning_depth().upper_bound() as usize; - let pruning_size_for_caches = pruning_depth + params.finality_depth().upper_bound() as usize; // Upper bound for any block/header related data + let pruning_depth = params.pruning_depth().after() as usize; + let pruning_size_for_caches = pruning_depth + params.finality_depth().after() as usize; // Upper bound for any block/header related data let level_lower_bound = 2 * params.pruning_proof_m as usize; // Number of items lower bound for level-related caches // Budgets in bytes. All byte budgets overall sum up to ~1GB of memory (which obviously takes more low level alloc space) @@ -211,9 +210,8 @@ impl ConsensusStorage { // Pruning let pruning_point_store = Arc::new(RwLock::new(DbPruningStore::new(db.clone()))); let past_pruning_points_store = Arc::new(DbPastPruningPointsStore::new(db.clone(), past_pruning_points_builder.build())); - let pruning_utxoset_stores = Arc::new(RwLock::new(PruningUtxosetStores::new(db.clone(), utxo_set_builder.build()))); + let pruning_meta_stores = Arc::new(RwLock::new(PruningMetaStores::new(db.clone(), utxo_set_builder.build()))); let pruning_samples_store = Arc::new(DbPruningSamplesStore::new(db.clone(), header_data_builder.build())); - // Txs let block_transactions_store = Arc::new(DbBlockTransactionsStore::new(db.clone(), transactions_builder.build())); let utxo_diffs_store = Arc::new(DbUtxoDiffsStore::new(db.clone(), utxo_diffs_builder.build())); @@ -249,7 +247,7 @@ impl ConsensusStorage { body_tips_store, headers_store, block_transactions_store, - pruning_utxoset_stores, + pruning_meta_stores, virtual_stores, selected_chain_store, acceptance_data_store, diff --git a/consensus/src/consensus/test_consensus.rs b/consensus/src/consensus/test_consensus.rs index 69ec7170c2..091b7440c1 100644 --- a/consensus/src/consensus/test_consensus.rs +++ b/consensus/src/consensus/test_consensus.rs @@ -25,8 +25,7 @@ use crate::{ model::{ services::reachability::MTReachabilityService, stores::{ - ghostdag::DbGhostdagStore, headers::HeaderStoreReader, pruning::PruningStoreReader, reachability::DbReachabilityStore, - virtual_state::VirtualStores, DB, + ghostdag::DbGhostdagStore, headers::HeaderStoreReader, reachability::DbReachabilityStore, virtual_state::VirtualStores, DB, }, }, params::Params, @@ -119,13 +118,10 @@ impl TestConsensus { } pub fn build_header_with_parents(&self, hash: Hash, parents: Vec) -> Header { - let mut header = header_from_precomputed_hash(hash, parents); + let mut header = header_from_precomputed_hash(hash, parents.clone()); + let parents_by_level = self.consensus.services.parents_manager.calc_block_parents(self.pruning_point(), &parents); + header.parents_by_level = parents_by_level; let ghostdag_data = self.consensus.services.ghostdag_manager.ghostdag(header.direct_parents()); - header.pruning_point = self - .consensus - .services - .pruning_point_manager - .expected_header_pruning_point_v1(ghostdag_data.to_compact(), self.consensus.pruning_point_store.read().get().unwrap()); let daa_window = self.consensus.services.window_manager.block_daa_window(&ghostdag_data).unwrap(); header.bits = self.consensus.services.window_manager.calculate_difficulty_bits(&ghostdag_data, &daa_window); header.daa_score = daa_window.daa_score; @@ -136,8 +132,12 @@ impl TestConsensus { header } - pub fn add_block_with_parents(&self, hash: Hash, parents: Vec) -> impl Future> { - self.validate_and_insert_block(self.build_block_with_parents(hash, parents).to_immutable()).virtual_state_task + pub fn add_header_only_block_with_parents( + &self, + hash: Hash, + parents: Vec, + ) -> impl Future> { + self.validate_and_insert_block(self.build_header_only_block_with_parents(hash, parents).to_immutable()).virtual_state_task } /// Adds a valid block with the given transactions and parents to the consensus. @@ -157,6 +157,14 @@ impl TestConsensus { .virtual_state_task } + pub fn add_empty_utxo_valid_block_with_parents( + &self, + hash: Hash, + parents: Vec, + ) -> impl Future> { + self.add_utxo_valid_block_with_parents(hash, parents, vec![]) + } + /// Builds a valid block with the given transactions, parents, and miner data. /// /// # Panics @@ -190,11 +198,11 @@ impl TestConsensus { let cb = Transaction::new(TX_VERSION, vec![], vec![], 0, SUBNETWORK_ID_COINBASE, 0, cb_payload); txs.insert(0, cb); - header.hash_merkle_root = calc_hash_merkle_root(txs.iter(), false); + header.hash_merkle_root = calc_hash_merkle_root(txs.iter()); MutableBlock::new(header, txs) } - pub fn build_block_with_parents(&self, hash: Hash, parents: Vec) -> MutableBlock { + pub fn build_header_only_block_with_parents(&self, hash: Hash, parents: Vec) -> MutableBlock { MutableBlock::from_header(self.build_header_with_parents(hash, parents)) } diff --git a/consensus/src/model/stores/headers.rs b/consensus/src/model/stores/headers.rs index 85668f6992..166b1e5de5 100644 --- a/consensus/src/model/stores/headers.rs +++ b/consensus/src/model/stores/headers.rs @@ -38,6 +38,54 @@ pub trait HeaderStore: HeaderStoreReader { fn delete(&self, hash: Hash) -> Result<(), StoreError>; } +/// A temporary struct for backward compatibility. This struct is used to deserialize old header data with +/// parents_by_level as Vec>. +#[derive(Clone, Debug, Deserialize)] +struct Header2 { + pub hash: Hash, + pub version: u16, + pub parents_by_level: Vec>, + pub hash_merkle_root: Hash, + pub accepted_id_merkle_root: Hash, + pub utxo_commitment: Hash, + pub timestamp: u64, + pub bits: u32, + pub nonce: u64, + pub daa_score: u64, + pub blue_work: kaspa_consensus_core::BlueWorkType, + pub blue_score: u64, + pub pruning_point: Hash, +} + +#[derive(Clone, Deserialize)] +struct HeaderWithBlockLevel2 { + header: Header2, + block_level: BlockLevel, +} +impl From for HeaderWithBlockLevel { + fn from(value: HeaderWithBlockLevel2) -> Self { + Self { + header: Header { + hash: value.header.hash, + version: value.header.version, + parents_by_level: value.header.parents_by_level.try_into().unwrap(), + hash_merkle_root: value.header.hash_merkle_root, + accepted_id_merkle_root: value.header.accepted_id_merkle_root, + utxo_commitment: value.header.utxo_commitment, + timestamp: value.header.timestamp, + bits: value.header.bits, + nonce: value.header.nonce, + daa_score: value.header.daa_score, + blue_work: value.header.blue_work, + blue_score: value.header.blue_score, + pruning_point: value.header.pruning_point, + } + .into(), + block_level: value.block_level, + } + } +} + #[derive(Clone, Copy, Serialize, Deserialize)] pub struct CompactHeaderData { pub daa_score: u64, @@ -60,6 +108,7 @@ pub struct DbHeadersStore { db: Arc, compact_headers_access: CachedDbAccess, headers_access: CachedDbAccess, + fallback_prefix: Vec, } impl DbHeadersStore { @@ -71,7 +120,8 @@ impl DbHeadersStore { compact_cache_policy, DatabaseStorePrefixes::HeadersCompact.into(), ), - headers_access: CachedDbAccess::new(db, cache_policy, DatabaseStorePrefixes::Headers.into()), + headers_access: CachedDbAccess::new(db, cache_policy, DatabaseStorePrefixes::CompressedHeaders.into()), + fallback_prefix: DatabaseStorePrefixes::Headers.into(), } } @@ -80,7 +130,7 @@ impl DbHeadersStore { } pub fn has(&self, hash: Hash) -> StoreResult { - self.headers_access.has(hash) + self.headers_access.has_with_fallback(self.fallback_prefix.as_ref(), hash) } pub fn insert_batch( @@ -90,7 +140,7 @@ impl DbHeadersStore { header: Arc
, block_level: BlockLevel, ) -> Result<(), StoreError> { - if self.headers_access.has(hash)? { + if self.has(hash)? { return Err(StoreError::HashAlreadyExists(hash)); } self.headers_access.write(BatchDbWriter::new(batch), hash, HeaderWithBlockLevel { header: header.clone(), block_level })?; @@ -134,11 +184,11 @@ impl HeaderStoreReader for DbHeadersStore { } fn get_header(&self, hash: Hash) -> Result, StoreError> { - Ok(self.headers_access.read(hash)?.header) + Ok(self.headers_access.read_with_fallback::(self.fallback_prefix.as_ref(), hash)?.header) } fn get_header_with_block_level(&self, hash: Hash) -> Result { - self.headers_access.read(hash) + self.headers_access.read_with_fallback::(self.fallback_prefix.as_ref(), hash) } fn get_compact_header_data(&self, hash: Hash) -> Result { @@ -151,7 +201,7 @@ impl HeaderStoreReader for DbHeadersStore { impl HeaderStore for DbHeadersStore { fn insert(&self, hash: Hash, header: Arc
, block_level: u8) -> Result<(), StoreError> { - if self.headers_access.has(hash)? { + if self.has(hash)? { return Err(StoreError::HashAlreadyExists(hash)); } if self.compact_headers_access.has(hash)? { diff --git a/consensus/src/model/stores/mod.rs b/consensus/src/model/stores/mod.rs index 02e2824a98..30c77dec22 100644 --- a/consensus/src/model/stores/mod.rs +++ b/consensus/src/model/stores/mod.rs @@ -9,8 +9,8 @@ pub mod headers; pub mod headers_selected_tip; pub mod past_pruning_points; pub mod pruning; +pub mod pruning_meta; pub mod pruning_samples; -pub mod pruning_utxoset; pub mod reachability; pub mod relations; pub mod selected_chain; diff --git a/consensus/src/model/stores/pruning.rs b/consensus/src/model/stores/pruning.rs index 14636bed64..8b58326cf7 100644 --- a/consensus/src/model/stores/pruning.rs +++ b/consensus/src/model/stores/pruning.rs @@ -5,38 +5,34 @@ use kaspa_database::prelude::DB; use kaspa_database::prelude::{BatchDbWriter, CachedDbItem, DirectDbWriter}; use kaspa_database::registry::DatabaseStorePrefixes; use kaspa_hashes::Hash; +use kaspa_hashes::ZERO_HASH; use rocksdb::WriteBatch; use serde::{Deserialize, Serialize}; #[derive(Clone, Copy, Serialize, Deserialize)] -pub struct PruningPointInfo { - pub pruning_point: Hash, - pub candidate: Hash, - pub index: u64, +struct PruningPointInfo { + pruning_point: Hash, + _candidate: Hash, // Obsolete field. Kept only for avoiding the DB upgrade logic. TODO: remove all together + index: u64, } impl PruningPointInfo { - pub fn new(pruning_point: Hash, candidate: Hash, index: u64) -> Self { - Self { pruning_point, candidate, index } + pub fn new(pruning_point: Hash, index: u64) -> Self { + Self { pruning_point, _candidate: ZERO_HASH, index } } - pub fn from_genesis(genesis_hash: Hash) -> Self { - Self { pruning_point: genesis_hash, candidate: genesis_hash, index: 0 } - } - - pub fn decompose(self) -> (Hash, Hash, u64) { - (self.pruning_point, self.candidate, self.index) + pub fn decompose(self) -> (Hash, u64) { + (self.pruning_point, self.index) } } /// Reader API for `PruningStore`. pub trait PruningStoreReader { fn pruning_point(&self) -> StoreResult; - fn pruning_point_candidate(&self) -> StoreResult; fn pruning_point_index(&self) -> StoreResult; - /// Returns full pruning point info, including its index and the next pruning point candidate - fn get(&self) -> StoreResult; + /// Returns the pruning point and its index + fn pruning_point_and_index(&self) -> StoreResult<(Hash, u64)>; /// Represent the point after which data is fully held (i.e., history is consecutive from this point and up to virtual). /// This is usually a pruning point that is at or below the retention period requirement (and for archival @@ -52,7 +48,7 @@ pub trait PruningStoreReader { } pub trait PruningStore: PruningStoreReader { - fn set(&mut self, pruning_point: Hash, candidate: Hash, index: u64) -> StoreResult<()>; + fn set(&mut self, pruning_point: Hash, index: u64) -> StoreResult<()>; } /// A DB + cache implementation of `PruningStore` trait, with concurrent readers support. @@ -78,8 +74,8 @@ impl DbPruningStore { Self::new(Arc::clone(&self.db)) } - pub fn set_batch(&mut self, batch: &mut WriteBatch, pruning_point: Hash, candidate: Hash, index: u64) -> StoreResult<()> { - self.access.write(BatchDbWriter::new(batch), &PruningPointInfo { pruning_point, candidate, index }) + pub fn set_batch(&mut self, batch: &mut WriteBatch, pruning_point: Hash, index: u64) -> StoreResult<()> { + self.access.write(BatchDbWriter::new(batch), &PruningPointInfo::new(pruning_point, index)) } pub fn set_retention_checkpoint(&mut self, batch: &mut WriteBatch, retention_checkpoint: Hash) -> StoreResult<()> { @@ -96,16 +92,12 @@ impl PruningStoreReader for DbPruningStore { Ok(self.access.read()?.pruning_point) } - fn pruning_point_candidate(&self) -> StoreResult { - Ok(self.access.read()?.candidate) - } - fn pruning_point_index(&self) -> StoreResult { Ok(self.access.read()?.index) } - fn get(&self) -> StoreResult { - self.access.read() + fn pruning_point_and_index(&self) -> StoreResult<(Hash, u64)> { + Ok(self.access.read()?.decompose()) } fn retention_checkpoint(&self) -> StoreResult { @@ -118,7 +110,7 @@ impl PruningStoreReader for DbPruningStore { } impl PruningStore for DbPruningStore { - fn set(&mut self, pruning_point: Hash, candidate: Hash, index: u64) -> StoreResult<()> { - self.access.write(DirectDbWriter::new(&self.db), &PruningPointInfo::new(pruning_point, candidate, index)) + fn set(&mut self, pruning_point: Hash, index: u64) -> StoreResult<()> { + self.access.write(DirectDbWriter::new(&self.db), &PruningPointInfo::new(pruning_point, index)) } } diff --git a/consensus/src/model/stores/pruning_meta.rs b/consensus/src/model/stores/pruning_meta.rs new file mode 100644 index 0000000000..1f33c5a8c0 --- /dev/null +++ b/consensus/src/model/stores/pruning_meta.rs @@ -0,0 +1,75 @@ +use std::sync::Arc; + +use kaspa_database::prelude::CachePolicy; +use kaspa_database::prelude::StoreResult; +use kaspa_database::prelude::StoreResultExtensions; +use kaspa_database::prelude::DB; +use kaspa_database::prelude::{BatchDbWriter, CachedDbItem}; +use kaspa_database::registry::DatabaseStorePrefixes; +use kaspa_hashes::Hash; +use rocksdb::WriteBatch; + +use super::utxo_set::DbUtxoSetStore; + +/// Used in order to group stores related to the pruning point utxoset under a single lock +pub struct PruningMetaStores { + pub utxo_set: DbUtxoSetStore, + utxoset_position_access: CachedDbItem, + utxoset_stable_flag_access: CachedDbItem, + body_missing_anticone_blocks: CachedDbItem>, +} + +impl PruningMetaStores { + pub fn new(db: Arc, utxoset_cache_policy: CachePolicy) -> Self { + Self { + utxo_set: DbUtxoSetStore::new(db.clone(), utxoset_cache_policy, DatabaseStorePrefixes::PruningUtxoset.into()), + utxoset_position_access: CachedDbItem::new(db.clone(), DatabaseStorePrefixes::PruningUtxosetPosition.into()), + utxoset_stable_flag_access: CachedDbItem::new(db.clone(), DatabaseStorePrefixes::PruningUtxosetSyncFlag.into()), + body_missing_anticone_blocks: CachedDbItem::new(db.clone(), DatabaseStorePrefixes::BodyMissingAnticone.into()), + } + } + + /// Represents the exact point of the current pruning point utxoset. Used in order to safely + /// progress the pruning point utxoset in batches and to allow recovery if the process crashes + /// during the pruning point utxoset movement + pub fn utxoset_position(&self) -> StoreResult { + self.utxoset_position_access.read() + } + + pub fn set_utxoset_position(&mut self, batch: &mut WriteBatch, pruning_utxoset_position: Hash) -> StoreResult<()> { + self.utxoset_position_access.write(BatchDbWriter::new(batch), &pruning_utxoset_position) + } + + /// Flip the sync flag in the same batch as your other writes + pub fn set_pruning_utxoset_stable_flag(&mut self, batch: &mut WriteBatch, stable: bool) -> StoreResult<()> { + self.utxoset_stable_flag_access.write(BatchDbWriter::new(batch), &stable) + } + + /// Read the flag; default to true if missing - this is important because a node upgrading should have this value true + /// as all non staging consensuses had a stable utxoset previously + pub fn pruning_utxoset_stable_flag(&self) -> bool { + self.utxoset_stable_flag_access.read().unwrap_option().unwrap_or(true) + } + + /// Represents blocks in the anticone of the current pruning point which may lack a block body + /// These blocks need to be kept track of as they require trusted validation, + /// so that downloading of further blocks on top of them could resume + pub fn set_body_missing_anticone(&mut self, batch: &mut WriteBatch, body_missing_anticone: Vec) -> StoreResult<()> { + self.body_missing_anticone_blocks.write(BatchDbWriter::new(batch), &body_missing_anticone) + } + + /// Default to empty if missing - this is important because a node upgrading should have this value empty + /// since all non staging consensuses had no missing body anticone previously + pub fn get_body_missing_anticone(&self) -> Vec { + self.body_missing_anticone_blocks.read().unwrap_option().unwrap_or(vec![]) + } + + // check if there are any body missing blocks remaining in the anticone of the current pruning point + pub fn is_anticone_fully_synced(&self) -> bool { + self.get_body_missing_anticone().is_empty() + } + + pub fn is_in_transitional_ibd_state(&self) -> bool { + !self.is_anticone_fully_synced() || !self.pruning_utxoset_stable_flag() + } +} diff --git a/consensus/src/model/stores/pruning_utxoset.rs b/consensus/src/model/stores/pruning_utxoset.rs deleted file mode 100644 index 116134514e..0000000000 --- a/consensus/src/model/stores/pruning_utxoset.rs +++ /dev/null @@ -1,37 +0,0 @@ -use std::sync::Arc; - -use kaspa_database::prelude::CachePolicy; -use kaspa_database::prelude::StoreResult; -use kaspa_database::prelude::DB; -use kaspa_database::prelude::{BatchDbWriter, CachedDbItem}; -use kaspa_database::registry::DatabaseStorePrefixes; -use kaspa_hashes::Hash; -use rocksdb::WriteBatch; - -use super::utxo_set::DbUtxoSetStore; - -/// Used in order to group stores related to the pruning point utxoset under a single lock -pub struct PruningUtxosetStores { - pub utxo_set: DbUtxoSetStore, - utxoset_position_access: CachedDbItem, -} - -impl PruningUtxosetStores { - pub fn new(db: Arc, utxoset_cache_policy: CachePolicy) -> Self { - Self { - utxo_set: DbUtxoSetStore::new(db.clone(), utxoset_cache_policy, DatabaseStorePrefixes::PruningUtxoset.into()), - utxoset_position_access: CachedDbItem::new(db, DatabaseStorePrefixes::PruningUtxosetPosition.into()), - } - } - - /// Represents the exact point of the current pruning point utxoset. Used it order to safely - /// progress the pruning point utxoset in batches and to allow recovery if the process crashes - /// during the pruning point utxoset movement - pub fn utxoset_position(&self) -> StoreResult { - self.utxoset_position_access.read() - } - - pub fn set_utxoset_position(&mut self, batch: &mut WriteBatch, pruning_utxoset_position: Hash) -> StoreResult<()> { - self.utxoset_position_access.write(BatchDbWriter::new(batch), &pruning_utxoset_position) - } -} diff --git a/consensus/src/model/stores/selected_chain.rs b/consensus/src/model/stores/selected_chain.rs index 82ba8a25d5..271a6f613a 100644 --- a/consensus/src/model/stores/selected_chain.rs +++ b/consensus/src/model/stores/selected_chain.rs @@ -116,6 +116,10 @@ impl SelectedChainStore for DbSelectedChainStore { } fn init_with_pruning_point(&mut self, batch: &mut WriteBatch, block: Hash) -> StoreResult<()> { + // remove potential leftover chain + let _ = self.access_index_by_hash.delete_all(BatchDbWriter::new(batch)); + let _ = self.access_hash_by_index.delete_all(BatchDbWriter::new(batch)); + self.access_index_by_hash.write(BatchDbWriter::new(batch), block, 0)?; self.access_hash_by_index.write(BatchDbWriter::new(batch), 0.into(), block)?; self.access_highest_index.write(BatchDbWriter::new(batch), &0).unwrap(); diff --git a/consensus/src/model/stores/tips.rs b/consensus/src/model/stores/tips.rs index 06dd13b0cf..f00eaeca1a 100644 --- a/consensus/src/model/stores/tips.rs +++ b/consensus/src/model/stores/tips.rs @@ -1,5 +1,6 @@ use std::sync::Arc; +use itertools::Itertools; use kaspa_consensus_core::BlockHashSet; use kaspa_consensus_core::BlockHasher; use kaspa_database::prelude::CachedDbSetItem; @@ -38,6 +39,7 @@ pub trait TipsStore: TipsStoreReader { self.prune_tips_with_writer(BatchDbWriter::new(batch), pruned_tips) } fn prune_tips_with_writer(&mut self, writer: impl DbWriter, pruned_tips: &[Hash]) -> StoreResult<()>; + fn delete_all_tips(&mut self, writer: &mut WriteBatch) -> StoreResult<()>; } /// A DB + cache implementation of `TipsStore` trait @@ -94,6 +96,11 @@ impl TipsStore for DbTipsStore { self.access.update(writer, &[], pruned_tips)?; Ok(()) } + fn delete_all_tips(&mut self, writer: &mut WriteBatch) -> StoreResult<()> { + let tips = self.get()?.read().iter().copied().collect_vec(); + self.access.update(BatchDbWriter::new(writer), &[], &tips)?; + Ok(()) + } } #[cfg(test)] diff --git a/consensus/src/pipeline/body_processor/body_validation_in_context.rs b/consensus/src/pipeline/body_processor/body_validation_in_context.rs index 44ed9f453d..b1263f4566 100644 --- a/consensus/src/pipeline/body_processor/body_validation_in_context.rs +++ b/consensus/src/pipeline/body_processor/body_validation_in_context.rs @@ -1,7 +1,7 @@ use super::BlockBodyProcessor; use crate::{ errors::{BlockProcessResult, RuleError}, - model::stores::{ghostdag::GhostdagStoreReader, headers::HeaderStoreReader, statuses::StatusesStoreReader}, + model::stores::statuses::StatusesStoreReader, processes::{ transaction_validator::{ tx_validation_in_header_context::{LockTimeArg, LockTimeType}, @@ -10,7 +10,7 @@ use crate::{ window::WindowManager, }, }; -use kaspa_consensus_core::{block::Block, errors::tx::TxRuleError}; +use kaspa_consensus_core::block::Block; use kaspa_database::prelude::StoreResultExtensions; use kaspa_hashes::Hash; use once_cell::unsync::Lazy; @@ -19,7 +19,6 @@ use std::sync::Arc; impl BlockBodyProcessor { pub fn validate_body_in_context(self: &Arc, block: &Block) -> BlockProcessResult<()> { self.check_parent_bodies_exist(block)?; - self.check_coinbase_outputs_limit(block)?; self.check_coinbase_blue_score_and_subsidy(block)?; self.check_block_transactions_in_context(block) } @@ -35,7 +34,7 @@ impl BlockBodyProcessor { // We only evaluate the pmt calculation when actually needed LockTimeType::Time => LockTimeArg::MedianTime((*lazy_pmt_res).clone()?), }; - if let Err(e) = self.transaction_validator.validate_tx_in_header_context(tx, block.header.daa_score, lock_time_arg) { + if let Err(e) = self.transaction_validator.validate_tx_in_header_context(tx, lock_time_arg) { return Err(RuleError::TxInContextFailed(tx.id(), e)); }; } @@ -61,32 +60,6 @@ impl BlockBodyProcessor { Ok(()) } - fn check_coinbase_outputs_limit(&self, block: &Block) -> BlockProcessResult<()> { - // [Crescendo]: coinbase_outputs_limit depends on ghostdag k and thus depends on fork activation - // which makes it header contextual. - // - // TODO (post HF): move this check back to transaction in isolation validation - - // [Crescendo]: Ghostdag k activation is decided based on selected parent DAA score - // so we follow the same methodology for coinbase output limit (which is driven from the - // actual bound on the number of blue blocks in the mergeset). - // - // Note that body validation in context is not called for trusted blocks, so we can safely assume - // the selected parent exists and its daa score is accessible - let selected_parent = self.ghostdag_store.get_selected_parent(block.hash()).unwrap(); - let selected_parent_daa_score = self.headers_store.get_daa_score(selected_parent).unwrap(); - let coinbase_outputs_limit = self.ghostdag_k.get(selected_parent_daa_score) as u64 + 2; - - let tx = &block.transactions[0]; - if tx.outputs.len() as u64 > coinbase_outputs_limit { - return Err(RuleError::TxInIsolationValidationFailed( - tx.id(), - TxRuleError::CoinbaseTooManyOutputs(tx.outputs.len(), coinbase_outputs_limit), - )); - } - Ok(()) - } - fn check_coinbase_blue_score_and_subsidy(self: &Arc, block: &Block) -> BlockProcessResult<()> { match self.coinbase_manager.deserialize_coinbase_payload(&block.transactions[0].payload) { Ok(data) => { @@ -121,17 +94,13 @@ mod tests { }; use kaspa_consensus_core::{ api::ConsensusApi, - merkle::calc_hash_merkle_root as calc_hash_merkle_root_with_options, + merkle::calc_hash_merkle_root, subnets::SUBNETWORK_ID_NATIVE, tx::{Transaction, TransactionInput, TransactionOutpoint}, }; use kaspa_core::assert_match; use kaspa_hashes::Hash; - fn calc_hash_merkle_root<'a>(txs: impl ExactSizeIterator) -> Hash { - calc_hash_merkle_root_with_options(txs, false) - } - #[tokio::test] async fn validate_body_in_context_test() { let config = ConfigBuilder::new(DEVNET_PARAMS) @@ -142,7 +111,7 @@ mod tests { let wait_handles = consensus.init(); let body_processor = consensus.block_body_processor(); - consensus.add_block_with_parents(1.into(), vec![config.genesis.hash]).await.unwrap(); + consensus.add_header_only_block_with_parents(1.into(), vec![config.genesis.hash]).await.unwrap(); { let block = consensus.build_block_with_parents_and_transactions(2.into(), vec![1.into()], vec![]); diff --git a/consensus/src/pipeline/body_processor/body_validation_in_isolation.rs b/consensus/src/pipeline/body_processor/body_validation_in_isolation.rs index 2afd80421b..bd1032c467 100644 --- a/consensus/src/pipeline/body_processor/body_validation_in_isolation.rs +++ b/consensus/src/pipeline/body_processor/body_validation_in_isolation.rs @@ -11,14 +11,11 @@ use kaspa_consensus_core::{ impl BlockBodyProcessor { pub fn validate_body_in_isolation(self: &Arc, block: &Block) -> BlockProcessResult { - let crescendo_activated = self.crescendo_activation.is_active(block.header.daa_score); - Self::check_has_transactions(block)?; - Self::check_hash_merkle_root(block, crescendo_activated)?; + Self::check_hash_merkle_root(block)?; Self::check_only_one_coinbase(block)?; self.check_transactions_in_isolation(block)?; - self.check_coinbase_has_zero_mass(block, crescendo_activated)?; - let mass = self.check_block_mass(block, crescendo_activated)?; + let mass = self.check_block_mass(block)?; self.check_duplicate_transactions(block)?; self.check_block_double_spends(block)?; self.check_no_chained_transactions(block)?; @@ -34,8 +31,8 @@ impl BlockBodyProcessor { Ok(()) } - fn check_hash_merkle_root(block: &Block, crescendo_activated: bool) -> BlockProcessResult<()> { - let calculated = calc_hash_merkle_root(block.transactions.iter(), crescendo_activated); + fn check_hash_merkle_root(block: &Block) -> BlockProcessResult<()> { + let calculated = calc_hash_merkle_root(block.transactions.iter()); if calculated != block.header.hash_merkle_root { return Err(RuleError::BadMerkleRoot(block.header.hash_merkle_root, calculated)); } @@ -63,56 +60,36 @@ impl BlockBodyProcessor { Ok(()) } - fn check_coinbase_has_zero_mass(&self, block: &Block, crescendo_activated: bool) -> BlockProcessResult<()> { - // TODO (post HF): move to check_coinbase_in_isolation - if crescendo_activated && block.transactions[0].mass() > 0 { - return Err(RuleError::CoinbaseNonZeroMassCommitment); - } - Ok(()) - } - - fn check_block_mass(self: &Arc, block: &Block, crescendo_activated: bool) -> BlockProcessResult { - if crescendo_activated { - let mut total_compute_mass: u64 = 0; - let mut total_transient_mass: u64 = 0; - let mut total_storage_mass: u64 = 0; - for tx in block.transactions.iter() { - // Calculate the non-contextual masses - let NonContextualMasses { compute_mass, transient_mass } = self.mass_calculator.calc_non_contextual_masses(tx); - - // Read the storage mass commitment. This value cannot be computed here w/o UTXO context - // so we use the commitment. Later on, when the transaction is verified in context, we use - // the context to calculate the expected storage mass and verify it matches this commitment - let storage_mass_commitment = tx.mass(); - - // Sum over the various masses separately - total_compute_mass = total_compute_mass.saturating_add(compute_mass); - total_transient_mass = total_transient_mass.saturating_add(transient_mass); - total_storage_mass = total_storage_mass.saturating_add(storage_mass_commitment); - - // Verify all limits - if total_compute_mass > self.max_block_mass { - return Err(RuleError::ExceedsComputeMassLimit(total_compute_mass, self.max_block_mass)); - } - if total_transient_mass > self.max_block_mass { - return Err(RuleError::ExceedsTransientMassLimit(total_transient_mass, self.max_block_mass)); - } - if total_storage_mass > self.max_block_mass { - return Err(RuleError::ExceedsStorageMassLimit(total_storage_mass, self.max_block_mass)); - } + fn check_block_mass(self: &Arc, block: &Block) -> BlockProcessResult { + let mut total_compute_mass: u64 = 0; + let mut total_transient_mass: u64 = 0; + let mut total_storage_mass: u64 = 0; + for tx in block.transactions.iter() { + // Calculate the non-contextual masses + let NonContextualMasses { compute_mass, transient_mass } = self.mass_calculator.calc_non_contextual_masses(tx); + + // Read the storage mass commitment. This value cannot be computed here w/o UTXO context + // so we use the commitment. Later on, when the transaction is verified in context, we use + // the context to calculate the expected storage mass and verify it matches this commitment + let storage_mass_commitment = tx.mass(); + + // Sum over the various masses separately + total_compute_mass = total_compute_mass.saturating_add(compute_mass); + total_transient_mass = total_transient_mass.saturating_add(transient_mass); + total_storage_mass = total_storage_mass.saturating_add(storage_mass_commitment); + + // Verify all limits + if total_compute_mass > self.max_block_mass { + return Err(RuleError::ExceedsComputeMassLimit(total_compute_mass, self.max_block_mass)); + } + if total_transient_mass > self.max_block_mass { + return Err(RuleError::ExceedsTransientMassLimit(total_transient_mass, self.max_block_mass)); } - Ok((NonContextualMasses::new(total_compute_mass, total_transient_mass), ContextualMasses::new(total_storage_mass))) - } else { - let mut total_mass: u64 = 0; - for tx in block.transactions.iter() { - let compute_mass = self.mass_calculator.calc_non_contextual_masses(tx).compute_mass; - total_mass = total_mass.saturating_add(compute_mass); - if total_mass > self.max_block_mass { - return Err(RuleError::ExceedsComputeMassLimit(total_mass, self.max_block_mass)); - } + if total_storage_mass > self.max_block_mass { + return Err(RuleError::ExceedsStorageMassLimit(total_storage_mass, self.max_block_mass)); } - Ok((NonContextualMasses::new(total_mass, 0), ContextualMasses::new(0))) } + Ok((NonContextualMasses::new(total_compute_mass, total_transient_mass), ContextualMasses::new(total_storage_mass))) } fn check_block_double_spends(self: &Arc, block: &Block) -> BlockProcessResult<()> { @@ -165,17 +142,13 @@ mod tests { api::{BlockValidationFutures, ConsensusApi}, block::MutableBlock, header::Header, - merkle::calc_hash_merkle_root as calc_hash_merkle_root_with_options, + merkle::calc_hash_merkle_root, subnets::{SUBNETWORK_ID_COINBASE, SUBNETWORK_ID_NATIVE}, tx::{scriptvec, ScriptPublicKey, Transaction, TransactionId, TransactionInput, TransactionOutpoint, TransactionOutput}, }; use kaspa_core::assert_match; use kaspa_hashes::Hash; - fn calc_hash_merkle_root<'a>(txs: impl ExactSizeIterator) -> Hash { - calc_hash_merkle_root_with_options(txs, false) - } - #[test] fn validate_body_in_isolation_test() { let consensus = TestConsensus::new(&Config::new(MAINNET_PARAMS)); @@ -194,7 +167,9 @@ mod tests { 0x4b, 0xb0, 0x75, 0x35, 0xdf, 0xd5, 0x8e, 0x0b, 0x3c, 0xd6, 0x4f, 0xd7, 0x15, 0x52, 0x80, 0x87, 0x2a, 0x04, 0x71, 0xbc, 0xf8, 0x30, 0x95, 0x52, 0x6a, 0xce, 0x0e, 0x38, 0xc6, 0x00, 0x00, 0x00, ]), - ]], + ]] + .try_into() + .unwrap(), Hash::from_slice(&[ 0x46, 0xec, 0xf4, 0x5b, 0xe3, 0xba, 0xca, 0x34, 0x9d, 0xfe, 0x8a, 0x78, 0xde, 0xaf, 0x05, 0x3b, 0x0a, 0xa6, 0xd5, 0x38, 0x97, 0x4d, 0xa5, 0x0f, 0xd6, 0xef, 0xb4, 0xd2, 0x66, 0xbc, 0x8d, 0x21, @@ -510,7 +485,7 @@ mod tests { ); let mut block = consensus.build_block_with_parents_and_transactions(1.into(), vec![config.genesis.hash], vec![]); - block.header.parents_by_level[0][0] = 0.into(); + block.header.parents_by_level.set_direct_parents(vec![0.into()]); assert_match!( consensus.validate_and_insert_block(block.clone().to_immutable()).virtual_state_task.await, diff --git a/consensus/src/pipeline/body_processor/processor.rs b/consensus/src/pipeline/body_processor/processor.rs index 4229ea263c..4a8336dd17 100644 --- a/consensus/src/pipeline/body_processor/processor.rs +++ b/consensus/src/pipeline/body_processor/processor.rs @@ -28,7 +28,7 @@ use kaspa_consensus_core::{ blockstatus::BlockStatus::{self, StatusHeaderOnly, StatusInvalid}, config::{ genesis::GenesisBlock, - params::{ForkActivation, ForkedParam, Params}, + params::{ForkedParam, Params}, }, mass::{Mass, MassCalculator, MassOps}, tx::Transaction, @@ -87,9 +87,6 @@ pub struct BlockBodyProcessor { // Counters counters: Arc, - - /// Storage mass hardfork DAA score - pub(crate) crescendo_activation: ForkActivation, } impl BlockBodyProcessor { @@ -133,7 +130,6 @@ impl BlockBodyProcessor { task_manager: BlockTaskDependencyManager::new(), notification_root, counters, - crescendo_activation: params.crescendo_activation, } } diff --git a/consensus/src/pipeline/header_processor/post_pow_validation.rs b/consensus/src/pipeline/header_processor/post_pow_validation.rs index c3feb9aa92..6ae936711c 100644 --- a/consensus/src/pipeline/header_processor/post_pow_validation.rs +++ b/consensus/src/pipeline/header_processor/post_pow_validation.rs @@ -13,7 +13,6 @@ impl HeaderProcessor { self.check_median_timestamp(ctx, header)?; self.check_mergeset_size_limit(ctx)?; self.check_bounded_merge_depth(ctx)?; - self.check_pruning_point(ctx, header)?; self.check_indirect_parents(ctx, header) } @@ -30,7 +29,7 @@ impl HeaderProcessor { pub fn check_mergeset_size_limit(&self, ctx: &mut HeaderProcessingContext) -> BlockProcessResult<()> { let mergeset_size = ctx.ghostdag_data().mergeset_size() as u64; - let mergeset_size_limit = self.mergeset_size_limit.get(ctx.selected_parent_daa_score()); + let mergeset_size_limit = self.mergeset_size_limit.after(); if mergeset_size > mergeset_size_limit { return Err(RuleError::MergeSetTooBig(mergeset_size, mergeset_size_limit)); } @@ -54,50 +53,33 @@ impl HeaderProcessor { } pub fn check_indirect_parents(&self, ctx: &mut HeaderProcessingContext, header: &Header) -> BlockProcessResult<()> { - let expected_block_parents = self.parents_manager.calc_block_parents(ctx.pruning_point(), header.direct_parents()); - let crescendo_activated = self.crescendo_activation.is_active(ctx.selected_parent_daa_score()); - if header.parents_by_level.len() != expected_block_parents.len() - || !expected_block_parents.iter().enumerate().all(|(block_level, expected_level_parents)| { - let header_level_parents = &header.parents_by_level[block_level]; - if header_level_parents.len() != expected_level_parents.len() { - return false; - } - // Optimistic path where both arrays are identical also in terms of order - if header_level_parents == expected_level_parents { - return true; - } - if crescendo_activated { + let expected_block_parents = self.parents_manager.calc_block_parents(ctx.pruning_point, header.direct_parents()); + if header.parents_by_level.expanded_len() != expected_block_parents.expanded_len() + || !expected_block_parents.expanded_iter().zip(header.parents_by_level.expanded_iter()).all( + |(expected_level_parents, header_level_parents)| { + if header_level_parents.len() != expected_level_parents.len() { + return false; + } + // Optimistic path where both arrays are identical also in terms of order + if header_level_parents == expected_level_parents { + return true; + } HashSet::<&Hash>::from_iter(header_level_parents) == HashSet::<&Hash>::from_iter(expected_level_parents) - } else { - let expected_set = HashSet::<&Hash>::from_iter(expected_level_parents); - header_level_parents.iter().all(|header_parent| expected_set.contains(header_parent)) - } - }) + }, + ) { return Err(RuleError::UnexpectedIndirectParents( - TwoDimVecDisplay(expected_block_parents), - TwoDimVecDisplay(header.parents_by_level.clone()), + TwoDimVecDisplay(expected_block_parents.into()), + TwoDimVecDisplay((&header.parents_by_level).into()), )); }; Ok(()) } - pub fn check_pruning_point(&self, ctx: &mut HeaderProcessingContext, header: &Header) -> BlockProcessResult<()> { - // [Crescendo]: changing expected pruning point check from header validity to chain qualification - if !self.crescendo_activation.is_active(ctx.selected_parent_daa_score()) { - let expected = - self.pruning_point_manager.expected_header_pruning_point_v1(ctx.ghostdag_data().to_compact(), ctx.pruning_info); - if expected != header.pruning_point { - return Err(RuleError::WrongHeaderPruningPoint(expected, header.pruning_point)); - } - } - Ok(()) - } - pub fn check_bounded_merge_depth(&self, ctx: &mut HeaderProcessingContext) -> BlockProcessResult<()> { let ghostdag_data = ctx.ghostdag_data(); - let merge_depth_root = self.depth_manager.calc_merge_depth_root(ghostdag_data, ctx.pruning_point()); - let finality_point = self.depth_manager.calc_finality_point(ghostdag_data, ctx.pruning_point()); + let merge_depth_root = self.depth_manager.calc_merge_depth_root(ghostdag_data, ctx.pruning_point); + let finality_point = self.depth_manager.calc_finality_point(ghostdag_data, ctx.pruning_point); let mut kosherizing_blues: Option> = None; for red in ghostdag_data.mergeset_reds.iter().copied() { diff --git a/consensus/src/pipeline/header_processor/pre_ghostdag_validation.rs b/consensus/src/pipeline/header_processor/pre_ghostdag_validation.rs index e153c18274..76303c2ac7 100644 --- a/consensus/src/pipeline/header_processor/pre_ghostdag_validation.rs +++ b/consensus/src/pipeline/header_processor/pre_ghostdag_validation.rs @@ -17,7 +17,7 @@ impl HeaderProcessor { pub(super) fn validate_header_in_isolation(&self, header: &Header) -> BlockProcessResult { self.check_header_version(header)?; self.check_block_timestamp_in_isolation(header)?; - self.check_parents_limit_upper_bound(header)?; + self.check_parents_limit(header)?; Self::check_parents_not_origin(header)?; self.check_pow_and_calc_block_level(header) } @@ -44,16 +44,14 @@ impl HeaderProcessor { Ok(()) } - fn check_parents_limit_upper_bound(&self, header: &Header) -> BlockProcessResult<()> { + fn check_parents_limit(&self, header: &Header) -> BlockProcessResult<()> { if header.direct_parents().is_empty() { return Err(RuleError::NoParents); } - // [Crescendo]: moved the tight parents limit check to pre_pow_validation since it requires selected parent DAA score info - // which is available only post ghostdag. We keep this upper bound check here since this method is applied to trusted blocks - // as well. - if header.direct_parents().len() > self.max_block_parents.upper_bound() as usize { - return Err(RuleError::TooManyParents(header.direct_parents().len(), self.max_block_parents.upper_bound() as usize)); + let max_block_parents = self.max_block_parents.after() as usize; + if header.direct_parents().len() > max_block_parents { + return Err(RuleError::TooManyParents(header.direct_parents().len(), max_block_parents)); } Ok(()) diff --git a/consensus/src/pipeline/header_processor/pre_pow_validation.rs b/consensus/src/pipeline/header_processor/pre_pow_validation.rs index e896e4f5a3..a6abd0bf67 100644 --- a/consensus/src/pipeline/header_processor/pre_pow_validation.rs +++ b/consensus/src/pipeline/header_processor/pre_pow_validation.rs @@ -6,34 +6,19 @@ use kaspa_consensus_core::header::Header; impl HeaderProcessor { pub(super) fn pre_pow_validation(&self, ctx: &mut HeaderProcessingContext, header: &Header) -> BlockProcessResult<()> { - self.check_parents_limit(ctx, header)?; self.check_pruning_violation(ctx)?; self.check_difficulty_and_daa_score(ctx, header)?; Ok(()) } - // TODO (post HF): move back to pre_ghostdag_validation (substitute for check_parents_limit_upper_bound) - fn check_parents_limit(&self, ctx: &mut HeaderProcessingContext, header: &Header) -> BlockProcessResult<()> { - if header.direct_parents().is_empty() { - return Err(RuleError::NoParents); - } - - let max_block_parents = self.max_block_parents.get(ctx.selected_parent_daa_score()) as usize; - if header.direct_parents().len() > max_block_parents { - return Err(RuleError::TooManyParents(header.direct_parents().len(), max_block_parents)); - } - - Ok(()) - } - fn check_pruning_violation(&self, ctx: &HeaderProcessingContext) -> BlockProcessResult<()> { let known_parents = ctx.direct_known_parents(); // We check that the new block is in the future of the pruning point by verifying that at least // one of its parents is in the pruning point future (or the pruning point itself). Otherwise, // the Prunality proof implies that the block can be discarded. - if !self.reachability_service.is_dag_ancestor_of_any(ctx.pruning_point(), &mut known_parents.iter().copied()) { - return Err(RuleError::PruningViolation(ctx.pruning_point())); + if !self.reachability_service.is_dag_ancestor_of_any(ctx.pruning_point, &mut known_parents.iter().copied()) { + return Err(RuleError::PruningViolation(ctx.pruning_point)); } Ok(()) } diff --git a/consensus/src/pipeline/header_processor/processor.rs b/consensus/src/pipeline/header_processor/processor.rs index 8166754dbd..31fd3488cd 100644 --- a/consensus/src/pipeline/header_processor/processor.rs +++ b/consensus/src/pipeline/header_processor/processor.rs @@ -14,9 +14,9 @@ use crate::{ daa::DbDaaStore, depth::DbDepthStore, ghostdag::{DbGhostdagStore, GhostdagData, GhostdagStoreReader}, - headers::{DbHeadersStore, HeaderStoreReader}, + headers::DbHeadersStore, headers_selected_tip::{DbHeadersSelectedTipStore, HeadersSelectedTipStoreReader}, - pruning::{DbPruningStore, PruningPointInfo, PruningStoreReader}, + pruning::{DbPruningStore, PruningStoreReader}, reachability::{DbReachabilityStore, StagingReachabilityStore}, relations::{DbRelationsStore, RelationsStoreReader}, statuses::{DbStatusesStore, StatusesStore, StatusesStoreBatchExtensions, StatusesStoreReader}, @@ -32,10 +32,7 @@ use itertools::Itertools; use kaspa_consensus_core::{ blockhash::{BlockHashes, ORIGIN}, blockstatus::BlockStatus::{self, StatusHeaderOnly, StatusInvalid}, - config::{ - genesis::GenesisBlock, - params::{ForkActivation, ForkedParam}, - }, + config::{genesis::GenesisBlock, params::ForkedParam}, header::Header, BlockHashSet, BlockLevel, }; @@ -53,13 +50,12 @@ use super::super::ProcessingCounters; pub struct HeaderProcessingContext { pub hash: Hash, pub header: Arc
, - pub pruning_info: PruningPointInfo, + pub pruning_point: Hash, pub block_level: BlockLevel, pub known_parents: Vec, // Staging data pub ghostdag_data: Option>, - pub selected_parent_daa_score: Option, // [Crescendo] pub block_window_for_difficulty: Option>, pub block_window_for_past_median_time: Option>, pub mergeset_non_daa: Option, @@ -72,17 +68,16 @@ impl HeaderProcessingContext { hash: Hash, header: Arc
, block_level: BlockLevel, - pruning_info: PruningPointInfo, + pruning_point: Hash, known_parents: Vec, ) -> Self { Self { hash, header, block_level, - pruning_info, + pruning_point, known_parents, ghostdag_data: None, - selected_parent_daa_score: None, block_window_for_difficulty: None, mergeset_non_daa: None, block_window_for_past_median_time: None, @@ -96,20 +91,11 @@ impl HeaderProcessingContext { &self.known_parents[0] } - /// Returns the pruning point at the time this header began processing - pub fn pruning_point(&self) -> Hash { - self.pruning_info.pruning_point - } - /// Returns the primary (level 0) GHOSTDAG data of this header. - /// NOTE: is expected to be called only after GHOSTDAG computation was pushed into the context + /// NOTE: expected to be called only after GHOSTDAG computation was pushed into the context pub fn ghostdag_data(&self) -> &Arc { self.ghostdag_data.as_ref().unwrap() } - - pub fn selected_parent_daa_score(&self) -> u64 { - self.selected_parent_daa_score.unwrap() - } } pub struct HeaderProcessor { @@ -127,7 +113,6 @@ pub struct HeaderProcessor { pub(super) mergeset_size_limit: ForkedParam, pub(super) skip_proof_of_work: bool, pub(super) max_block_level: BlockLevel, - pub(super) crescendo_activation: ForkActivation, // DB db: Arc, @@ -214,7 +199,6 @@ impl HeaderProcessor { mergeset_size_limit: params.mergeset_size_limit(), skip_proof_of_work: params.skip_proof_of_work, max_block_level: params.max_block_level, - crescendo_activation: params.crescendo_activation, } } @@ -307,8 +291,6 @@ impl HeaderProcessor { self.validate_parent_relations(header)?; let mut ctx = self.build_processing_context(header, block_level); self.ghostdag(&mut ctx); - // [Crescendo]: persist the selected parent DAA score to be used for activation checks - ctx.selected_parent_daa_score = Some(self.headers_store.get_daa_score(ctx.ghostdag_data().selected_parent).unwrap()); self.pre_pow_validation(&mut ctx, header)?; if let Err(e) = self.post_pow_validation(&mut ctx, header) { self.statuses_store.write().set(ctx.hash, StatusInvalid).unwrap(); @@ -330,7 +312,7 @@ impl HeaderProcessor { header.hash, header.clone(), block_level, - self.pruning_point_store.read().get().unwrap(), + self.pruning_point_store.read().pruning_point().unwrap(), self.collect_known_parents(header, block_level), ) } @@ -368,7 +350,6 @@ impl HeaderProcessor { fn commit_header(&self, ctx: HeaderProcessingContext, header: &Header) { let ghostdag_data = ctx.ghostdag_data.as_ref().unwrap(); - let pp = ctx.pruning_point(); // Create a DB batch writer let mut batch = WriteBatch::default(); @@ -407,7 +388,7 @@ impl HeaderProcessor { let mut hst_write = self.headers_selected_tip_store.write(); let prev_hst = hst_write.get().unwrap(); if SortableBlock::new(ctx.hash, header.blue_work) > prev_hst - && reachability::is_chain_ancestor_of(&staging, pp, ctx.hash).unwrap() + && reachability::is_chain_ancestor_of(&staging, ctx.pruning_point, ctx.hash).unwrap() { // Hint reachability about the new tip. reachability::hint_virtual_selected_parent(&mut staging, ctx.hash).unwrap(); @@ -491,7 +472,7 @@ impl HeaderProcessor { self.genesis.hash, genesis_header.clone(), self.max_block_level, - PruningPointInfo::from_genesis(self.genesis.hash), + self.genesis.hash, (0..=self.max_block_level).map(|_| BlockHashes::new(vec![ORIGIN])).collect(), ); ctx.ghostdag_data = Some(Arc::new(self.ghostdag_manager.genesis_ghostdag_data())); diff --git a/consensus/src/pipeline/pruning_processor/processor.rs b/consensus/src/pipeline/pruning_processor/processor.rs index 4d34119d06..fdf7ca3d20 100644 --- a/consensus/src/pipeline/pruning_processor/processor.rs +++ b/consensus/src/pipeline/pruning_processor/processor.rs @@ -11,7 +11,7 @@ use crate::{ ghostdag::{CompactGhostdagData, GhostdagStoreReader}, headers::HeaderStoreReader, past_pruning_points::PastPruningPointsStoreReader, - pruning::{PruningStore, PruningStoreReader}, + pruning::PruningStoreReader, pruning_samples::PruningSamplesStoreReader, reachability::{DbReachabilityStore, ReachabilityStoreReader, StagingReachabilityStore}, relations::StagingRelationsStore, @@ -19,6 +19,7 @@ use crate::{ statuses::StatusesStoreReader, tips::{TipsStore, TipsStoreReader}, utxo_diffs::UtxoDiffsStoreReader, + virtual_state::VirtualStateStoreReader, }, }, processes::{pruning_proof::PruningProofManager, reachability::inquirer as reachability, relations}, @@ -117,27 +118,37 @@ impl PruningProcessor { } pub fn worker(self: &Arc) { - let Ok(PruningProcessingMessage::Process { sink_ghostdag_data }) = self.receiver.recv() else { - return; - }; - // On start-up, check if any pruning workflows require recovery. We wait for the first processing message to arrive // in order to make sure the node is already connected and receiving blocks before we start background recovery operations - self.recover_pruning_workflows_if_needed(); - self.advance_pruning_point_and_candidate_if_possible(sink_ghostdag_data); - + let mut recovered = false; while let Ok(PruningProcessingMessage::Process { sink_ghostdag_data }) = self.receiver.recv() { + if !recovered { + if !self.recover_pruning_workflows_if_needed() { + // Recovery could fail for several reasons: + // (a) Consensus has exited while it was undergoing + // (b) Consensus is in a transitional state + // (c) Consensus is no longer in a transitional state per-se but has yet to catch up on sufficient block data + // For (a), the best course of measure is to exit the loop + // For (b)+(c), it is to attempt it again + // Continuing the loop satisfies both since if consensus exited the next iteration of the loop will exit as well + continue; + } + recovered = true; + } self.advance_pruning_point_and_candidate_if_possible(sink_ghostdag_data); } } - fn recover_pruning_workflows_if_needed(&self) { + fn recover_pruning_workflows_if_needed(&self) -> bool { + // returns true if recorvery was completed successfully or was not needed let pruning_point_read = self.pruning_point_store.read(); let pruning_point = pruning_point_read.pruning_point().unwrap(); let retention_checkpoint = pruning_point_read.retention_checkpoint().unwrap(); let retention_period_root = pruning_point_read.retention_period_root().unwrap(); - let pruning_utxoset_position = self.pruning_utxoset_stores.read().utxoset_position().unwrap(); + let pruning_meta_read = self.pruning_meta_stores.read(); + let pruning_utxoset_position = pruning_meta_read.utxoset_position().unwrap(); drop(pruning_point_read); + drop(pruning_meta_read); debug!( "[PRUNING PROCESSOR] recovery check: current pruning point: {}, retention checkpoint: {:?}, pruning utxoset position: {:?}", @@ -149,10 +160,29 @@ impl PruningProcessor { info!("Recovering pruning utxo-set from {} to the pruning point {}", pruning_utxoset_position, pruning_point); if !self.advance_pruning_utxoset(pruning_utxoset_position, pruning_point) { info!("Interrupted while advancing the pruning point UTXO set: Process is exiting"); - return; + return false; } } + // The following two chekcs are implicitly checked in advance_pruning_utxoset, and hence can theoretically + // be skipped if that function was called. As these checks are cheap, we perform them regardless + // as to not complicate the logic. + + // If the latest pruning point is the result of an IBD catchup, it is guaranteed that the headers selected tip + // is pruning_depth on top of it + // but crucially it is not guaranteed *virtual* is of sufficient depth above it + // internally the pruning process checks this process for virtual and fails otherwise + // for this reason, pruning is held until virtual has advanced enough. + if !self.confirm_pruning_depth_below_virtual(pruning_point) { + return false; + } + let pruning_meta_read = self.pruning_meta_stores.read(); + + // don't prune if in a transitional ibd state. + if pruning_meta_read.is_in_transitional_ibd_state() { + return false; + } + drop(pruning_meta_read); trace!( "retention_checkpoint: {:?} | retention_period_root: {} | pruning_point: {}", retention_checkpoint, @@ -165,16 +195,13 @@ impl PruningProcessor { if retention_checkpoint != retention_period_root { self.prune(pruning_point, retention_period_root); } + true } fn advance_pruning_point_and_candidate_if_possible(&self, sink_ghostdag_data: CompactGhostdagData) { let pruning_point_read = self.pruning_point_store.upgradable_read(); - let current_pruning_info = pruning_point_read.get().unwrap(); - let (new_pruning_points, new_candidate) = self.pruning_point_manager.next_pruning_points( - sink_ghostdag_data, - current_pruning_info.candidate, - current_pruning_info.pruning_point, - ); + let (current_pruning_point, current_index) = pruning_point_read.pruning_point_and_index().unwrap(); + let new_pruning_points = self.pruning_point_manager.next_pruning_points(sink_ghostdag_data, current_pruning_point); if let Some(new_pruning_point) = new_pruning_points.last().copied() { let retention_period_root = pruning_point_read.retention_period_root().unwrap(); @@ -183,10 +210,10 @@ impl PruningProcessor { let mut batch = WriteBatch::default(); let mut pruning_point_write = RwLockUpgradableReadGuard::upgrade(pruning_point_read); for (i, past_pp) in new_pruning_points.iter().copied().enumerate() { - self.past_pruning_points_store.insert_batch(&mut batch, current_pruning_info.index + i as u64 + 1, past_pp).unwrap(); + self.past_pruning_points_store.insert_batch(&mut batch, current_index + i as u64 + 1, past_pp).unwrap(); } - let new_pp_index = current_pruning_info.index + new_pruning_points.len() as u64; - pruning_point_write.set_batch(&mut batch, new_pruning_point, new_candidate, new_pp_index).unwrap(); + let new_pp_index = current_index + new_pruning_points.len() as u64; + pruning_point_write.set_batch(&mut batch, new_pruning_point, new_pp_index).unwrap(); // For archival nodes, keep the retention root in place let adjusted_retention_period_root = if self.config.is_archival { @@ -203,10 +230,10 @@ impl PruningProcessor { trace!("New Pruning Point: {} | New Retention Period Root: {}", new_pruning_point, adjusted_retention_period_root); // Inform the user - info!("Periodic pruning point movement: advancing from {} to {}", current_pruning_info.pruning_point, new_pruning_point); + info!("Periodic pruning point movement: advancing from {} to {}", current_pruning_point, new_pruning_point); // Advance the pruning point utxoset to the state of the new pruning point using chain-block UTXO diffs - if !self.advance_pruning_utxoset(current_pruning_info.pruning_point, new_pruning_point) { + if !self.advance_pruning_utxoset(current_pruning_point, new_pruning_point) { info!("Interrupted while advancing the pruning point UTXO set: Process is exiting"); return; } @@ -214,25 +241,38 @@ impl PruningProcessor { // Finally, prune data in the new pruning point past self.prune(new_pruning_point, adjusted_retention_period_root); - } else if new_candidate != current_pruning_info.candidate { - let mut pruning_point_write = RwLockUpgradableReadGuard::upgrade(pruning_point_read); - pruning_point_write.set(current_pruning_info.pruning_point, new_candidate, current_pruning_info.index).unwrap(); } } fn advance_pruning_utxoset(&self, utxoset_position: Hash, new_pruning_point: Hash) -> bool { - let mut pruning_utxoset_write = self.pruning_utxoset_stores.write(); + // If the latest pruning point is the result of an IBD catchup, it is guaranteed that the headers selected tip + // is pruning_depth on top of it + // but crucially it is not guaranteed *virtual* is of sufficient depth above it + // internally the pruning process checks this process for virtual and fails otherwise + // for this reason, pruning is held until virtual has advanced enough. + if !self.confirm_pruning_depth_below_virtual(new_pruning_point) { + return false; + } + for chain_block in self.reachability_service.forward_chain_iterator(utxoset_position, new_pruning_point, true).skip(1) { if self.is_consensus_exiting.load(Ordering::Relaxed) { return false; } + // halt pruning if an unstable IBD state was initiated in the midst of it + let pruning_meta_read = self.pruning_meta_stores.upgradable_read(); + + if pruning_meta_read.is_in_transitional_ibd_state() { + return false; + } + let mut pruning_meta_write = RwLockUpgradableReadGuard::upgrade(pruning_meta_read); + let utxo_diff = self.utxo_diffs_store.get(chain_block).expect("chain blocks have utxo state"); let mut batch = WriteBatch::default(); - pruning_utxoset_write.utxo_set.write_diff_batch(&mut batch, utxo_diff.as_ref()).unwrap(); - pruning_utxoset_write.set_utxoset_position(&mut batch, chain_block).unwrap(); + pruning_meta_write.utxo_set.write_diff_batch(&mut batch, utxo_diff.as_ref()).unwrap(); + pruning_meta_write.set_utxoset_position(&mut batch, chain_block).unwrap(); self.db.write(batch).unwrap(); + drop(pruning_meta_write); } - drop(pruning_utxoset_write); if self.config.enable_sanity_checks { info!("Performing a sanity check that the new UTXO set has the expected UTXO commitment"); @@ -245,8 +285,8 @@ impl PruningProcessor { info!("Verifying the new pruning point UTXO commitment (sanity test)"); let commitment = self.headers_store.get_header(pruning_point).unwrap().utxo_commitment; let mut multiset = MuHash::new(); - let pruning_utxoset_read = self.pruning_utxoset_stores.read(); - for (outpoint, entry) in pruning_utxoset_read.utxo_set.iterator().map(|r| r.unwrap()) { + let pruning_meta_read = self.pruning_meta_stores.read(); + for (outpoint, entry) in pruning_meta_read.utxo_set.iterator().map(|r| r.unwrap()) { multiset.add_utxo(&outpoint, &entry); } assert_eq!(multiset.finalize(), commitment, "Updated pruning point utxo set does not match the header utxo commitment"); @@ -469,6 +509,12 @@ impl PruningProcessor { self.ghostdag_store.delete_batch(&mut batch, current).unwrap_option(); } } + // while we keep headers for keep relation blocks regardless, + // some of those relations blocks may accidentally have a pruning sample stored, + // delete those samples unless the block is a pruning block itself + if !keep_headers.contains(¤t) { + self.pruning_samples_store.delete_batch(&mut batch, current).unwrap(); + } } else { // Count only blocks which get fully pruned including DAG relations counter += 1; @@ -558,7 +604,7 @@ impl PruningProcessor { /// doing any pruning. Pruning point must be the new pruning point this node is advancing to. /// /// The returned retention_period_root is guaranteed to be in past(pruning_point) or the pruning point itself. - fn advance_retention_period_root(&self, retention_period_root: Hash, pruning_point: Hash) -> Hash { + pub fn advance_retention_period_root(&self, retention_period_root: Hash, pruning_point: Hash) -> Hash { match self.config.retention_period_days { // If the retention period wasn't set, immediately default to the pruning point. None => pruning_point, @@ -610,11 +656,18 @@ impl PruningProcessor { } fn past_pruning_points(&self) -> BlockHashSet { - (0..self.pruning_point_store.read().get().unwrap().index) + (0..self.pruning_point_store.read().pruning_point_index().unwrap()) .map(|index| self.past_pruning_points_store.get(index).unwrap()) .collect() } + fn confirm_pruning_depth_below_virtual(&self, pruning_point: Hash) -> bool { + let virtual_state = self.virtual_stores.read().state.get().unwrap(); + let pp_bs = self.headers_store.get_blue_score(pruning_point).unwrap(); + let pp_daa = self.headers_store.get_daa_score(pruning_point).unwrap(); + virtual_state.ghostdag_data.blue_score >= pp_bs + self.config.params.pruning_depth().get(pp_daa) + } + fn assert_proof_rebuilding(&self, ref_proof: Arc, new_pruning_point: Hash) { info!("Rebuilding the pruning proof after pruning data (sanity test)"); let proof_hashes = ref_proof.iter().flatten().map(|h| h.hash).collect::>(); diff --git a/consensus/src/pipeline/virtual_processor/processor.rs b/consensus/src/pipeline/virtual_processor/processor.rs index c9177d6860..fe383d74d3 100644 --- a/consensus/src/pipeline/virtual_processor/processor.rs +++ b/consensus/src/pipeline/virtual_processor/processor.rs @@ -23,8 +23,8 @@ use crate::{ headers::{DbHeadersStore, HeaderStoreReader}, past_pruning_points::DbPastPruningPointsStore, pruning::{DbPruningStore, PruningStoreReader}, + pruning_meta::PruningMetaStores, pruning_samples::DbPruningSamplesStore, - pruning_utxoset::PruningUtxosetStores, reachability::DbReachabilityStore, relations::{DbRelationsStore, RelationsStoreReader}, selected_chain::{DbSelectedChainStore, SelectedChainStore}, @@ -141,7 +141,7 @@ pub struct VirtualStateProcessor { pub(super) utxo_multisets_store: Arc, pub(super) acceptance_data_store: Arc, pub(super) virtual_stores: Arc>, - pub(super) pruning_utxoset_stores: Arc>, + pub(super) pruning_meta_stores: Arc>, /// The "last known good" virtual state. To be used by any logic which does not want to wait /// for a possible virtual state write to complete but can rather settle with the last known state @@ -223,7 +223,7 @@ impl VirtualStateProcessor { utxo_multisets_store: storage.utxo_multisets_store.clone(), acceptance_data_store: storage.acceptance_data_store.clone(), virtual_stores: storage.virtual_stores.clone(), - pruning_utxoset_stores: storage.pruning_utxoset_stores.clone(), + pruning_meta_stores: storage.pruning_meta_stores.clone(), lkg_virtual_state: storage.lkg_virtual_state.clone(), block_window_cache_for_difficulty: storage.block_window_cache_for_difficulty.clone(), @@ -709,9 +709,8 @@ impl VirtualStateProcessor { // we might touch such data prior to validating the bounded merge rule. All in all, this function is short // enough so we avoid making further optimizations let _prune_guard = self.pruning_lock.blocking_read(); - let selected_parent_daa_score = self.headers_store.get_daa_score(selected_parent).unwrap(); - let max_block_parents = self.max_block_parents.get(selected_parent_daa_score) as usize; - let mergeset_size_limit = self.mergeset_size_limit.get(selected_parent_daa_score); + let max_block_parents = self.max_block_parents.after() as usize; + let mergeset_size_limit = self.mergeset_size_limit.after(); let max_candidates = self.max_virtual_parent_candidates(max_block_parents); // Prioritize half the blocks with highest blue work and pick the rest randomly to ensure diversity between nodes @@ -1054,9 +1053,9 @@ impl VirtualStateProcessor { // [`calc_block_parents`] can use deep blocks below the pruning point for this calculation, so we // need to hold the pruning lock. let _prune_guard = self.pruning_lock.blocking_read(); - let pruning_info = self.pruning_point_store.read().get().unwrap(); + let pruning_point = self.pruning_point_store.read().pruning_point().unwrap(); let header_pruning_point = - self.pruning_point_manager.expected_header_pruning_point_v2(virtual_state.ghostdag_data.to_compact()).pruning_point; + self.pruning_point_manager.expected_header_pruning_point(virtual_state.ghostdag_data.to_compact()).pruning_point; let coinbase = self .coinbase_manager .expected_coinbase_transaction( @@ -1069,11 +1068,8 @@ impl VirtualStateProcessor { .unwrap(); txs.insert(0, coinbase.tx); let version = BLOCK_VERSION; - let parents_by_level = self.parents_manager.calc_block_parents(pruning_info.pruning_point, &virtual_state.parents); - - // Hash according to hardfork activation - let storage_mass_activated = self.crescendo_activation.is_active(virtual_state.daa_score); - let hash_merkle_root = calc_hash_merkle_root(txs.iter(), storage_mass_activated); + let parents_by_level = self.parents_manager.calc_block_parents(pruning_point, &virtual_state.parents); + let hash_merkle_root = calc_hash_merkle_root(txs.iter()); let accepted_id_merkle_root = self.calc_accepted_id_merkle_root( virtual_state.daa_score, @@ -1116,16 +1112,16 @@ impl VirtualStateProcessor { let pruning_point_read = self.pruning_point_store.upgradable_read(); if pruning_point_read.pruning_point().unwrap_option().is_none() { let mut pruning_point_write = RwLockUpgradableReadGuard::upgrade(pruning_point_read); - let mut pruning_utxoset_write = self.pruning_utxoset_stores.write(); + let mut pruning_meta_write = self.pruning_meta_stores.write(); let mut batch = WriteBatch::default(); self.past_pruning_points_store.insert_batch(&mut batch, 0, self.genesis.hash).unwrap_or_exists(); - pruning_point_write.set_batch(&mut batch, self.genesis.hash, self.genesis.hash, 0).unwrap(); + pruning_point_write.set_batch(&mut batch, self.genesis.hash, 0).unwrap(); pruning_point_write.set_retention_checkpoint(&mut batch, self.genesis.hash).unwrap(); pruning_point_write.set_retention_period_root(&mut batch, self.genesis.hash).unwrap(); - pruning_utxoset_write.set_utxoset_position(&mut batch, self.genesis.hash).unwrap(); + pruning_meta_write.set_utxoset_position(&mut batch, self.genesis.hash).unwrap(); self.db.write(batch).unwrap(); drop(pruning_point_write); - drop(pruning_utxoset_write); + drop(pruning_meta_write); } } @@ -1170,19 +1166,19 @@ impl VirtualStateProcessor { { // Set the pruning point utxoset position to the new point we just verified let mut batch = WriteBatch::default(); - let mut pruning_utxoset_write = self.pruning_utxoset_stores.write(); - pruning_utxoset_write.set_utxoset_position(&mut batch, new_pruning_point).unwrap(); + let mut pruning_meta_write = self.pruning_meta_stores.write(); + pruning_meta_write.set_utxoset_position(&mut batch, new_pruning_point).unwrap(); self.db.write(batch).unwrap(); - drop(pruning_utxoset_write); + drop(pruning_meta_write); } { // Copy the pruning-point UTXO set into virtual's UTXO set - let pruning_utxoset_read = self.pruning_utxoset_stores.read(); + let pruning_meta_read = self.pruning_meta_stores.read(); let mut virtual_write = self.virtual_stores.write(); virtual_write.utxo_set.clear().unwrap(); - for chunk in &pruning_utxoset_read.utxo_set.iterator().map(|iter_result| iter_result.unwrap()).chunks(1000) { + for chunk in &pruning_meta_read.utxo_set.iterator().map(|iter_result| iter_result.unwrap()).chunks(1000) { virtual_write.utxo_set.write_from_iterator_without_cache(chunk).unwrap(); } } diff --git a/consensus/src/pipeline/virtual_processor/utxo_inquirer.rs b/consensus/src/pipeline/virtual_processor/utxo_inquirer.rs index 6ab6a463cb..ac74d32ade 100644 --- a/consensus/src/pipeline/virtual_processor/utxo_inquirer.rs +++ b/consensus/src/pipeline/virtual_processor/utxo_inquirer.rs @@ -1,80 +1,274 @@ -use std::{cmp, sync::Arc}; +use std::{cmp, collections::HashSet, sync::Arc}; use kaspa_consensus_core::{ - acceptance_data::AcceptanceData, - tx::{SignableTransaction, Transaction, UtxoEntry}, - utxo::{utxo_diff::ImmutableUtxoDiff, utxo_inquirer::UtxoInquirerError}, + acceptance_data::{AcceptanceData, MergesetBlockAcceptanceData}, + tx::{SignableTransaction, Transaction, TransactionId, TransactionIndexType, TransactionOutpoint, UtxoEntry}, + utxo::{ + utxo_diff::ImmutableUtxoDiff, + utxo_inquirer::{UtxoInquirerError, UtxoInquirerFindTxsFromAcceptanceDataError, UtxoInquirerResult}, + }, }; -use kaspa_core::{trace, warn}; +use kaspa_core::trace; use kaspa_hashes::Hash; -use crate::model::stores::{ - acceptance_data::AcceptanceDataStoreReader, block_transactions::BlockTransactionsStoreReader, headers::HeaderStoreReader, - selected_chain::SelectedChainStoreReader, utxo_diffs::UtxoDiffsStoreReader, +use crate::model::{ + services::reachability::ReachabilityService, + stores::{ + acceptance_data::AcceptanceDataStoreReader, block_transactions::BlockTransactionsStoreReader, headers::HeaderStoreReader, + selected_chain::SelectedChainStoreReader, utxo_diffs::UtxoDiffsStoreReader, utxo_set::UtxoSetStoreReader, + virtual_state::VirtualStateStoreReader, + }, }; use super::VirtualStateProcessor; +pub struct MergesetAcceptanceMetaData { + pub accepting_block_hash: Hash, + pub acceptance_data: Arc, + pub accepting_daa_score: u64, + pub mergeset_idx: usize, +} + impl VirtualStateProcessor { - /// Returns the fully populated transaction with the given txid which was accepted at the provided accepting_block_daa_score. - /// The argument `accepting_block_daa_score` is expected to be the DAA score of the accepting chain block of `txid`. - /// - /// *Assumed to be called under the pruning read lock.* - pub fn get_populated_transaction( + pub fn find_accepting_data( &self, - txid: Hash, - accepting_block_daa_score: u64, + block_hash: Hash, + retention_period_root_hash: Hash, + sink_hash: Hash, + ) -> UtxoInquirerResult> { + // accepting block hash, daa score, acceptance data + // check if block is an ancestor of the sink block, i.e. we expect it to be accepted + if self.reachability_service.is_dag_ancestor_of(block_hash, sink_hash) { + // find the first "possible" accepting chain block + let ancestor = self.find_accepting_chain_block_hash_at_daa_score( + self.headers_store + .get_daa_score(block_hash) + .map_err(|_| UtxoInquirerError::MissingCompactHeaderForBlockHash(block_hash))?, + retention_period_root_hash, + )?; + // iterate forward from the ancestor to the sink block, looking for the accepting block + for candidate in self.reachability_service.forward_chain_iterator(ancestor, sink_hash, true) { + let acceptance_data = self + .acceptance_data_store + .get(candidate) + .map_err(|_| UtxoInquirerError::MissingAcceptanceDataForChainBlock(candidate))?; + for (i, mbad) in acceptance_data.iter().enumerate() { + if mbad.block_hash == block_hash { + return Ok(Some(MergesetAcceptanceMetaData { + accepting_block_hash: candidate, + acceptance_data, + accepting_daa_score: self + .headers_store + .get_daa_score(candidate) + .map_err(|_| UtxoInquirerError::MissingCompactHeaderForBlockHash(candidate))?, + mergeset_idx: i, + })); + } + } + } + } + Ok(None) + } + + pub fn populate_block_transactions( + &self, + block_hash: Hash, + txs: Vec, retention_period_root_hash: Hash, - ) -> Result { - let retention_period_root_daa_score = self + ) -> UtxoInquirerResult> { + let virual_state_read = self.virtual_stores.read(); + let sink_hash = virual_state_read.state.get().expect("expected virtual state").ghostdag_data.selected_parent; + let utxo_store = &virual_state_read.utxo_set; + + let mut signable_transactions = Vec::with_capacity(txs.len()); + + if let Some(mergeset_meta_data) = self.find_accepting_data(block_hash, retention_period_root_hash, sink_hash)? { + // We have a mergeset acceptance, so we most factor in the acceptance data to populate the transactions + let utxo_diff = self + .utxo_diffs_store + .get(mergeset_meta_data.accepting_block_hash) + .map_err(|_| UtxoInquirerError::MissingUtxoDiffForChainBlock(mergeset_meta_data.accepting_block_hash))?; + for tx in txs.into_iter() { + let mut entries = Vec::with_capacity(tx.inputs.len()); + for input in tx.inputs.iter() { + if let Some(utxo) = utxo_diff.removed().get(&input.previous_outpoint) { + // first check: if it was accepted, i.e. removed in the diff + entries.push(utxo.clone()); + } else if let Some(utxo) = utxo_store.get(&input.previous_outpoint).ok().map(|arc| (*arc).clone()) { + // secound check: if it was not accepted, it may be in the utxo set + entries.push(utxo); + } else { + // third check: if it was not accepted and not in the utxo set, it may have been created and spent in a parallel block. + entries.push(self.resolve_missing_outpoint( + &input.previous_outpoint, + &mergeset_meta_data.acceptance_data, + mergeset_meta_data.accepting_daa_score, + )?); + } + } + signable_transactions.push(SignableTransaction::with_entries(tx, entries)); + } + } else { + // We don't have a mergeset acceptance, so we use the utxo set solely to populate the transactions. + // we do not expect to find the outpoints anywhere else. + for tx in txs.into_iter() { + let mut entries = Vec::with_capacity(tx.inputs.len()); + for input in tx.inputs.iter() { + match utxo_store.get(&input.previous_outpoint) { + Ok(utxo) => entries.push((*utxo).clone()), + Err(_) => return Err(UtxoInquirerError::MissingUtxoEntryForOutpoint(input.previous_outpoint)), + } + } + signable_transactions.push(SignableTransaction::with_entries(tx, entries)); + } + } + + Ok(signable_transactions) + } + + fn resolve_missing_outpoint( + &self, + outpoint: &TransactionOutpoint, + acceptance_data: &AcceptanceData, + accepting_block_daa_score: u64, + ) -> UtxoInquirerResult { + // This handles this rare scenario: + // - UTXO0 is spent by TX1 and creates UTXO1 + // - UTXO1 is spent by TX2 and creates UTXO2 + // - A chain block happens to accept both of these + // In this case, removed_diff wouldn't contain the outpoint of the created-and-immediately-spent UTXO + // so we use the transaction (which also has acceptance data in this block) and look at its outputs + let other_tx = &self.find_txs_from_acceptance_data(Some(vec![outpoint.transaction_id]), acceptance_data)?[0]; + let output = &other_tx.outputs[outpoint.index as usize]; + let utxo_entry = + UtxoEntry::new(output.value, output.script_public_key.clone(), accepting_block_daa_score, other_tx.is_coinbase()); + Ok(utxo_entry) + } + + pub fn get_populated_transactions_by_block_acceptance_data( + &self, + tx_ids: Option>, + block_acceptance_data: MergesetBlockAcceptanceData, + accepting_block: Hash, + ) -> UtxoInquirerResult> { + let accepting_daa_score = self .headers_store - .get_daa_score(retention_period_root_hash) - .map_err(|_| UtxoInquirerError::MissingCompactHeaderForBlockHash(retention_period_root_hash))?; + .get_daa_score(accepting_block) + .map_err(|_| UtxoInquirerError::MissingCompactHeaderForBlockHash(accepting_block))?; + + let utxo_diff = self + .utxo_diffs_store + .get(accepting_block) + .map_err(|_| UtxoInquirerError::MissingUtxoDiffForChainBlock(accepting_block))?; + + let acceptance_data_for_this_block = vec![block_acceptance_data]; + + let txs = self.find_txs_from_acceptance_data(tx_ids, &acceptance_data_for_this_block)?; + + let mut populated_txs = Vec::::with_capacity(txs.len()); + + for tx in txs.iter() { + let mut entries = Vec::with_capacity(tx.inputs.len()); + for input in tx.inputs.iter() { + let filled_utxo = if let Some(utxo) = utxo_diff.removed().get(&input.previous_outpoint).cloned() { + Some(utxo) + } else if let Some(utxo) = populated_txs.iter().map(|ptx| &ptx.tx).chain(txs.iter()).find_map(|tx| { + if tx.id() == input.previous_outpoint.transaction_id { + let output = &tx.outputs[input.previous_outpoint.index as usize]; + Some(UtxoEntry::new(output.value, output.script_public_key.clone(), accepting_daa_score, tx.is_coinbase())) + } else { + None + } + }) { + Some(utxo) + } else { + // When trying to resolve the missing outpoint, the transaction data we need is going to come from the acceptance + // data of some other block that was merged by this chain block. We cannot use "acceptance_data_for_this_block" as that + // definitely cannot contain the missing outpoint. A single block cannot accept interdependent txs, therefore the dependency tx + // must have been included by a different block. + // So we need to acquire the full acceptance data here of all the blocks merged and accepted by this chain block + // and pass that down to resolve_missing_outpoint. + let full_acceptance_data = self + .acceptance_data_store + .get(accepting_block) + .map_err(|_| UtxoInquirerError::MissingAcceptanceDataForChainBlock(accepting_block))?; + Some(self.resolve_missing_outpoint(&input.previous_outpoint, &full_acceptance_data, accepting_daa_score)?) + }; - if accepting_block_daa_score < retention_period_root_daa_score { - // Early exit if target daa score is lower than that of pruning point's daa score: - return Err(UtxoInquirerError::AlreadyPruned); + entries.push(filled_utxo.ok_or(UtxoInquirerError::MissingUtxoEntryForOutpoint(input.previous_outpoint))?); + } + populated_txs.push(SignableTransaction::with_entries(tx.clone(), entries)); } - let (matching_chain_block_hash, acceptance_data) = - self.find_accepting_chain_block_hash_at_daa_score(accepting_block_daa_score, retention_period_root_hash)?; + Ok(populated_txs) + } + pub fn get_populated_transactions_by_accepting_block( + &self, + tx_ids: Option>, + accepting_block: Hash, + ) -> UtxoInquirerResult> { + let acceptance_data = self + .acceptance_data_store + .get(accepting_block) + .map_err(|_| UtxoInquirerError::MissingAcceptanceDataForChainBlock(accepting_block))?; + + let accepting_daa_score = self + .headers_store + .get_daa_score(accepting_block) + .map_err(|_| UtxoInquirerError::MissingCompactHeaderForBlockHash(accepting_block))?; // Expected to never fail, since we found the acceptance data and therefore there must be matching diff let utxo_diff = self .utxo_diffs_store - .get(matching_chain_block_hash) - .map_err(|_| UtxoInquirerError::MissingUtxoDiffForChainBlock(matching_chain_block_hash))?; - - let tx = self.find_tx_from_acceptance_data(txid, &acceptance_data)?; - - let mut populated_tx = SignableTransaction::new(tx); - - let removed_diffs = utxo_diff.removed(); - - populated_tx.tx.inputs.iter().enumerate().for_each(|(index, input)| { - let filled_utxo = if let Some(utxo_entry) = removed_diffs.get(&input.previous_outpoint) { - Some(utxo_entry.clone().to_owned()) - } else { - // This handles this rare scenario: - // - UTXO0 is spent by TX1 and creates UTXO1 - // - UTXO1 is spent by TX2 and creates UTXO2 - // - A chain block happens to accept both of these - // In this case, removed_diff wouldn't contain the outpoint of the created-and-immediately-spent UTXO - // so we use the transaction (which also has acceptance data in this block) and look at its outputs - let other_txid = input.previous_outpoint.transaction_id; - let other_tx = self.find_tx_from_acceptance_data(other_txid, &acceptance_data).unwrap(); - let output = &other_tx.outputs[input.previous_outpoint.index as usize]; - let utxo_entry = - UtxoEntry::new(output.value, output.script_public_key.clone(), accepting_block_daa_score, other_tx.is_coinbase()); - Some(utxo_entry) - }; - - populated_tx.entries[index] = filled_utxo; - }); - - Ok(populated_tx) + .get(accepting_block) + .map_err(|_| UtxoInquirerError::MissingUtxoDiffForChainBlock(accepting_block))?; + + let txs = self.find_txs_from_acceptance_data(tx_ids, &acceptance_data)?; + + let mut populated_txs = Vec::::with_capacity(txs.len()); + + for tx in txs.iter() { + let mut entries = Vec::with_capacity(tx.inputs.len()); + for input in tx.inputs.iter() { + let filled_utxo = if let Some(utxo) = utxo_diff.removed().get(&input.previous_outpoint).cloned() { + Some(utxo) + } else if let Some(utxo) = populated_txs.iter().map(|ptx| &ptx.tx).chain(txs.iter()).find_map(|tx| { + if tx.id() == input.previous_outpoint.transaction_id { + let output = &tx.outputs[input.previous_outpoint.index as usize]; + Some(UtxoEntry::new(output.value, output.script_public_key.clone(), accepting_daa_score, tx.is_coinbase())) + } else { + None + } + }) { + Some(utxo) + } else { + Some(self.resolve_missing_outpoint(&input.previous_outpoint, &acceptance_data, accepting_daa_score)?) + }; + + entries.push(filled_utxo.ok_or(UtxoInquirerError::MissingUtxoEntryForOutpoint(input.previous_outpoint))?); + } + populated_txs.push(SignableTransaction::with_entries(tx.clone(), entries)); + } + + Ok(populated_txs) } + /// Returns the fully populated transactions with the given tx ids which were accepted at the provided accepting_block_daa_score. + /// The argument `accepting_block_daa_score` is expected to be the DAA score of the accepting chain block of `tx ids`. + /// + /// *Assumed to be called under the pruning read lock.* + /// + pub fn get_populated_transactions_by_accepting_daa_score( + &self, + tx_ids: Option>, + accepting_block_daa_score: u64, + retention_period_root_hash: Hash, + ) -> UtxoInquirerResult> { + let matching_chain_block_hash = + self.find_accepting_chain_block_hash_at_daa_score(accepting_block_daa_score, retention_period_root_hash)?; + + self.get_populated_transactions_by_accepting_block(tx_ids, matching_chain_block_hash) + } /// Find the accepting chain block hash at the given DAA score by binary searching /// through selected chain store using indexes. /// This method assumes that local caller have acquired the pruning read lock to guarantee @@ -82,11 +276,11 @@ impl VirtualStateProcessor { /// other stores outside). If no such lock is acquired, this method tries to find /// the accepting chain block hash on a best effort basis (may fail if parts of the data /// are pruned between two sequential calls) - fn find_accepting_chain_block_hash_at_daa_score( + pub fn find_accepting_chain_block_hash_at_daa_score( &self, target_daa_score: u64, retention_period_root_hash: Hash, - ) -> Result<(Hash, Arc), UtxoInquirerError> { + ) -> UtxoInquirerResult { let sc_read = self.selected_chain_store.read(); let retention_period_root_index = sc_read @@ -138,50 +332,153 @@ impl VirtualStateProcessor { } }; - let acceptance_data = self - .acceptance_data_store - .get(matching_chain_block_hash) - .map_err(|_| UtxoInquirerError::MissingAcceptanceDataForChainBlock(matching_chain_block_hash))?; - - Ok((matching_chain_block_hash, acceptance_data)) + Ok(matching_chain_block_hash) } /// Finds a transaction's containing block hash and index within block through /// the accepting block acceptance data - fn find_containing_block_and_index_from_acceptance_data( + fn find_containing_blocks_and_indices_from_acceptance_data( &self, - txid: Hash, + tx_ids: &[TransactionId], acceptance_data: &AcceptanceData, - ) -> Option<(Hash, usize)> { - acceptance_data.iter().find_map(|mbad| { - let tx_arr_index = - mbad.accepted_transactions.iter().find_map(|tx| (tx.transaction_id == txid).then_some(tx.index_within_block as usize)); - tx_arr_index.map(|index| (mbad.block_hash, index)) - }) - } + ) -> Vec<(Hash, Vec)> { + let tx_set = tx_ids.iter().collect::>(); + let mut collected = 0usize; - /// Finds a transaction through the accepting block acceptance data (and using indexed info therein for - /// finding the tx in the block transactions store) - fn find_tx_from_acceptance_data(&self, txid: Hash, acceptance_data: &AcceptanceData) -> Result { - let (containing_block, index) = self - .find_containing_block_and_index_from_acceptance_data(txid, acceptance_data) - .ok_or(UtxoInquirerError::MissingContainingAcceptanceForTx(txid))?; - - let tx = self - .block_transactions_store - .get(containing_block) - .map_err(|_| UtxoInquirerError::MissingBlockFromBlockTxStore(containing_block)) - .and_then(|block_txs| { - block_txs.get(index).cloned().ok_or(UtxoInquirerError::MissingTransactionIndexOfBlock(index, containing_block)) - })?; + let mut result = Vec::with_capacity(acceptance_data.len()); - if tx.id() != txid { - // Should never happen, but do a sanity check. This would mean something went wrong with storing block transactions. - // Sanity check is necessary to guarantee that this function will never give back a wrong address (err on the side of not found) - warn!("Expected {} to match {} when checking block_transaction_store using array index of transaction", tx.id(), txid); - return Err(UtxoInquirerError::UnexpectedTransactionMismatch(tx.id(), txid)); + 'outer: for mbad in acceptance_data.iter() { + for atx in mbad.accepted_transactions.iter() { + let mut indices = Vec::new(); + if tx_set.contains(&atx.transaction_id) { + indices.push(atx.index_within_block); + collected += 1; + if collected == tx_ids.len() { + result.push((mbad.block_hash, indices)); + break 'outer; + } + } + if !indices.is_empty() { + result.push((mbad.block_hash, indices)); + } + } } - Ok(tx) + result + } + + /// Finds transaction(s) through a provided accepting block acceptance data + /// + /// Arguments: + /// * `tx_ids`: an optional list of tx id(s) to resolve. When passing `None`, the accepted transaction ids + /// contained in `acceptance_data` is used as a filter. + /// This default behavior ensures only the accepted transactions by this mergeset are resolved. + /// * `acceptance_data`: accepting block acceptance data + /// + /// Limitations: + /// * `tx_ids` currently only allow filtering with exactly one transaction, not multiple + fn find_txs_from_acceptance_data( + &self, + tx_ids: Option>, + acceptance_data: &AcceptanceData, + ) -> UtxoInquirerResult> { + match tx_ids.as_deref() { + None => { + // no filter passed, using default accepted transactions by mergeset filter + let total_accepted: usize = acceptance_data.iter().map(|mbad| mbad.accepted_transactions.len()).sum(); + + // accepted transactions data of this mergeset + let mut all_txs = Vec::with_capacity(total_accepted); + + for mbad in acceptance_data { + let block_txs = self + .block_transactions_store + .get(mbad.block_hash) + .map_err(|_| UtxoInquirerError::MissingBlockFromBlockTxStore(mbad.block_hash))?; + + for accepted in &mbad.accepted_transactions { + let idx = accepted.index_within_block as usize; + + let tx = block_txs.get(idx).ok_or(UtxoInquirerError::MissingTransactionIndexOfBlock(idx, mbad.block_hash))?; + + all_txs.push(tx.clone()); + } + } + Ok(all_txs) + } + Some([]) => { + // empty filter -> error + Err(UtxoInquirerFindTxsFromAcceptanceDataError::TxIdsFilterIsEmptyError.into()) + } + Some([tx_id]) => { + // single element filter, optimize for this case specifically + let (containing_block, index) = acceptance_data + .iter() + .find_map(|mbad| { + let tx_arr_index = mbad + .accepted_transactions + .iter() + .find_map(|tx| (tx.transaction_id == *tx_id).then_some(tx.index_within_block as usize)); + tx_arr_index.map(|index| (mbad.block_hash, index)) + }) + .ok_or_else(|| UtxoInquirerError::MissingQueriedTransactions(vec![*tx_id]))?; + + let tx = self + .block_transactions_store + .get(containing_block) + .map_err(|_| UtxoInquirerError::MissingBlockFromBlockTxStore(containing_block)) + .and_then(|block_txs| { + block_txs.get(index).cloned().ok_or(UtxoInquirerError::MissingTransactionIndexOfBlock(index, containing_block)) + })?; + + Ok(vec![tx]) + } + Some(_more) => { + Err(UtxoInquirerFindTxsFromAcceptanceDataError::TxIdsFilterNeedsLessOrEqualThanOneElementError.into()) + // TODO: currently there is no calling site that needs to make arbitrary filter by tx_ids with more than 1 element + // But it should be considered a future enhancement to address + // artifact implementation that has been commented, keeping it for track record as long as it's unimplemented + /* + + let mut txs = HashMap::::new(); + for (containing_block, indices) in + self.find_containing_blocks_and_indices_from_acceptance_data(&tx_ids, acceptance_data) + { + let mut indice_iter = indices.iter(); + let mut target_index = (*indice_iter.next().unwrap()) as usize; + let cut_off_index = (*indices.last().unwrap()) as usize; + + txs.extend( + self.block_transactions_store + .get(containing_block) + .map_err(|_| UtxoInquirerError::MissingBlockFromBlockTxStore(containing_block))? + .unwrap_or_clone() + .into_iter() + .enumerate() + .take_while(|(i, _)| *i <= cut_off_index) + .filter_map(|(i, tx)| { + if i == target_index { + target_index = (*indice_iter.next().unwrap()) as usize; + Some((tx.id(), tx)) + } else { + None + } + }), + ); + } + + /* + if txs.len() < tx_ids.len() { + // The query includes txs which are not in the acceptance data, we constitute this as an error. + return Err(UtxoInquirerError::MissingQueriedTransactions( + tx_ids.iter().filter(|tx_id| !txs.contains_key(*tx_id)).copied().collect::>(), + )); + }; + */ + + return Ok(tx_ids.iter().map(|tx_id| txs.remove(tx_id).expect("expected queried tx id")).collect::>()) + */ + } + } } } diff --git a/consensus/src/pipeline/virtual_processor/utxo_validation.rs b/consensus/src/pipeline/virtual_processor/utxo_validation.rs index 201a634746..13d790d75c 100644 --- a/consensus/src/pipeline/virtual_processor/utxo_validation.rs +++ b/consensus/src/pipeline/virtual_processor/utxo_validation.rs @@ -252,23 +252,9 @@ impl VirtualStateProcessor { header: &Header, ghostdag_data: CompactGhostdagData, ) -> BlockProcessResult { - // [Crescendo]: changing expected pruning point check from header validity to chain qualification. - // Note that we activate here based on the selected parent DAA score thus complementing the deactivation - // in header processor which is based on selected parent DAA score as well. - - if self.crescendo_activation.is_within_range_from_activation(header.daa_score, 1000) { - self.crescendo_logger.report_activation(); - } - - let selected_parent_daa_score = self.headers_store.get_daa_score(ghostdag_data.selected_parent).unwrap(); - // [Crescendo]: we need to save reply.pruning_sample to the database also prior to activation - let reply = self.pruning_point_manager.expected_header_pruning_point_v2(ghostdag_data); - if self.crescendo_activation.is_active(selected_parent_daa_score) { - if reply.pruning_point != header.pruning_point { - return Err(WrongHeaderPruningPoint(reply.pruning_point, header.pruning_point)); - } - } else { - assert_eq!(reply.pruning_point, header.pruning_point, "verified by header validation (v1 = v2 pre activation)"); + let reply = self.pruning_point_manager.expected_header_pruning_point(ghostdag_data); + if reply.pruning_point != header.pruning_point { + return Err(WrongHeaderPruningPoint(reply.pruning_point, header.pruning_point)); } Ok(reply) } @@ -288,11 +274,7 @@ impl VirtualStateProcessor { .expected_coinbase_transaction(daa_score, miner_data, ghostdag_data, mergeset_rewards, mergeset_non_daa) .unwrap() .tx; - // [Crescendo]: we can pass include_mass_field = false here since post activation coinbase mass field - // is guaranteed to be zero (see check_coinbase_has_zero_mass), so after the fork we will be able to - // safely remove the include_mass_field parameter. This is because internally include_mass_field = false - // and mass = 0 are treated the same. - if hashing::tx::hash(coinbase, false) != hashing::tx::hash(&expected_coinbase, false) { + if hashing::tx::hash(coinbase) != hashing::tx::hash(&expected_coinbase) { Err(BadCoinbaseTransaction) } else { Ok(()) diff --git a/consensus/src/processes/block_depth.rs b/consensus/src/processes/block_depth.rs index 2f6c47ee39..7b96ba5ff4 100644 --- a/consensus/src/processes/block_depth.rs +++ b/consensus/src/processes/block_depth.rs @@ -55,10 +55,9 @@ impl self.merge_depth.get(selected_parent_daa_score), - BlockDepthType::Finality => self.finality_depth.get(selected_parent_daa_score), + BlockDepthType::MergeRoot => self.merge_depth.after(), + BlockDepthType::Finality => self.finality_depth.after(), }; if ghostdag_data.blue_score < depth { return self.genesis_hash; diff --git a/consensus/src/processes/coinbase.rs b/consensus/src/processes/coinbase.rs index 63f656270a..a447533ba3 100644 --- a/consensus/src/processes/coinbase.rs +++ b/consensus/src/processes/coinbase.rs @@ -322,7 +322,7 @@ mod tests { let legacy_cbm = create_legacy_manager(); let pre_deflationary_rewards = legacy_cbm.pre_deflationary_phase_base_subsidy * legacy_cbm.deflationary_phase_daa_score; let total_rewards: u64 = pre_deflationary_rewards + SUBSIDY_BY_MONTH_TABLE.iter().map(|x| x * SECONDS_PER_MONTH).sum::(); - let testnet_11_bps = SIMNET_PARAMS.bps().upper_bound(); + let testnet_11_bps = SIMNET_PARAMS.bps().after(); let total_high_bps_rewards_rounded_up: u64 = pre_deflationary_rewards + SUBSIDY_BY_MONTH_TABLE.iter().map(|x| (x.div_ceil(testnet_11_bps) * testnet_11_bps) * SECONDS_PER_MONTH).sum::(); diff --git a/consensus/src/processes/parents_builder.rs b/consensus/src/processes/parents_builder.rs index 14df3fcecb..d7cf637c41 100644 --- a/consensus/src/processes/parents_builder.rs +++ b/consensus/src/processes/parents_builder.rs @@ -1,6 +1,10 @@ use indexmap::IndexSet; use itertools::Itertools; -use kaspa_consensus_core::{blockhash::ORIGIN, header::Header, BlockHashMap, BlockHasher, BlockLevel}; +use kaspa_consensus_core::{ + blockhash::ORIGIN, + header::{CompressedParents, Header}, + BlockHashMap, BlockHasher, BlockLevel, +}; use kaspa_hashes::Hash; use smallvec::{smallvec, SmallVec}; use std::sync::Arc; @@ -33,7 +37,7 @@ impl /// Calculates the parents for each level based on the direct parents. Expects the current /// global pruning point s.t. at least one of the direct parents is in its inclusive future - pub fn calc_block_parents(&self, current_pruning_point: Hash, direct_parents: &[Hash]) -> Vec> { + pub fn calc_block_parents(&self, current_pruning_point: Hash, direct_parents: &[Hash]) -> CompressedParents { let mut direct_parent_headers = direct_parents.iter().copied().map(|parent| self.headers_store.get_header_with_block_level(parent).unwrap()).collect_vec(); @@ -51,7 +55,7 @@ impl direct_parent_headers.swap(0, first_parent_in_future_of_pruning_point); let mut origin_children_headers = None; - let mut parents = Vec::with_capacity(self.max_block_level as usize); + let mut parents = CompressedParents::default(); for block_level in 0..=self.max_block_level { // Direct parents are guaranteed to be in one another's anticones so add them all to @@ -179,19 +183,15 @@ impl parents } - pub fn parents<'a>(&'a self, header: &'a Header) -> impl ExactSizeIterator { - (0..=self.max_block_level).map(|level| self.parents_at_level(header, level)) - } - pub fn parents_at_level<'a>(&'a self, header: &'a Header, level: u8) -> &'a [Hash] { - if header.parents_by_level.is_empty() { - // If is genesis - &[] - } else if header.parents_by_level.len() > level as usize { - &header.parents_by_level[level as usize][..] - } else { - std::slice::from_ref(&self.genesis_hash) - } + header.parents_by_level.get(level as usize).unwrap_or_else(|| { + if header.parents_by_level.is_empty() { + // If is genesis + &[] + } else { + std::slice::from_ref(&self.genesis_hash) + } + }) } } @@ -315,7 +315,9 @@ mod tests { vec![1001.into()], vec![1001.into()], vec![1002.into()], - ], + ] + .try_into() + .unwrap(), hash_merkle_root: 1.into(), accepted_id_merkle_root: 1.into(), utxo_commitment: 1.into(), @@ -344,7 +346,9 @@ mod tests { vec![2001.into()], vec![2001.into()], vec![2001.into()], - ], + ] + .try_into() + .unwrap(), hash_merkle_root: 1.into(), accepted_id_merkle_root: 1.into(), utxo_commitment: 1.into(), @@ -373,7 +377,9 @@ mod tests { vec![2001.into()], vec![2001.into()], vec![2001.into()], - ], + ] + .try_into() + .unwrap(), hash_merkle_root: 1.into(), accepted_id_merkle_root: 1.into(), utxo_commitment: 1.into(), @@ -475,7 +481,7 @@ mod tests { header: Arc::new(Header { hash, version: 0, - parents_by_level: expected_parents, + parents_by_level: expected_parents.try_into().unwrap(), hash_merkle_root: 1.into(), accepted_id_merkle_root: 1.into(), utxo_commitment: 1.into(), @@ -501,7 +507,7 @@ mod tests { for test_block in test_blocks { let direct_parents = test_block.direct_parents.iter().map(|parent| Hash::from_u64_word(*parent)).collect_vec(); let parents = parents_manager.calc_block_parents(pruning_point, &direct_parents); - let actual_parents = parents.iter().map(|parents| BlockHashSet::from_iter(parents.iter().copied())).collect_vec(); + let actual_parents = parents.expanded_iter().map(|parents| BlockHashSet::from_iter(parents.iter().copied())).collect_vec(); let expected_parents = test_block .expected_parents .iter() @@ -537,7 +543,7 @@ mod tests { header: Arc::new(Header { hash: pruning_point, version: 0, - parents_by_level: vec![vec![1001.into(), 1002.into()], vec![1001.into(), 1002.into()]], + parents_by_level: vec![vec![1001.into(), 1002.into()], vec![1001.into(), 1002.into()]].try_into().unwrap(), hash_merkle_root: 1.into(), accepted_id_merkle_root: 1.into(), utxo_commitment: 1.into(), @@ -578,7 +584,7 @@ mod tests { header: Arc::new(Header { hash, version: 0, - parents_by_level: expected_parents, + parents_by_level: expected_parents.try_into().unwrap(), hash_merkle_root: 1.into(), accepted_id_merkle_root: 1.into(), utxo_commitment: 1.into(), @@ -603,7 +609,7 @@ mod tests { for test_block in test_blocks { let direct_parents = test_block.direct_parents.iter().map(|parent| Hash::from_u64_word(*parent)).collect_vec(); let parents = parents_manager.calc_block_parents(pruning_point, &direct_parents); - let actual_parents = parents.iter().map(|parents| BlockHashSet::from_iter(parents.iter().copied())).collect_vec(); + let actual_parents = parents.expanded_iter().map(|parents| BlockHashSet::from_iter(parents.iter().copied())).collect_vec(); let expected_parents = test_block .expected_parents .iter() diff --git a/consensus/src/processes/pruning.rs b/consensus/src/processes/pruning.rs index 6e4af17893..6055001b80 100644 --- a/consensus/src/processes/pruning.rs +++ b/consensus/src/processes/pruning.rs @@ -1,6 +1,5 @@ use std::{collections::VecDeque, sync::Arc}; -use super::{reachability::ReachabilityResultExtensions, utils::CoinFlip}; use crate::model::{ services::reachability::{MTReachabilityService, ReachabilityService}, stores::{ @@ -8,7 +7,6 @@ use crate::model::{ headers::HeaderStoreReader, headers_selected_tip::HeadersSelectedTipStoreReader, past_pruning_points::PastPruningPointsStoreReader, - pruning::PruningPointInfo, pruning_samples::PruningSamplesStore, reachability::ReachabilityStoreReader, }, @@ -18,7 +16,6 @@ use kaspa_consensus_core::{ config::params::ForkedParam, errors::pruning::{PruningImportError, PruningImportResult}, }; -use kaspa_core::{info, log::CRESCENDO_KEYWORD}; use kaspa_database::prelude::StoreResultEmptyTuple; use kaspa_hashes::Hash; use parking_lot::RwLock; @@ -82,14 +79,7 @@ impl< header_selected_tip_store: Arc>, pruning_samples_store: Arc, ) -> Self { - // [Crescendo]: These conditions ensure that blue score points with the same finality score before - // the fork will remain with the same finality score post the fork. See below for the usage. - assert!(finality_depth.before() <= finality_depth.after()); - assert!(finality_depth.after() % finality_depth.before() == 0); - assert!(pruning_depth.before() <= pruning_depth.after()); - - let pruning_samples_steps = pruning_depth.before().div_ceil(finality_depth.before()); - assert_eq!(pruning_samples_steps, pruning_depth.after().div_ceil(finality_depth.after())); + let pruning_samples_steps = pruning_depth.after().div_ceil(finality_depth.after()); Self { pruning_depth, @@ -105,25 +95,23 @@ impl< } } - /// The new method for calculating the expected pruning point from some POV (header/virtual) using the new - /// pruning samples store. Except for edge cases during fork transition, this method is expected to retain - /// the exact semantics of current rules (v1). + /// The method for calculating the expected pruning point from some POV (header/virtual) using the + /// pruning samples store. /// /// Let B denote the current block (represented by `ghostdag_data`) /// Assumptions: - /// 1. Unlike v1 this method assumes that the current global pruning point is on B's chain, which + /// 1. This method assumes that the current global pruning point is on B's chain, which /// is why it should be called only for chain candidates / sink / virtual /// 2. All chain ancestors of B up to the pruning point are expected to have a /// `pruning_sample_from_pov` store entry - pub fn expected_header_pruning_point_v2(&self, ghostdag_data: CompactGhostdagData) -> PruningPointReply { + pub fn expected_header_pruning_point(&self, ghostdag_data: CompactGhostdagData) -> PruningPointReply { // // Note that past pruning samples are only assumed to have a header store entry and a pruning sample // store entry, se we only use these stores here (and specifically do not use the ghostdag store) // - let selected_parent_daa_score = self.headers_store.get_daa_score(ghostdag_data.selected_parent).unwrap(); - let pruning_depth = self.pruning_depth.get(selected_parent_daa_score); - let finality_depth = self.finality_depth.get(selected_parent_daa_score); + let pruning_depth = self.pruning_depth.after(); + let finality_depth = self.finality_depth.after(); let selected_parent_blue_score = self.headers_store.get_blue_score(ghostdag_data.selected_parent).unwrap(); @@ -171,79 +159,32 @@ impl< PruningPointReply { pruning_sample, pruning_point } } - fn log_pruning_depth_post_activation( - &self, - ghostdag_data: CompactGhostdagData, - selected_parent_daa_score: u64, - pruning_point_blue_score: u64, - ) { - if self.pruning_depth.activation().is_active(selected_parent_daa_score) - && ghostdag_data.blue_score.saturating_sub(pruning_point_blue_score) < self.pruning_depth.after() - && CoinFlip::new(1.0 / 1000.0).flip() - { - info!(target: CRESCENDO_KEYWORD, - "[Crescendo] Pruning depth increasing post activation: {} (target: {})", - ghostdag_data.blue_score.saturating_sub(pruning_point_blue_score), - self.pruning_depth.after() - ); - } - } - /// A block is a pruning sample *iff* its own finality score is larger than its pruning sample /// finality score or its selected parent finality score (or any block in between them). /// /// To see why we can compare to any such block, observe that by definition all blocks in the range /// `[pruning sample, selected parent]` must have the same finality score. - fn is_pruning_sample(&self, self_blue_score: u64, epoch_chain_ancestor_blue_score: u64, finality_depth: u64) -> bool { + pub fn is_pruning_sample(&self, self_blue_score: u64, epoch_chain_ancestor_blue_score: u64, finality_depth: u64) -> bool { self.finality_score(epoch_chain_ancestor_blue_score, finality_depth) < self.finality_score(self_blue_score, finality_depth) } - pub fn next_pruning_points( - &self, - sink_ghostdag: CompactGhostdagData, - current_candidate: Hash, - current_pruning_point: Hash, - ) -> (Vec, Hash) { + pub fn next_pruning_points(&self, sink_ghostdag: CompactGhostdagData, current_pruning_point: Hash) -> Vec { if sink_ghostdag.selected_parent.is_origin() { // This only happens when sink is genesis - return (vec![], current_candidate); - } - let selected_parent_daa_score = self.headers_store.get_daa_score(sink_ghostdag.selected_parent).unwrap(); - if self.pruning_depth.activation().is_active(selected_parent_daa_score) { - let v2 = self.next_pruning_points_v2(sink_ghostdag, selected_parent_daa_score, current_pruning_point); - // Keep the candidate valid also post activation just in case it's still used by v1 calls - let candidate = v2.last().copied().unwrap_or(current_candidate); - (v2, candidate) - } else { - let (v1, candidate) = self.next_pruning_points_v1(sink_ghostdag, current_candidate, current_pruning_point); - // [Crescendo]: sanity check that v2 logic pre activation is equivalent to v1 - let v2 = self.next_pruning_points_v2(sink_ghostdag, selected_parent_daa_score, current_pruning_point); - assert_eq!(v1, v2, "v1 = v2 pre activation"); - (v1, candidate) + return vec![]; } - } - fn next_pruning_points_v2( - &self, - sink_ghostdag: CompactGhostdagData, - selected_parent_daa_score: u64, - current_pruning_point: Hash, - ) -> Vec { let current_pruning_point_blue_score = self.headers_store.get_blue_score(current_pruning_point).unwrap(); - // Sanity check #1: global pruning point depth from sink >= min(P, P') - if current_pruning_point_blue_score + self.pruning_depth.lower_bound() > sink_ghostdag.blue_score { + // Sanity check #1: global pruning point depth from sink >= P + if current_pruning_point_blue_score + self.pruning_depth.after() > sink_ghostdag.blue_score { // During initial IBD the sink can be close to the global pruning point. - // We use min(P, P') here and rely on sanity check #2 for post activation edge cases return vec![]; } - let sink_pruning_point = self.expected_header_pruning_point_v2(sink_ghostdag).pruning_point; + let sink_pruning_point = self.expected_header_pruning_point(sink_ghostdag).pruning_point; let sink_pruning_point_blue_score = self.headers_store.get_blue_score(sink_pruning_point).unwrap(); - // Log the current pruning depth if it has not reached P' yet - self.log_pruning_depth_post_activation(sink_ghostdag, selected_parent_daa_score, sink_pruning_point_blue_score); - // Sanity check #2: if the sink pruning point is lower or equal to current, there is no need to search if sink_pruning_point_blue_score <= current_pruning_point_blue_score { return vec![]; @@ -261,208 +202,12 @@ impl< deque.into() } - fn next_pruning_points_v1( - &self, - ghostdag_data: CompactGhostdagData, - current_candidate: Hash, - current_pruning_point: Hash, - ) -> (Vec, Hash) { - let selected_parent_daa_score = self.headers_store.get_daa_score(ghostdag_data.selected_parent).unwrap(); - let pruning_depth = self.pruning_depth.get(selected_parent_daa_score); - let finality_depth = self.finality_depth.get(selected_parent_daa_score); - self.next_pruning_points_v1_inner(ghostdag_data, current_candidate, current_pruning_point, pruning_depth, finality_depth) - } - - /// Returns the next pruning points and an updated pruning point candidate given the current - /// pruning point (P), a current candidate (C) and a target block B (represented by GD data). - /// - /// The pruning point candidate C is a moving block which usually has pruning depth from sink but - /// its finality score is still equal to P. It serves as an optimal starting point for searching - /// up rather then restarting the search from P each time. - /// - /// Assumptions: P ∈ chain(C), C ∈ chain(B), P and C have the same finality score - /// - /// Returns: new pruning points ordered from bottom up and an updated candidate - fn next_pruning_points_v1_inner( - &self, - ghostdag_data: CompactGhostdagData, - current_candidate: Hash, - current_pruning_point: Hash, - pruning_depth: u64, - finality_depth: u64, - ) -> (Vec, Hash) { - // If the pruning point is more out of date than that, an IBD with headers proof is needed anyway. - let mut new_pruning_points = Vec::with_capacity((pruning_depth / finality_depth) as usize); - let mut latest_pruning_point_bs = self.ghostdag_store.get_blue_score(current_pruning_point).unwrap(); - - if latest_pruning_point_bs + pruning_depth > ghostdag_data.blue_score { - // The pruning point is not in depth of self.pruning_depth, so there's - // no point in checking if it is required to update it. This can happen - // because virtual is not immediately updated during IBD, so the pruning point - // might be in depth less than self.pruning_depth. - return (vec![], current_candidate); - } - - let mut new_candidate = current_candidate; - - /* - [Crescendo] - - Notation: - P = pruning point - C = candidate - F0 = the finality depth before the fork - F1 = the finality depth after the fork - - Property 1: F0 <= F1 AND F1 % F0 == 0 (validated in Self::new) - - Remark 1: if P,C had the same finality score with regard to F0, they have the same finality score also with regard to F1 - - Proof by picture (based on Property 1): - F0: [ 0 ] [ 1 ] [ 2 ] [ 3 ] [ 4 ] [ 5 ] ... [ 9 ] ... - F1: [ 0 ] [ 1 ] ... - - (each row divides the blue score space into finality score buckets with F0 or F1 numbers in each bucket correspondingly) - - This means we can safely begin the search from C even in the few moments post the fork (i.e., there's no fear of needing to "pull" C back) - - Note that overall this search is guaranteed to provide the desired monotonicity described in KIP-14: - https://github.com/kaspanet/kips/blob/master/kip-0014.md#pruning-point-adjustment - */ - for selected_child in self.reachability_service.forward_chain_iterator(current_candidate, ghostdag_data.selected_parent, true) - { - let selected_child_bs = self.ghostdag_store.get_blue_score(selected_child).unwrap(); - - if ghostdag_data.blue_score - selected_child_bs < pruning_depth { - break; - } - - new_candidate = selected_child; - let new_candidate_bs = selected_child_bs; - - if self.finality_score(new_candidate_bs, finality_depth) > self.finality_score(latest_pruning_point_bs, finality_depth) { - new_pruning_points.push(new_candidate); - latest_pruning_point_bs = new_candidate_bs; - } - } - - (new_pruning_points, new_candidate) - } - /// Returns the floored integer division of blue score by finality depth. /// The returned number represent the sampling epoch this blue score point belongs to. fn finality_score(&self, blue_score: u64, finality_depth: u64) -> u64 { blue_score / finality_depth } - fn expected_header_pruning_point_v1_inner( - &self, - ghostdag_data: CompactGhostdagData, - current_candidate: Hash, - current_pruning_point: Hash, - pruning_depth: u64, - finality_depth: u64, - ) -> Hash { - self.next_pruning_points_v1_inner(ghostdag_data, current_candidate, current_pruning_point, pruning_depth, finality_depth) - .0 - .last() - .copied() - .unwrap_or(current_pruning_point) - } - - pub fn expected_header_pruning_point_v1(&self, ghostdag_data: CompactGhostdagData, pruning_info: PruningPointInfo) -> Hash { - if ghostdag_data.selected_parent == self.genesis_hash { - return self.genesis_hash; - } - - let selected_parent_daa_score = self.headers_store.get_daa_score(ghostdag_data.selected_parent).unwrap(); - let pruning_depth = self.pruning_depth.get(selected_parent_daa_score); - let finality_depth = self.finality_depth.get(selected_parent_daa_score); - - let (current_pruning_point, current_candidate, current_pruning_point_index) = pruning_info.decompose(); - - let sp_pp = self.headers_store.get_header(ghostdag_data.selected_parent).unwrap().pruning_point; - let sp_pp_blue_score = self.headers_store.get_blue_score(sp_pp).unwrap(); - - // If the block doesn't have the pruning in its selected chain we know for sure that it can't trigger a pruning point - // change (we check the selected parent to take care of the case where the block is the virtual which doesn't have reachability data). - let has_pruning_point_in_its_selected_chain = - self.reachability_service.is_chain_ancestor_of(current_pruning_point, ghostdag_data.selected_parent); - - // Note: the pruning point from the POV of the current block is the first block in its chain that is in depth of self.pruning_depth and - // its finality score is greater than the previous pruning point. This is why if the diff between finality_score(selected_parent.blue_score + 1) * finality_interval - // and the current block blue score is less than self.pruning_depth we can know for sure that this block didn't trigger a pruning point change. - let min_required_blue_score_for_next_pruning_point = - (self.finality_score(sp_pp_blue_score, finality_depth) + 1) * finality_depth; - let next_or_current_pp = if has_pruning_point_in_its_selected_chain - && min_required_blue_score_for_next_pruning_point + pruning_depth <= ghostdag_data.blue_score - { - // If the selected parent pruning point is in the future of current global pruning point, then provide it as a suggestion - let sp_pp_in_global_pp_future = - self.reachability_service.is_dag_ancestor_of_result(current_pruning_point, sp_pp).unwrap_option().is_some_and(|b| b); - - /* - Notation: - P = global pruning point - C = global candidate - B = current block (can be virtual) - S = B's selected parent - R = S's pruning point - F = the finality depth - */ - - let (pp, cc) = if sp_pp_in_global_pp_future { - if self.reachability_service.is_chain_ancestor_of(sp_pp, current_candidate) { - // R ∈ future(P), R ∈ chain(C): use R as pruning point and C as candidate - // There are two cases: (i) C is not deep enough from B, R will be returned - // (ii) C is deep enough and the search will start from it, possibly finding a new pruning point for B - (sp_pp, current_candidate) - } else { - // R ∈ future(P), R ∉ chain(C): Use R as candidate as well. - // This might require a long walk up from R (bounded by F), however it is highly unlikely since it - // requires a ~pruning depth deep parallel chain - (sp_pp, sp_pp) - } - } else if self.reachability_service.is_chain_ancestor_of(current_candidate, ghostdag_data.selected_parent) { - // R ∉ future(P), P,C ∈ chain(B) - (current_pruning_point, current_candidate) - } else { - // R ∉ future(P), P ∈ chain(B), C ∉ chain(B) - (current_pruning_point, current_pruning_point) - }; - - self.expected_header_pruning_point_v1_inner(ghostdag_data, cc, pp, pruning_depth, finality_depth) - } else { - sp_pp - }; - - // [Crescendo]: shortly after fork activation, R is not guaranteed to comply with the new - // increased pruning depth, so we must manually verify not to go below it - if sp_pp_blue_score >= self.headers_store.get_blue_score(next_or_current_pp).unwrap() { - return sp_pp; - } - - if self.is_pruning_point_in_pruning_depth(ghostdag_data.blue_score, next_or_current_pp, pruning_depth) { - return next_or_current_pp; - } - - for i in (0..=current_pruning_point_index).rev() { - let past_pp = self.past_pruning_points_store.get(i).unwrap(); - - // [Crescendo]: shortly after fork activation, R is not guaranteed to comply with the new - // increased pruning depth, so we must manually verify not to go below it - if sp_pp_blue_score >= self.headers_store.get_blue_score(past_pp).unwrap() { - return sp_pp; - } - - if self.is_pruning_point_in_pruning_depth(ghostdag_data.blue_score, past_pp, pruning_depth) { - return past_pp; - } - } - - self.genesis_hash - } - fn is_pruning_point_in_pruning_depth(&self, pov_blue_score: u64, pruning_point: Hash, pruning_depth: u64) -> bool { let pp_bs = self.headers_store.get_blue_score(pruning_point).unwrap(); pov_blue_score >= pp_bs + pruning_depth @@ -481,11 +226,50 @@ impl< // new pruning depth is expected, so we use the DAA score of the pruning point itself as an indicator. // This means that in the first few days following the fork we err on the side of a shorter period which is // a weaker requirement - let pruning_depth = self.pruning_depth.get(self.headers_store.get_daa_score(pp_candidate).unwrap()); + let pruning_depth = self.pruning_depth.after(); self.is_pruning_point_in_pruning_depth(tip_bs, pp_candidate, pruning_depth) } - pub fn are_pruning_points_in_valid_chain(&self, pruning_info: PruningPointInfo, syncer_sink: Hash) -> PruningImportResult<()> { + // Function returns the pruning points on the path + // ordered from newest to the oldest + pub fn pruning_points_on_path_to_syncer_sink( + &self, + pruning_point: Hash, + syncer_sink: Hash, + ) -> PruningImportResult> { + let mut pps_on_path = VecDeque::new(); + for current in self.reachability_service.forward_chain_iterator(pruning_point, syncer_sink, true).skip(1) { + let current_header = self.headers_store.get_header(current).unwrap(); + // Post-crescendo: expected header pruning point is no longer part of header validity, but we want to make sure + // the syncer's virtual chain indeed coincides with the pruning point and past pruning points before downloading + // the UTXO set and resolving virtual. Hence we perform the check over this chain here. + let reply = self.expected_header_pruning_point(self.ghostdag_store.get_compact_data(current).unwrap()); + if reply.pruning_point != current_header.pruning_point { + return Err(PruningImportError::WrongHeaderPruningPoint(current_header.pruning_point, current)); + } + // Save so that following blocks can recursively use this value + self.pruning_samples_store.insert(current, reply.pruning_sample).unwrap_or_exists(); + // Going up the chain from the pruning point to the sink. The goal is to exit this loop with a queue [P(k),...,P(0), P(-1), P(-2), ..., P(-n)] + // where P(0) is the new pruning point, P(-1) is the point before it and P(-n) is the pruning point of P(0). That is, + // ceiling(P/F) = n (where n is usually 3). + // k is the number of future pruning points on path to virtual beyond the new, currently synced pruning point + // + // Let C be the current block's pruning point. Push to the front of the queue if: + // 1. the queue is empty + // 2. the front of the queue is different than C + if pps_on_path.front().is_none_or(|&h| h != current_header.pruning_point) { + pps_on_path.push_front(current_header.pruning_point); + } + } + Ok(pps_on_path) + } + + pub fn are_pruning_points_in_valid_chain( + &self, + synced_pruning_point: Hash, + synced_pp_index: u64, + syncer_sink: Hash, + ) -> PruningImportResult<()> { // We want to validate that the past pruning points form a chain to genesis. Since // each pruning point's header doesn't point to the previous pruning point, but to // the pruning point from its POV, we can't just traverse from one pruning point to @@ -493,42 +277,27 @@ impl< // we rely on the fact that each pruning point is pointed by another known block or // pruning point. // So in the first stage we go over the selected chain and add to the queue of expected - // pruning points all the pruning points from the POV of some chain block. In the second - // stage we go over the past pruning points from recent to older, check that it's the head + // pruning points all the pruning points from the POV of some chain block, and update pruning samples. + // In the second stage we go over the past pruning points from recent to older, check that it's the head // of the queue (by popping the queue), and add its header pruning point to the queue since // we expect to see it later on the list. // The first stage is important because the most recent pruning point is pointing to a few // pruning points before, so the first few pruning points on the list won't be pointed by // any other pruning point in the list, so we are compelled to check if it's referenced by // the selected chain. - let mut expected_pps_queue = VecDeque::new(); - for current in self.reachability_service.forward_chain_iterator(pruning_info.pruning_point, syncer_sink, true).skip(1) { - let current_header = self.headers_store.get_header(current).unwrap(); - // Post-crescendo: expected header pruning point is no longer part of header validity, but we want to make sure - // the syncer's virtual chain indeed coincides with the pruning point and past pruning points before downloading - // the UTXO set and resolving virtual. Hence we perform the check over this chain here. - let reply = self.expected_header_pruning_point_v2(self.ghostdag_store.get_compact_data(current).unwrap()); - if reply.pruning_point != current_header.pruning_point { - return Err(PruningImportError::WrongHeaderPruningPoint(current_header.pruning_point, current)); - } - // Save so that following blocks can recursively use this value - self.pruning_samples_store.insert(current, reply.pruning_sample).unwrap_or_exists(); - /* - Going up the chain from the pruning point to the sink. The goal is to exit this loop with a queue [P(0), P(-1), P(-2), ..., P(-n)] - where P(0) is the current pruning point, P(-1) is the point before it and P(-n) is the pruning point of P(0). That is, - ceiling(P/F) = n (where n is usually 3). - - Let C be the current block's pruning point. Push to the front of the queue if: - 1. the queue is empty; OR - 2. the front of the queue is different than C; AND - 3. the front of the queue is different than P(0) (if it is P(0), we already filled the queue with what we need) - */ - if expected_pps_queue.front().is_none_or(|&h| h != current_header.pruning_point && h != pruning_info.pruning_point) { - expected_pps_queue.push_front(current_header.pruning_point); + let mut expected_pps_queue = self.pruning_points_on_path_to_syncer_sink(synced_pruning_point, syncer_sink)?; + // remove excess pruning points beyond the pruning_point + while let Some(&future_pp) = expected_pps_queue.front() { + if future_pp == synced_pruning_point { + break; } + expected_pps_queue.pop_front(); + } + if expected_pps_queue.is_empty() { + return Err(PruningImportError::MissingPointedPruningPoint); } - for idx in (0..=pruning_info.index).rev() { + for idx in (0..=synced_pp_index).rev() { let pp = self.past_pruning_points_store.get(idx).unwrap(); let pp_header = self.headers_store.get_header(pp).unwrap(); let Some(expected_pp) = expected_pps_queue.pop_front() else { diff --git a/consensus/src/processes/pruning_proof/apply.rs b/consensus/src/processes/pruning_proof/apply.rs index 427cfefc51..5487f586ac 100644 --- a/consensus/src/processes/pruning_proof/apply.rs +++ b/consensus/src/processes/pruning_proof/apply.rs @@ -41,7 +41,9 @@ use crate::{ use super::PruningProofManager; impl PruningProofManager { - pub fn apply_proof(&self, mut proof: PruningPointProof, trusted_set: &[TrustedBlock]) -> PruningImportResult<()> { + pub fn apply_proof(&self, proof: PruningPointProof, trusted_set: &[TrustedBlock]) -> PruningImportResult<()> { + // Following validation of a pruning proof, various consensus storages must be updated + let pruning_point_header = proof[0].last().unwrap().clone(); let pruning_point = pruning_point_header.hash; @@ -50,7 +52,10 @@ impl PruningProofManager { .map(|level| BlockHashSet::from_iter(proof[level as usize].iter().map(|header| header.hash))) .collect_vec(); + let mut expanded_proof = proof; let mut trusted_gd_map: BlockHashMap = BlockHashMap::new(); + // This loop expands the proof with the headers of the trusted set + // and creates a hash to ghostdag data map of the trusted set for tb in trusted_set.iter() { trusted_gd_map.insert(tb.block.hash(), tb.ghostdag.clone().into()); let tb_block_level = calc_block_level(&tb.block.header, self.max_block_level); @@ -60,29 +65,39 @@ impl PruningProofManager { if proof_sets[current_proof_level as usize].contains(&tb.block.hash()) { return; } - - proof[current_proof_level as usize].push(tb.block.header.clone()); + // otherwise, add this block to the proof data + expanded_proof[current_proof_level as usize].push(tb.block.header.clone()); }); } - - proof.iter_mut().for_each(|level_proof| { + // topologically sort every level in the proof + expanded_proof.iter_mut().for_each(|level_proof| { level_proof.sort_by(|a, b| a.blue_work.cmp(&b.blue_work)); }); - self.populate_reachability_and_headers(&proof); + self.populate_reachability_and_headers(&expanded_proof); + // sanity check { let reachability_read = self.reachability_store.read(); for tb in trusted_set.iter() { - // Header-only trusted blocks are expected to be in pruning point past + // A trusted block not in the past of the pruning point is in its anticone and thus must have a body if tb.block.is_header_only() && !reachability_read.is_dag_ancestor_of(tb.block.hash(), pruning_point) { - return Err(PruningImportError::PruningPointPastMissingReachability(tb.block.hash())); + return Err(PruningImportError::PruningPointAnticoneMissingBody(tb.block.hash())); + } + + // Trusted blocks are expected to be in the pruning point anti-future. + if tb.block.hash() != pruning_point && reachability_read.is_dag_ancestor_of(pruning_point, tb.block.hash()) { + return Err(PruningImportError::TrustedBlockInPruningPointFuture(tb.block.hash(), pruning_point)); } } } - - for (level, headers) in proof.iter().enumerate() { + // Populate ghostdag_store and relation store (on a per level basis) for every block in the proof + for (level, headers) in expanded_proof.iter().enumerate() { trace!("Applying level {} from the pruning point proof", level); + // We are only interested in those level ancestors that belong to the pruning proof at that level, + // so other level parents are filtered out. + // Since each level is topologically sorted, we can construct the level ancesstors + // on the fly rather than constructing it ahead of time let mut level_ancestors: HashSet = HashSet::new(); level_ancestors.insert(ORIGIN); @@ -98,7 +113,6 @@ impl PruningProofManager { ); self.relations_stores.write()[level].insert(header.hash, parents.clone()).unwrap(); - if level == 0 { let gd = if let Some(gd) = trusted_gd_map.get(&header.hash) { gd.clone() @@ -121,6 +135,8 @@ impl PruningProofManager { } } + // Update virtual state based on proof derived pruning point. + // updating of the utxoset is done separately as it requires downloading the new utxoset in its entirety. let virtual_parents = vec![pruning_point]; let virtual_state = Arc::new(VirtualState { parents: virtual_parents.clone(), diff --git a/consensus/src/processes/pruning_proof/build.rs b/consensus/src/processes/pruning_proof/build.rs index cdb2997b78..44679a223d 100644 --- a/consensus/src/processes/pruning_proof/build.rs +++ b/consensus/src/processes/pruning_proof/build.rs @@ -178,7 +178,9 @@ impl PruningProofManager { pp_header: &HeaderWithBlockLevel, temp_db: Arc, ) -> (Vec>, Vec, Vec) { - let current_dag_level = self.find_current_dag_level(&pp_header.header); + // TODO: Uncomment line and send as argument to find_sufficiently_deep_level_root + // once full fix to minimize proof sizes comes + // let current_dag_level = self.find_current_dag_level(&pp_header.header); let mut ghostdag_stores: Vec>> = vec![None; self.max_block_level as usize + 1]; let mut selected_tip_by_level = vec![None; self.max_block_level as usize + 1]; let mut root_by_level = vec![None; self.max_block_level as usize + 1]; @@ -195,7 +197,7 @@ impl PruningProofManager { None }; let (store, selected_tip, root) = self - .find_sufficient_root(pp_header, level, current_dag_level, required_block, temp_db.clone()) + .find_sufficiently_deep_level_root(pp_header, level, required_block, temp_db.clone()) .unwrap_or_else(|_| panic!("find_sufficient_root failed for level {level}")); ghostdag_stores[level_usize] = Some(store); selected_tip_by_level[level_usize] = Some(selected_tip); @@ -217,11 +219,10 @@ impl PruningProofManager { /// 2. block at depth m at the next level ∈ Future(root) /// /// Returns: the filled ghostdag store from root to tip, the selected tip and the root - fn find_sufficient_root( + fn find_sufficiently_deep_level_root( &self, pp_header: &HeaderWithBlockLevel, level: BlockLevel, - _current_dag_level: BlockLevel, required_block: Option, temp_db: Arc, ) -> PruningProofManagerInternalResult<(Arc, Hash, Hash)> { @@ -237,7 +238,7 @@ impl PruningProofManager { // We only have the headers store (which has level 0 blue_scores) to assemble the proof data from. // We need to look deeper at higher levels (2x deeper every level) to find 2M (plus margin) blocks at that level - // TODO: uncomment when the full fix to minimize proof sizes comes. + // TODO: uncomment when the full fix to minimize proof sizes comes, and add current_dag_level as an argument // let mut required_base_level_depth = self.estimated_blue_depth_at_level_0( // level, // required_level_depth + 100, // We take a safety margin @@ -407,9 +408,9 @@ impl PruningProofManager { let direct_parents = BlockHashSet::from_iter(pp_header.direct_parents().iter().copied()); pp_header .parents_by_level - .iter() + .expanded_iter() .enumerate() - .skip(1) + .skip(1) // skip checking direct parents .find_map(|(level, parents)| { if BlockHashSet::from_iter(parents.iter().copied()) == direct_parents { None @@ -465,6 +466,7 @@ impl PruningProofManager { while current_header.blue_score + base_depth >= high_header_score { if current_header.direct_parents().is_empty() { + // Reached genesis break; } diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 9a83bc29c0..92b1b79bbd 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -46,6 +46,7 @@ use crate::{ headers_selected_tip::DbHeadersSelectedTipStore, past_pruning_points::{DbPastPruningPointsStore, PastPruningPointsStore}, pruning::{DbPruningStore, PruningStoreReader}, + pruning_meta::PruningMetaStores, pruning_samples::{DbPruningSamplesStore, PruningSamplesStore}, reachability::DbReachabilityStore, relations::{DbRelationsStore, RelationsStoreReader}, @@ -115,6 +116,7 @@ pub struct PruningProofManager { depth_store: Arc, selected_chain_store: Arc>, pruning_samples_store: Arc, + pruning_meta_stores: Arc>, ghostdag_manager: DbGhostdagManager, traversal_manager: DbDagTraversalManager, @@ -159,6 +161,7 @@ impl PruningProofManager { ghostdag_store: storage.ghostdag_store.clone(), relations_stores: storage.relations_stores.clone(), pruning_point_store: storage.pruning_point_store.clone(), + pruning_meta_stores: storage.pruning_meta_stores.clone(), past_pruning_points_store: storage.past_pruning_points_store.clone(), virtual_stores: storage.virtual_stores.clone(), body_tips_store: storage.body_tips_store.clone(), @@ -223,7 +226,7 @@ impl PruningProofManager { let mut pruning_point_write = self.pruning_point_store.write(); let mut batch = WriteBatch::default(); - pruning_point_write.set_batch(&mut batch, new_pruning_point, new_pruning_point, (pruning_points.len() - 1) as u64).unwrap(); + pruning_point_write.set_batch(&mut batch, new_pruning_point, (pruning_points.len() - 1) as u64).unwrap(); pruning_point_write.set_retention_checkpoint(&mut batch, new_pruning_point).unwrap(); pruning_point_write.set_retention_period_root(&mut batch, new_pruning_point).unwrap(); self.db.write(batch).unwrap(); @@ -302,9 +305,7 @@ impl PruningProofManager { let mut daa_window_blocks = BlockHashMap::new(); let mut ghostdag_blocks = BlockHashMap::new(); - // [Crescendo]: get ghostdag k based on the pruning point's DAA score. The off-by-one of not going by selected parent - // DAA score is not important here as we simply increase K one block earlier which is more conservative (saving/sending more data) - let ghostdag_k = self.ghostdag_k.get(self.headers_store.get_daa_score(pruning_point).unwrap()); + let ghostdag_k = self.ghostdag_k.after(); // PRUNE SAFETY: called either via consensus under the prune guard or by the pruning processor (hence no pruning in parallel) @@ -405,12 +406,8 @@ impl PruningProofManager { let virtual_state = self.virtual_stores.read().state.get().unwrap(); let pp_bs = self.headers_store.get_blue_score(pp).unwrap(); - // [Crescendo]: use pruning point DAA score for activation. This means that only after sufficient time - // post activation we will require the increased finalization depth - let pruning_point_daa_score = self.headers_store.get_daa_score(pp).unwrap(); - // The anticone is considered final only if the pruning point is at sufficient depth from virtual - if virtual_state.ghostdag_data.blue_score >= pp_bs + self.anticone_finalization_depth.get(pruning_point_daa_score) { + if virtual_state.ghostdag_data.blue_score >= pp_bs + self.anticone_finalization_depth.after() { let anticone = Arc::new(self.calculate_pruning_point_anticone_and_trusted_data(pp, virtual_state.parents.iter().copied())); cache_lock.replace(CachedPruningPointData { pruning_point: pp, data: anticone.clone() }); Ok(anticone) diff --git a/consensus/src/processes/pruning_proof/validate.rs b/consensus/src/processes/pruning_proof/validate.rs index f612188104..6043a52457 100644 --- a/consensus/src/processes/pruning_proof/validate.rs +++ b/consensus/src/processes/pruning_proof/validate.rs @@ -75,7 +75,7 @@ impl PruningProofManager { let pruning_read = self.pruning_point_store.read(); let relations_read = self.relations_stores.read(); - let current_pp = pruning_read.get().unwrap().pruning_point; + let current_pp = pruning_read.pruning_point().unwrap(); let current_pp_header = self.headers_store.get_header(current_pp).unwrap(); // The accumulated blue work of current consensus from the pruning point onward @@ -176,9 +176,7 @@ impl PruningProofManager { return Err(PruningImportError::PruningProofNotEnoughHeaders); } - // [Crescendo]: decide on ghostdag K based on proof pruning point DAA score - let proof_pp_daa_score = proof[0].last().expect("checked if empty").daa_score; - let ghostdag_k = self.ghostdag_k.get(proof_pp_daa_score); + let ghostdag_k = self.ghostdag_k.after(); let headers_estimate = self.estimate_proof_unique_size(proof); @@ -270,6 +268,7 @@ impl PruningProofManager { headers_store.insert(header.hash, header.clone(), header_level).unwrap_or_exists(); + // filter out parents that do not appear at the pruning proof: let parents = self .parents_manager .parents_at_level(header, level) @@ -352,7 +351,7 @@ impl PruningProofManager { proof_pp: Hash, proof_pp_header: &Header, ) -> PruningImportResult<()> { - // A proof selected tip of some level has to be the proof suggested prunint point itself if its level + // A proof selected tip of some level has to be the proof suggested pruning point itself if its level // is lower or equal to the pruning point level, or a parent of the pruning point on the relevant level // otherwise. if level <= proof_pp_level { diff --git a/consensus/src/processes/sync/mod.rs b/consensus/src/processes/sync/mod.rs index 4b39216c01..48585abbf9 100644 --- a/consensus/src/processes/sync/mod.rs +++ b/consensus/src/processes/sync/mod.rs @@ -78,7 +78,7 @@ impl< /// because it returns blocks with MergeSet granularity, so if MergeSet > max_blocks, the function will return nothing which is undesired behavior. pub fn antipast_hashes_between(&self, low: Hash, high: Hash, max_blocks: Option) -> (Vec, Hash) { let max_blocks = max_blocks.unwrap_or(usize::MAX); - assert!(max_blocks >= self.mergeset_size_limit.upper_bound() as usize); + assert!(max_blocks >= self.mergeset_size_limit.after() as usize); // If low is not in the chain of high - forward_chain_iterator will fail. // Therefore, we traverse down low's chain until we reach a block that is in @@ -123,7 +123,7 @@ impl< /// Returns a logarithmic amount of blocks sampled from the virtual selected chain between `low` and `high`. /// Expects both blocks to be on the virtual selected chain, otherwise an error is returned pub fn create_virtual_selected_chain_block_locator(&self, low: Option, high: Option) -> SyncManagerResult> { - let low = low.unwrap_or_else(|| self.pruning_point_store.read().get().unwrap().pruning_point); + let low = low.unwrap_or_else(|| self.pruning_point_store.read().pruning_point().unwrap()); let sc_read = self.selected_chain_store.read(); let high = high.unwrap_or_else(|| sc_read.get_tip().unwrap().1); if low == high { diff --git a/consensus/src/processes/transaction_validator/mod.rs b/consensus/src/processes/transaction_validator/mod.rs index c58dfb45df..7cf04dc3f9 100644 --- a/consensus/src/processes/transaction_validator/mod.rs +++ b/consensus/src/processes/transaction_validator/mod.rs @@ -12,6 +12,7 @@ use kaspa_txscript::{ use kaspa_consensus_core::{ config::params::{ForkActivation, ForkedParam}, mass::MassCalculator, + KType, }; #[derive(Clone)] @@ -22,6 +23,7 @@ pub struct TransactionValidator { max_script_public_key_len: ForkedParam, coinbase_payload_script_public_key_max_len: u8, coinbase_maturity: ForkedParam, + ghostdag_k: KType, sig_cache: Cache, pub(crate) mass_calculator: MassCalculator, @@ -38,6 +40,7 @@ impl TransactionValidator { max_script_public_key_len: ForkedParam, coinbase_payload_script_public_key_max_len: u8, coinbase_maturity: ForkedParam, + ghostdag_k: KType, counters: Arc, mass_calculator: MassCalculator, crescendo_activation: ForkActivation, @@ -49,6 +52,7 @@ impl TransactionValidator { max_script_public_key_len, coinbase_payload_script_public_key_max_len, coinbase_maturity, + ghostdag_k, sig_cache: Cache::with_counters(10_000, counters), mass_calculator, crescendo_activation, @@ -62,6 +66,7 @@ impl TransactionValidator { max_script_public_key_len: usize, coinbase_payload_script_public_key_max_len: u8, coinbase_maturity: u64, + ghostdag_k: KType, counters: Arc, ) -> Self { Self { @@ -71,6 +76,7 @@ impl TransactionValidator { max_script_public_key_len: ForkedParam::new_const(max_script_public_key_len), coinbase_payload_script_public_key_max_len, coinbase_maturity: ForkedParam::new_const(coinbase_maturity), + ghostdag_k, sig_cache: Cache::with_counters(10_000, counters), mass_calculator: MassCalculator::new(0, 0, 0, 0), crescendo_activation: ForkActivation::never(), diff --git a/consensus/src/processes/transaction_validator/tx_validation_in_header_context.rs b/consensus/src/processes/transaction_validator/tx_validation_in_header_context.rs index e080028f28..ed4bfb162c 100644 --- a/consensus/src/processes/transaction_validator/tx_validation_in_header_context.rs +++ b/consensus/src/processes/transaction_validator/tx_validation_in_header_context.rs @@ -30,7 +30,6 @@ impl TransactionValidator { ) -> TxResult<()> { self.validate_tx_in_header_context( tx, - ctx_daa_score, match Self::get_lock_time_type(tx) { LockTimeType::Finalized => LockTimeArg::Finalized, LockTimeType::DaaScore => LockTimeArg::DaaScore(ctx_daa_score), @@ -39,17 +38,7 @@ impl TransactionValidator { ) } - pub(crate) fn validate_tx_in_header_context( - &self, - tx: &Transaction, - ctx_daa_score: u64, - lock_time_arg: LockTimeArg, - ) -> TxResult<()> { - self.check_transaction_payload(tx, ctx_daa_score)?; - self.check_transaction_inputs_count_ctx(tx, ctx_daa_score)?; - self.check_transaction_outputs_count_ctx(tx, ctx_daa_score)?; - self.check_transaction_signature_scripts_ctx(tx, ctx_daa_score)?; - self.check_transaction_script_public_keys_ctx(tx, ctx_daa_score)?; + pub(crate) fn validate_tx_in_header_context(&self, tx: &Transaction, lock_time_arg: LockTimeArg) -> TxResult<()> { self.check_tx_is_finalized(tx, lock_time_arg) } @@ -91,66 +80,4 @@ impl TransactionValidator { Ok(()) } - - fn check_transaction_payload(&self, tx: &Transaction, ctx_daa_score: u64) -> TxResult<()> { - // TODO (post HF): move back to in isolation validation - if self.crescendo_activation.is_active(ctx_daa_score) { - Ok(()) - } else { - if !tx.is_coinbase() && !tx.payload.is_empty() { - return Err(TxRuleError::NonCoinbaseTxHasPayload); - } - Ok(()) - } - } - - fn check_transaction_outputs_count_ctx(&self, tx: &Transaction, ctx_daa_score: u64) -> TxResult<()> { - // TODO (post HF): move back to in isolation validation - if tx.is_coinbase() { - // We already check coinbase outputs count vs. Ghostdag K + 2 - return Ok(()); - } - if tx.outputs.len() > self.max_tx_outputs.get(ctx_daa_score) { - return Err(TxRuleError::TooManyOutputs(tx.outputs.len(), self.max_tx_inputs.get(ctx_daa_score))); - } - - Ok(()) - } - - fn check_transaction_inputs_count_ctx(&self, tx: &Transaction, ctx_daa_score: u64) -> TxResult<()> { - // TODO (post HF): move back to in isolation validation - if !tx.is_coinbase() && tx.inputs.is_empty() { - return Err(TxRuleError::NoTxInputs); - } - - if tx.inputs.len() > self.max_tx_inputs.get(ctx_daa_score) { - return Err(TxRuleError::TooManyInputs(tx.inputs.len(), self.max_tx_inputs.get(ctx_daa_score))); - } - - Ok(()) - } - - // The main purpose of this check is to avoid overflows when calculating transaction mass later. - fn check_transaction_signature_scripts_ctx(&self, tx: &Transaction, ctx_daa_score: u64) -> TxResult<()> { - // TODO (post HF): move back to in isolation validation - if let Some(i) = - tx.inputs.iter().position(|input| input.signature_script.len() > self.max_signature_script_len.get(ctx_daa_score)) - { - return Err(TxRuleError::TooBigSignatureScript(i, self.max_signature_script_len.get(ctx_daa_score))); - } - - Ok(()) - } - - // The main purpose of this check is to avoid overflows when calculating transaction mass later. - fn check_transaction_script_public_keys_ctx(&self, tx: &Transaction, ctx_daa_score: u64) -> TxResult<()> { - // TODO (post HF): move back to in isolation validation - if let Some(i) = - tx.outputs.iter().position(|out| out.script_public_key.script().len() > self.max_script_public_key_len.get(ctx_daa_score)) - { - return Err(TxRuleError::TooBigScriptPublicKey(i, self.max_script_public_key_len.get(ctx_daa_score))); - } - - Ok(()) - } } diff --git a/consensus/src/processes/transaction_validator/tx_validation_in_isolation.rs b/consensus/src/processes/transaction_validator/tx_validation_in_isolation.rs index 7780dc5c5b..a09a4e69d6 100644 --- a/consensus/src/processes/transaction_validator/tx_validation_in_isolation.rs +++ b/consensus/src/processes/transaction_validator/tx_validation_in_isolation.rs @@ -43,15 +43,15 @@ impl TransactionValidator { return Err(TxRuleError::CoinbaseHasInputs(tx.inputs.len())); } - /* - [Crescendo]: moved this specific check to body_validation_in_context since it depends on fork activation - TODO (post HF): move back here + if tx.mass() > 0 { + return Err(TxRuleError::CoinbaseNonZeroMassCommitment); + } let outputs_limit = self.ghostdag_k as u64 + 2; if tx.outputs.len() as u64 > outputs_limit { return Err(TxRuleError::CoinbaseTooManyOutputs(tx.outputs.len(), outputs_limit)); } - */ + for (i, output) in tx.outputs.iter().enumerate() { if output.script_public_key.script().len() > self.coinbase_payload_script_public_key_max_len as usize { return Err(TxRuleError::CoinbaseScriptPublicKeyTooLong(i)); @@ -65,9 +65,8 @@ impl TransactionValidator { // We already check coinbase outputs count vs. Ghostdag K + 2 return Ok(()); } - // [Crescendo]: keep the check here over the upper limit. Add a tight check to in_header_context validation - if tx.outputs.len() > self.max_tx_outputs.upper_bound() { - return Err(TxRuleError::TooManyOutputs(tx.outputs.len(), self.max_tx_inputs.upper_bound())); + if tx.outputs.len() > self.max_tx_outputs.after() { + return Err(TxRuleError::TooManyOutputs(tx.outputs.len(), self.max_tx_inputs.after())); } Ok(()) @@ -78,9 +77,8 @@ impl TransactionValidator { return Err(TxRuleError::NoTxInputs); } - // [Crescendo]: keep the check here over the upper limit. Add a tight check to in_header_context validation - if tx.inputs.len() > self.max_tx_inputs.upper_bound() { - return Err(TxRuleError::TooManyInputs(tx.inputs.len(), self.max_tx_inputs.upper_bound())); + if tx.inputs.len() > self.max_tx_inputs.after() { + return Err(TxRuleError::TooManyInputs(tx.inputs.len(), self.max_tx_inputs.after())); } Ok(()) @@ -88,10 +86,8 @@ impl TransactionValidator { // The main purpose of this check is to avoid overflows when calculating transaction mass later. fn check_transaction_signature_scripts(&self, tx: &Transaction) -> TxResult<()> { - // [Crescendo]: keep the check here over the upper limit. Add a tight check to in_header_context validation - if let Some(i) = tx.inputs.iter().position(|input| input.signature_script.len() > self.max_signature_script_len.upper_bound()) - { - return Err(TxRuleError::TooBigSignatureScript(i, self.max_signature_script_len.upper_bound())); + if let Some(i) = tx.inputs.iter().position(|input| input.signature_script.len() > self.max_signature_script_len.after()) { + return Err(TxRuleError::TooBigSignatureScript(i, self.max_signature_script_len.after())); } Ok(()) @@ -99,11 +95,10 @@ impl TransactionValidator { // The main purpose of this check is to avoid overflows when calculating transaction mass later. fn check_transaction_script_public_keys(&self, tx: &Transaction) -> TxResult<()> { - // [Crescendo]: keep the check here over the upper limit. Add a tight check to in_header_context validation if let Some(i) = - tx.outputs.iter().position(|out| out.script_public_key.script().len() > self.max_script_public_key_len.upper_bound()) + tx.outputs.iter().position(|out| out.script_public_key.script().len() > self.max_script_public_key_len.after()) { - return Err(TxRuleError::TooBigScriptPublicKey(i, self.max_script_public_key_len.upper_bound())); + return Err(TxRuleError::TooBigScriptPublicKey(i, self.max_script_public_key_len.after())); } Ok(()) @@ -194,6 +189,7 @@ mod tests { params.prior_max_script_public_key_len, params.coinbase_payload_script_public_key_max_len, params.prior_coinbase_maturity, + params.ghostdag_k().after(), Default::default(), ); diff --git a/consensus/src/processes/transaction_validator/tx_validation_in_utxo_context.rs b/consensus/src/processes/transaction_validator/tx_validation_in_utxo_context.rs index 4eccdbd897..9f4c83fe69 100644 --- a/consensus/src/processes/transaction_validator/tx_validation_in_utxo_context.rs +++ b/consensus/src/processes/transaction_validator/tx_validation_in_utxo_context.rs @@ -39,7 +39,7 @@ impl TransactionValidator { flags: TxValidationFlags, mass_and_feerate_threshold: Option<(u64, f64)>, ) -> TxResult { - self.check_transaction_coinbase_maturity(tx, pov_daa_score, block_daa_score)?; + self.check_transaction_coinbase_maturity(tx, pov_daa_score)?; let total_in = self.check_transaction_input_amounts(tx)?; let total_out = Self::check_transaction_output_values(tx, total_in)?; let fee = total_in - total_out; @@ -55,10 +55,7 @@ impl TransactionValidator { match flags { TxValidationFlags::Full | TxValidationFlags::SkipMassCheck => { - if !self.crescendo_activation.is_active(block_daa_score) { - Self::check_sig_op_counts(tx)?; - } - self.check_scripts(tx, block_daa_score)?; + self.check_scripts(tx)?; } TxValidationFlags::SkipScriptChecks => {} } @@ -77,21 +74,18 @@ impl TransactionValidator { Ok(()) } - fn check_transaction_coinbase_maturity( - &self, - tx: &impl VerifiableTransaction, - pov_daa_score: u64, - block_daa_score: u64, - ) -> TxResult<()> { - if let Some((index, (input, entry))) = tx.populated_inputs().enumerate().find(|(_, (_, entry))| { - entry.is_coinbase && entry.block_daa_score + self.coinbase_maturity.get(block_daa_score) > pov_daa_score - }) { + fn check_transaction_coinbase_maturity(&self, tx: &impl VerifiableTransaction, pov_daa_score: u64) -> TxResult<()> { + if let Some((index, (input, entry))) = tx + .populated_inputs() + .enumerate() + .find(|(_, (_, entry))| entry.is_coinbase && entry.block_daa_score + self.coinbase_maturity.after() > pov_daa_score) + { return Err(TxRuleError::ImmatureCoinbaseSpend( index, input.previous_outpoint, entry.block_daa_score, pov_daa_score, - self.coinbase_maturity.get(block_daa_score), + self.coinbase_maturity.after(), )); } @@ -173,54 +167,34 @@ impl TransactionValidator { Ok(()) } - pub fn check_scripts(&self, tx: &(impl VerifiableTransaction + Sync), block_daa_score: u64) -> TxResult<()> { - check_scripts( - &self.sig_cache, - tx, - self.crescendo_activation.is_active(block_daa_score), - self.crescendo_activation.is_active(block_daa_score), - ) + pub fn check_scripts(&self, tx: &(impl VerifiableTransaction + Sync)) -> TxResult<()> { + check_scripts(&self.sig_cache, tx) } } -pub fn check_scripts( - sig_cache: &Cache, - tx: &(impl VerifiableTransaction + Sync), - kip10_enabled: bool, - runtime_sig_op_counting: bool, -) -> TxResult<()> { +pub fn check_scripts(sig_cache: &Cache, tx: &(impl VerifiableTransaction + Sync)) -> TxResult<()> { if tx.inputs().len() > CHECK_SCRIPTS_PARALLELISM_THRESHOLD { - check_scripts_par_iter(sig_cache, tx, kip10_enabled, runtime_sig_op_counting) + check_scripts_par_iter(sig_cache, tx) } else { - check_scripts_sequential(sig_cache, tx, kip10_enabled, runtime_sig_op_counting) + check_scripts_sequential(sig_cache, tx) } } -pub fn check_scripts_sequential( - sig_cache: &Cache, - tx: &impl VerifiableTransaction, - kip10_enabled: bool, - runtime_sig_op_counting: bool, -) -> TxResult<()> { +pub fn check_scripts_sequential(sig_cache: &Cache, tx: &impl VerifiableTransaction) -> TxResult<()> { let reused_values = SigHashReusedValuesUnsync::new(); for (i, (input, entry)) in tx.populated_inputs().enumerate() { - TxScriptEngine::from_transaction_input(tx, input, i, entry, &reused_values, sig_cache, kip10_enabled, runtime_sig_op_counting) + TxScriptEngine::from_transaction_input(tx, input, i, entry, &reused_values, sig_cache) .execute() .map_err(|err| map_script_err(err, input))?; } Ok(()) } -pub fn check_scripts_par_iter( - sig_cache: &Cache, - tx: &(impl VerifiableTransaction + Sync), - kip10_enabled: bool, - runtime_sig_op_counting: bool, -) -> TxResult<()> { +pub fn check_scripts_par_iter(sig_cache: &Cache, tx: &(impl VerifiableTransaction + Sync)) -> TxResult<()> { let reused_values = SigHashReusedValuesSync::new(); (0..tx.inputs().len()).into_par_iter().try_for_each(|idx| { let (input, utxo) = tx.populated_input(idx); - TxScriptEngine::from_transaction_input(tx, input, idx, utxo, &reused_values, sig_cache, kip10_enabled, runtime_sig_op_counting) + TxScriptEngine::from_transaction_input(tx, input, idx, utxo, &reused_values, sig_cache) .execute() .map_err(|err| map_script_err(err, input)) }) @@ -230,10 +204,8 @@ pub fn check_scripts_par_iter_pool( sig_cache: &Cache, tx: &(impl VerifiableTransaction + Sync), pool: &ThreadPool, - kip10_enabled: bool, - runtime_sig_op_counting: bool, ) -> TxResult<()> { - pool.install(|| check_scripts_par_iter(sig_cache, tx, kip10_enabled, runtime_sig_op_counting)) + pool.install(|| check_scripts_par_iter(sig_cache, tx)) } fn map_script_err(script_err: TxScriptError, input: &TransactionInput) -> TxRuleError { @@ -282,6 +254,7 @@ mod tests { params.prior_max_script_public_key_len, params.coinbase_payload_script_public_key_max_len, params.prior_coinbase_maturity, + params.ghostdag_k().after(), Default::default(), ); @@ -327,13 +300,13 @@ mod tests { }], ); - tv.check_scripts(&populated_tx, u64::MAX).expect("Signature check failed"); + tv.check_scripts(&populated_tx).expect("Signature check failed"); // Test a tx with 2 inputs to cover parallelism split points in inner script checking code let (tx2, entries2) = duplicate_input(&tx, &populated_tx.entries); // Duplicated sigs should fail due to wrong sighash assert_eq!( - tv.check_scripts(&PopulatedTransaction::new(&tx2, entries2), u64::MAX), + tv.check_scripts(&PopulatedTransaction::new(&tx2, entries2)), Err(TxRuleError::SignatureInvalid(TxScriptError::EvalFalse)) ); } @@ -350,6 +323,7 @@ mod tests { params.prior_max_script_public_key_len, params.coinbase_payload_script_public_key_max_len, params.prior_coinbase_maturity, + params.ghostdag_k().after(), Default::default(), ); @@ -396,11 +370,11 @@ mod tests { }], ); - assert!(tv.check_scripts(&populated_tx, u64::MAX).is_err(), "Expecting signature check to fail"); + assert!(tv.check_scripts(&populated_tx).is_err(), "Expecting signature check to fail"); // Test a tx with 2 inputs to cover parallelism split points in inner script checking code let (tx2, entries2) = duplicate_input(&tx, &populated_tx.entries); - tv.check_scripts(&PopulatedTransaction::new(&tx2, entries2), u64::MAX).expect_err("Expecting signature check to fail"); + tv.check_scripts(&PopulatedTransaction::new(&tx2, entries2)).expect_err("Expecting signature check to fail"); // Verify we are correctly testing the parallelism case (applied here as sanity for all tests) assert!( @@ -422,6 +396,7 @@ mod tests { params.prior_max_script_public_key_len, params.coinbase_payload_script_public_key_max_len, params.prior_coinbase_maturity, + params.ghostdag_k().after(), Default::default(), ); @@ -468,13 +443,13 @@ mod tests { is_coinbase: false, }], ); - tv.check_scripts(&populated_tx, u64::MAX).expect("Signature check failed"); + tv.check_scripts(&populated_tx).expect("Signature check failed"); // Test a tx with 2 inputs to cover parallelism split points in inner script checking code let (tx2, entries2) = duplicate_input(&tx, &populated_tx.entries); // Duplicated sigs should fail due to wrong sighash assert_eq!( - tv.check_scripts(&PopulatedTransaction::new(&tx2, entries2), u64::MAX), + tv.check_scripts(&PopulatedTransaction::new(&tx2, entries2)), Err(TxRuleError::SignatureInvalid(TxScriptError::NullFail)) ); } @@ -491,6 +466,7 @@ mod tests { params.prior_max_script_public_key_len, params.coinbase_payload_script_public_key_max_len, params.prior_coinbase_maturity, + params.ghostdag_k().after(), Default::default(), ); @@ -538,12 +514,12 @@ mod tests { }], ); - assert_eq!(tv.check_scripts(&populated_tx, u64::MAX), Err(TxRuleError::SignatureInvalid(TxScriptError::NullFail))); + assert_eq!(tv.check_scripts(&populated_tx), Err(TxRuleError::SignatureInvalid(TxScriptError::NullFail))); // Test a tx with 2 inputs to cover parallelism split points in inner script checking code let (tx2, entries2) = duplicate_input(&tx, &populated_tx.entries); assert_eq!( - tv.check_scripts(&PopulatedTransaction::new(&tx2, entries2), u64::MAX), + tv.check_scripts(&PopulatedTransaction::new(&tx2, entries2)), Err(TxRuleError::SignatureInvalid(TxScriptError::NullFail)) ); } @@ -560,6 +536,7 @@ mod tests { params.prior_max_script_public_key_len, params.coinbase_payload_script_public_key_max_len, params.prior_coinbase_maturity, + params.ghostdag_k().after(), Default::default(), ); @@ -607,12 +584,12 @@ mod tests { }], ); - assert_eq!(tv.check_scripts(&populated_tx, u64::MAX), Err(TxRuleError::SignatureInvalid(TxScriptError::NullFail))); + assert_eq!(tv.check_scripts(&populated_tx), Err(TxRuleError::SignatureInvalid(TxScriptError::NullFail))); // Test a tx with 2 inputs to cover parallelism split points in inner script checking code let (tx2, entries2) = duplicate_input(&tx, &populated_tx.entries); assert_eq!( - tv.check_scripts(&PopulatedTransaction::new(&tx2, entries2), u64::MAX), + tv.check_scripts(&PopulatedTransaction::new(&tx2, entries2)), Err(TxRuleError::SignatureInvalid(TxScriptError::NullFail)) ); } @@ -629,6 +606,7 @@ mod tests { params.prior_max_script_public_key_len, params.coinbase_payload_script_public_key_max_len, params.prior_coinbase_maturity, + params.ghostdag_k().after(), Default::default(), ); @@ -676,12 +654,12 @@ mod tests { }], ); - assert_eq!(tv.check_scripts(&populated_tx, u64::MAX), Err(TxRuleError::SignatureInvalid(TxScriptError::EvalFalse))); + assert_eq!(tv.check_scripts(&populated_tx), Err(TxRuleError::SignatureInvalid(TxScriptError::EvalFalse))); // Test a tx with 2 inputs to cover parallelism split points in inner script checking code let (tx2, entries2) = duplicate_input(&tx, &populated_tx.entries); assert_eq!( - tv.check_scripts(&PopulatedTransaction::new(&tx2, entries2), u64::MAX), + tv.check_scripts(&PopulatedTransaction::new(&tx2, entries2)), Err(TxRuleError::SignatureInvalid(TxScriptError::EvalFalse)) ); } @@ -697,6 +675,7 @@ mod tests { params.prior_max_script_public_key_len, params.coinbase_payload_script_public_key_max_len, params.prior_coinbase_maturity, + params.ghostdag_k().after(), Default::default(), ); @@ -736,15 +715,12 @@ mod tests { }], ); - assert_eq!( - tv.check_scripts(&populated_tx, u64::MAX), - Err(TxRuleError::SignatureInvalid(TxScriptError::SignatureScriptNotPushOnly)) - ); + assert_eq!(tv.check_scripts(&populated_tx), Err(TxRuleError::SignatureInvalid(TxScriptError::SignatureScriptNotPushOnly))); // Test a tx with 2 inputs to cover parallelism split points in inner script checking code let (tx2, entries2) = duplicate_input(&tx, &populated_tx.entries); assert_eq!( - tv.check_scripts(&PopulatedTransaction::new(&tx2, entries2), u64::MAX), + tv.check_scripts(&PopulatedTransaction::new(&tx2, entries2)), Err(TxRuleError::SignatureInvalid(TxScriptError::SignatureScriptNotPushOnly)) ); } @@ -759,6 +735,7 @@ mod tests { params.prior_max_script_public_key_len, params.coinbase_payload_script_public_key_max_len, params.prior_coinbase_maturity, + params.ghostdag_k().after(), Default::default(), ); @@ -824,7 +801,7 @@ mod tests { let schnorr_key = secp256k1::Keypair::from_seckey_slice(secp256k1::SECP256K1, &secret_key.secret_bytes()).unwrap(); let signed_tx = sign(MutableTransaction::with_entries(unsigned_tx, entries), schnorr_key); let populated_tx = signed_tx.as_verifiable(); - assert_eq!(tv.check_scripts(&populated_tx, u64::MAX), Ok(())); + assert_eq!(tv.check_scripts(&populated_tx), Ok(())); assert_eq!(TransactionValidator::check_sig_op_counts(&populated_tx), Ok(())); } } diff --git a/consensus/src/test_helpers.rs b/consensus/src/test_helpers.rs index b3867f145e..9a0666786a 100644 --- a/consensus/src/test_helpers.rs +++ b/consensus/src/test_helpers.rs @@ -94,7 +94,7 @@ pub fn generate_random_block( pub fn generate_random_header(rng: &mut SmallRng, parent_amount: usize) -> Header { Header::new_finalized( rng.gen(), - vec![generate_random_hashes(rng, parent_amount)], + vec![generate_random_hashes(rng, parent_amount)].try_into().unwrap(), generate_random_hash(rng), generate_random_hash(rng), generate_random_hash(rng), diff --git a/crypto/hashes/Cargo.toml b/crypto/hashes/Cargo.toml index ef36bc1b50..68396d434a 100644 --- a/crypto/hashes/Cargo.toml +++ b/crypto/hashes/Cargo.toml @@ -10,6 +10,7 @@ license.workspace = true repository.workspace = true [features] +default = ["keccak?/asm"] no-asm = ["keccak"] [dependencies] @@ -25,7 +26,7 @@ sha2.workspace = true wasm-bindgen.workspace = true workflow-wasm.workspace = true -[target.'cfg(any(target_os = "windows", not(target_arch = "x86_64")))'.dependencies] +[target.'cfg(not(target_arch = "x86_64"))'.dependencies] keccak.workspace = true [dev-dependencies] diff --git a/crypto/hashes/build.rs b/crypto/hashes/build.rs index 608ef3efe4..c4080e56d8 100644 --- a/crypto/hashes/build.rs +++ b/crypto/hashes/build.rs @@ -1,16 +1,23 @@ use std::env; fn main() -> Result<(), Box> { - println!("cargo:rerun-if-changed=src/keccakf1600_x86-64.s"); - println!("cargo:rerun-if-changed=src/keccakf1600_x86-64-osx.s"); + println!("cargo:rerun-if-changed=src/asm"); let target_arch = env::var("CARGO_CFG_TARGET_ARCH").unwrap(); let target_os = env::var("CARGO_CFG_TARGET_OS").unwrap(); - if target_arch == "x86_64" && target_os != "windows" && target_os != "macos" { - cc::Build::new().flag("-c").file("src/keccakf1600_x86-64.s").compile("libkeccak.a"); - } - if target_arch == "x86_64" && target_os == "macos" { - cc::Build::new().flag("-c").file("src/keccakf1600_x86-64-osx.s").compile("libkeccak.a"); + let target_env = env::var("CARGO_CFG_TARGET_ENV").unwrap_or_default(); + + if target_arch == "x86_64" { + let mut builder = cc::Build::new(); + builder.flag("-c"); + match target_os.as_str() { + "macos" => builder.file("src/asm/keccakf1600_x86-64-osx.s"), + "linux" => builder.file("src/asm/keccakf1600_x86-64-elf.s"), + "windows" if target_env == "gnu" => builder.file("src/asm/keccakf1600_x86-64-mingw64.s"), + "windows" if target_env == "msvc" => builder.file("src/asm/keccakf1600_x86-64-msvc.asm"), + _ => unimplemented!("Unsupported OS"), + }; + builder.compile("libkeccak.a"); } Ok(()) } diff --git a/crypto/hashes/src/keccakf1600_x86-64.s b/crypto/hashes/src/asm/keccakf1600_x86-64-elf.s similarity index 67% rename from crypto/hashes/src/keccakf1600_x86-64.s rename to crypto/hashes/src/asm/keccakf1600_x86-64-elf.s index 7addce36ba..74c6b35132 100644 --- a/crypto/hashes/src/keccakf1600_x86-64.s +++ b/crypto/hashes/src/asm/keccakf1600_x86-64-elf.s @@ -294,6 +294,7 @@ KeccakF1600: subq $200,%rsp .cfi_adjust_cfa_offset 200 + notq -92(%rdi) notq -84(%rdi) notq -36(%rdi) @@ -314,30 +315,190 @@ KeccakF1600: notq 60(%rdi) leaq -100(%rdi),%rdi - addq $200,%rsp -.cfi_adjust_cfa_offset -200 - - popq %r15 -.cfi_adjust_cfa_offset -8 -.cfi_restore %r15 - popq %r14 -.cfi_adjust_cfa_offset -8 -.cfi_restore %r14 - popq %r13 -.cfi_adjust_cfa_offset -8 -.cfi_restore %r13 - popq %r12 -.cfi_adjust_cfa_offset -8 + leaq 248(%rsp),%r11 +.cfi_def_cfa %r11,8 + movq -48(%r11),%r15 + movq -40(%r11),%r14 + movq -32(%r11),%r13 + movq -24(%r11),%r12 + movq -16(%r11),%rbp + movq -8(%r11),%rbx + leaq (%r11),%rsp .cfi_restore %r12 - popq %rbp -.cfi_adjust_cfa_offset -8 +.cfi_restore %r13 +.cfi_restore %r14 +.cfi_restore %r15 .cfi_restore %rbp - popq %rbx -.cfi_adjust_cfa_offset -8 .cfi_restore %rbx .byte 0xf3,0xc3 .cfi_endproc .size KeccakF1600,.-KeccakF1600 +.globl SHA3_absorb +.type SHA3_absorb,@function +.align 32 +SHA3_absorb: +.cfi_startproc + .byte 0xf3,0x0f,0x1e,0xfa + + + pushq %rbx +.cfi_adjust_cfa_offset 8 +.cfi_offset %rbx,-16 + pushq %rbp +.cfi_adjust_cfa_offset 8 +.cfi_offset %rbp,-24 + pushq %r12 +.cfi_adjust_cfa_offset 8 +.cfi_offset %r12,-32 + pushq %r13 +.cfi_adjust_cfa_offset 8 +.cfi_offset %r13,-40 + pushq %r14 +.cfi_adjust_cfa_offset 8 +.cfi_offset %r14,-48 + pushq %r15 +.cfi_adjust_cfa_offset 8 +.cfi_offset %r15,-56 + + leaq 100(%rdi),%rdi + subq $232,%rsp +.cfi_adjust_cfa_offset 232 + + + movq %rsi,%r9 + leaq 100(%rsp),%rsi + + notq -92(%rdi) + notq -84(%rdi) + notq -36(%rdi) + notq -4(%rdi) + notq 36(%rdi) + notq 60(%rdi) + leaq iotas(%rip),%r15 + + movq %rcx,216-100(%rsi) + +.Loop_absorb: + cmpq %rcx,%rdx + jc .Ldone_absorb + + shrq $3,%rcx + leaq -100(%rdi),%r8 + +.Lblock_absorb: + movq (%r9),%rax + leaq 8(%r9),%r9 + xorq (%r8),%rax + leaq 8(%r8),%r8 + subq $8,%rdx + movq %rax,-8(%r8) + subq $1,%rcx + jnz .Lblock_absorb + + movq %r9,200-100(%rsi) + movq %rdx,208-100(%rsi) + call __KeccakF1600 + movq 200-100(%rsi),%r9 + movq 208-100(%rsi),%rdx + movq 216-100(%rsi),%rcx + jmp .Loop_absorb + +.align 32 +.Ldone_absorb: + movq %rdx,%rax + + notq -92(%rdi) + notq -84(%rdi) + notq -36(%rdi) + notq -4(%rdi) + notq 36(%rdi) + notq 60(%rdi) + + leaq 280(%rsp),%r11 +.cfi_def_cfa %r11,8 + movq -48(%r11),%r15 + movq -40(%r11),%r14 + movq -32(%r11),%r13 + movq -24(%r11),%r12 + movq -16(%r11),%rbp + movq -8(%r11),%rbx + leaq (%r11),%rsp +.cfi_restore %r12 +.cfi_restore %r13 +.cfi_restore %r14 +.cfi_restore %r15 +.cfi_restore %rbp +.cfi_restore %rbx + .byte 0xf3,0xc3 +.cfi_endproc +.size SHA3_absorb,.-SHA3_absorb +.globl SHA3_squeeze +.type SHA3_squeeze,@function +.align 32 +SHA3_squeeze: +.cfi_startproc + .byte 0xf3,0x0f,0x1e,0xfa + + + pushq %r12 +.cfi_adjust_cfa_offset 8 +.cfi_offset %r12,-16 + pushq %r13 +.cfi_adjust_cfa_offset 8 +.cfi_offset %r13,-24 + pushq %r14 +.cfi_adjust_cfa_offset 8 +.cfi_offset %r14,-32 + subq $32,%rsp +.cfi_adjust_cfa_offset 32 + + + shrq $3,%rcx + movq %rdi,%r8 + movq %rsi,%r12 + movq %rdx,%r13 + movq %rcx,%r14 + jmp .Loop_squeeze + +.align 32 +.Loop_squeeze: + cmpq $8,%r13 + jb .Ltail_squeeze + + movq (%r8),%rax + leaq 8(%r8),%r8 + movq %rax,(%r12) + leaq 8(%r12),%r12 + subq $8,%r13 + jz .Ldone_squeeze + + subq $1,%rcx + jnz .Loop_squeeze + + movq %rdi,%rcx + call KeccakF1600 + movq %rdi,%r8 + movq %r14,%rcx + jmp .Loop_squeeze + +.Ltail_squeeze: + movq %r8,%rsi + movq %r12,%rdi + movq %r13,%rcx +.byte 0xf3,0xa4 + +.Ldone_squeeze: + movq 32(%rsp),%r14 + movq 40(%rsp),%r13 + movq 48(%rsp),%r12 + addq $56,%rsp +.cfi_adjust_cfa_offset -56 +.cfi_restore %r12 +.cfi_restore %r13 +.cfi_restore %r14 + .byte 0xf3,0xc3 +.cfi_endproc +.size SHA3_squeeze,.-SHA3_squeeze .align 256 .quad 0,0,0,0,0,0,0,0 .type iotas,@object diff --git a/crypto/hashes/src/asm/keccakf1600_x86-64-mingw64.s b/crypto/hashes/src/asm/keccakf1600_x86-64-mingw64.s new file mode 100644 index 0000000000..d23269424b --- /dev/null +++ b/crypto/hashes/src/asm/keccakf1600_x86-64-mingw64.s @@ -0,0 +1,650 @@ +# Source: https://github.com/dot-asm/cryptogams/blob/master/x86_64/keccak1600-x86_64.pl + +.text + +.def __KeccakF1600; .scl 3; .type 32; .endef +.p2align 5 +__KeccakF1600: + .byte 0xf3,0x0f,0x1e,0xfa + + movq 60(%rdi),%rax + movq 68(%rdi),%rbx + movq 76(%rdi),%rcx + movq 84(%rdi),%rdx + movq 92(%rdi),%rbp + jmp .Loop + +.p2align 5 +.Loop: + movq -100(%rdi),%r8 + movq -52(%rdi),%r9 + movq -4(%rdi),%r10 + movq 44(%rdi),%r11 + + xorq -84(%rdi),%rcx + xorq -76(%rdi),%rdx + xorq %r8,%rax + xorq -92(%rdi),%rbx + xorq -44(%rdi),%rcx + xorq -60(%rdi),%rax + movq %rbp,%r12 + xorq -68(%rdi),%rbp + + xorq %r10,%rcx + xorq -20(%rdi),%rax + xorq -36(%rdi),%rdx + xorq %r9,%rbx + xorq -28(%rdi),%rbp + + xorq 36(%rdi),%rcx + xorq 20(%rdi),%rax + xorq 4(%rdi),%rdx + xorq -12(%rdi),%rbx + xorq 12(%rdi),%rbp + + movq %rcx,%r13 + rolq $1,%rcx + xorq %rax,%rcx + xorq %r11,%rdx + + rolq $1,%rax + xorq %rdx,%rax + xorq 28(%rdi),%rbx + + rolq $1,%rdx + xorq %rbx,%rdx + xorq 52(%rdi),%rbp + + rolq $1,%rbx + xorq %rbp,%rbx + + rolq $1,%rbp + xorq %r13,%rbp + xorq %rcx,%r9 + xorq %rdx,%r10 + rolq $44,%r9 + xorq %rbp,%r11 + xorq %rax,%r12 + rolq $43,%r10 + xorq %rbx,%r8 + movq %r9,%r13 + rolq $21,%r11 + orq %r10,%r9 + xorq %r8,%r9 + rolq $14,%r12 + + xorq (%r15),%r9 + leaq 8(%r15),%r15 + + movq %r12,%r14 + andq %r11,%r12 + movq %r9,-100(%rsi) + xorq %r10,%r12 + notq %r10 + movq %r12,-84(%rsi) + + orq %r11,%r10 + movq 76(%rdi),%r12 + xorq %r13,%r10 + movq %r10,-92(%rsi) + + andq %r8,%r13 + movq -28(%rdi),%r9 + xorq %r14,%r13 + movq -20(%rdi),%r10 + movq %r13,-68(%rsi) + + orq %r8,%r14 + movq -76(%rdi),%r8 + xorq %r11,%r14 + movq 28(%rdi),%r11 + movq %r14,-76(%rsi) + + + xorq %rbp,%r8 + xorq %rdx,%r12 + rolq $28,%r8 + xorq %rcx,%r11 + xorq %rax,%r9 + rolq $61,%r12 + rolq $45,%r11 + xorq %rbx,%r10 + rolq $20,%r9 + movq %r8,%r13 + orq %r12,%r8 + rolq $3,%r10 + + xorq %r11,%r8 + movq %r8,-36(%rsi) + + movq %r9,%r14 + andq %r13,%r9 + movq -92(%rdi),%r8 + xorq %r12,%r9 + notq %r12 + movq %r9,-28(%rsi) + + orq %r11,%r12 + movq -44(%rdi),%r9 + xorq %r10,%r12 + movq %r12,-44(%rsi) + + andq %r10,%r11 + movq 60(%rdi),%r12 + xorq %r14,%r11 + movq %r11,-52(%rsi) + + orq %r10,%r14 + movq 4(%rdi),%r10 + xorq %r13,%r14 + movq 52(%rdi),%r11 + movq %r14,-60(%rsi) + + + xorq %rbp,%r10 + xorq %rax,%r11 + rolq $25,%r10 + xorq %rdx,%r9 + rolq $8,%r11 + xorq %rbx,%r12 + rolq $6,%r9 + xorq %rcx,%r8 + rolq $18,%r12 + movq %r10,%r13 + andq %r11,%r10 + rolq $1,%r8 + + notq %r11 + xorq %r9,%r10 + movq %r10,-12(%rsi) + + movq %r12,%r14 + andq %r11,%r12 + movq -12(%rdi),%r10 + xorq %r13,%r12 + movq %r12,-4(%rsi) + + orq %r9,%r13 + movq 84(%rdi),%r12 + xorq %r8,%r13 + movq %r13,-20(%rsi) + + andq %r8,%r9 + xorq %r14,%r9 + movq %r9,12(%rsi) + + orq %r8,%r14 + movq -60(%rdi),%r9 + xorq %r11,%r14 + movq 36(%rdi),%r11 + movq %r14,4(%rsi) + + + movq -68(%rdi),%r8 + + xorq %rcx,%r10 + xorq %rdx,%r11 + rolq $10,%r10 + xorq %rbx,%r9 + rolq $15,%r11 + xorq %rbp,%r12 + rolq $36,%r9 + xorq %rax,%r8 + rolq $56,%r12 + movq %r10,%r13 + orq %r11,%r10 + rolq $27,%r8 + + notq %r11 + xorq %r9,%r10 + movq %r10,28(%rsi) + + movq %r12,%r14 + orq %r11,%r12 + xorq %r13,%r12 + movq %r12,36(%rsi) + + andq %r9,%r13 + xorq %r8,%r13 + movq %r13,20(%rsi) + + orq %r8,%r9 + xorq %r14,%r9 + movq %r9,52(%rsi) + + andq %r14,%r8 + xorq %r11,%r8 + movq %r8,44(%rsi) + + + xorq -84(%rdi),%rdx + xorq -36(%rdi),%rbp + rolq $62,%rdx + xorq 68(%rdi),%rcx + rolq $55,%rbp + xorq 12(%rdi),%rax + rolq $2,%rcx + xorq 20(%rdi),%rbx + xchgq %rsi,%rdi + rolq $39,%rax + rolq $41,%rbx + movq %rdx,%r13 + andq %rbp,%rdx + notq %rbp + xorq %rcx,%rdx + movq %rdx,92(%rdi) + + movq %rax,%r14 + andq %rbp,%rax + xorq %r13,%rax + movq %rax,60(%rdi) + + orq %rcx,%r13 + xorq %rbx,%r13 + movq %r13,84(%rdi) + + andq %rbx,%rcx + xorq %r14,%rcx + movq %rcx,76(%rdi) + + orq %r14,%rbx + xorq %rbp,%rbx + movq %rbx,68(%rdi) + + movq %rdx,%rbp + movq %r13,%rdx + + testq $255,%r15 + jnz .Loop + + leaq -192(%r15),%r15 + .byte 0xf3,0xc3 + + +.globl KeccakF1600 +.def KeccakF1600; .scl 2; .type 32; .endef +.p2align 5 +KeccakF1600: + .byte 0xf3,0x0f,0x1e,0xfa + movq %rdi,8(%rsp) + movq %rsi,16(%rsp) + movq %rsp,%r11 +.LSEH_begin_KeccakF1600: + + + movq %rcx,%rdi + pushq %rbx + + pushq %rbp + + pushq %r12 + + pushq %r13 + + pushq %r14 + + pushq %r15 + + + leaq 100(%rdi),%rdi + subq $200,%rsp + +.LSEH_body_KeccakF1600: + + + notq -92(%rdi) + notq -84(%rdi) + notq -36(%rdi) + notq -4(%rdi) + notq 36(%rdi) + notq 60(%rdi) + + leaq iotas(%rip),%r15 + leaq 100(%rsp),%rsi + + call __KeccakF1600 + + notq -92(%rdi) + notq -84(%rdi) + notq -36(%rdi) + notq -4(%rdi) + notq 36(%rdi) + notq 60(%rdi) + leaq -100(%rdi),%rdi + + leaq 248(%rsp),%r11 + + movq -48(%r11),%r15 + movq -40(%r11),%r14 + movq -32(%r11),%r13 + movq -24(%r11),%r12 + movq -16(%r11),%rbp + movq -8(%r11),%rbx + leaq (%r11),%rsp +.LSEH_epilogue_KeccakF1600: + mov 8(%r11),%rdi + mov 16(%r11),%rsi + + .byte 0xf3,0xc3 + +.LSEH_end_KeccakF1600: +.globl SHA3_absorb +.def SHA3_absorb; .scl 2; .type 32; .endef +.p2align 5 +SHA3_absorb: + .byte 0xf3,0x0f,0x1e,0xfa + movq %rdi,8(%rsp) + movq %rsi,16(%rsp) + movq %rsp,%r11 +.LSEH_begin_SHA3_absorb: + + + movq %rcx,%rdi + movq %rdx,%rsi + movq %r8,%rdx + movq %r9,%rcx + pushq %rbx + + pushq %rbp + + pushq %r12 + + pushq %r13 + + pushq %r14 + + pushq %r15 + + + leaq 100(%rdi),%rdi + subq $232,%rsp + +.LSEH_body_SHA3_absorb: + + + movq %rsi,%r9 + leaq 100(%rsp),%rsi + + notq -92(%rdi) + notq -84(%rdi) + notq -36(%rdi) + notq -4(%rdi) + notq 36(%rdi) + notq 60(%rdi) + leaq iotas(%rip),%r15 + + movq %rcx,216-100(%rsi) + +.Loop_absorb: + cmpq %rcx,%rdx + jc .Ldone_absorb + + shrq $3,%rcx + leaq -100(%rdi),%r8 + +.Lblock_absorb: + movq (%r9),%rax + leaq 8(%r9),%r9 + xorq (%r8),%rax + leaq 8(%r8),%r8 + subq $8,%rdx + movq %rax,-8(%r8) + subq $1,%rcx + jnz .Lblock_absorb + + movq %r9,200-100(%rsi) + movq %rdx,208-100(%rsi) + call __KeccakF1600 + movq 200-100(%rsi),%r9 + movq 208-100(%rsi),%rdx + movq 216-100(%rsi),%rcx + jmp .Loop_absorb + +.p2align 5 +.Ldone_absorb: + movq %rdx,%rax + + notq -92(%rdi) + notq -84(%rdi) + notq -36(%rdi) + notq -4(%rdi) + notq 36(%rdi) + notq 60(%rdi) + + leaq 280(%rsp),%r11 + + movq -48(%r11),%r15 + movq -40(%r11),%r14 + movq -32(%r11),%r13 + movq -24(%r11),%r12 + movq -16(%r11),%rbp + movq -8(%r11),%rbx + leaq (%r11),%rsp +.LSEH_epilogue_SHA3_absorb: + mov 8(%r11),%rdi + mov 16(%r11),%rsi + + .byte 0xf3,0xc3 + +.LSEH_end_SHA3_absorb: +.globl SHA3_squeeze +.def SHA3_squeeze; .scl 2; .type 32; .endef +.p2align 5 +SHA3_squeeze: + .byte 0xf3,0x0f,0x1e,0xfa + movq %rdi,8(%rsp) + movq %rsi,16(%rsp) + movq %rsp,%r11 +.LSEH_begin_SHA3_squeeze: + + + movq %rcx,%rdi + movq %rdx,%rsi + movq %r8,%rdx + movq %r9,%rcx + pushq %r12 + + pushq %r13 + + pushq %r14 + + subq $32,%rsp + +.LSEH_body_SHA3_squeeze: + + + shrq $3,%rcx + movq %rdi,%r8 + movq %rsi,%r12 + movq %rdx,%r13 + movq %rcx,%r14 + jmp .Loop_squeeze + +.p2align 5 +.Loop_squeeze: + cmpq $8,%r13 + jb .Ltail_squeeze + + movq (%r8),%rax + leaq 8(%r8),%r8 + movq %rax,(%r12) + leaq 8(%r12),%r12 + subq $8,%r13 + jz .Ldone_squeeze + + subq $1,%rcx + jnz .Loop_squeeze + + movq %rdi,%rcx + call KeccakF1600 + movq %rdi,%r8 + movq %r14,%rcx + jmp .Loop_squeeze + +.Ltail_squeeze: + movq %r8,%rsi + movq %r12,%rdi + movq %r13,%rcx +.byte 0xf3,0xa4 + +.Ldone_squeeze: + movq 32(%rsp),%r14 + movq 40(%rsp),%r13 + movq 48(%rsp),%r12 + addq $56,%rsp + +.LSEH_epilogue_SHA3_squeeze: + mov 8(%rsp),%rdi + mov 16(%rsp),%rsi + + .byte 0xf3,0xc3 + +.LSEH_end_SHA3_squeeze: +.p2align 8 +.quad 0,0,0,0,0,0,0,0 + +iotas: +.quad 0x0000000000000001 +.quad 0x0000000000008082 +.quad 0x800000000000808a +.quad 0x8000000080008000 +.quad 0x000000000000808b +.quad 0x0000000080000001 +.quad 0x8000000080008081 +.quad 0x8000000000008009 +.quad 0x000000000000008a +.quad 0x0000000000000088 +.quad 0x0000000080008009 +.quad 0x000000008000000a +.quad 0x000000008000808b +.quad 0x800000000000008b +.quad 0x8000000000008089 +.quad 0x8000000000008003 +.quad 0x8000000000008002 +.quad 0x8000000000000080 +.quad 0x000000000000800a +.quad 0x800000008000000a +.quad 0x8000000080008081 +.quad 0x8000000000008080 +.quad 0x0000000080000001 +.quad 0x8000000080008008 + +.byte 75,101,99,99,97,107,45,49,54,48,48,32,97,98,115,111,114,98,32,97,110,100,32,115,113,117,101,101,122,101,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 +.section .pdata +.p2align 2 +.rva .LSEH_begin_KeccakF1600 +.rva .LSEH_body_KeccakF1600 +.rva .LSEH_info_KeccakF1600_prologue + +.rva .LSEH_body_KeccakF1600 +.rva .LSEH_epilogue_KeccakF1600 +.rva .LSEH_info_KeccakF1600_body + +.rva .LSEH_epilogue_KeccakF1600 +.rva .LSEH_end_KeccakF1600 +.rva .LSEH_info_KeccakF1600_epilogue + +.rva .LSEH_begin_SHA3_absorb +.rva .LSEH_body_SHA3_absorb +.rva .LSEH_info_SHA3_absorb_prologue + +.rva .LSEH_body_SHA3_absorb +.rva .LSEH_epilogue_SHA3_absorb +.rva .LSEH_info_SHA3_absorb_body + +.rva .LSEH_epilogue_SHA3_absorb +.rva .LSEH_end_SHA3_absorb +.rva .LSEH_info_SHA3_absorb_epilogue + +.rva .LSEH_begin_SHA3_squeeze +.rva .LSEH_body_SHA3_squeeze +.rva .LSEH_info_SHA3_squeeze_prologue + +.rva .LSEH_body_SHA3_squeeze +.rva .LSEH_epilogue_SHA3_squeeze +.rva .LSEH_info_SHA3_squeeze_body + +.rva .LSEH_epilogue_SHA3_squeeze +.rva .LSEH_end_SHA3_squeeze +.rva .LSEH_info_SHA3_squeeze_epilogue + +.section .xdata +.p2align 3 +.LSEH_info_KeccakF1600_prologue: +.byte 1,0,5,0x0b +.byte 0,0x74,1,0 +.byte 0,0x64,2,0 +.byte 0,0xb3 +.byte 0,0 +.long 0,0 +.LSEH_info_KeccakF1600_body: +.byte 1,0,18,0 +.byte 0x00,0xf4,0x19,0x00 +.byte 0x00,0xe4,0x1a,0x00 +.byte 0x00,0xd4,0x1b,0x00 +.byte 0x00,0xc4,0x1c,0x00 +.byte 0x00,0x54,0x1d,0x00 +.byte 0x00,0x34,0x1e,0x00 +.byte 0x00,0x74,0x20,0x00 +.byte 0x00,0x64,0x21,0x00 +.byte 0x00,0x01,0x1f,0x00 +.byte 0x00,0x00,0x00,0x00 +.byte 0x00,0x00,0x00,0x00 +.LSEH_info_KeccakF1600_epilogue: +.byte 1,0,5,11 +.byte 0x00,0x74,0x01,0x00 +.byte 0x00,0x64,0x02,0x00 +.byte 0x00,0xb3 +.byte 0x00,0x00,0x00,0x00,0x00,0x00 +.byte 0x00,0x00,0x00,0x00 + +.LSEH_info_SHA3_absorb_prologue: +.byte 1,0,5,0x0b +.byte 0,0x74,1,0 +.byte 0,0x64,2,0 +.byte 0,0xb3 +.byte 0,0 +.long 0,0 +.LSEH_info_SHA3_absorb_body: +.byte 1,0,18,0 +.byte 0x00,0xf4,0x1d,0x00 +.byte 0x00,0xe4,0x1e,0x00 +.byte 0x00,0xd4,0x1f,0x00 +.byte 0x00,0xc4,0x20,0x00 +.byte 0x00,0x54,0x21,0x00 +.byte 0x00,0x34,0x22,0x00 +.byte 0x00,0x74,0x24,0x00 +.byte 0x00,0x64,0x25,0x00 +.byte 0x00,0x01,0x23,0x00 +.byte 0x00,0x00,0x00,0x00 +.byte 0x00,0x00,0x00,0x00 +.LSEH_info_SHA3_absorb_epilogue: +.byte 1,0,5,11 +.byte 0x00,0x74,0x01,0x00 +.byte 0x00,0x64,0x02,0x00 +.byte 0x00,0xb3 +.byte 0x00,0x00,0x00,0x00,0x00,0x00 +.byte 0x00,0x00,0x00,0x00 + +.LSEH_info_SHA3_squeeze_prologue: +.byte 1,0,5,0x0b +.byte 0,0x74,1,0 +.byte 0,0x64,2,0 +.byte 0,0xb3 +.byte 0,0 +.long 0,0 +.LSEH_info_SHA3_squeeze_body: +.byte 1,0,11,0 +.byte 0x00,0xe4,0x04,0x00 +.byte 0x00,0xd4,0x05,0x00 +.byte 0x00,0xc4,0x06,0x00 +.byte 0x00,0x74,0x08,0x00 +.byte 0x00,0x64,0x09,0x00 +.byte 0x00,0x62 +.byte 0x00,0x00,0x00,0x00,0x00,0x00 +.LSEH_info_SHA3_squeeze_epilogue: +.byte 1,0,4,0 +.byte 0x00,0x74,0x01,0x00 +.byte 0x00,0x64,0x02,0x00 +.byte 0x00,0x00,0x00,0x00 + diff --git a/crypto/hashes/src/asm/keccakf1600_x86-64-msvc.asm b/crypto/hashes/src/asm/keccakf1600_x86-64-msvc.asm new file mode 100644 index 0000000000..6860e31870 --- /dev/null +++ b/crypto/hashes/src/asm/keccakf1600_x86-64-msvc.asm @@ -0,0 +1,661 @@ +OPTION DOTNAME +.text$ SEGMENT ALIGN(256) 'CODE' + + +ALIGN 32 +__KeccakF1600 PROC PRIVATE + DB 243,15,30,250 + + mov rax,QWORD PTR[60+rdi] + mov rbx,QWORD PTR[68+rdi] + mov rcx,QWORD PTR[76+rdi] + mov rdx,QWORD PTR[84+rdi] + mov rbp,QWORD PTR[92+rdi] + jmp $L$oop + +ALIGN 32 +$L$oop:: + mov r8,QWORD PTR[((-100))+rdi] + mov r9,QWORD PTR[((-52))+rdi] + mov r10,QWORD PTR[((-4))+rdi] + mov r11,QWORD PTR[44+rdi] + + xor rcx,QWORD PTR[((-84))+rdi] + xor rdx,QWORD PTR[((-76))+rdi] + xor rax,r8 + xor rbx,QWORD PTR[((-92))+rdi] + xor rcx,QWORD PTR[((-44))+rdi] + xor rax,QWORD PTR[((-60))+rdi] + mov r12,rbp + xor rbp,QWORD PTR[((-68))+rdi] + + xor rcx,r10 + xor rax,QWORD PTR[((-20))+rdi] + xor rdx,QWORD PTR[((-36))+rdi] + xor rbx,r9 + xor rbp,QWORD PTR[((-28))+rdi] + + xor rcx,QWORD PTR[36+rdi] + xor rax,QWORD PTR[20+rdi] + xor rdx,QWORD PTR[4+rdi] + xor rbx,QWORD PTR[((-12))+rdi] + xor rbp,QWORD PTR[12+rdi] + + mov r13,rcx + rol rcx,1 + xor rcx,rax + xor rdx,r11 + + rol rax,1 + xor rax,rdx + xor rbx,QWORD PTR[28+rdi] + + rol rdx,1 + xor rdx,rbx + xor rbp,QWORD PTR[52+rdi] + + rol rbx,1 + xor rbx,rbp + + rol rbp,1 + xor rbp,r13 + xor r9,rcx + xor r10,rdx + rol r9,44 + xor r11,rbp + xor r12,rax + rol r10,43 + xor r8,rbx + mov r13,r9 + rol r11,21 + or r9,r10 + xor r9,r8 + rol r12,14 + + xor r9,QWORD PTR[r15] + lea r15,QWORD PTR[8+r15] + + mov r14,r12 + and r12,r11 + mov QWORD PTR[((-100))+rsi],r9 + xor r12,r10 + not r10 + mov QWORD PTR[((-84))+rsi],r12 + + or r10,r11 + mov r12,QWORD PTR[76+rdi] + xor r10,r13 + mov QWORD PTR[((-92))+rsi],r10 + + and r13,r8 + mov r9,QWORD PTR[((-28))+rdi] + xor r13,r14 + mov r10,QWORD PTR[((-20))+rdi] + mov QWORD PTR[((-68))+rsi],r13 + + or r14,r8 + mov r8,QWORD PTR[((-76))+rdi] + xor r14,r11 + mov r11,QWORD PTR[28+rdi] + mov QWORD PTR[((-76))+rsi],r14 + + + xor r8,rbp + xor r12,rdx + rol r8,28 + xor r11,rcx + xor r9,rax + rol r12,61 + rol r11,45 + xor r10,rbx + rol r9,20 + mov r13,r8 + or r8,r12 + rol r10,3 + + xor r8,r11 + mov QWORD PTR[((-36))+rsi],r8 + + mov r14,r9 + and r9,r13 + mov r8,QWORD PTR[((-92))+rdi] + xor r9,r12 + not r12 + mov QWORD PTR[((-28))+rsi],r9 + + or r12,r11 + mov r9,QWORD PTR[((-44))+rdi] + xor r12,r10 + mov QWORD PTR[((-44))+rsi],r12 + + and r11,r10 + mov r12,QWORD PTR[60+rdi] + xor r11,r14 + mov QWORD PTR[((-52))+rsi],r11 + + or r14,r10 + mov r10,QWORD PTR[4+rdi] + xor r14,r13 + mov r11,QWORD PTR[52+rdi] + mov QWORD PTR[((-60))+rsi],r14 + + + xor r10,rbp + xor r11,rax + rol r10,25 + xor r9,rdx + rol r11,8 + xor r12,rbx + rol r9,6 + xor r8,rcx + rol r12,18 + mov r13,r10 + and r10,r11 + rol r8,1 + + not r11 + xor r10,r9 + mov QWORD PTR[((-12))+rsi],r10 + + mov r14,r12 + and r12,r11 + mov r10,QWORD PTR[((-12))+rdi] + xor r12,r13 + mov QWORD PTR[((-4))+rsi],r12 + + or r13,r9 + mov r12,QWORD PTR[84+rdi] + xor r13,r8 + mov QWORD PTR[((-20))+rsi],r13 + + and r9,r8 + xor r9,r14 + mov QWORD PTR[12+rsi],r9 + + or r14,r8 + mov r9,QWORD PTR[((-60))+rdi] + xor r14,r11 + mov r11,QWORD PTR[36+rdi] + mov QWORD PTR[4+rsi],r14 + + + mov r8,QWORD PTR[((-68))+rdi] + + xor r10,rcx + xor r11,rdx + rol r10,10 + xor r9,rbx + rol r11,15 + xor r12,rbp + rol r9,36 + xor r8,rax + rol r12,56 + mov r13,r10 + or r10,r11 + rol r8,27 + + not r11 + xor r10,r9 + mov QWORD PTR[28+rsi],r10 + + mov r14,r12 + or r12,r11 + xor r12,r13 + mov QWORD PTR[36+rsi],r12 + + and r13,r9 + xor r13,r8 + mov QWORD PTR[20+rsi],r13 + + or r9,r8 + xor r9,r14 + mov QWORD PTR[52+rsi],r9 + + and r8,r14 + xor r8,r11 + mov QWORD PTR[44+rsi],r8 + + + xor rdx,QWORD PTR[((-84))+rdi] + xor rbp,QWORD PTR[((-36))+rdi] + rol rdx,62 + xor rcx,QWORD PTR[68+rdi] + rol rbp,55 + xor rax,QWORD PTR[12+rdi] + rol rcx,2 + xor rbx,QWORD PTR[20+rdi] + xchg rdi,rsi + rol rax,39 + rol rbx,41 + mov r13,rdx + and rdx,rbp + not rbp + xor rdx,rcx + mov QWORD PTR[92+rdi],rdx + + mov r14,rax + and rax,rbp + xor rax,r13 + mov QWORD PTR[60+rdi],rax + + or r13,rcx + xor r13,rbx + mov QWORD PTR[84+rdi],r13 + + and rcx,rbx + xor rcx,r14 + mov QWORD PTR[76+rdi],rcx + + or rbx,r14 + xor rbx,rbp + mov QWORD PTR[68+rdi],rbx + + mov rbp,rdx + mov rdx,r13 + + test r15,255 + jnz $L$oop + + lea r15,QWORD PTR[((-192))+r15] + DB 0F3h,0C3h ;repret +__KeccakF1600 ENDP + +PUBLIC KeccakF1600 + +ALIGN 32 +KeccakF1600 PROC PUBLIC + DB 243,15,30,250 + mov QWORD PTR[8+rsp],rdi ;WIN64 prologue + mov QWORD PTR[16+rsp],rsi + mov r11,rsp +$L$SEH_begin_KeccakF1600:: + + + mov rdi,rcx + push rbx + + push rbp + + push r12 + + push r13 + + push r14 + + push r15 + + + lea rdi,QWORD PTR[100+rdi] + sub rsp,200 + +$L$SEH_body_KeccakF1600:: + + + not QWORD PTR[((-92))+rdi] + not QWORD PTR[((-84))+rdi] + not QWORD PTR[((-36))+rdi] + not QWORD PTR[((-4))+rdi] + not QWORD PTR[36+rdi] + not QWORD PTR[60+rdi] + + lea r15,QWORD PTR[iotas] + lea rsi,QWORD PTR[100+rsp] + + call __KeccakF1600 + + not QWORD PTR[((-92))+rdi] + not QWORD PTR[((-84))+rdi] + not QWORD PTR[((-36))+rdi] + not QWORD PTR[((-4))+rdi] + not QWORD PTR[36+rdi] + not QWORD PTR[60+rdi] + lea rdi,QWORD PTR[((-100))+rdi] + + lea r11,QWORD PTR[248+rsp] + + mov r15,QWORD PTR[((-48))+r11] + mov r14,QWORD PTR[((-40))+r11] + mov r13,QWORD PTR[((-32))+r11] + mov r12,QWORD PTR[((-24))+r11] + mov rbp,QWORD PTR[((-16))+r11] + mov rbx,QWORD PTR[((-8))+r11] + lea rsp,QWORD PTR[r11] +$L$SEH_epilogue_KeccakF1600:: + mov rdi,QWORD PTR[8+r11] ;WIN64 epilogue + mov rsi,QWORD PTR[16+r11] + + DB 0F3h,0C3h ;repret + +$L$SEH_end_KeccakF1600:: +KeccakF1600 ENDP +PUBLIC SHA3_absorb + +ALIGN 32 +SHA3_absorb PROC PUBLIC + DB 243,15,30,250 + mov QWORD PTR[8+rsp],rdi ;WIN64 prologue + mov QWORD PTR[16+rsp],rsi + mov r11,rsp +$L$SEH_begin_SHA3_absorb:: + + + mov rdi,rcx + mov rsi,rdx + mov rdx,r8 + mov rcx,r9 + push rbx + + push rbp + + push r12 + + push r13 + + push r14 + + push r15 + + + lea rdi,QWORD PTR[100+rdi] + sub rsp,232 + +$L$SEH_body_SHA3_absorb:: + + + mov r9,rsi + lea rsi,QWORD PTR[100+rsp] + + not QWORD PTR[((-92))+rdi] + not QWORD PTR[((-84))+rdi] + not QWORD PTR[((-36))+rdi] + not QWORD PTR[((-4))+rdi] + not QWORD PTR[36+rdi] + not QWORD PTR[60+rdi] + lea r15,QWORD PTR[iotas] + + mov QWORD PTR[((216-100))+rsi],rcx + +$L$oop_absorb:: + cmp rdx,rcx + jc $L$done_absorb + + shr rcx,3 + lea r8,QWORD PTR[((-100))+rdi] + +$L$block_absorb:: + mov rax,QWORD PTR[r9] + lea r9,QWORD PTR[8+r9] + xor rax,QWORD PTR[r8] + lea r8,QWORD PTR[8+r8] + sub rdx,8 + mov QWORD PTR[((-8))+r8],rax + sub rcx,1 + jnz $L$block_absorb + + mov QWORD PTR[((200-100))+rsi],r9 + mov QWORD PTR[((208-100))+rsi],rdx + call __KeccakF1600 + mov r9,QWORD PTR[((200-100))+rsi] + mov rdx,QWORD PTR[((208-100))+rsi] + mov rcx,QWORD PTR[((216-100))+rsi] + jmp $L$oop_absorb + +ALIGN 32 +$L$done_absorb:: + mov rax,rdx + + not QWORD PTR[((-92))+rdi] + not QWORD PTR[((-84))+rdi] + not QWORD PTR[((-36))+rdi] + not QWORD PTR[((-4))+rdi] + not QWORD PTR[36+rdi] + not QWORD PTR[60+rdi] + + lea r11,QWORD PTR[280+rsp] + + mov r15,QWORD PTR[((-48))+r11] + mov r14,QWORD PTR[((-40))+r11] + mov r13,QWORD PTR[((-32))+r11] + mov r12,QWORD PTR[((-24))+r11] + mov rbp,QWORD PTR[((-16))+r11] + mov rbx,QWORD PTR[((-8))+r11] + lea rsp,QWORD PTR[r11] +$L$SEH_epilogue_SHA3_absorb:: + mov rdi,QWORD PTR[8+r11] ;WIN64 epilogue + mov rsi,QWORD PTR[16+r11] + + DB 0F3h,0C3h ;repret + +$L$SEH_end_SHA3_absorb:: +SHA3_absorb ENDP +PUBLIC SHA3_squeeze + +ALIGN 32 +SHA3_squeeze PROC PUBLIC + DB 243,15,30,250 + mov QWORD PTR[8+rsp],rdi ;WIN64 prologue + mov QWORD PTR[16+rsp],rsi + mov r11,rsp +$L$SEH_begin_SHA3_squeeze:: + + + mov rdi,rcx + mov rsi,rdx + mov rdx,r8 + mov rcx,r9 + push r12 + + push r13 + + push r14 + + sub rsp,32 + +$L$SEH_body_SHA3_squeeze:: + + + shr rcx,3 + mov r8,rdi + mov r12,rsi + mov r13,rdx + mov r14,rcx + jmp $L$oop_squeeze + +ALIGN 32 +$L$oop_squeeze:: + cmp r13,8 + jb $L$tail_squeeze + + mov rax,QWORD PTR[r8] + lea r8,QWORD PTR[8+r8] + mov QWORD PTR[r12],rax + lea r12,QWORD PTR[8+r12] + sub r13,8 + jz $L$done_squeeze + + sub rcx,1 + jnz $L$oop_squeeze + + mov rcx,rdi + call KeccakF1600 + mov r8,rdi + mov rcx,r14 + jmp $L$oop_squeeze + +$L$tail_squeeze:: + mov rsi,r8 + mov rdi,r12 + mov rcx,r13 +DB 0f3h,0a4h + +$L$done_squeeze:: + mov r14,QWORD PTR[32+rsp] + mov r13,QWORD PTR[40+rsp] + mov r12,QWORD PTR[48+rsp] + add rsp,56 + +$L$SEH_epilogue_SHA3_squeeze:: + mov rdi,QWORD PTR[8+rsp] ;WIN64 epilogue + mov rsi,QWORD PTR[16+rsp] + + DB 0F3h,0C3h ;repret + +$L$SEH_end_SHA3_squeeze:: +SHA3_squeeze ENDP +ALIGN 256 + DQ 0,0,0,0,0,0,0,0 + +iotas:: + DQ 00000000000000001h + DQ 00000000000008082h + DQ 0800000000000808ah + DQ 08000000080008000h + DQ 0000000000000808bh + DQ 00000000080000001h + DQ 08000000080008081h + DQ 08000000000008009h + DQ 0000000000000008ah + DQ 00000000000000088h + DQ 00000000080008009h + DQ 0000000008000000ah + DQ 0000000008000808bh + DQ 0800000000000008bh + DQ 08000000000008089h + DQ 08000000000008003h + DQ 08000000000008002h + DQ 08000000000000080h + DQ 0000000000000800ah + DQ 0800000008000000ah + DQ 08000000080008081h + DQ 08000000000008080h + DQ 00000000080000001h + DQ 08000000080008008h + +DB 75,101,99,99,97,107,45,49,54,48,48,32,97,98,115,111 +DB 114,98,32,97,110,100,32,115,113,117,101,101,122,101,32,102 +DB 111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84 +DB 79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64 +DB 111,112,101,110,115,115,108,46,111,114,103,62,0 +.text$ ENDS +.pdata SEGMENT READONLY ALIGN(4) +ALIGN 4 + DD imagerel $L$SEH_begin_KeccakF1600 + DD imagerel $L$SEH_body_KeccakF1600 + DD imagerel $L$SEH_info_KeccakF1600_prologue + + DD imagerel $L$SEH_body_KeccakF1600 + DD imagerel $L$SEH_epilogue_KeccakF1600 + DD imagerel $L$SEH_info_KeccakF1600_body + + DD imagerel $L$SEH_epilogue_KeccakF1600 + DD imagerel $L$SEH_end_KeccakF1600 + DD imagerel $L$SEH_info_KeccakF1600_epilogue + + DD imagerel $L$SEH_begin_SHA3_absorb + DD imagerel $L$SEH_body_SHA3_absorb + DD imagerel $L$SEH_info_SHA3_absorb_prologue + + DD imagerel $L$SEH_body_SHA3_absorb + DD imagerel $L$SEH_epilogue_SHA3_absorb + DD imagerel $L$SEH_info_SHA3_absorb_body + + DD imagerel $L$SEH_epilogue_SHA3_absorb + DD imagerel $L$SEH_end_SHA3_absorb + DD imagerel $L$SEH_info_SHA3_absorb_epilogue + + DD imagerel $L$SEH_begin_SHA3_squeeze + DD imagerel $L$SEH_body_SHA3_squeeze + DD imagerel $L$SEH_info_SHA3_squeeze_prologue + + DD imagerel $L$SEH_body_SHA3_squeeze + DD imagerel $L$SEH_epilogue_SHA3_squeeze + DD imagerel $L$SEH_info_SHA3_squeeze_body + + DD imagerel $L$SEH_epilogue_SHA3_squeeze + DD imagerel $L$SEH_end_SHA3_squeeze + DD imagerel $L$SEH_info_SHA3_squeeze_epilogue + +.pdata ENDS +.xdata SEGMENT READONLY ALIGN(8) +ALIGN 8 +$L$SEH_info_KeccakF1600_prologue:: +DB 1,0,5,00bh +DB 0,074h,1,0 +DB 0,064h,2,0 +DB 0,0b3h +DB 0,0 + DD 0,0 +$L$SEH_info_KeccakF1600_body:: +DB 1,0,18,0 +DB 000h,0f4h,019h,000h +DB 000h,0e4h,01ah,000h +DB 000h,0d4h,01bh,000h +DB 000h,0c4h,01ch,000h +DB 000h,054h,01dh,000h +DB 000h,034h,01eh,000h +DB 000h,074h,020h,000h +DB 000h,064h,021h,000h +DB 000h,001h,01fh,000h +DB 000h,000h,000h,000h +DB 000h,000h,000h,000h +$L$SEH_info_KeccakF1600_epilogue:: +DB 1,0,5,11 +DB 000h,074h,001h,000h +DB 000h,064h,002h,000h +DB 000h,0b3h +DB 000h,000h,000h,000h,000h,000h +DB 000h,000h,000h,000h + +$L$SEH_info_SHA3_absorb_prologue:: +DB 1,0,5,00bh +DB 0,074h,1,0 +DB 0,064h,2,0 +DB 0,0b3h +DB 0,0 + DD 0,0 +$L$SEH_info_SHA3_absorb_body:: +DB 1,0,18,0 +DB 000h,0f4h,01dh,000h +DB 000h,0e4h,01eh,000h +DB 000h,0d4h,01fh,000h +DB 000h,0c4h,020h,000h +DB 000h,054h,021h,000h +DB 000h,034h,022h,000h +DB 000h,074h,024h,000h +DB 000h,064h,025h,000h +DB 000h,001h,023h,000h +DB 000h,000h,000h,000h +DB 000h,000h,000h,000h +$L$SEH_info_SHA3_absorb_epilogue:: +DB 1,0,5,11 +DB 000h,074h,001h,000h +DB 000h,064h,002h,000h +DB 000h,0b3h +DB 000h,000h,000h,000h,000h,000h +DB 000h,000h,000h,000h + +$L$SEH_info_SHA3_squeeze_prologue:: +DB 1,0,5,00bh +DB 0,074h,1,0 +DB 0,064h,2,0 +DB 0,0b3h +DB 0,0 + DD 0,0 +$L$SEH_info_SHA3_squeeze_body:: +DB 1,0,11,0 +DB 000h,0e4h,004h,000h +DB 000h,0d4h,005h,000h +DB 000h,0c4h,006h,000h +DB 000h,074h,008h,000h +DB 000h,064h,009h,000h +DB 000h,062h +DB 000h,000h,000h,000h,000h,000h +$L$SEH_info_SHA3_squeeze_epilogue:: +DB 1,0,4,0 +DB 000h,074h,001h,000h +DB 000h,064h,002h,000h +DB 000h,000h,000h,000h + + +.xdata ENDS +END diff --git a/crypto/hashes/src/keccakf1600_x86-64-osx.s b/crypto/hashes/src/asm/keccakf1600_x86-64-osx.s similarity index 67% rename from crypto/hashes/src/keccakf1600_x86-64-osx.s rename to crypto/hashes/src/asm/keccakf1600_x86-64-osx.s index 5d9cf039ca..ddb62361f6 100644 --- a/crypto/hashes/src/keccakf1600_x86-64-osx.s +++ b/crypto/hashes/src/asm/keccakf1600_x86-64-osx.s @@ -294,6 +294,7 @@ _KeccakF1600: subq $200,%rsp .cfi_adjust_cfa_offset 200 + notq -92(%rdi) notq -84(%rdi) notq -36(%rdi) @@ -314,30 +315,190 @@ _KeccakF1600: notq 60(%rdi) leaq -100(%rdi),%rdi - addq $200,%rsp -.cfi_adjust_cfa_offset -200 - - popq %r15 -.cfi_adjust_cfa_offset -8 -.cfi_restore %r15 - popq %r14 -.cfi_adjust_cfa_offset -8 -.cfi_restore %r14 - popq %r13 -.cfi_adjust_cfa_offset -8 + leaq 248(%rsp),%r11 +.cfi_def_cfa %r11,8 + movq -48(%r11),%r15 + movq -40(%r11),%r14 + movq -32(%r11),%r13 + movq -24(%r11),%r12 + movq -16(%r11),%rbp + movq -8(%r11),%rbx + leaq (%r11),%rsp +.cfi_restore %r12 .cfi_restore %r13 - popq %r12 -.cfi_adjust_cfa_offset -8 +.cfi_restore %r14 +.cfi_restore %r15 +.cfi_restore %rbp +.cfi_restore %rbx + .byte 0xf3,0xc3 +.cfi_endproc + +.globl _SHA3_absorb + +.p2align 5 +_SHA3_absorb: +.cfi_startproc + .byte 0xf3,0x0f,0x1e,0xfa + + + pushq %rbx +.cfi_adjust_cfa_offset 8 +.cfi_offset %rbx,-16 + pushq %rbp +.cfi_adjust_cfa_offset 8 +.cfi_offset %rbp,-24 + pushq %r12 +.cfi_adjust_cfa_offset 8 +.cfi_offset %r12,-32 + pushq %r13 +.cfi_adjust_cfa_offset 8 +.cfi_offset %r13,-40 + pushq %r14 +.cfi_adjust_cfa_offset 8 +.cfi_offset %r14,-48 + pushq %r15 +.cfi_adjust_cfa_offset 8 +.cfi_offset %r15,-56 + + leaq 100(%rdi),%rdi + subq $232,%rsp +.cfi_adjust_cfa_offset 232 + + + movq %rsi,%r9 + leaq 100(%rsp),%rsi + + notq -92(%rdi) + notq -84(%rdi) + notq -36(%rdi) + notq -4(%rdi) + notq 36(%rdi) + notq 60(%rdi) + leaq iotas(%rip),%r15 + + movq %rcx,216-100(%rsi) + +L$oop_absorb: + cmpq %rcx,%rdx + jc L$done_absorb + + shrq $3,%rcx + leaq -100(%rdi),%r8 + +L$block_absorb: + movq (%r9),%rax + leaq 8(%r9),%r9 + xorq (%r8),%rax + leaq 8(%r8),%r8 + subq $8,%rdx + movq %rax,-8(%r8) + subq $1,%rcx + jnz L$block_absorb + + movq %r9,200-100(%rsi) + movq %rdx,208-100(%rsi) + call __KeccakF1600 + movq 200-100(%rsi),%r9 + movq 208-100(%rsi),%rdx + movq 216-100(%rsi),%rcx + jmp L$oop_absorb + +.p2align 5 +L$done_absorb: + movq %rdx,%rax + + notq -92(%rdi) + notq -84(%rdi) + notq -36(%rdi) + notq -4(%rdi) + notq 36(%rdi) + notq 60(%rdi) + + leaq 280(%rsp),%r11 +.cfi_def_cfa %r11,8 + movq -48(%r11),%r15 + movq -40(%r11),%r14 + movq -32(%r11),%r13 + movq -24(%r11),%r12 + movq -16(%r11),%rbp + movq -8(%r11),%rbx + leaq (%r11),%rsp .cfi_restore %r12 - popq %rbp -.cfi_adjust_cfa_offset -8 +.cfi_restore %r13 +.cfi_restore %r14 +.cfi_restore %r15 .cfi_restore %rbp - popq %rbx -.cfi_adjust_cfa_offset -8 .cfi_restore %rbx .byte 0xf3,0xc3 .cfi_endproc +.globl _SHA3_squeeze + +.p2align 5 +_SHA3_squeeze: +.cfi_startproc + .byte 0xf3,0x0f,0x1e,0xfa + + + pushq %r12 +.cfi_adjust_cfa_offset 8 +.cfi_offset %r12,-16 + pushq %r13 +.cfi_adjust_cfa_offset 8 +.cfi_offset %r13,-24 + pushq %r14 +.cfi_adjust_cfa_offset 8 +.cfi_offset %r14,-32 + subq $32,%rsp +.cfi_adjust_cfa_offset 32 + + + shrq $3,%rcx + movq %rdi,%r8 + movq %rsi,%r12 + movq %rdx,%r13 + movq %rcx,%r14 + jmp L$oop_squeeze + +.p2align 5 +L$oop_squeeze: + cmpq $8,%r13 + jb L$tail_squeeze + + movq (%r8),%rax + leaq 8(%r8),%r8 + movq %rax,(%r12) + leaq 8(%r12),%r12 + subq $8,%r13 + jz L$done_squeeze + + subq $1,%rcx + jnz L$oop_squeeze + + movq %rdi,%rcx + call _KeccakF1600 + movq %rdi,%r8 + movq %r14,%rcx + jmp L$oop_squeeze + +L$tail_squeeze: + movq %r8,%rsi + movq %r12,%rdi + movq %r13,%rcx +.byte 0xf3,0xa4 + +L$done_squeeze: + movq 32(%rsp),%r14 + movq 40(%rsp),%r13 + movq 48(%rsp),%r12 + addq $56,%rsp +.cfi_adjust_cfa_offset -56 +.cfi_restore %r12 +.cfi_restore %r13 +.cfi_restore %r14 + .byte 0xf3,0xc3 +.cfi_endproc + .p2align 8 .quad 0,0,0,0,0,0,0,0 diff --git a/crypto/hashes/src/asm/keccakf1600_x86-64-win64.s b/crypto/hashes/src/asm/keccakf1600_x86-64-win64.s new file mode 100644 index 0000000000..d23269424b --- /dev/null +++ b/crypto/hashes/src/asm/keccakf1600_x86-64-win64.s @@ -0,0 +1,650 @@ +# Source: https://github.com/dot-asm/cryptogams/blob/master/x86_64/keccak1600-x86_64.pl + +.text + +.def __KeccakF1600; .scl 3; .type 32; .endef +.p2align 5 +__KeccakF1600: + .byte 0xf3,0x0f,0x1e,0xfa + + movq 60(%rdi),%rax + movq 68(%rdi),%rbx + movq 76(%rdi),%rcx + movq 84(%rdi),%rdx + movq 92(%rdi),%rbp + jmp .Loop + +.p2align 5 +.Loop: + movq -100(%rdi),%r8 + movq -52(%rdi),%r9 + movq -4(%rdi),%r10 + movq 44(%rdi),%r11 + + xorq -84(%rdi),%rcx + xorq -76(%rdi),%rdx + xorq %r8,%rax + xorq -92(%rdi),%rbx + xorq -44(%rdi),%rcx + xorq -60(%rdi),%rax + movq %rbp,%r12 + xorq -68(%rdi),%rbp + + xorq %r10,%rcx + xorq -20(%rdi),%rax + xorq -36(%rdi),%rdx + xorq %r9,%rbx + xorq -28(%rdi),%rbp + + xorq 36(%rdi),%rcx + xorq 20(%rdi),%rax + xorq 4(%rdi),%rdx + xorq -12(%rdi),%rbx + xorq 12(%rdi),%rbp + + movq %rcx,%r13 + rolq $1,%rcx + xorq %rax,%rcx + xorq %r11,%rdx + + rolq $1,%rax + xorq %rdx,%rax + xorq 28(%rdi),%rbx + + rolq $1,%rdx + xorq %rbx,%rdx + xorq 52(%rdi),%rbp + + rolq $1,%rbx + xorq %rbp,%rbx + + rolq $1,%rbp + xorq %r13,%rbp + xorq %rcx,%r9 + xorq %rdx,%r10 + rolq $44,%r9 + xorq %rbp,%r11 + xorq %rax,%r12 + rolq $43,%r10 + xorq %rbx,%r8 + movq %r9,%r13 + rolq $21,%r11 + orq %r10,%r9 + xorq %r8,%r9 + rolq $14,%r12 + + xorq (%r15),%r9 + leaq 8(%r15),%r15 + + movq %r12,%r14 + andq %r11,%r12 + movq %r9,-100(%rsi) + xorq %r10,%r12 + notq %r10 + movq %r12,-84(%rsi) + + orq %r11,%r10 + movq 76(%rdi),%r12 + xorq %r13,%r10 + movq %r10,-92(%rsi) + + andq %r8,%r13 + movq -28(%rdi),%r9 + xorq %r14,%r13 + movq -20(%rdi),%r10 + movq %r13,-68(%rsi) + + orq %r8,%r14 + movq -76(%rdi),%r8 + xorq %r11,%r14 + movq 28(%rdi),%r11 + movq %r14,-76(%rsi) + + + xorq %rbp,%r8 + xorq %rdx,%r12 + rolq $28,%r8 + xorq %rcx,%r11 + xorq %rax,%r9 + rolq $61,%r12 + rolq $45,%r11 + xorq %rbx,%r10 + rolq $20,%r9 + movq %r8,%r13 + orq %r12,%r8 + rolq $3,%r10 + + xorq %r11,%r8 + movq %r8,-36(%rsi) + + movq %r9,%r14 + andq %r13,%r9 + movq -92(%rdi),%r8 + xorq %r12,%r9 + notq %r12 + movq %r9,-28(%rsi) + + orq %r11,%r12 + movq -44(%rdi),%r9 + xorq %r10,%r12 + movq %r12,-44(%rsi) + + andq %r10,%r11 + movq 60(%rdi),%r12 + xorq %r14,%r11 + movq %r11,-52(%rsi) + + orq %r10,%r14 + movq 4(%rdi),%r10 + xorq %r13,%r14 + movq 52(%rdi),%r11 + movq %r14,-60(%rsi) + + + xorq %rbp,%r10 + xorq %rax,%r11 + rolq $25,%r10 + xorq %rdx,%r9 + rolq $8,%r11 + xorq %rbx,%r12 + rolq $6,%r9 + xorq %rcx,%r8 + rolq $18,%r12 + movq %r10,%r13 + andq %r11,%r10 + rolq $1,%r8 + + notq %r11 + xorq %r9,%r10 + movq %r10,-12(%rsi) + + movq %r12,%r14 + andq %r11,%r12 + movq -12(%rdi),%r10 + xorq %r13,%r12 + movq %r12,-4(%rsi) + + orq %r9,%r13 + movq 84(%rdi),%r12 + xorq %r8,%r13 + movq %r13,-20(%rsi) + + andq %r8,%r9 + xorq %r14,%r9 + movq %r9,12(%rsi) + + orq %r8,%r14 + movq -60(%rdi),%r9 + xorq %r11,%r14 + movq 36(%rdi),%r11 + movq %r14,4(%rsi) + + + movq -68(%rdi),%r8 + + xorq %rcx,%r10 + xorq %rdx,%r11 + rolq $10,%r10 + xorq %rbx,%r9 + rolq $15,%r11 + xorq %rbp,%r12 + rolq $36,%r9 + xorq %rax,%r8 + rolq $56,%r12 + movq %r10,%r13 + orq %r11,%r10 + rolq $27,%r8 + + notq %r11 + xorq %r9,%r10 + movq %r10,28(%rsi) + + movq %r12,%r14 + orq %r11,%r12 + xorq %r13,%r12 + movq %r12,36(%rsi) + + andq %r9,%r13 + xorq %r8,%r13 + movq %r13,20(%rsi) + + orq %r8,%r9 + xorq %r14,%r9 + movq %r9,52(%rsi) + + andq %r14,%r8 + xorq %r11,%r8 + movq %r8,44(%rsi) + + + xorq -84(%rdi),%rdx + xorq -36(%rdi),%rbp + rolq $62,%rdx + xorq 68(%rdi),%rcx + rolq $55,%rbp + xorq 12(%rdi),%rax + rolq $2,%rcx + xorq 20(%rdi),%rbx + xchgq %rsi,%rdi + rolq $39,%rax + rolq $41,%rbx + movq %rdx,%r13 + andq %rbp,%rdx + notq %rbp + xorq %rcx,%rdx + movq %rdx,92(%rdi) + + movq %rax,%r14 + andq %rbp,%rax + xorq %r13,%rax + movq %rax,60(%rdi) + + orq %rcx,%r13 + xorq %rbx,%r13 + movq %r13,84(%rdi) + + andq %rbx,%rcx + xorq %r14,%rcx + movq %rcx,76(%rdi) + + orq %r14,%rbx + xorq %rbp,%rbx + movq %rbx,68(%rdi) + + movq %rdx,%rbp + movq %r13,%rdx + + testq $255,%r15 + jnz .Loop + + leaq -192(%r15),%r15 + .byte 0xf3,0xc3 + + +.globl KeccakF1600 +.def KeccakF1600; .scl 2; .type 32; .endef +.p2align 5 +KeccakF1600: + .byte 0xf3,0x0f,0x1e,0xfa + movq %rdi,8(%rsp) + movq %rsi,16(%rsp) + movq %rsp,%r11 +.LSEH_begin_KeccakF1600: + + + movq %rcx,%rdi + pushq %rbx + + pushq %rbp + + pushq %r12 + + pushq %r13 + + pushq %r14 + + pushq %r15 + + + leaq 100(%rdi),%rdi + subq $200,%rsp + +.LSEH_body_KeccakF1600: + + + notq -92(%rdi) + notq -84(%rdi) + notq -36(%rdi) + notq -4(%rdi) + notq 36(%rdi) + notq 60(%rdi) + + leaq iotas(%rip),%r15 + leaq 100(%rsp),%rsi + + call __KeccakF1600 + + notq -92(%rdi) + notq -84(%rdi) + notq -36(%rdi) + notq -4(%rdi) + notq 36(%rdi) + notq 60(%rdi) + leaq -100(%rdi),%rdi + + leaq 248(%rsp),%r11 + + movq -48(%r11),%r15 + movq -40(%r11),%r14 + movq -32(%r11),%r13 + movq -24(%r11),%r12 + movq -16(%r11),%rbp + movq -8(%r11),%rbx + leaq (%r11),%rsp +.LSEH_epilogue_KeccakF1600: + mov 8(%r11),%rdi + mov 16(%r11),%rsi + + .byte 0xf3,0xc3 + +.LSEH_end_KeccakF1600: +.globl SHA3_absorb +.def SHA3_absorb; .scl 2; .type 32; .endef +.p2align 5 +SHA3_absorb: + .byte 0xf3,0x0f,0x1e,0xfa + movq %rdi,8(%rsp) + movq %rsi,16(%rsp) + movq %rsp,%r11 +.LSEH_begin_SHA3_absorb: + + + movq %rcx,%rdi + movq %rdx,%rsi + movq %r8,%rdx + movq %r9,%rcx + pushq %rbx + + pushq %rbp + + pushq %r12 + + pushq %r13 + + pushq %r14 + + pushq %r15 + + + leaq 100(%rdi),%rdi + subq $232,%rsp + +.LSEH_body_SHA3_absorb: + + + movq %rsi,%r9 + leaq 100(%rsp),%rsi + + notq -92(%rdi) + notq -84(%rdi) + notq -36(%rdi) + notq -4(%rdi) + notq 36(%rdi) + notq 60(%rdi) + leaq iotas(%rip),%r15 + + movq %rcx,216-100(%rsi) + +.Loop_absorb: + cmpq %rcx,%rdx + jc .Ldone_absorb + + shrq $3,%rcx + leaq -100(%rdi),%r8 + +.Lblock_absorb: + movq (%r9),%rax + leaq 8(%r9),%r9 + xorq (%r8),%rax + leaq 8(%r8),%r8 + subq $8,%rdx + movq %rax,-8(%r8) + subq $1,%rcx + jnz .Lblock_absorb + + movq %r9,200-100(%rsi) + movq %rdx,208-100(%rsi) + call __KeccakF1600 + movq 200-100(%rsi),%r9 + movq 208-100(%rsi),%rdx + movq 216-100(%rsi),%rcx + jmp .Loop_absorb + +.p2align 5 +.Ldone_absorb: + movq %rdx,%rax + + notq -92(%rdi) + notq -84(%rdi) + notq -36(%rdi) + notq -4(%rdi) + notq 36(%rdi) + notq 60(%rdi) + + leaq 280(%rsp),%r11 + + movq -48(%r11),%r15 + movq -40(%r11),%r14 + movq -32(%r11),%r13 + movq -24(%r11),%r12 + movq -16(%r11),%rbp + movq -8(%r11),%rbx + leaq (%r11),%rsp +.LSEH_epilogue_SHA3_absorb: + mov 8(%r11),%rdi + mov 16(%r11),%rsi + + .byte 0xf3,0xc3 + +.LSEH_end_SHA3_absorb: +.globl SHA3_squeeze +.def SHA3_squeeze; .scl 2; .type 32; .endef +.p2align 5 +SHA3_squeeze: + .byte 0xf3,0x0f,0x1e,0xfa + movq %rdi,8(%rsp) + movq %rsi,16(%rsp) + movq %rsp,%r11 +.LSEH_begin_SHA3_squeeze: + + + movq %rcx,%rdi + movq %rdx,%rsi + movq %r8,%rdx + movq %r9,%rcx + pushq %r12 + + pushq %r13 + + pushq %r14 + + subq $32,%rsp + +.LSEH_body_SHA3_squeeze: + + + shrq $3,%rcx + movq %rdi,%r8 + movq %rsi,%r12 + movq %rdx,%r13 + movq %rcx,%r14 + jmp .Loop_squeeze + +.p2align 5 +.Loop_squeeze: + cmpq $8,%r13 + jb .Ltail_squeeze + + movq (%r8),%rax + leaq 8(%r8),%r8 + movq %rax,(%r12) + leaq 8(%r12),%r12 + subq $8,%r13 + jz .Ldone_squeeze + + subq $1,%rcx + jnz .Loop_squeeze + + movq %rdi,%rcx + call KeccakF1600 + movq %rdi,%r8 + movq %r14,%rcx + jmp .Loop_squeeze + +.Ltail_squeeze: + movq %r8,%rsi + movq %r12,%rdi + movq %r13,%rcx +.byte 0xf3,0xa4 + +.Ldone_squeeze: + movq 32(%rsp),%r14 + movq 40(%rsp),%r13 + movq 48(%rsp),%r12 + addq $56,%rsp + +.LSEH_epilogue_SHA3_squeeze: + mov 8(%rsp),%rdi + mov 16(%rsp),%rsi + + .byte 0xf3,0xc3 + +.LSEH_end_SHA3_squeeze: +.p2align 8 +.quad 0,0,0,0,0,0,0,0 + +iotas: +.quad 0x0000000000000001 +.quad 0x0000000000008082 +.quad 0x800000000000808a +.quad 0x8000000080008000 +.quad 0x000000000000808b +.quad 0x0000000080000001 +.quad 0x8000000080008081 +.quad 0x8000000000008009 +.quad 0x000000000000008a +.quad 0x0000000000000088 +.quad 0x0000000080008009 +.quad 0x000000008000000a +.quad 0x000000008000808b +.quad 0x800000000000008b +.quad 0x8000000000008089 +.quad 0x8000000000008003 +.quad 0x8000000000008002 +.quad 0x8000000000000080 +.quad 0x000000000000800a +.quad 0x800000008000000a +.quad 0x8000000080008081 +.quad 0x8000000000008080 +.quad 0x0000000080000001 +.quad 0x8000000080008008 + +.byte 75,101,99,99,97,107,45,49,54,48,48,32,97,98,115,111,114,98,32,97,110,100,32,115,113,117,101,101,122,101,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 +.section .pdata +.p2align 2 +.rva .LSEH_begin_KeccakF1600 +.rva .LSEH_body_KeccakF1600 +.rva .LSEH_info_KeccakF1600_prologue + +.rva .LSEH_body_KeccakF1600 +.rva .LSEH_epilogue_KeccakF1600 +.rva .LSEH_info_KeccakF1600_body + +.rva .LSEH_epilogue_KeccakF1600 +.rva .LSEH_end_KeccakF1600 +.rva .LSEH_info_KeccakF1600_epilogue + +.rva .LSEH_begin_SHA3_absorb +.rva .LSEH_body_SHA3_absorb +.rva .LSEH_info_SHA3_absorb_prologue + +.rva .LSEH_body_SHA3_absorb +.rva .LSEH_epilogue_SHA3_absorb +.rva .LSEH_info_SHA3_absorb_body + +.rva .LSEH_epilogue_SHA3_absorb +.rva .LSEH_end_SHA3_absorb +.rva .LSEH_info_SHA3_absorb_epilogue + +.rva .LSEH_begin_SHA3_squeeze +.rva .LSEH_body_SHA3_squeeze +.rva .LSEH_info_SHA3_squeeze_prologue + +.rva .LSEH_body_SHA3_squeeze +.rva .LSEH_epilogue_SHA3_squeeze +.rva .LSEH_info_SHA3_squeeze_body + +.rva .LSEH_epilogue_SHA3_squeeze +.rva .LSEH_end_SHA3_squeeze +.rva .LSEH_info_SHA3_squeeze_epilogue + +.section .xdata +.p2align 3 +.LSEH_info_KeccakF1600_prologue: +.byte 1,0,5,0x0b +.byte 0,0x74,1,0 +.byte 0,0x64,2,0 +.byte 0,0xb3 +.byte 0,0 +.long 0,0 +.LSEH_info_KeccakF1600_body: +.byte 1,0,18,0 +.byte 0x00,0xf4,0x19,0x00 +.byte 0x00,0xe4,0x1a,0x00 +.byte 0x00,0xd4,0x1b,0x00 +.byte 0x00,0xc4,0x1c,0x00 +.byte 0x00,0x54,0x1d,0x00 +.byte 0x00,0x34,0x1e,0x00 +.byte 0x00,0x74,0x20,0x00 +.byte 0x00,0x64,0x21,0x00 +.byte 0x00,0x01,0x1f,0x00 +.byte 0x00,0x00,0x00,0x00 +.byte 0x00,0x00,0x00,0x00 +.LSEH_info_KeccakF1600_epilogue: +.byte 1,0,5,11 +.byte 0x00,0x74,0x01,0x00 +.byte 0x00,0x64,0x02,0x00 +.byte 0x00,0xb3 +.byte 0x00,0x00,0x00,0x00,0x00,0x00 +.byte 0x00,0x00,0x00,0x00 + +.LSEH_info_SHA3_absorb_prologue: +.byte 1,0,5,0x0b +.byte 0,0x74,1,0 +.byte 0,0x64,2,0 +.byte 0,0xb3 +.byte 0,0 +.long 0,0 +.LSEH_info_SHA3_absorb_body: +.byte 1,0,18,0 +.byte 0x00,0xf4,0x1d,0x00 +.byte 0x00,0xe4,0x1e,0x00 +.byte 0x00,0xd4,0x1f,0x00 +.byte 0x00,0xc4,0x20,0x00 +.byte 0x00,0x54,0x21,0x00 +.byte 0x00,0x34,0x22,0x00 +.byte 0x00,0x74,0x24,0x00 +.byte 0x00,0x64,0x25,0x00 +.byte 0x00,0x01,0x23,0x00 +.byte 0x00,0x00,0x00,0x00 +.byte 0x00,0x00,0x00,0x00 +.LSEH_info_SHA3_absorb_epilogue: +.byte 1,0,5,11 +.byte 0x00,0x74,0x01,0x00 +.byte 0x00,0x64,0x02,0x00 +.byte 0x00,0xb3 +.byte 0x00,0x00,0x00,0x00,0x00,0x00 +.byte 0x00,0x00,0x00,0x00 + +.LSEH_info_SHA3_squeeze_prologue: +.byte 1,0,5,0x0b +.byte 0,0x74,1,0 +.byte 0,0x64,2,0 +.byte 0,0xb3 +.byte 0,0 +.long 0,0 +.LSEH_info_SHA3_squeeze_body: +.byte 1,0,11,0 +.byte 0x00,0xe4,0x04,0x00 +.byte 0x00,0xd4,0x05,0x00 +.byte 0x00,0xc4,0x06,0x00 +.byte 0x00,0x74,0x08,0x00 +.byte 0x00,0x64,0x09,0x00 +.byte 0x00,0x62 +.byte 0x00,0x00,0x00,0x00,0x00,0x00 +.LSEH_info_SHA3_squeeze_epilogue: +.byte 1,0,4,0 +.byte 0x00,0x74,0x01,0x00 +.byte 0x00,0x64,0x02,0x00 +.byte 0x00,0x00,0x00,0x00 + diff --git a/crypto/hashes/src/pow_hashers.rs b/crypto/hashes/src/pow_hashers.rs index 49c801219d..1465d56076 100644 --- a/crypto/hashes/src/pow_hashers.rs +++ b/crypto/hashes/src/pow_hashers.rs @@ -60,13 +60,13 @@ impl KHeavyHash { } mod keccak256 { - #[cfg(any(not(target_arch = "x86_64"), feature = "no-asm", target_os = "windows"))] + #[cfg(any(not(target_arch = "x86_64"), feature = "no-asm"))] #[inline(always)] pub(super) fn f1600(state: &mut [u64; 25]) { keccak::f1600(state); } - #[cfg(all(target_arch = "x86_64", not(feature = "no-asm"), not(target_os = "windows")))] + #[cfg(all(target_arch = "x86_64", not(feature = "no-asm")))] #[inline(always)] pub(super) fn f1600(state: &mut [u64; 25]) { extern "C" { diff --git a/crypto/txscript/examples/kip-10.rs b/crypto/txscript/examples/kip-10.rs index e6c887340d..f1e80fd147 100644 --- a/crypto/txscript/examples/kip-10.rs +++ b/crypto/txscript/examples/kip-10.rs @@ -126,8 +126,7 @@ fn threshold_scenario() -> ScriptBuilderResult<()> { } let tx = tx.as_verifiable(); - let mut vm = - TxScriptEngine::from_transaction_input(&tx, &tx.inputs()[0], 0, &utxo_entry, &reused_values, &sig_cache, true, false); + let mut vm = TxScriptEngine::from_transaction_input(&tx, &tx.inputs()[0], 0, &utxo_entry, &reused_values, &sig_cache); assert_eq!(vm.execute(), Ok(())); println!("[STANDARD] Owner branch execution successful"); } @@ -137,8 +136,7 @@ fn threshold_scenario() -> ScriptBuilderResult<()> { println!("[STANDARD] Checking borrower branch"); tx.inputs[0].signature_script = ScriptBuilder::new().add_op(OpFalse)?.add_data(&script)?.drain(); let tx = PopulatedTransaction::new(&tx, vec![utxo_entry.clone()]); - let mut vm = - TxScriptEngine::from_transaction_input(&tx, &tx.tx.inputs[0], 0, &utxo_entry, &reused_values, &sig_cache, true, false); + let mut vm = TxScriptEngine::from_transaction_input(&tx, &tx.tx.inputs[0], 0, &utxo_entry, &reused_values, &sig_cache); assert_eq!(vm.execute(), Ok(())); println!("[STANDARD] Borrower branch execution successful"); } @@ -149,8 +147,7 @@ fn threshold_scenario() -> ScriptBuilderResult<()> { // Less than threshold tx.outputs[0].value -= 1; let tx = PopulatedTransaction::new(&tx, vec![utxo_entry.clone()]); - let mut vm = - TxScriptEngine::from_transaction_input(&tx, &tx.tx.inputs[0], 0, &utxo_entry, &reused_values, &sig_cache, true, false); + let mut vm = TxScriptEngine::from_transaction_input(&tx, &tx.tx.inputs[0], 0, &utxo_entry, &reused_values, &sig_cache); assert_eq!(vm.execute(), Err(EvalFalse)); println!("[STANDARD] Borrower branch with threshold not reached failed as expected"); } @@ -298,8 +295,7 @@ fn threshold_scenario_limited_one_time() -> ScriptBuilderResult<()> { } let tx = tx.as_verifiable(); - let mut vm = - TxScriptEngine::from_transaction_input(&tx, &tx.inputs()[0], 0, &utxo_entry, &reused_values, &sig_cache, true, false); + let mut vm = TxScriptEngine::from_transaction_input(&tx, &tx.inputs()[0], 0, &utxo_entry, &reused_values, &sig_cache); assert_eq!(vm.execute(), Ok(())); println!("[ONE-TIME] Owner branch execution successful"); } @@ -309,8 +305,7 @@ fn threshold_scenario_limited_one_time() -> ScriptBuilderResult<()> { println!("[ONE-TIME] Checking borrower branch"); tx.inputs[0].signature_script = ScriptBuilder::new().add_op(OpFalse)?.add_data(&script)?.drain(); let tx = PopulatedTransaction::new(&tx, vec![utxo_entry.clone()]); - let mut vm = - TxScriptEngine::from_transaction_input(&tx, &tx.tx.inputs[0], 0, &utxo_entry, &reused_values, &sig_cache, true, false); + let mut vm = TxScriptEngine::from_transaction_input(&tx, &tx.tx.inputs[0], 0, &utxo_entry, &reused_values, &sig_cache); assert_eq!(vm.execute(), Ok(())); println!("[ONE-TIME] Borrower branch execution successful"); } @@ -321,8 +316,7 @@ fn threshold_scenario_limited_one_time() -> ScriptBuilderResult<()> { // Less than threshold tx.outputs[0].value -= 1; let tx = PopulatedTransaction::new(&tx, vec![utxo_entry.clone()]); - let mut vm = - TxScriptEngine::from_transaction_input(&tx, &tx.tx.inputs[0], 0, &utxo_entry, &reused_values, &sig_cache, true, false); + let mut vm = TxScriptEngine::from_transaction_input(&tx, &tx.tx.inputs[0], 0, &utxo_entry, &reused_values, &sig_cache); assert_eq!(vm.execute(), Err(EvalFalse)); println!("[ONE-TIME] Borrower branch with threshold not reached failed as expected"); } @@ -344,16 +338,8 @@ fn threshold_scenario_limited_one_time() -> ScriptBuilderResult<()> { wrong_tx.inputs[0].signature_script = ScriptBuilder::new().add_op(OpFalse)?.add_data(&script)?.drain(); let wrong_tx = PopulatedTransaction::new(&wrong_tx, vec![utxo_entry.clone()]); - let mut vm = TxScriptEngine::from_transaction_input( - &wrong_tx, - &wrong_tx.tx.inputs[0], - 0, - &utxo_entry, - &reused_values, - &sig_cache, - true, - false, - ); + let mut vm = + TxScriptEngine::from_transaction_input(&wrong_tx, &wrong_tx.tx.inputs[0], 0, &utxo_entry, &reused_values, &sig_cache); assert_eq!(vm.execute(), Err(VerifyError)); println!("[ONE-TIME] Borrower branch with output going to wrong address failed as expected"); } @@ -462,8 +448,7 @@ fn threshold_scenario_limited_2_times() -> ScriptBuilderResult<()> { } let tx = tx.as_verifiable(); - let mut vm = - TxScriptEngine::from_transaction_input(&tx, &tx.inputs()[0], 0, &utxo_entry, &reused_values, &sig_cache, true, false); + let mut vm = TxScriptEngine::from_transaction_input(&tx, &tx.inputs()[0], 0, &utxo_entry, &reused_values, &sig_cache); assert_eq!(vm.execute(), Ok(())); println!("[TWO-TIMES] Owner branch execution successful"); } @@ -473,8 +458,7 @@ fn threshold_scenario_limited_2_times() -> ScriptBuilderResult<()> { println!("[TWO-TIMES] Checking borrower branch (first borrowing)"); tx.inputs[0].signature_script = ScriptBuilder::new().add_op(OpFalse)?.add_data(&two_times_script)?.drain(); let tx = PopulatedTransaction::new(&tx, vec![utxo_entry.clone()]); - let mut vm = - TxScriptEngine::from_transaction_input(&tx, &tx.tx.inputs[0], 0, &utxo_entry, &reused_values, &sig_cache, true, false); + let mut vm = TxScriptEngine::from_transaction_input(&tx, &tx.tx.inputs[0], 0, &utxo_entry, &reused_values, &sig_cache); assert_eq!(vm.execute(), Ok(())); println!("[TWO-TIMES] Borrower branch (first borrowing) execution successful"); } @@ -485,8 +469,7 @@ fn threshold_scenario_limited_2_times() -> ScriptBuilderResult<()> { // Less than threshold tx.outputs[0].value -= 1; let tx = PopulatedTransaction::new(&tx, vec![utxo_entry.clone()]); - let mut vm = - TxScriptEngine::from_transaction_input(&tx, &tx.tx.inputs[0], 0, &utxo_entry, &reused_values, &sig_cache, true, false); + let mut vm = TxScriptEngine::from_transaction_input(&tx, &tx.tx.inputs[0], 0, &utxo_entry, &reused_values, &sig_cache); assert_eq!(vm.execute(), Err(EvalFalse)); println!("[TWO-TIMES] Borrower branch with threshold not reached failed as expected"); } @@ -508,16 +491,8 @@ fn threshold_scenario_limited_2_times() -> ScriptBuilderResult<()> { wrong_tx.inputs[0].signature_script = ScriptBuilder::new().add_op(OpFalse)?.add_data(&two_times_script)?.drain(); let wrong_tx = PopulatedTransaction::new(&wrong_tx, vec![utxo_entry.clone()]); - let mut vm = TxScriptEngine::from_transaction_input( - &wrong_tx, - &wrong_tx.tx.inputs[0], - 0, - &utxo_entry, - &reused_values, - &sig_cache, - true, - false, - ); + let mut vm = + TxScriptEngine::from_transaction_input(&wrong_tx, &wrong_tx.tx.inputs[0], 0, &utxo_entry, &reused_values, &sig_cache); assert_eq!(vm.execute(), Err(VerifyError)); println!("[TWO-TIMES] Borrower branch with output going to wrong address failed as expected"); } @@ -628,8 +603,7 @@ fn shared_secret_scenario() -> ScriptBuilderResult<()> { } let tx = tx.as_verifiable(); - let mut vm = - TxScriptEngine::from_transaction_input(&tx, &tx.inputs()[0], 0, &utxo_entry, &reused_values, &sig_cache, true, false); + let mut vm = TxScriptEngine::from_transaction_input(&tx, &tx.inputs()[0], 0, &utxo_entry, &reused_values, &sig_cache); assert_eq!(vm.execute(), Ok(())); println!("[SHARED-SECRET] Owner branch execution successful"); } @@ -647,8 +621,7 @@ fn shared_secret_scenario() -> ScriptBuilderResult<()> { } let tx = tx.as_verifiable(); - let mut vm = - TxScriptEngine::from_transaction_input(&tx, &tx.inputs()[0], 0, &utxo_entry, &reused_values, &sig_cache, true, false); + let mut vm = TxScriptEngine::from_transaction_input(&tx, &tx.inputs()[0], 0, &utxo_entry, &reused_values, &sig_cache); assert_eq!(vm.execute(), Ok(())); println!("[SHARED-SECRET] Borrower branch with correct shared secret execution successful"); } @@ -666,8 +639,7 @@ fn shared_secret_scenario() -> ScriptBuilderResult<()> { } let tx = tx.as_verifiable(); - let mut vm = - TxScriptEngine::from_transaction_input(&tx, &tx.inputs()[0], 0, &utxo_entry, &reused_values, &sig_cache, true, false); + let mut vm = TxScriptEngine::from_transaction_input(&tx, &tx.inputs()[0], 0, &utxo_entry, &reused_values, &sig_cache); assert_eq!(vm.execute(), Err(VerifyError)); println!("[SHARED-SECRET] Borrower branch with incorrect secret failed as expected"); } diff --git a/crypto/txscript/src/data_stack.rs b/crypto/txscript/src/data_stack.rs index 898550fec2..9c5b2d1116 100644 --- a/crypto/txscript/src/data_stack.rs +++ b/crypto/txscript/src/data_stack.rs @@ -155,23 +155,14 @@ fn deserialize_i64(v: &[u8]) -> Result { } } -// TODO: Rename to DefaultSizedEncodeInt when KIP-10 is activated -pub type Kip10I64 = SizedEncodeInt<8>; - impl OpcodeData for Vec { #[inline] fn deserialize(&self) -> Result { - // TODO: Change LEN to 8 once KIP-10 is activated - OpcodeData::>::deserialize(self).map(i64::from) + OpcodeData::>::deserialize(self).map(i64::from) } #[inline] fn serialize(from: &i64) -> Result { - // Note that serialization and deserialization use different LEN. - // This is because prior to KIP-10, only deserialization size was limited. - // It's safe to use 8 here because i32 arithmetic operations (which were the - // only ones that were supported prior to KIP-10) can't get to i64::MIN - // (the only i64 value that requires more than 8 bytes to serialize). OpcodeData::>::serialize(&(*from).into()) } } @@ -343,7 +334,7 @@ impl DataStack for Stack { #[cfg(test)] mod tests { - use super::{Kip10I64, OpcodeData}; + use super::OpcodeData; use crate::data_stack::SizedEncodeInt; use kaspa_txscript_errors::{SerializationError, TxScriptError}; @@ -449,100 +440,6 @@ mod tests { TestCase:: { serialized: hex::decode("00008080").expect("failed parsing hex"), result: Ok(-8388608) }, TestCase:: { serialized: hex::decode("ffffff7f").expect("failed parsing hex"), result: Ok(2147483647) }, TestCase:: { serialized: hex::decode("ffffffff").expect("failed parsing hex"), result: Ok(-2147483647) }, - /* - TestCase::{serialized: hex::decode("ffffffffffffff7f").expect("failed parsing hex"), num_len: 8, result: Ok(9223372036854775807)}, - TestCase::{serialized: hex::decode("ffffffffffffffff").expect("failed parsing hex"), num_len: 8, result: Ok(-9223372036854775807)},*/ - // Minimally encoded values that are out of range for data that - // is interpreted as script numbers with the minimal encoding - // flag set. Should error and return 0. - TestCase:: { - serialized: hex::decode("0000008000").expect("failed parsing hex"), - result: Err(TxScriptError::NumberTooBig( - "numeric value encoded as [0, 0, 0, 80, 0] is 5 bytes which exceeds the max allowed of 4".to_string(), - )), - }, - TestCase:: { - serialized: hex::decode("0000008080").expect("failed parsing hex"), - result: Err(TxScriptError::NumberTooBig( - "numeric value encoded as [0, 0, 0, 80, 80] is 5 bytes which exceeds the max allowed of 4".to_string(), - )), - }, - TestCase:: { - serialized: hex::decode("0000009000").expect("failed parsing hex"), - result: Err(TxScriptError::NumberTooBig( - "numeric value encoded as [0, 0, 0, 90, 0] is 5 bytes which exceeds the max allowed of 4".to_string(), - )), - }, - TestCase:: { - serialized: hex::decode("0000009080").expect("failed parsing hex"), - result: Err(TxScriptError::NumberTooBig( - "numeric value encoded as [0, 0, 0, 90, 80] is 5 bytes which exceeds the max allowed of 4".to_string(), - )), - }, - TestCase:: { - serialized: hex::decode("ffffffff00").expect("failed parsing hex"), - result: Err(TxScriptError::NumberTooBig( - "numeric value encoded as [ff, ff, ff, ff, 0] is 5 bytes which exceeds the max allowed of 4".to_string(), - )), - }, - TestCase:: { - serialized: hex::decode("ffffffff80").expect("failed parsing hex"), - result: Err(TxScriptError::NumberTooBig( - "numeric value encoded as [ff, ff, ff, ff, 80] is 5 bytes which exceeds the max allowed of 4".to_string(), - )), - }, - TestCase:: { - serialized: hex::decode("0000000001").expect("failed parsing hex"), - result: Err(TxScriptError::NumberTooBig( - "numeric value encoded as [0, 0, 0, 0, 1] is 5 bytes which exceeds the max allowed of 4".to_string(), - )), - }, - TestCase:: { - serialized: hex::decode("0000000081").expect("failed parsing hex"), - result: Err(TxScriptError::NumberTooBig( - "numeric value encoded as [0, 0, 0, 0, 81] is 5 bytes which exceeds the max allowed of 4".to_string(), - )), - }, - TestCase:: { - serialized: hex::decode("ffffffffffff00").expect("failed parsing hex"), - result: Err(TxScriptError::NumberTooBig( - "numeric value encoded as [ff, ff, ff, ff, ff, ff, 0] is 7 bytes which exceeds the max allowed of 4".to_string(), - )), - }, - TestCase:: { - serialized: hex::decode("ffffffffffff80").expect("failed parsing hex"), - result: Err(TxScriptError::NumberTooBig( - "numeric value encoded as [ff, ff, ff, ff, ff, ff, 80] is 7 bytes which exceeds the max allowed of 4".to_string(), - )), - }, - TestCase:: { - serialized: hex::decode("ffffffffffffff00").expect("failed parsing hex"), - result: Err(TxScriptError::NumberTooBig( - "numeric value encoded as [ff, ff, ff, ff, ff, ff, ff, 0] is 8 bytes which exceeds the max allowed of 4" - .to_string(), - )), - }, - TestCase:: { - serialized: hex::decode("ffffffffffffff80").expect("failed parsing hex"), - result: Err(TxScriptError::NumberTooBig( - "numeric value encoded as [ff, ff, ff, ff, ff, ff, ff, 80] is 8 bytes which exceeds the max allowed of 4" - .to_string(), - )), - }, - TestCase:: { - serialized: hex::decode("ffffffffffffff7f").expect("failed parsing hex"), - result: Err(TxScriptError::NumberTooBig( - "numeric value encoded as [ff, ff, ff, ff, ff, ff, ff, 7f] is 8 bytes which exceeds the max allowed of 4" - .to_string(), - )), - }, - TestCase:: { - serialized: hex::decode("ffffffffffffffff").expect("failed parsing hex"), - result: Err(TxScriptError::NumberTooBig( - "numeric value encoded as [ff, ff, ff, ff, ff, ff, ff, ff] is 8 bytes which exceeds the max allowed of 4" - .to_string(), - )), - }, // Non-minimally encoded, but otherwise valid values with // minimal encoding flag. Should error and return 0. TestCase:: { @@ -602,66 +499,39 @@ mod tests { // Values above 8 bytes should always return error ]; let kip10_tests = vec![ - TestCase:: { - serialized: hex::decode("0000008000").expect("failed parsing hex"), - result: Ok(Kip10I64::from(2147483648i64)), - }, - TestCase:: { - serialized: hex::decode("0000008080").expect("failed parsing hex"), - result: Ok(Kip10I64::from(-2147483648i64)), - }, - TestCase:: { - serialized: hex::decode("0000009000").expect("failed parsing hex"), - result: Ok(Kip10I64::from(2415919104i64)), - }, - TestCase:: { - serialized: hex::decode("0000009080").expect("failed parsing hex"), - result: Ok(Kip10I64::from(-2415919104i64)), - }, - TestCase:: { - serialized: hex::decode("ffffffff00").expect("failed parsing hex"), - result: Ok(Kip10I64::from(4294967295i64)), - }, - TestCase:: { - serialized: hex::decode("ffffffff80").expect("failed parsing hex"), - result: Ok(Kip10I64::from(-4294967295i64)), - }, - TestCase:: { - serialized: hex::decode("0000000001").expect("failed parsing hex"), - result: Ok(Kip10I64::from(4294967296i64)), - }, - TestCase:: { - serialized: hex::decode("0000000081").expect("failed parsing hex"), - result: Ok(Kip10I64::from(-4294967296i64)), - }, - TestCase:: { - serialized: hex::decode("ffffffffffff00").expect("failed parsing hex"), - result: Ok(Kip10I64::from(281474976710655i64)), - }, - TestCase:: { + TestCase:: { serialized: hex::decode("0000008000").expect("failed parsing hex"), result: Ok(2147483648i64) }, + TestCase:: { serialized: hex::decode("0000008080").expect("failed parsing hex"), result: Ok(-2147483648i64) }, + TestCase:: { serialized: hex::decode("0000009000").expect("failed parsing hex"), result: Ok(2415919104i64) }, + TestCase:: { serialized: hex::decode("0000009080").expect("failed parsing hex"), result: Ok(-2415919104i64) }, + TestCase:: { serialized: hex::decode("ffffffff00").expect("failed parsing hex"), result: Ok(4294967295i64) }, + TestCase:: { serialized: hex::decode("ffffffff80").expect("failed parsing hex"), result: Ok(-4294967295i64) }, + TestCase:: { serialized: hex::decode("0000000001").expect("failed parsing hex"), result: Ok(4294967296i64) }, + TestCase:: { serialized: hex::decode("0000000081").expect("failed parsing hex"), result: Ok(-4294967296i64) }, + TestCase:: { serialized: hex::decode("ffffffffffff00").expect("failed parsing hex"), result: Ok(281474976710655i64) }, + TestCase:: { serialized: hex::decode("ffffffffffff80").expect("failed parsing hex"), - result: Ok(Kip10I64::from(-281474976710655i64)), + result: Ok(-281474976710655i64), }, - TestCase:: { + TestCase:: { serialized: hex::decode("ffffffffffffff00").expect("failed parsing hex"), - result: Ok(Kip10I64::from(72057594037927935i64)), + result: Ok(72057594037927935i64), }, - TestCase:: { + TestCase:: { serialized: hex::decode("ffffffffffffff80").expect("failed parsing hex"), - result: Ok(Kip10I64::from(-72057594037927935i64)), + result: Ok(-72057594037927935i64), }, - TestCase:: { + TestCase:: { serialized: hex::decode("ffffffffffffff7f").expect("failed parsing hex"), - result: Ok(Kip10I64::from(9223372036854775807i64)), + result: Ok(9223372036854775807i64), }, - TestCase:: { + TestCase:: { serialized: hex::decode("ffffffffffffffff").expect("failed parsing hex"), - result: Ok(Kip10I64::from(-9223372036854775807i64)), + result: Ok(-9223372036854775807i64), }, // Minimally encoded values that are out of range for data that // is interpreted as script numbers with the minimal encoding // flag set. Should error and return 0. - TestCase:: { + TestCase:: { serialized: hex::decode("000000000000008080").expect("failed parsing hex"), result: Err(TxScriptError::NumberTooBig( "numeric value encoded as [0, 0, 0, 0, 0, 0, 0, 80, 80] is 9 bytes which exceeds the max allowed of 8".to_string(), diff --git a/crypto/txscript/src/lib.rs b/crypto/txscript/src/lib.rs index 0ce563c8de..d19100940b 100644 --- a/crypto/txscript/src/lib.rs +++ b/crypto/txscript/src/lib.rs @@ -32,7 +32,7 @@ use script_class::ScriptClass; pub mod prelude { pub use super::standard::*; } -use crate::runtime_sig_op_counter::{RuntimeSigOpCounter, SigOpConsumer}; +use crate::runtime_sig_op_counter::RuntimeSigOpCounter; pub use standard::*; pub const MAX_SCRIPT_PUBLIC_KEY_VERSION: u16 = 0; @@ -90,8 +90,7 @@ pub struct TxScriptEngine<'a, T: VerifiableTransaction, Reused: SigHashReusedVal cond_stack: Vec, // Following if stacks, and whether it is running num_ops: i32, - kip10_enabled: bool, - runtime_sig_op_counter: Option, + runtime_sig_op_counter: RuntimeSigOpCounter, } fn parse_script( @@ -131,7 +130,7 @@ fn parse_script( /// # Returns /// * `Ok(u8)` - The exact number of signature operations executed /// * `Err(TxScriptError)` - If script execution fails or input index is invalid -pub fn get_sig_op_count(tx: &T, input_idx: usize, kip10_enabled: bool) -> Result { +pub fn get_sig_op_count(tx: &T, input_idx: usize) -> Result { let sig_cache = Cache::new(0); let reused_values = SigHashReusedValuesUnsync::new(); let mut vm = TxScriptEngine::from_transaction_input( @@ -141,11 +140,9 @@ pub fn get_sig_op_count(tx: &T, input_idx: usize, kip1 tx.utxo(input_idx).ok_or_else(|| TxScriptError::InvalidInputIndex(input_idx as i32, tx.inputs().len()))?, &reused_values, &sig_cache, - kip10_enabled, - true, ); vm.execute()?; - Ok(vm.used_sig_ops().unwrap()) + Ok(vm.used_sig_ops()) } /// Calculates an upper bound of signature operations in a script without executing it. @@ -225,7 +222,7 @@ pub fn is_unspendable(scr } impl<'a, T: VerifiableTransaction, Reused: SigHashReusedValues> TxScriptEngine<'a, T, Reused> { - pub fn new(reused_values: &'a Reused, sig_cache: &'a Cache, kip10_enabled: bool) -> Self { + pub fn new(reused_values: &'a Reused, sig_cache: &'a Cache) -> Self { Self { dstack: vec![], astack: vec![], @@ -234,16 +231,13 @@ impl<'a, T: VerifiableTransaction, Reused: SigHashReusedValues> TxScriptEngine<' sig_cache, cond_stack: vec![], num_ops: 0, - kip10_enabled, - runtime_sig_op_counter: None, + runtime_sig_op_counter: RuntimeSigOpCounter::new(u8::MAX), } } - /// Returns the number of signature operations used in script execution if runtime sig op counting is enabled. - /// - /// Returns None if runtime signature operation counting is disabled. - pub fn used_sig_ops(&self) -> Option { - self.runtime_sig_op_counter.as_ref().map(|counter| counter.used_sig_ops()) + /// Returns the number of signature operations used in script execution. + pub fn used_sig_ops(&self) -> u8 { + self.runtime_sig_op_counter.used_sig_ops() } /// Creates a new Script Engine for validating transaction input. @@ -269,8 +263,6 @@ impl<'a, T: VerifiableTransaction, Reused: SigHashReusedValues> TxScriptEngine<' utxo_entry: &'a UtxoEntry, reused_values: &'a Reused, sig_cache: &'a Cache, - kip10_enabled: bool, - runtime_sig_op_counting: bool, ) -> Self { let script_public_key = utxo_entry.script_public_key.script(); // The script_public_key in P2SH is just validating the hash on the OpMultiSig script @@ -285,17 +277,11 @@ impl<'a, T: VerifiableTransaction, Reused: SigHashReusedValues> TxScriptEngine<' sig_cache, cond_stack: Default::default(), num_ops: 0, - kip10_enabled, - runtime_sig_op_counter: runtime_sig_op_counting.then_some(RuntimeSigOpCounter::new(input.sig_op_count)), + runtime_sig_op_counter: RuntimeSigOpCounter::new(input.sig_op_count), } } - pub fn from_script( - script: &'a [u8], - reused_values: &'a Reused, - sig_cache: &'a Cache, - kip10_enabled: bool, - ) -> Self { + pub fn from_script(script: &'a [u8], reused_values: &'a Reused, sig_cache: &'a Cache) -> Self { Self { dstack: Default::default(), astack: Default::default(), @@ -304,9 +290,8 @@ impl<'a, T: VerifiableTransaction, Reused: SigHashReusedValues> TxScriptEngine<' sig_cache, cond_stack: Default::default(), num_ops: 0, - kip10_enabled, // Runtime sig op counting is not needed for standalone scripts, only inputs have sig op count value - runtime_sig_op_counter: None, + runtime_sig_op_counter: RuntimeSigOpCounter::new(u8::MAX), } } @@ -699,21 +684,9 @@ mod tests { let utxo_entry = UtxoEntry::new(output.value, output.script_public_key.clone(), 0, tx.is_coinbase()); let populated_tx = PopulatedTransaction::new(&tx, vec![utxo_entry.clone()]); - [false, true].into_iter().for_each(|kip10_enabled| { - [false, true].into_iter().for_each(|runtime_sig_op_counting| { - let mut vm = TxScriptEngine::from_transaction_input( - &populated_tx, - &input, - 0, - &utxo_entry, - &reused_values, - &sig_cache, - kip10_enabled, - runtime_sig_op_counting, - ); - assert_eq!(vm.execute(), test.expected_result); - }); - }); + + let mut vm = TxScriptEngine::from_transaction_input(&populated_tx, &input, 0, &utxo_entry, &reused_values, &sig_cache); + assert_eq!(vm.execute(), test.expected_result); } } @@ -1271,10 +1244,9 @@ mod tests { // Execute script let tx = tx.as_verifiable(); - let mut vm = - TxScriptEngine::from_transaction_input(&tx, &tx.inputs()[0], 0, &utxo_entry, &reused_values, &sig_cache, false, true); + let mut vm = TxScriptEngine::from_transaction_input(&tx, &tx.inputs()[0], 0, &utxo_entry, &reused_values, &sig_cache); - let result = vm.execute().map(|_| vm.used_sig_ops().unwrap()); + let result = vm.execute().map(|_| vm.used_sig_ops()); match (result, test.should_pass) { (Ok(count), true) => { @@ -1370,7 +1342,7 @@ mod bitcoind_tests { } impl JsonTestRow { - fn test_row(&self, kip10_enabled: bool, runtime_sig_op_counting: bool) -> Result<(), TestError> { + fn test_row(&self) -> Result<(), TestError> { // Parse test to objects let (sig_script, script_pub_key, expected_result) = match self.clone() { JsonTestRow::Test(sig_script, sig_pub_key, _, expected_result) => (sig_script, sig_pub_key, expected_result), @@ -1382,7 +1354,7 @@ mod bitcoind_tests { } }; - let result = Self::run_test(sig_script, script_pub_key, kip10_enabled, runtime_sig_op_counting); + let result = Self::run_test(sig_script, script_pub_key); match Self::result_name(result.clone()).contains(&expected_result.as_str()) { true => Ok(()), @@ -1390,12 +1362,7 @@ mod bitcoind_tests { } } - fn run_test( - sig_script: String, - script_pub_key: String, - kip10_enabled: bool, - runtime_sig_op_counting: bool, - ) -> Result<(), UnifiedError> { + fn run_test(sig_script: String, script_pub_key: String) -> Result<(), UnifiedError> { let script_sig = opcodes::parse_short_form(sig_script).map_err(UnifiedError::ScriptBuilderError)?; let script_pub_key = ScriptPublicKey::from_vec(0, opcodes::parse_short_form(script_pub_key).map_err(UnifiedError::ScriptBuilderError)?); @@ -1415,8 +1382,6 @@ mod bitcoind_tests { &populated_tx.entries[0], &reused_values, &sig_cache, - kip10_enabled, - runtime_sig_op_counting, ); vm.execute().map_err(UnifiedError::TxScriptError) } @@ -1497,10 +1462,9 @@ mod bitcoind_tests { #[test] fn test_bitcoind_tests() { - // Script test files are split into two versions to test behavior before and after KIP-10: + // Script test files are split into two versions to test behavior after KIP-10: // - // - script_tests.json: Tests basic script functionality with KIP-10 disabled (kip10_enabled=false) - // - script_tests-kip10.json: Tests expanded functionality with KIP-10 enabled (kip10_enabled=true) + // - script_tests.json: Tests expanded functionality with KIP-10 enabled // // KIP-10 introduces two major changes: // @@ -1520,19 +1484,16 @@ mod bitcoind_tests { // When KIP-10 is disabled (pre-activation), the new opcodes will return an InvalidOpcode error // and arithmetic is limited to 4 bytes. When enabled, scripts gain full access to transaction // data and 8-byte arithmetic capabilities. - for runtime_sig_op_counting in [false, true] { - for (file_name, kip10_enabled) in [("script_tests.json", false), ("script_tests-kip10.json", true)] { - let file = File::open(Path::new(env!("CARGO_MANIFEST_DIR")).join("test-data").join(file_name)) - .expect("Could not find test file"); - let reader = BufReader::new(file); - - // Read the JSON contents of the file as an instance of `User`. - let tests: Vec = serde_json::from_reader(reader).expect("Failed Parsing {:?}"); - for row in tests { - if let Err(error) = row.test_row(kip10_enabled, runtime_sig_op_counting) { - panic!("Test: {:?} failed for {}: {:?}", row.clone(), file_name, error); - } - } + let file_name = "script_tests.json"; + let file = + File::open(Path::new(env!("CARGO_MANIFEST_DIR")).join("test-data").join(file_name)).expect("Could not find test file"); + let reader = BufReader::new(file); + + // Read the JSON contents of the file as an instance of `User`. + let tests: Vec = serde_json::from_reader(reader).expect("Failed Parsing {:?}"); + for row in tests { + if let Err(error) = row.test_row() { + panic!("Test: {:?} failed for {}: {:?}", row.clone(), file_name, error); } } } diff --git a/crypto/txscript/src/opcodes/mod.rs b/crypto/txscript/src/opcodes/mod.rs index fde1cfa95f..c970841532 100644 --- a/crypto/txscript/src/opcodes/mod.rs +++ b/crypto/txscript/src/opcodes/mod.rs @@ -2,7 +2,7 @@ mod macros; use crate::{ - data_stack::{DataStack, Kip10I64, OpcodeData}, + data_stack::{DataStack, OpcodeData}, ScriptSource, SpkEncoding, TxScriptEngine, TxScriptError, LOCK_TIME_THRESHOLD, MAX_TX_IN_SEQUENCE_NUM, NO_COST_OPCODE, SEQUENCE_LOCK_TIME_DISABLED, SEQUENCE_LOCK_TIME_MASK, }; @@ -216,27 +216,6 @@ fn push_number( Ok(()) } -/// This macro helps to avoid code duplication in numeric opcodes where the only difference -/// between KIP10_ENABLED and disabled states is the numeric type used (Kip10I64 vs i64). -/// KIP10I64 deserializator supports 8-byte integers -// TODO: Remove this macro after KIP-10 activation. -macro_rules! numeric_op { - ($vm: expr, $pattern: pat, $count: expr, $block: expr) => { - if $vm.kip10_enabled { - let $pattern: [Kip10I64; $count] = $vm.dstack.pop_items()?; - let r = $block; - $vm.dstack.push_item(r)?; - Ok(()) - } else { - let $pattern: [i64; $count] = $vm.dstack.pop_items()?; - #[allow(clippy::useless_conversion)] - let r = $block; - $vm.dstack.push_item(r)?; - Ok(()) - } - }; -} - /* The following is the implementation and metadata of all opcodes. Each opcode has unique number (and template system makes it impossible to use two opcodes), length specification, @@ -589,38 +568,62 @@ opcode_list! { // Numeric related opcodes. opcode Op1Add<0x8b, 1>(self, vm) { - numeric_op!(vm, [value], 1, value.checked_add(1).ok_or_else(|| TxScriptError::NumberTooBig("Result of addition exceeds 64-bit signed integer range".to_string()))?) + let [value]: [i64; 1] = vm.dstack.pop_items()?; + let r = value.checked_add(1).ok_or_else(|| TxScriptError::NumberTooBig("Result of addition exceeds 64-bit signed integer range".to_string()))?; + vm.dstack.push_item(r)?; + Ok(()) } opcode Op1Sub<0x8c, 1>(self, vm) { - numeric_op!(vm, [value], 1, value.checked_sub(1).ok_or_else(|| TxScriptError::NumberTooBig("Result of subtraction exceeds 64-bit signed integer range".to_string()))?) + let [value]: [i64; 1] = vm.dstack.pop_items()?; + let r = value.checked_sub(1).ok_or_else(|| TxScriptError::NumberTooBig("Result of subtraction exceeds 64-bit signed integer range".to_string()))?; + vm.dstack.push_item(r)?; + Ok(()) } opcode Op2Mul<0x8d, 1>(self, vm) Err(TxScriptError::OpcodeDisabled(format!("{self:?}"))) opcode Op2Div<0x8e, 1>(self, vm) Err(TxScriptError::OpcodeDisabled(format!("{self:?}"))) opcode OpNegate<0x8f, 1>(self, vm) { - numeric_op!(vm, [value], 1, value.checked_neg().ok_or_else(|| TxScriptError::NumberTooBig("Negation result exceeds 64-bit signed integer range".to_string()))?) + let [value]: [i64; 1] = vm.dstack.pop_items()?; + let r = value.checked_neg().ok_or_else(|| TxScriptError::NumberTooBig("Negation result exceeds 64-bit signed integer range".to_string()))?; + vm.dstack.push_item(r)?; + Ok(()) } opcode OpAbs<0x90, 1>(self, vm) { - numeric_op!(vm, [value], 1, value.checked_abs().ok_or_else(|| TxScriptError::NumberTooBig("Absolute value exceeds 64-bit signed integer range".to_string()))?) + let [ value ]: [i64; 1] = vm.dstack.pop_items()?; + let r = value.checked_abs().ok_or_else(|| TxScriptError::NumberTooBig("Absolute value exceeds 64-bit signed integer range".to_string()))?; + vm.dstack.push_item(r)?; + Ok(()) } opcode OpNot<0x91, 1>(self, vm) { - numeric_op!(vm, [m], 1, (m == 0) as i64) + let [ m ]: [i64; 1] = vm.dstack.pop_items()?; + let r = (m == 0) as i64; + vm.dstack.push_item(r)?; + Ok(()) } opcode Op0NotEqual<0x92, 1>(self, vm) { - numeric_op!(vm, [m], 1, (m != 0) as i64) + let [ m ]: [i64; 1] = vm.dstack.pop_items()?; + let r = (m != 0) as i64; + vm.dstack.push_item(r)?; + Ok(()) } opcode OpAdd<0x93, 1>(self, vm) { - numeric_op!(vm, [a,b], 2, a.checked_add(b.into()).ok_or_else(|| TxScriptError::NumberTooBig("Sum exceeds 64-bit signed integer range".to_string()))?) + let [ a, b ]: [i64; 2] = vm.dstack.pop_items()?; + let r = a.checked_add(b).ok_or_else(|| TxScriptError::NumberTooBig("Sum exceeds 64-bit signed integer range".to_string()))?; + vm.dstack.push_item(r)?; + Ok(()) } opcode OpSub<0x94, 1>(self, vm) { - numeric_op!(vm, [a,b], 2, a.checked_sub(b.into()).ok_or_else(|| TxScriptError::NumberTooBig("Difference exceeds 64-bit signed integer range".to_string()))?) + let [ a, b ]: [i64; 2] = vm.dstack.pop_items()?; + let r = a.checked_sub(b).ok_or_else(|| TxScriptError::NumberTooBig("Difference exceeds 64-bit signed integer range".to_string()))?; + vm.dstack.push_item(r)?; + Ok(()) } opcode OpMul<0x95, 1>(self, vm) Err(TxScriptError::OpcodeDisabled(format!("{self:?}"))) @@ -630,63 +633,88 @@ opcode_list! { opcode OpRShift<0x99, 1>(self, vm) Err(TxScriptError::OpcodeDisabled(format!("{self:?}"))) opcode OpBoolAnd<0x9a, 1>(self, vm) { - numeric_op!(vm, [a,b], 2, ((a != 0) && (b != 0)) as i64) + let [ a, b ]: [i64; 2] = vm.dstack.pop_items()?; + let r = ((a != 0) && (b != 0)) as i64; + vm.dstack.push_item(r)?; + Ok(()) } opcode OpBoolOr<0x9b, 1>(self, vm) { - numeric_op!(vm, [a,b], 2, ((a != 0) || (b != 0)) as i64) + let [ a, b ]: [i64; 2] = vm.dstack.pop_items()?; + let r = ((a != 0) || (b != 0)) as i64; + vm.dstack.push_item(r)?; + Ok(()) } opcode OpNumEqual<0x9c, 1>(self, vm) { - numeric_op!(vm, [a,b], 2, (a == b) as i64) + let [ a, b ]: [i64; 2] = vm.dstack.pop_items()?; + let r = (a == b) as i64; + vm.dstack.push_item(r)?; + Ok(()) } opcode OpNumEqualVerify<0x9d, 1>(self, vm) { - if vm.kip10_enabled { - let [a,b]: [Kip10I64; 2] = vm.dstack.pop_items()?; - match a == b { - true => Ok(()), - false => Err(TxScriptError::VerifyError) - } - } else { - let [a,b]: [i64; 2] = vm.dstack.pop_items()?; - match a == b { - true => Ok(()), - false => Err(TxScriptError::VerifyError) - } + let [a,b]: [i64; 2] = vm.dstack.pop_items()?; + match a == b { + true => Ok(()), + false => Err(TxScriptError::VerifyError) } } opcode OpNumNotEqual<0x9e, 1>(self, vm) { - numeric_op!(vm, [a, b], 2, (a != b) as i64) + let [ a, b ]: [i64; 2] = vm.dstack.pop_items()?; + let r = (a != b) as i64; + vm.dstack.push_item(r)?; + Ok(()) } opcode OpLessThan<0x9f, 1>(self, vm) { - numeric_op!(vm, [a, b], 2, (a < b) as i64) + let [ a, b ]: [i64; 2] = vm.dstack.pop_items()?; + let r = (a < b) as i64; + vm.dstack.push_item(r)?; + Ok(()) } opcode OpGreaterThan<0xa0, 1>(self, vm) { - numeric_op!(vm, [a, b], 2, (a > b) as i64) + let [ a, b ]: [i64; 2] = vm.dstack.pop_items()?; + let r = (a > b) as i64; + vm.dstack.push_item(r)?; + Ok(()) } opcode OpLessThanOrEqual<0xa1, 1>(self, vm) { - numeric_op!(vm, [a, b], 2, (a <= b) as i64) + let [ a, b ]: [i64; 2] = vm.dstack.pop_items()?; + let r = (a <= b) as i64; + vm.dstack.push_item(r)?; + Ok(()) } opcode OpGreaterThanOrEqual<0xa2, 1>(self, vm) { - numeric_op!(vm, [a, b], 2, (a >= b) as i64) + let [ a, b ]: [i64; 2] = vm.dstack.pop_items()?; + let r = (a >= b) as i64; + vm.dstack.push_item(r)?; + Ok(()) } opcode OpMin<0xa3, 1>(self, vm) { - numeric_op!(vm, [a, b], 2, a.min(b)) + let [ a, b ]: [i64; 2] = vm.dstack.pop_items()?; + let r = a.min(b); + vm.dstack.push_item(r)?; + Ok(()) } opcode OpMax<0xa4, 1>(self, vm) { - numeric_op!(vm, [a, b], 2, a.max(b)) + let [ a, b ]: [i64; 2] = vm.dstack.pop_items()?; + let r = a.max(b); + vm.dstack.push_item(r)?; + Ok(()) } opcode OpWithin<0xa5, 1>(self, vm) { - numeric_op!(vm, [x,l,u], 3, (x >= l && x < u) as i64) + let [ x, l, u ]: [i64; 3] = vm.dstack.pop_items()?; + let r = (x >= l && x < u) as i64; + vm.dstack.push_item(r)?; + Ok(()) } // Undefined opcodes. @@ -881,27 +909,19 @@ opcode_list! { // Transaction level opcodes (following Transaction struct field order) opcode OpTxVersion<0xb2, 1>(self, vm) Err(TxScriptError::OpcodeReserved(format!("{self:?}"))) opcode OpTxInputCount<0xb3, 1>(self, vm) { - if vm.kip10_enabled { - match vm.script_source { - ScriptSource::TxInput{tx, ..} => { - push_number(tx.inputs().len() as i64, vm) - }, - _ => Err(TxScriptError::InvalidSource("OpInputCount only applies to transaction inputs".to_string())) - } - } else { - Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) + match vm.script_source { + ScriptSource::TxInput{tx, ..} => { + push_number(tx.inputs().len() as i64, vm) + }, + _ => Err(TxScriptError::InvalidSource("OpInputCount only applies to transaction inputs".to_string())) } } opcode OpTxOutputCount<0xb4, 1>(self, vm) { - if vm.kip10_enabled { - match vm.script_source { - ScriptSource::TxInput{tx, ..} => { - push_number(tx.outputs().len() as i64, vm) - }, - _ => Err(TxScriptError::InvalidSource("OpOutputCount only applies to transaction inputs".to_string())) - } - } else { - Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) + match vm.script_source { + ScriptSource::TxInput{tx, ..} => { + push_number(tx.outputs().len() as i64, vm) + }, + _ => Err(TxScriptError::InvalidSource("OpOutputCount only applies to transaction inputs".to_string())) } } opcode OpTxLockTime<0xb5, 1>(self, vm) Err(TxScriptError::OpcodeReserved(format!("{self:?}"))) @@ -910,15 +930,11 @@ opcode_list! { opcode OpTxPayload<0xb8, 1>(self, vm) Err(TxScriptError::OpcodeReserved(format!("{self:?}"))) // Input related opcodes (following TransactionInput struct field order) opcode OpTxInputIndex<0xb9, 1>(self, vm) { - if vm.kip10_enabled { - match vm.script_source { - ScriptSource::TxInput{idx, ..} => { - push_number(idx as i64, vm) - }, - _ => Err(TxScriptError::InvalidSource("OpInputIndex only applies to transaction inputs".to_string())) - } - } else { - Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) + match vm.script_source { + ScriptSource::TxInput{idx, ..} => { + push_number(idx as i64, vm) + }, + _ => Err(TxScriptError::InvalidSource("OpInputIndex only applies to transaction inputs".to_string())) } } opcode OpOutpointTxId<0xba, 1>(self, vm) Err(TxScriptError::OpcodeReserved(format!("{self:?}"))) @@ -927,72 +943,56 @@ opcode_list! { opcode OpTxInputSeq<0xbd, 1>(self, vm) Err(TxScriptError::OpcodeReserved(format!("{self:?}"))) // UTXO related opcodes (following UtxoEntry struct field order) opcode OpTxInputAmount<0xbe, 1>(self, vm) { - if vm.kip10_enabled { - match vm.script_source { - ScriptSource::TxInput{tx, ..} => { - let [idx]: [i32; 1] = vm.dstack.pop_items()?; - let utxo = usize::try_from(idx).ok() - .and_then(|idx| tx.utxo(idx)) - .ok_or_else(|| TxScriptError::InvalidInputIndex(idx, tx.inputs().len()))?; - push_number(utxo.amount.try_into().map_err(|e: TryFromIntError| TxScriptError::NumberTooBig(e.to_string()))?, vm) - }, - _ => Err(TxScriptError::InvalidSource("OpInputAmount only applies to transaction inputs".to_string())) - } - } else { - Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) + match vm.script_source { + ScriptSource::TxInput{tx, ..} => { + let [idx]: [i32; 1] = vm.dstack.pop_items()?; + let utxo = usize::try_from(idx).ok() + .and_then(|idx| tx.utxo(idx)) + .ok_or_else(|| TxScriptError::InvalidInputIndex(idx, tx.inputs().len()))?; + push_number(utxo.amount.try_into().map_err(|e: TryFromIntError| TxScriptError::NumberTooBig(e.to_string()))?, vm) + }, + _ => Err(TxScriptError::InvalidSource("OpInputAmount only applies to transaction inputs".to_string())) } } opcode OpTxInputSpk<0xbf, 1>(self, vm) { - if vm.kip10_enabled { - match vm.script_source { - ScriptSource::TxInput{tx, ..} => { - let [idx]: [i32; 1] = vm.dstack.pop_items()?; - let utxo = usize::try_from(idx).ok() - .and_then(|idx| tx.utxo(idx)) - .ok_or_else(|| TxScriptError::InvalidInputIndex(idx, tx.inputs().len()))?; - vm.dstack.push(utxo.script_public_key.to_bytes()); - Ok(()) - }, - _ => Err(TxScriptError::InvalidSource("OpInputSpk only applies to transaction inputs".to_string())) - } - } else { - Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) + match vm.script_source { + ScriptSource::TxInput{tx, ..} => { + let [idx]: [i32; 1] = vm.dstack.pop_items()?; + let utxo = usize::try_from(idx).ok() + .and_then(|idx| tx.utxo(idx)) + .ok_or_else(|| TxScriptError::InvalidInputIndex(idx, tx.inputs().len()))?; + vm.dstack.push(utxo.script_public_key.to_bytes()); + Ok(()) + }, + _ => Err(TxScriptError::InvalidSource("OpInputSpk only applies to transaction inputs".to_string())) } } opcode OpTxInputBlockDaaScore<0xc0, 1>(self, vm) Err(TxScriptError::OpcodeReserved(format!("{self:?}"))) opcode OpTxInputIsCoinbase<0xc1, 1>(self, vm) Err(TxScriptError::OpcodeReserved(format!("{self:?}"))) // Output related opcodes (following TransactionOutput struct field order) opcode OpTxOutputAmount<0xc2, 1>(self, vm) { - if vm.kip10_enabled { - match vm.script_source { - ScriptSource::TxInput{tx, ..} => { - let [idx]: [i32; 1] = vm.dstack.pop_items()?; - let output = usize::try_from(idx).ok() - .and_then(|idx| tx.outputs().get(idx)) - .ok_or_else(|| TxScriptError::InvalidOutputIndex(idx, tx.inputs().len()))?; - push_number(output.value.try_into().map_err(|e: TryFromIntError| TxScriptError::NumberTooBig(e.to_string()))?, vm) - }, - _ => Err(TxScriptError::InvalidSource("OpOutputAmount only applies to transaction inputs".to_string())) - } - } else { - Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) + match vm.script_source { + ScriptSource::TxInput{tx, ..} => { + let [idx]: [i32; 1] = vm.dstack.pop_items()?; + let output = usize::try_from(idx).ok() + .and_then(|idx| tx.outputs().get(idx)) + .ok_or_else(|| TxScriptError::InvalidOutputIndex(idx, tx.inputs().len()))?; + push_number(output.value.try_into().map_err(|e: TryFromIntError| TxScriptError::NumberTooBig(e.to_string()))?, vm) + }, + _ => Err(TxScriptError::InvalidSource("OpOutputAmount only applies to transaction inputs".to_string())) } } opcode OpTxOutputSpk<0xc3, 1>(self, vm) { - if vm.kip10_enabled { - match vm.script_source { - ScriptSource::TxInput{tx, ..} => { - let [idx]: [i32; 1] = vm.dstack.pop_items()?; - let output = usize::try_from(idx).ok() - .and_then(|idx| tx.outputs().get(idx)) - .ok_or_else(|| TxScriptError::InvalidOutputIndex(idx, tx.inputs().len()))?; - vm.dstack.push(output.script_public_key.to_bytes()); - Ok(()) - }, - _ => Err(TxScriptError::InvalidSource("OpOutputSpk only applies to transaction inputs".to_string())) - } - } else { - Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) + match vm.script_source { + ScriptSource::TxInput{tx, ..} => { + let [idx]: [i32; 1] = vm.dstack.pop_items()?; + let output = usize::try_from(idx).ok() + .and_then(|idx| tx.outputs().get(idx)) + .ok_or_else(|| TxScriptError::InvalidOutputIndex(idx, tx.inputs().len()))?; + vm.dstack.push(output.script_public_key.to_bytes()); + Ok(()) + }, + _ => Err(TxScriptError::InvalidSource("OpOutputSpk only applies to transaction inputs".to_string())) } } // Undefined opcodes @@ -1102,12 +1102,10 @@ mod test { let cache = Cache::new(10_000); let reused_values = SigHashReusedValuesUnsync::new(); for TestCase { init, code, dstack } in tests { - [false, true].into_iter().for_each(|kip10_enabled| { - let mut vm = TxScriptEngine::new(&reused_values, &cache, kip10_enabled); - vm.dstack = init.clone(); - code.execute(&mut vm).unwrap_or_else(|_| panic!("Opcode {} should not fail", code.value())); - assert_eq!(*vm.dstack, dstack, "OpCode {} Pushed wrong value", code.value()); - }); + let mut vm = TxScriptEngine::new(&reused_values, &cache); + vm.dstack = init.clone(); + code.execute(&mut vm).unwrap_or_else(|_| panic!("Opcode {} should not fail", code.value())); + assert_eq!(*vm.dstack, dstack, "OpCode {} Pushed wrong value", code.value()); } } @@ -1115,18 +1113,16 @@ mod test { let cache = Cache::new(10_000); let reused_values = SigHashReusedValuesUnsync::new(); for ErrorTestCase { init, code, error } in tests { - [false, true].into_iter().for_each(|kip10_enabled| { - let mut vm = TxScriptEngine::new(&reused_values, &cache, kip10_enabled); - vm.dstack.clone_from(&init); - assert_eq!( - code.execute(&mut vm) - .expect_err(format!("Opcode {} should have errored (init: {:?})", code.value(), init.clone()).as_str()), - error, - "Opcode {} returned wrong error {:?}", - code.value(), - init - ); - }); + let mut vm = TxScriptEngine::new(&reused_values, &cache); + vm.dstack.clone_from(&init); + assert_eq!( + code.execute(&mut vm) + .expect_err(format!("Opcode {} should have errored (init: {:?})", code.value(), init.clone()).as_str()), + error, + "Opcode {} returned wrong error {:?}", + code.value(), + init + ); } } @@ -1152,7 +1148,7 @@ mod test { let cache = Cache::new(10_000); let reused_values = SigHashReusedValuesUnsync::new(); - let mut vm = TxScriptEngine::new(&reused_values, &cache, false); + let mut vm = TxScriptEngine::new(&reused_values, &cache); for pop in tests { match pop.execute(&mut vm) { @@ -1186,7 +1182,7 @@ mod test { let cache = Cache::new(10_000); let reused_values = SigHashReusedValuesUnsync::new(); - let mut vm = TxScriptEngine::new(&reused_values, &cache, false); + let mut vm = TxScriptEngine::new(&reused_values, &cache); for pop in tests { match pop.execute(&mut vm) { @@ -1259,7 +1255,7 @@ mod test { let cache = Cache::new(10_000); let reused_values = SigHashReusedValuesUnsync::new(); - let mut vm = TxScriptEngine::new(&reused_values, &cache, false); + let mut vm = TxScriptEngine::new(&reused_values, &cache); for pop in tests { match pop.execute(&mut vm) { @@ -2854,7 +2850,7 @@ mod test { ] { let mut tx = base_tx.clone(); tx.0.lock_time = tx_lock_time; - let mut vm = TxScriptEngine::from_transaction_input(&tx, &input, 0, &utxo_entry, &reused_values, &sig_cache, false, false); + let mut vm = TxScriptEngine::from_transaction_input(&tx, &input, 0, &utxo_entry, &reused_values, &sig_cache); vm.dstack = vec![lock_time.clone()]; match code.execute(&mut vm) { // Message is based on the should_fail values @@ -2896,7 +2892,7 @@ mod test { ] { let mut input = base_input.clone(); input.sequence = tx_sequence; - let mut vm = TxScriptEngine::from_transaction_input(&tx, &input, 0, &utxo_entry, &reused_values, &sig_cache, false, false); + let mut vm = TxScriptEngine::from_transaction_input(&tx, &input, 0, &utxo_entry, &reused_values, &sig_cache); vm.dstack = vec![sequence.clone()]; match code.execute(&mut vm) { // Message is based on the should_fail values @@ -3041,7 +3037,6 @@ mod test { #[derive(Debug)] struct TestGroup { name: &'static str, - kip10_enabled: bool, test_cases: Vec, } @@ -3089,21 +3084,16 @@ mod test { tx.utxo(current_idx).unwrap(), &reused_values, &sig_cache, - group.kip10_enabled, - false, ); // Check input index opcode first let op_input_idx = opcodes::OpTxInputIndex::empty().expect("Should accept empty"); - if !group.kip10_enabled { - assert!(matches!(op_input_idx.execute(&mut vm), Err(TxScriptError::InvalidOpcode(_)))); - } else { - let mut expected = vm.dstack.clone(); - expected.push_item(current_idx as i64).unwrap(); - op_input_idx.execute(&mut vm).unwrap(); - assert_eq!(vm.dstack, expected); - vm.dstack.clear(); - } + + let mut expected = vm.dstack.clone(); + expected.push_item(current_idx as i64).unwrap(); + op_input_idx.execute(&mut vm).unwrap(); + assert_eq!(vm.dstack, expected); + vm.dstack.clear(); // Prepare opcodes let op_input_spk = opcodes::OpTxInputSpk::empty().expect("Should accept empty"); @@ -3158,35 +3148,8 @@ mod test { #[test] fn test_unary_introspection_ops() { let test_groups = vec![ - TestGroup { - name: "KIP-10 disabled", - kip10_enabled: false, - test_cases: vec![ - TestCase::Incorrect { - operation: Operation::InputSpk, - index: Some(0), - expected_error: TxScriptError::InvalidOpcode("Invalid opcode".to_string()), - }, - TestCase::Incorrect { - operation: Operation::OutputSpk, - index: Some(0), - expected_error: TxScriptError::InvalidOpcode("Invalid opcode".to_string()), - }, - TestCase::Incorrect { - operation: Operation::InputAmount, - index: Some(0), - expected_error: TxScriptError::InvalidOpcode("Invalid opcode".to_string()), - }, - TestCase::Incorrect { - operation: Operation::OutputAmount, - index: Some(0), - expected_error: TxScriptError::InvalidOpcode("Invalid opcode".to_string()), - }, - ], - }, TestGroup { name: "Valid input indices", - kip10_enabled: true, test_cases: vec![ TestCase::Successful { operation: Operation::InputSpk, @@ -3224,7 +3187,6 @@ mod test { }, TestGroup { name: "Valid output indices", - kip10_enabled: true, test_cases: vec![ TestCase::Successful { operation: Operation::OutputSpk, @@ -3262,7 +3224,6 @@ mod test { }, TestGroup { name: "Error cases", - kip10_enabled: true, test_cases: vec![ TestCase::Incorrect { operation: Operation::InputAmount, @@ -3349,56 +3310,37 @@ mod test { let sig_cache = Cache::new(10_000); let reused_values = SigHashReusedValuesUnsync::new(); - for runtime_sig_op_counting in [true, false] { - // Test with KIP-10 enabled and disabled - for kip10_enabled in [true, false] { - let mut vm = TxScriptEngine::from_transaction_input( - &tx, - &tx.inputs()[0], // Use first input - 0, - tx.utxo(0).unwrap(), - &reused_values, - &sig_cache, - kip10_enabled, - runtime_sig_op_counting, - ); - - let op_input_count = opcodes::OpTxInputCount::empty().expect("Should accept empty"); - let op_output_count = opcodes::OpTxOutputCount::empty().expect("Should accept empty"); - - if kip10_enabled { - // Test input count - op_input_count.execute(&mut vm).unwrap(); - assert_eq!( - vm.dstack, - vec![ as OpcodeData>::serialize(&(input_count as i64)).unwrap()], - "Input count mismatch for {} inputs", - input_count - ); - vm.dstack.clear(); + let mut vm = TxScriptEngine::from_transaction_input( + &tx, + &tx.inputs()[0], // Use first input + 0, + tx.utxo(0).unwrap(), + &reused_values, + &sig_cache, + ); - // Test output count - op_output_count.execute(&mut vm).unwrap(); - assert_eq!( - vm.dstack, - vec![ as OpcodeData>::serialize(&(output_count as i64)).unwrap()], - "Output count mismatch for {} outputs", - output_count - ); - vm.dstack.clear(); - } else { - // Test that operations fail when KIP-10 is disabled - assert!( - matches!(op_input_count.execute(&mut vm), Err(TxScriptError::InvalidOpcode(_))), - "OpInputCount should fail when KIP-10 is disabled" - ); - assert!( - matches!(op_output_count.execute(&mut vm), Err(TxScriptError::InvalidOpcode(_))), - "OpOutputCount should fail when KIP-10 is disabled" - ); - } - } - } + let op_input_count = opcodes::OpTxInputCount::empty().expect("Should accept empty"); + let op_output_count = opcodes::OpTxOutputCount::empty().expect("Should accept empty"); + + // Test input count + op_input_count.execute(&mut vm).unwrap(); + assert_eq!( + vm.dstack, + vec![ as OpcodeData>::serialize(&(input_count as i64)).unwrap()], + "Input count mismatch for {} inputs", + input_count + ); + vm.dstack.clear(); + + // Test output count + op_output_count.execute(&mut vm).unwrap(); + assert_eq!( + vm.dstack, + vec![ as OpcodeData>::serialize(&(output_count as i64)).unwrap()], + "Output count mismatch for {} outputs", + output_count + ); + vm.dstack.clear(); } } @@ -3434,16 +3376,8 @@ mod test { // Test success case { - let mut vm = TxScriptEngine::from_transaction_input( - &tx, - &tx.inputs()[0], - 0, - tx.utxo(0).unwrap(), - &reused_values, - &sig_cache, - true, - false, - ); + let mut vm = + TxScriptEngine::from_transaction_input(&tx, &tx.inputs()[0], 0, tx.utxo(0).unwrap(), &reused_values, &sig_cache); assert_eq!(vm.execute(), Ok(())); } @@ -3459,16 +3393,8 @@ mod test { tx.tx.inputs[0].signature_script = ScriptBuilder::new().add_data(&redeem_script).unwrap().drain(); let tx = tx.as_verifiable(); - let mut vm = TxScriptEngine::from_transaction_input( - &tx, - &tx.inputs()[0], - 0, - tx.utxo(0).unwrap(), - &reused_values, - &sig_cache, - true, - false, - ); + let mut vm = + TxScriptEngine::from_transaction_input(&tx, &tx.inputs()[0], 0, tx.utxo(0).unwrap(), &reused_values, &sig_cache); assert_eq!(vm.execute(), Err(TxScriptError::EvalFalse)); } @@ -3500,16 +3426,8 @@ mod test { let mut tx = MutableTransaction::with_entries(tx, utxo_entries); tx.tx.inputs[0].signature_script = ScriptBuilder::new().add_data(&redeem_script).unwrap().drain(); let tx = tx.as_verifiable(); - let mut vm = TxScriptEngine::from_transaction_input( - &tx, - &tx.inputs()[0], - 0, - tx.utxo(0).unwrap(), - &reused_values, - &sig_cache, - true, - false, - ); + let mut vm = + TxScriptEngine::from_transaction_input(&tx, &tx.inputs()[0], 0, tx.utxo(0).unwrap(), &reused_values, &sig_cache); assert_eq!(vm.execute(), Ok(())); } @@ -3527,16 +3445,8 @@ mod test { tx.tx.inputs[0].signature_script = ScriptBuilder::new().add_data(&redeem_script).unwrap().drain(); let tx = tx.as_verifiable(); - let mut vm = TxScriptEngine::from_transaction_input( - &tx, - &tx.inputs()[0], - 0, - tx.utxo(0).unwrap(), - &reused_values, - &sig_cache, - true, - false, - ); + let mut vm = + TxScriptEngine::from_transaction_input(&tx, &tx.inputs()[0], 0, tx.utxo(0).unwrap(), &reused_values, &sig_cache); assert_eq!(vm.execute(), Err(TxScriptError::EvalFalse)); } @@ -3557,16 +3467,8 @@ mod test { tx.tx.inputs[0].signature_script = ScriptBuilder::new().add_data(&redeem_script).unwrap().drain(); let tx = tx.as_verifiable(); - let mut vm = TxScriptEngine::from_transaction_input( - &tx, - &tx.inputs()[0], - 0, - tx.utxo(0).unwrap(), - &reused_values, - &sig_cache, - true, - false, - ); + let mut vm = + TxScriptEngine::from_transaction_input(&tx, &tx.inputs()[0], 0, tx.utxo(0).unwrap(), &reused_values, &sig_cache); // OpInputSpk should push input's SPK onto stack, making it non-empty assert_eq!(vm.execute(), Ok(())); @@ -3589,16 +3491,8 @@ mod test { tx.tx.inputs[0].signature_script = ScriptBuilder::new().add_data(&redeem_script).unwrap().drain(); let tx = tx.as_verifiable(); - let mut vm = TxScriptEngine::from_transaction_input( - &tx, - &tx.inputs()[0], - 0, - tx.utxo(0).unwrap(), - &reused_values, - &sig_cache, - true, - false, - ); + let mut vm = + TxScriptEngine::from_transaction_input(&tx, &tx.inputs()[0], 0, tx.utxo(0).unwrap(), &reused_values, &sig_cache); // Should succeed because the SPKs are different assert_eq!(vm.execute(), Ok(())); @@ -3622,16 +3516,8 @@ mod test { tx.tx.inputs[0].signature_script = ScriptBuilder::new().add_data(&redeem_script).unwrap().drain(); let tx = tx.as_verifiable(); - let mut vm = TxScriptEngine::from_transaction_input( - &tx, - &tx.inputs()[0], - 0, - tx.utxo(0).unwrap(), - &reused_values, - &sig_cache, - true, - false, - ); + let mut vm = + TxScriptEngine::from_transaction_input(&tx, &tx.inputs()[0], 0, tx.utxo(0).unwrap(), &reused_values, &sig_cache); // Should succeed because both SPKs are identical assert_eq!(vm.execute(), Ok(())); @@ -3668,16 +3554,8 @@ mod test { tx.tx.inputs[0].signature_script = ScriptBuilder::new().add_data(&redeem_script).unwrap().drain(); let tx = tx.as_verifiable(); - let mut vm = TxScriptEngine::from_transaction_input( - &tx, - &tx.inputs()[0], - 0, - tx.utxo(0).unwrap(), - &reused_values, - &sig_cache, - true, - false, - ); + let mut vm = + TxScriptEngine::from_transaction_input(&tx, &tx.inputs()[0], 0, tx.utxo(0).unwrap(), &reused_values, &sig_cache); assert_eq!(vm.execute(), Ok(())); } @@ -3695,16 +3573,8 @@ mod test { tx.tx.inputs[0].signature_script = ScriptBuilder::new().add_data(&redeem_script).unwrap().drain(); let tx = tx.as_verifiable(); - let mut vm = TxScriptEngine::from_transaction_input( - &tx, - &tx.inputs()[0], - 0, - tx.utxo(0).unwrap(), - &reused_values, - &sig_cache, - true, - false, - ); + let mut vm = + TxScriptEngine::from_transaction_input(&tx, &tx.inputs()[0], 0, tx.utxo(0).unwrap(), &reused_values, &sig_cache); assert_eq!(vm.execute(), Err(TxScriptError::EvalFalse)); } @@ -3729,16 +3599,8 @@ mod test { tx.tx.inputs[0].signature_script = ScriptBuilder::new().add_data(&redeem_script).unwrap().drain(); let tx = tx.as_verifiable(); - let mut vm = TxScriptEngine::from_transaction_input( - &tx, - &tx.inputs()[0], - 0, - tx.utxo(0).unwrap(), - &reused_values, - &sig_cache, - true, - false, - ); + let mut vm = + TxScriptEngine::from_transaction_input(&tx, &tx.inputs()[0], 0, tx.utxo(0).unwrap(), &reused_values, &sig_cache); assert_eq!(vm.execute(), Ok(())); } @@ -3754,16 +3616,8 @@ mod test { tx.tx.inputs[1].signature_script = ScriptBuilder::new().add_data(&redeem_script).unwrap().drain(); let tx = tx.as_verifiable(); - let mut vm = TxScriptEngine::from_transaction_input( - &tx, - &tx.inputs()[1], - 1, - tx.utxo(1).unwrap(), - &reused_values, - &sig_cache, - true, - false, - ); + let mut vm = + TxScriptEngine::from_transaction_input(&tx, &tx.inputs()[1], 1, tx.utxo(1).unwrap(), &reused_values, &sig_cache); // Should fail because script expects index 0 but we're at index 1 assert_eq!(vm.execute(), Err(TxScriptError::EvalFalse)); @@ -3801,16 +3655,8 @@ mod test { tx.tx.inputs[0].signature_script = ScriptBuilder::new().add_data(&input_count_script).unwrap().drain(); let tx = tx.as_verifiable(); - let mut vm = TxScriptEngine::from_transaction_input( - &tx, - &tx.inputs()[0], - 0, - tx.utxo(0).unwrap(), - &reused_values, - &sig_cache, - true, - false, - ); + let mut vm = + TxScriptEngine::from_transaction_input(&tx, &tx.inputs()[0], 0, tx.utxo(0).unwrap(), &reused_values, &sig_cache); assert_eq!(vm.execute(), Ok(())); } @@ -3821,16 +3667,8 @@ mod test { tx.tx.inputs[1].signature_script = ScriptBuilder::new().add_data(&output_count_script).unwrap().drain(); let tx = tx.as_verifiable(); - let mut vm = TxScriptEngine::from_transaction_input( - &tx, - &tx.inputs()[1], - 1, - tx.utxo(1).unwrap(), - &reused_values, - &sig_cache, - true, - false, - ); + let mut vm = + TxScriptEngine::from_transaction_input(&tx, &tx.inputs()[1], 1, tx.utxo(1).unwrap(), &reused_values, &sig_cache); assert_eq!(vm.execute(), Ok(())); } @@ -3845,16 +3683,8 @@ mod test { tx.tx.inputs[0].signature_script = ScriptBuilder::new().add_data(&wrong_input_count_script).unwrap().drain(); let tx = tx.as_verifiable(); - let mut vm = TxScriptEngine::from_transaction_input( - &tx, - &tx.inputs()[0], - 0, - tx.utxo(0).unwrap(), - &reused_values, - &sig_cache, - true, - false, - ); + let mut vm = + TxScriptEngine::from_transaction_input(&tx, &tx.inputs()[0], 0, tx.utxo(0).unwrap(), &reused_values, &sig_cache); assert_eq!(vm.execute(), Err(TxScriptError::EvalFalse)); } @@ -3868,16 +3698,8 @@ mod test { tx.tx.inputs[1].signature_script = ScriptBuilder::new().add_data(&wrong_output_count_script).unwrap().drain(); let tx = tx.as_verifiable(); - let mut vm = TxScriptEngine::from_transaction_input( - &tx, - &tx.inputs()[1], - 1, - tx.utxo(1).unwrap(), - &reused_values, - &sig_cache, - true, - false, - ); + let mut vm = + TxScriptEngine::from_transaction_input(&tx, &tx.inputs()[1], 1, tx.utxo(1).unwrap(), &reused_values, &sig_cache); assert_eq!(vm.execute(), Err(TxScriptError::EvalFalse)); } diff --git a/crypto/txscript/src/runtime_sig_op_counter.rs b/crypto/txscript/src/runtime_sig_op_counter.rs index 43f70dca0d..2e0bd84ed8 100644 --- a/crypto/txscript/src/runtime_sig_op_counter.rs +++ b/crypto/txscript/src/runtime_sig_op_counter.rs @@ -53,22 +53,3 @@ impl RuntimeSigOpCounter { self.sig_op_limit - self.sig_op_remaining } } - -pub trait SigOpConsumer { - fn consume_sig_op(&mut self) -> Result<(), TxScriptError>; -} - -impl SigOpConsumer for RuntimeSigOpCounter { - fn consume_sig_op(&mut self) -> Result<(), TxScriptError> { - RuntimeSigOpCounter::consume_sig_op(self) - } -} -impl SigOpConsumer for Option { - fn consume_sig_op(&mut self) -> Result<(), TxScriptError> { - if let Some(consumer) = self { - consumer.consume_sig_op() - } else { - Ok(()) - } - } -} diff --git a/crypto/txscript/src/script_builder.rs b/crypto/txscript/src/script_builder.rs index 1800045ce5..515105d1ee 100644 --- a/crypto/txscript/src/script_builder.rs +++ b/crypto/txscript/src/script_builder.rs @@ -1,7 +1,7 @@ use std::iter::once; use crate::{ - data_stack::{Kip10I64, OpcodeData}, + data_stack::OpcodeData, opcodes::{codes::*, OP_1_NEGATE_VAL, OP_DATA_MAX_VAL, OP_DATA_MIN_VAL, OP_SMALL_INT_MAX_VAL}, MAX_SCRIPTS_SIZE, MAX_SCRIPT_ELEMENT_SIZE, }; @@ -231,7 +231,7 @@ impl ScriptBuilder { return Ok(self); } - let bytes: Vec<_> = OpcodeData::::serialize(&val.into())?; + let bytes: Vec<_> = OpcodeData::::serialize(&val)?; self.add_data(&bytes) } @@ -291,7 +291,7 @@ mod tests { expected: Vec, } - let tests = vec![ + let tests = [ Test { name: "push OP_FALSE", opcodes: vec![OpFalse], expected: vec![OpFalse] }, Test { name: "push OP_TRUE", opcodes: vec![OpTrue], expected: vec![OpTrue] }, Test { name: "push OP_0", opcodes: vec![Op0], expected: vec![Op0] }, diff --git a/crypto/txscript/src/script_class.rs b/crypto/txscript/src/script_class.rs index ad61f30d89..85d96cab98 100644 --- a/crypto/txscript/src/script_class.rs +++ b/crypto/txscript/src/script_class.rs @@ -16,7 +16,7 @@ pub enum Error { } /// Standard classes of script payment in the blockDAG -#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[derive(PartialEq, Eq, Hash, Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] #[borsh(use_discriminant = true)] #[repr(u8)] pub enum ScriptClass { diff --git a/crypto/txscript/src/standard/multisig.rs b/crypto/txscript/src/standard/multisig.rs index 8133a6899c..550f7b2bf3 100644 --- a/crypto/txscript/src/standard/multisig.rs +++ b/crypto/txscript/src/standard/multisig.rs @@ -184,7 +184,7 @@ mod tests { let (input, entry) = tx.populated_inputs().next().unwrap(); let cache = Cache::new(10_000); - let mut engine = TxScriptEngine::from_transaction_input(&tx, input, 0, entry, &reused_values, &cache, false, false); + let mut engine = TxScriptEngine::from_transaction_input(&tx, input, 0, entry, &reused_values, &cache); assert_eq!(engine.execute().is_ok(), is_ok); } #[test] diff --git a/crypto/txscript/test-data/script_tests-kip10.json b/crypto/txscript/test-data/script_tests-kip10.json deleted file mode 100644 index 947c8810de..0000000000 --- a/crypto/txscript/test-data/script_tests-kip10.json +++ /dev/null @@ -1,5397 +0,0 @@ -[ - [ - "Format is: [[wit..., amount]?, scriptSig, scriptPubKey, flags, expected_scripterror, ... comments]" - ], - [ - "It is evaluated as if there was a crediting coinbase transaction with two 0" - ], - [ - "pushes as scriptSig, and one output of 0 satoshi and given scriptPubKey," - ], - [ - "followed by a spending transaction which spends this output as only input (and" - ], - [ - "correct prevout hash), using the given scriptSig. All nLockTimes are 0, all" - ], - [ - "nSequences are max." - ], - [ - "", - "DEPTH 0 EQUAL", - "", - "OK", - "Test the test: we should have an empty stack after scriptSig evaluation" - ], - [ - " ", - "DEPTH 0 EQUAL", - "", - "OK", - "and multiple spaces should not change that." - ], - [ - " ", - "DEPTH 0 EQUAL", - "", - "OK" - ], - [ - " ", - "DEPTH 0 EQUAL", - "", - "OK" - ], - [ - "1 2", - "2 EQUALVERIFY 1 EQUAL", - "", - "OK", - "Similarly whitespace around and between symbols" - ], - [ - "1 2", - "2 EQUALVERIFY 1 EQUAL", - "", - "OK" - ], - [ - " 1 2", - "2 EQUALVERIFY 1 EQUAL", - "", - "OK" - ], - [ - "1 2 ", - "2 EQUALVERIFY 1 EQUAL", - "", - "OK" - ], - [ - " 1 2 ", - "2 EQUALVERIFY 1 EQUAL", - "", - "OK" - ], - [ - "1", - "", - "", - "OK" - ], - [ - "0x02 0x01 0x00", - "", - "", - "OK", - "all bytes are significant, not only the last one" - ], - [ - "0x09 0x00000000 0x00000000 0x10", - "", - "", - "OK", - "equals zero when cast to Int64" - ], - [ - "0x01 0x11", - "17 EQUAL", - "", - "OK", - "push 1 byte" - ], - [ - "0x02 0x417a", - "'Az' EQUAL", - "", - "OK" - ], - [ - "0x4b 0x417a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a", - "'Azzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz' EQUAL", - "", - "OK", - "push 75 bytes" - ], - [ - "0x4c 0x4c 0x417a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a", - "'Azzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz' EQUAL", - "", - "OK", - "0x4c is OP_PUSHDATA1 (push 76 bytes)" - ], - [ - "0x4d 0x0001 0x417a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a", - "'Azzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz' EQUAL", - "", - "OK", - "0x4d is OP_PUSHDATA2" - ], - [ - "0x4f 1000", - "ADD 999 EQUAL", - "", - "OK" - ], - [ - "0", - "IF 0x50 ENDIF 1", - "", - "OK", - "0x50 is reserved (ok if not executed)" - ], - [ - "0x51", - "0x5f ADD 0x60 EQUAL", - "", - "OK", - "0x51 through 0x60 push 1 through 16 onto stack" - ], - [ - "1", - "NOP", - "", - "OK" - ], - [ - "0", - "IF VER ELSE 1 ENDIF", - "", - "OK", - "VER non-functional (ok if not executed)" - ], - [ - "0", - "IF RESERVED RESERVED1 RESERVED2 ELSE 1 ENDIF", - "", - "OK", - "RESERVED ok in un-executed IF" - ], - [ - "1", - "DUP IF ENDIF", - "", - "OK" - ], - [ - "1", - "IF 1 ENDIF", - "", - "OK" - ], - [ - "1", - "DUP IF ELSE ENDIF", - "", - "OK" - ], - [ - "1", - "IF 1 ELSE ENDIF", - "", - "OK" - ], - [ - "0", - "IF ELSE 1 ENDIF", - "", - "OK" - ], - [ - "1 1", - "IF IF 1 ELSE 0 ENDIF ENDIF", - "", - "OK" - ], - [ - "1 0", - "IF IF 1 ELSE 0 ENDIF ENDIF", - "", - "OK" - ], - [ - "1 1", - "IF IF 1 ELSE 0 ENDIF ELSE IF 0 ELSE 1 ENDIF ENDIF", - "", - "OK" - ], - [ - "0 0", - "IF IF 1 ELSE 0 ENDIF ELSE IF 0 ELSE 1 ENDIF ENDIF", - "", - "OK" - ], - [ - "1 0", - "NOTIF IF 1 ELSE 0 ENDIF ENDIF", - "", - "OK" - ], - [ - "1 1", - "NOTIF IF 1 ELSE 0 ENDIF ENDIF", - "", - "OK" - ], - [ - "1 0", - "NOTIF IF 1 ELSE 0 ENDIF ELSE IF 0 ELSE 1 ENDIF ENDIF", - "", - "OK" - ], - [ - "0 1", - "NOTIF IF 1 ELSE 0 ENDIF ELSE IF 0 ELSE 1 ENDIF ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0 ELSE 1 ELSE 0 ENDIF", - "", - "OK", - "Multiple ELSE's are valid and executed inverts on each ELSE encountered" - ], - [ - "1", - "IF 1 ELSE 0 ELSE ENDIF", - "", - "OK" - ], - [ - "1", - "IF ELSE 0 ELSE 1 ENDIF", - "", - "OK" - ], - [ - "1", - "IF 1 ELSE 0 ELSE 1 ENDIF ADD 2 EQUAL", - "", - "OK" - ], - [ - "'' 1", - "IF SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ENDIF 0x20 0x2c49a55fe0ca3e7a005420c19a527865df8f17e468d234f562ef238d4236a632 EQUAL", - "", - "OK" - ], - [ - "1", - "NOTIF 0 ELSE 1 ELSE 0 ENDIF", - "", - "OK", - "Multiple ELSE's are valid and execution inverts on each ELSE encountered" - ], - [ - "0", - "NOTIF 1 ELSE 0 ELSE ENDIF", - "", - "OK" - ], - [ - "0", - "NOTIF ELSE 0 ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "NOTIF 1 ELSE 0 ELSE 1 ENDIF ADD 2 EQUAL", - "", - "OK" - ], - [ - "'' 0", - "NOTIF SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ENDIF 0x20 0x2c49a55fe0ca3e7a005420c19a527865df8f17e468d234f562ef238d4236a632 EQUAL", - "", - "OK" - ], - [ - "0", - "IF 1 IF RETURN ELSE RETURN ELSE RETURN ENDIF ELSE 1 IF 1 ELSE RETURN ELSE 1 ENDIF ELSE RETURN ENDIF ADD 2 EQUAL", - "", - "OK", - "Nested ELSE ELSE" - ], - [ - "1", - "NOTIF 0 NOTIF RETURN ELSE RETURN ELSE RETURN ENDIF ELSE 0 NOTIF 1 ELSE RETURN ELSE 1 ENDIF ELSE RETURN ENDIF ADD 2 EQUAL", - "", - "OK" - ], - [ - "0", - "IF RETURN ENDIF 1", - "", - "OK", - "RETURN only works if executed" - ], - [ - "1 1", - "VERIFY", - "", - "OK" - ], - [ - "1 0x05 0x01 0x00 0x00 0x00 0x00", - "VERIFY", - "", - "OK", - "values >4 bytes can be cast to boolean" - ], - [ - "0x01 0x80", - "VERIFY TRUE", - "", - "VERIFY", - "negative 0 is false" - ], - [ - "10 0 11", - "TOALTSTACK DROP FROMALTSTACK ADD 21 EQUAL", - "", - "OK" - ], - [ - "'gavin_was_here'", - "TOALTSTACK 11 FROMALTSTACK 'gavin_was_here' EQUALVERIFY 11 EQUAL", - "", - "OK" - ], - [ - "0", - "IFDUP DEPTH 1 EQUALVERIFY 0 EQUAL", - "", - "OK" - ], - [ - "1", - "IFDUP DEPTH 2 EQUALVERIFY 1 EQUALVERIFY 1 EQUAL", - "", - "OK" - ], - [ - "0x05 0x0100000000", - "IFDUP DEPTH 2 EQUALVERIFY 0x05 0x0100000000 EQUALVERIFY DROP TRUE", - "", - "OK", - "IFDUP dups non ints" - ], - [ - "0", - "DROP DEPTH 0 EQUAL", - "", - "OK" - ], - [ - "0", - "DUP 1 ADD 1 EQUALVERIFY 0 EQUAL", - "", - "OK" - ], - [ - "0 1", - "NIP", - "", - "OK" - ], - [ - "1 0", - "OVER DEPTH 3 EQUALVERIFY DROP DROP DROP TRUE", - "", - "OK" - ], - [ - "22 21 20", - "0 PICK 20 EQUALVERIFY DEPTH 3 EQUALVERIFY DROP DROP DROP TRUE", - "", - "OK" - ], - [ - "22 21 20", - "1 PICK 21 EQUALVERIFY DEPTH 3 EQUALVERIFY DROP DROP DROP TRUE", - "", - "OK" - ], - [ - "22 21 20", - "2 PICK 22 EQUALVERIFY DEPTH 3 EQUALVERIFY DROP DROP DROP TRUE", - "", - "OK" - ], - [ - "22 21 20", - "0 ROLL 20 EQUALVERIFY DEPTH 2 EQUALVERIFY DROP DROP TRUE", - "", - "OK" - ], - [ - "22 21 20", - "1 ROLL 21 EQUALVERIFY DEPTH 2 EQUALVERIFY DROP DROP TRUE", - "", - "OK" - ], - [ - "22 21 20", - "2 ROLL 22 EQUALVERIFY DEPTH 2 EQUALVERIFY DROP DROP TRUE", - "", - "OK" - ], - [ - "22 21 20", - "ROT 22 EQUALVERIFY DROP DROP TRUE", - "", - "OK" - ], - [ - "22 21 20", - "ROT DROP 20 EQUALVERIFY DROP TRUE", - "", - "OK" - ], - [ - "22 21 20", - "ROT DROP DROP 21 EQUAL", - "", - "OK" - ], - [ - "22 21 20", - "ROT ROT 21 EQUAL 2DROP", - "", - "OK" - ], - [ - "22 21 20", - "ROT ROT ROT 20 EQUALVERIFY DROP DROP TRUE", - "", - "OK" - ], - [ - "25 24 23 22 21 20", - "2ROT 24 EQUALVERIFY DROP DROP DROP DROP DROP TRUE", - "", - "OK" - ], - [ - "25 24 23 22 21 20", - "2ROT DROP 25 EQUALVERIFY DROP DROP DROP DROP TRUE", - "", - "OK" - ], - [ - "25 24 23 22 21 20", - "2ROT 2DROP 20 EQUALVERIFY DROP DROP DROP TRUE", - "", - "OK" - ], - [ - "25 24 23 22 21 20", - "2ROT 2DROP DROP 21 EQUALVERIFY 2DROP TRUE", - "", - "OK" - ], - [ - "25 24 23 22 21 20", - "2ROT 2DROP 2DROP 22 EQUALVERIFY DROP TRUE", - "", - "OK" - ], - [ - "25 24 23 22 21 20", - "2ROT 2DROP 2DROP DROP 23 EQUALVERIFY TRUE", - "", - "OK" - ], - [ - "25 24 23 22 21 20", - "2ROT 2ROT 22 EQUALVERIFY 2DROP DROP DROP DROP TRUE", - "", - "OK" - ], - [ - "25 24 23 22 21 20", - "2ROT 2ROT 2ROT 20 EQUALVERIFY DROP DROP DROP DROP DROP TRUE", - "", - "OK" - ], - [ - "1 0", - "SWAP 1 EQUALVERIFY 0 EQUAL", - "", - "OK" - ], - [ - "0 1", - "TUCK DEPTH 3 EQUALVERIFY SWAP 2DROP", - "", - "OK" - ], - [ - "13 14", - "2DUP ROT EQUALVERIFY EQUAL", - "", - "OK" - ], - [ - "-1 0 1 2", - "3DUP DEPTH 7 EQUALVERIFY ADD ADD 3 EQUALVERIFY 2DROP 0 EQUALVERIFY", - "", - "OK" - ], - [ - "1 2 3 5", - "2OVER ADD ADD 8 EQUALVERIFY ADD ADD 6 EQUAL", - "", - "OK" - ], - [ - "1 3 5 7", - "2SWAP ADD 4 EQUALVERIFY ADD 12 EQUAL", - "", - "OK" - ], - [ - "0", - "SIZE 0 EQUALVERIFY DROP TRUE", - "", - "OK" - ], - [ - "1", - "SIZE 1 EQUALVERIFY DROP TRUE", - "", - "OK" - ], - [ - "127", - "SIZE 1 EQUALVERIFY DROP TRUE", - "", - "OK" - ], - [ - "128", - "SIZE 2 EQUALVERIFY DROP TRUE", - "", - "OK" - ], - [ - "32767", - "SIZE 2 EQUALVERIFY DROP TRUE", - "", - "OK" - ], - [ - "32768", - "SIZE 3 EQUALVERIFY DROP TRUE", - "", - "OK" - ], - [ - "8388607", - "SIZE 3 EQUALVERIFY DROP TRUE", - "", - "OK" - ], - [ - "8388608", - "SIZE 4 EQUALVERIFY DROP TRUE", - "", - "OK" - ], - [ - "2147483647", - "SIZE 4 EQUALVERIFY DROP TRUE", - "", - "OK" - ], - [ - "2147483648", - "SIZE 5 EQUALVERIFY DROP TRUE", - "", - "OK" - ], - [ - "549755813887", - "SIZE 5 EQUALVERIFY DROP TRUE", - "", - "OK" - ], - [ - "549755813888", - "SIZE 6 EQUALVERIFY DROP TRUE", - "", - "OK" - ], - [ - "9223372036854775807", - "SIZE 8 EQUALVERIFY DROP TRUE", - "", - "OK" - ], - [ - "-1", - "SIZE 1 EQUALVERIFY DROP TRUE", - "", - "OK" - ], - [ - "-127", - "SIZE 1 EQUALVERIFY DROP TRUE", - "", - "OK" - ], - [ - "-128", - "SIZE 2 EQUALVERIFY DROP TRUE", - "", - "OK" - ], - [ - "-32767", - "SIZE 2 EQUALVERIFY DROP TRUE", - "", - "OK" - ], - [ - "-32768", - "SIZE 3 EQUALVERIFY DROP TRUE", - "", - "OK" - ], - [ - "-8388607", - "SIZE 3 EQUALVERIFY DROP TRUE", - "", - "OK" - ], - [ - "-8388608", - "SIZE 4 EQUALVERIFY DROP TRUE", - "", - "OK" - ], - [ - "-2147483647", - "SIZE 4 EQUALVERIFY DROP TRUE", - "", - "OK" - ], - [ - "-2147483648", - "SIZE 5 EQUALVERIFY DROP TRUE", - "", - "OK" - ], - [ - "-549755813887", - "SIZE 5 EQUALVERIFY DROP TRUE", - "", - "OK" - ], - [ - "-549755813888", - "SIZE 6 EQUALVERIFY DROP TRUE", - "", - "OK" - ], - [ - "-9223372036854775807", - "SIZE 8 EQUALVERIFY DROP TRUE", - "", - "OK" - ], - [ - "'abcdefghijklmnopqrstuvwxyz'", - "SIZE 26 EQUALVERIFY DROP TRUE", - "", - "OK" - ], - [ - "42", - "SIZE 1 EQUALVERIFY 42 EQUAL", - "", - "OK", - "SIZE does not consume argument" - ], - [ - "2 -2", - "ADD 0 EQUAL", - "", - "OK" - ], - [ - "2147483647 -2147483647", - "ADD 0 EQUAL", - "", - "OK" - ], - [ - "-1 -1", - "ADD -2 EQUAL", - "", - "OK" - ], - [ - "0 0", - "EQUAL", - "", - "OK" - ], - [ - "1 1", - "ADD 2 EQUAL", - "", - "OK" - ], - [ - "1", - "1ADD 2 EQUAL", - "", - "OK" - ], - [ - "111", - "1SUB 110 EQUAL", - "", - "OK" - ], - [ - "111 1", - "ADD 12 SUB 100 EQUAL", - "", - "OK" - ], - [ - "0", - "ABS 0 EQUAL", - "", - "OK" - ], - [ - "16", - "ABS 16 EQUAL", - "", - "OK" - ], - [ - "-16", - "ABS -16 NEGATE EQUAL", - "", - "OK" - ], - [ - "0", - "NOT NOP", - "", - "OK" - ], - [ - "1", - "NOT 0 EQUAL", - "", - "OK" - ], - [ - "11", - "NOT 0 EQUAL", - "", - "OK" - ], - [ - "0", - "0NOTEQUAL 0 EQUAL", - "", - "OK" - ], - [ - "1", - "0NOTEQUAL 1 EQUAL", - "", - "OK" - ], - [ - "111", - "0NOTEQUAL 1 EQUAL", - "", - "OK" - ], - [ - "-111", - "0NOTEQUAL 1 EQUAL", - "", - "OK" - ], - [ - "1 1", - "BOOLAND NOP", - "", - "OK" - ], - [ - "1 0", - "BOOLAND NOT", - "", - "OK" - ], - [ - "0 1", - "BOOLAND NOT", - "", - "OK" - ], - [ - "0 0", - "BOOLAND NOT", - "", - "OK" - ], - [ - "16 17", - "BOOLAND NOP", - "", - "OK" - ], - [ - "1 1", - "BOOLOR NOP", - "", - "OK" - ], - [ - "1 0", - "BOOLOR NOP", - "", - "OK" - ], - [ - "0 1", - "BOOLOR NOP", - "", - "OK" - ], - [ - "0 0", - "BOOLOR NOT", - "", - "OK" - ], - [ - "16 17", - "BOOLOR NOP", - "", - "OK" - ], - [ - "11 10 1", - "ADD NUMEQUAL", - "", - "OK" - ], - [ - "11 10 1", - "ADD NUMEQUALVERIFY 1", - "", - "OK" - ], - [ - "11 10 1", - "ADD NUMNOTEQUAL NOT", - "", - "OK" - ], - [ - "111 10 1", - "ADD NUMNOTEQUAL", - "", - "OK" - ], - [ - "11 10", - "LESSTHAN NOT", - "", - "OK" - ], - [ - "4 4", - "LESSTHAN NOT", - "", - "OK" - ], - [ - "10 11", - "LESSTHAN", - "", - "OK" - ], - [ - "-11 11", - "LESSTHAN", - "", - "OK" - ], - [ - "-11 -10", - "LESSTHAN", - "", - "OK" - ], - [ - "11 10", - "GREATERTHAN", - "", - "OK" - ], - [ - "4 4", - "GREATERTHAN NOT", - "", - "OK" - ], - [ - "10 11", - "GREATERTHAN NOT", - "", - "OK" - ], - [ - "-11 11", - "GREATERTHAN NOT", - "", - "OK" - ], - [ - "-11 -10", - "GREATERTHAN NOT", - "", - "OK" - ], - [ - "11 10", - "LESSTHANOREQUAL NOT", - "", - "OK" - ], - [ - "4 4", - "LESSTHANOREQUAL", - "", - "OK" - ], - [ - "10 11", - "LESSTHANOREQUAL", - "", - "OK" - ], - [ - "-11 11", - "LESSTHANOREQUAL", - "", - "OK" - ], - [ - "-11 -10", - "LESSTHANOREQUAL", - "", - "OK" - ], - [ - "11 10", - "GREATERTHANOREQUAL", - "", - "OK" - ], - [ - "4 4", - "GREATERTHANOREQUAL", - "", - "OK" - ], - [ - "10 11", - "GREATERTHANOREQUAL NOT", - "", - "OK" - ], - [ - "-11 11", - "GREATERTHANOREQUAL NOT", - "", - "OK" - ], - [ - "-11 -10", - "GREATERTHANOREQUAL NOT", - "", - "OK" - ], - [ - "1 0", - "MIN 0 NUMEQUAL", - "", - "OK" - ], - [ - "0 1", - "MIN 0 NUMEQUAL", - "", - "OK" - ], - [ - "-1 0", - "MIN -1 NUMEQUAL", - "", - "OK" - ], - [ - "0 -2147483647", - "MIN -2147483647 NUMEQUAL", - "", - "OK" - ], - [ - "2147483647 0", - "MAX 2147483647 NUMEQUAL", - "", - "OK" - ], - [ - "0 100", - "MAX 100 NUMEQUAL", - "", - "OK" - ], - [ - "-100 0", - "MAX 0 NUMEQUAL", - "", - "OK" - ], - [ - "0 -2147483647", - "MAX 0 NUMEQUAL", - "", - "OK" - ], - [ - "0 0 1", - "WITHIN", - "", - "OK" - ], - [ - "1 0 1", - "WITHIN NOT", - "", - "OK" - ], - [ - "0 -2147483647 2147483647", - "WITHIN", - "", - "OK" - ], - [ - "-1 -100 100", - "WITHIN", - "", - "OK" - ], - [ - "11 -100 100", - "WITHIN", - "", - "OK" - ], - [ - "-2147483647 -100 100", - "WITHIN NOT", - "", - "OK" - ], - [ - "2147483647 -100 100", - "WITHIN NOT", - "", - "OK" - ], - [ - "2147483647 2147483647", - "SUB 0 EQUAL", - "", - "OK" - ], - [ - "2147483647", - "DUP ADD 4294967294 EQUAL", - "", - "OK", - ">32 bit EQUAL is valid" - ], - [ - "2147483647", - "NEGATE DUP ADD -4294967294 EQUAL", - "", - "OK" - ], - [ - "''", - "SHA256 0x20 0xe3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 EQUAL", - "", - "OK" - ], - [ - "'a'", - "SHA256 0x20 0xca978112ca1bbdcafac231b39a23dc4da786eff8147c4e72b9807785afee48bb EQUAL", - "", - "OK" - ], - [ - "'abcdefghijklmnopqrstuvwxyz'", - "SHA256 0x20 0x71c480df93d6ae2f1efad1447c66c9525e316218cf51fc8d9ed832f2daf18b73 EQUAL", - "", - "OK" - ], - [ - "''", - "SHA256 0x20 0xe3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 EQUAL", - "", - "OK" - ], - [ - "'a'", - "SHA256 0x20 0xca978112ca1bbdcafac231b39a23dc4da786eff8147c4e72b9807785afee48bb EQUAL", - "", - "OK" - ], - [ - "'abcdefghijklmnopqrstuvwxyz'", - "SHA256 0x20 0x71c480df93d6ae2f1efad1447c66c9525e316218cf51fc8d9ed832f2daf18b73 EQUAL", - "", - "OK" - ], - [ - "''", - "SHA256 0x20 0xe3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 EQUAL", - "", - "OK" - ], - [ - "'a'", - "SHA256 0x20 0xca978112ca1bbdcafac231b39a23dc4da786eff8147c4e72b9807785afee48bb EQUAL", - "", - "OK" - ], - [ - "'abcdefghijklmnopqrstuvwxyz'", - "SHA256 0x20 0x71c480df93d6ae2f1efad1447c66c9525e316218cf51fc8d9ed832f2daf18b73 EQUAL", - "", - "OK" - ], - [ - "''", - "NOP BLAKE2B 0x20 0x0e5751c026e543b2e8ab2eb06099daa1d1e5df47778f7787faab45cdf12fe3a8 EQUAL", - "", - "OK" - ], - [ - "'a'", - "BLAKE2B NOP 0x20 0x8928aae63c84d87ea098564d1e03ad813f107add474e56aedd286349c0c03ea4 EQUAL", - "", - "OK" - ], - [ - "'abcdefghijklmnopqrstuvwxyz'", - "NOP BLAKE2B 0x20 0x117ad6b940f5e8292c007d9c7e7350cd33cf85b5887e8da71c7957830f536e7c EQUAL", - "", - "OK", - "The NOP is added so the script won't be interpreted as P2SH" - ], - [ - "'a'", - "NOP BLAKE2B 0x20 0x8928aae63c84d87ea098564d1e03ad813f107add474e56aedd286349c0c03ea4 EQUAL", - "", - "OK" - ], - [ - "0", - "IF 0xb2 ELSE 1 ENDIF", - "", - "OK", - "opcodes above OP_CHECKSEQUENCEVERIFY invalid if executed" - ], - [ - "0", - "IF 0xbd ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xbe ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xbf ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xc0 ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xc1 ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xc2 ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xc3 ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xc4 ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xc5 ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xc6 ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xc7 ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xc8 ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xc9 ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xca ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xcb ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xcc ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xcd ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xce ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xcf ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xd0 ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xd1 ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xd2 ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xd3 ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xd4 ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xd5 ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xd6 ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xd7 ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xd8 ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xd9 ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xda ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xdb ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xdc ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xdd ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xde ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xdf ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xe0 ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xe1 ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xe2 ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xe3 ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xe4 ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xe5 ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xe6 ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xe7 ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xe8 ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xe9 ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xea ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xeb ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xec ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xed ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xee ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xef ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xf0 ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xf1 ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xf2 ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xf3 ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xf4 ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xf5 ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xf6 ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xf7 ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xf8 ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xf9 ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xfa ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xfb ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xfc ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xfd ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xfe ELSE 1 ENDIF", - "", - "OK" - ], - [ - "0", - "IF 0xff ELSE 1 ENDIF", - "", - "OK" - ], - [ - "", - "'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb'", - "", - "OK", - "520 byte push" - ], - [ - "1", - "0x616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161", - "", - "OK", - "201 opcodes executed. 0x61 is NOP" - ], - [ - "1 2 3 4 5", - "0x6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f 1 2 3 4 5 0x6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f 0x6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d75", - "", - "OK", - "244 stack size (0x6f is 3DUP, 0x6d is 2DROP, and 0x75 is DROP)" - ], - [ - "'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb'", - "'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 0x6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f 2DUP DROP 0x6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d 0x61616161", - "", - "OK", - "Max-size (10,000-byte), max-push(520 bytes), max-opcodes(201), max stack size(244 items). 0x6f is 3DUP, 0x61 is NOP, 0x6d is 2DROP" - ], - [ - "0", - "IF 0x5050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050 ENDIF 1", - "", - "OK", - ">201 opcodes, but RESERVED (0x50) doesn't count towards opcode limit." - ], - [ - "", - "1", - "", - "OK" - ], - [ - "127", - "0x01 0x7F EQUAL", - "", - "OK" - ], - [ - "128", - "0x02 0x8000 EQUAL", - "", - "OK", - "Leave room for the sign bit" - ], - [ - "32767", - "0x02 0xFF7F EQUAL", - "", - "OK" - ], - [ - "32768", - "0x03 0x008000 EQUAL", - "", - "OK" - ], - [ - "8388607", - "0x03 0xFFFF7F EQUAL", - "", - "OK" - ], - [ - "8388608", - "0x04 0x00008000 EQUAL", - "", - "OK" - ], - [ - "2147483647", - "0x04 0xFFFFFF7F EQUAL", - "", - "OK" - ], - [ - "2147483648", - "0x05 0x0000008000 EQUAL", - "", - "OK" - ], - [ - "549755813887", - "0x05 0xFFFFFFFF7F EQUAL", - "", - "OK" - ], - [ - "549755813888", - "0x06 0xFFFFFFFF7F EQUALVERIFY 2DROP TRUE", - "", - "OK" - ], - [ - "9223372036854775807", - "0x08 0xFFFFFFFFFFFFFF7F EQUAL", - "", - "OK" - ], - [ - "-2", - "0x01 0x82 EQUAL", - "", - "OK", - "Numbers are little-endian with the MSB being a sign bit" - ], - [ - "-127", - "0x01 0xFF EQUAL", - "", - "OK" - ], - [ - "-128", - "0x02 0x8080 EQUAL", - "", - "OK" - ], - [ - "-32767", - "0x02 0xFFFF EQUAL", - "", - "OK" - ], - [ - "-32768", - "0x03 0x008080 EQUAL", - "", - "OK" - ], - [ - "-8388607", - "0x03 0xFFFFFF EQUAL", - "", - "OK" - ], - [ - "-8388608", - "0x04 0x00008080 EQUAL", - "", - "OK" - ], - [ - "-2147483647", - "0x04 0xFFFFFFFF EQUAL", - "", - "OK" - ], - [ - "-2147483648", - "0x05 0x0000008080 EQUAL", - "", - "OK" - ], - [ - "-4294967295", - "0x05 0xFFFFFFFF80 EQUAL", - "", - "OK" - ], - [ - "-549755813887", - "0x05 0xFFFFFFFFFF EQUAL", - "", - "OK" - ], - [ - "-549755813888", - "0x06 0x000000008080 EQUAL", - "", - "OK" - ], - [ - "-9223372036854775807", - "0x08 0xFFFFFFFFFFFFFFFF EQUAL", - "", - "OK" - ], - [ - "2147483647", - "1ADD 2147483648 EQUAL", - "", - "OK", - "We can do math on 4-byte integers, and compare 5-byte ones" - ], - [ - "2147483647", - "1ADD DROP 1", - "", - "OK" - ], - [ - "-2147483647", - "1ADD DROP 1", - "", - "OK" - ], - [ - "1", - "0x02 0x0100 EQUAL NOT", - "", - "OK", - "Not the same byte array..." - ], - [ - "0", - "0x01 0x80 EQUAL NOT", - "", - "OK" - ], - [ - "", - "NOP 1", - "", - "OK", - "The following tests check the if(stack.size() < N) tests in each opcode" - ], - [ - "1", - "IF 1 ENDIF", - "", - "OK", - "They are here to catch copy-and-paste errors" - ], - [ - "0", - "NOTIF 1 ENDIF", - "", - "OK", - "Most of them are duplicated elsewhere," - ], - [ - "1", - "VERIFY 1", - "", - "OK", - "but, hey, more is always better, right?" - ], - [ - "0", - "TOALTSTACK 1", - "", - "OK" - ], - [ - "1", - "TOALTSTACK FROMALTSTACK", - "", - "OK" - ], - [ - "0 0", - "2DROP 1", - "", - "OK" - ], - [ - "0 1", - "2DUP VERIFY DROP DROP DROP TRUE", - "", - "OK" - ], - [ - "0 0 1", - "3DUP VERIFY DROP DROP DROP DROP DROP TRUE", - "", - "OK" - ], - [ - "0 1 0 0", - "2OVER VERIFY DROP DROP DROP DROP DROP TRUE", - "", - "OK" - ], - [ - "0 1 0 0 0 0", - "2ROT VERIFY DROP DROP DROP DROP DROP TRUE", - "", - "OK" - ], - [ - "0 1 0 0", - "2SWAP VERIFY DROP DROP DROP TRUE", - "", - "OK" - ], - [ - "1", - "IFDUP VERIFY", - "", - "OK" - ], - [ - "", - "DEPTH 1 VERIFY DROP TRUE", - "", - "OK" - ], - [ - "0", - "DROP 1", - "", - "OK" - ], - [ - "1", - "DUP VERIFY", - "", - "OK" - ], - [ - "0 1", - "NIP", - "", - "OK" - ], - [ - "1 0", - "OVER VERIFY DROP DROP TRUE", - "", - "OK" - ], - [ - "1 0 0 0 3", - "PICK VERIFY DROP DROP DROP DROP TRUE", - "", - "OK" - ], - [ - "1 0", - "PICK VERIFY DROP TRUE", - "", - "OK" - ], - [ - "1 0 0 0 3", - "ROLL VERIFY DROP DROP DROP TRUE", - "", - "OK" - ], - [ - "1 0", - "ROLL", - "", - "OK" - ], - [ - "1 0 0", - "ROT VERIFY DROP DROP TRUE", - "", - "OK" - ], - [ - "1 0", - "SWAP VERIFY DROP TRUE", - "", - "OK" - ], - [ - "0 1", - "TUCK VERIFY DROP DROP TRUE", - "", - "OK" - ], - [ - "1", - "SIZE VERIFY DROP TRUE", - "", - "OK" - ], - [ - "0 0", - "EQUAL", - "", - "OK" - ], - [ - "0 0", - "EQUALVERIFY 1", - "", - "OK" - ], - [ - "0 0 1", - "EQUAL EQUAL", - "", - "OK", - "OP_0 and bools must have identical byte representations" - ], - [ - "0", - "1ADD", - "", - "OK" - ], - [ - "2", - "1SUB", - "", - "OK" - ], - [ - "-1", - "NEGATE", - "", - "OK" - ], - [ - "-1", - "ABS", - "", - "OK" - ], - [ - "0", - "NOT", - "", - "OK" - ], - [ - "-1", - "0NOTEQUAL", - "", - "OK" - ], - [ - "1 0", - "ADD", - "", - "OK" - ], - [ - "1 0", - "SUB", - "", - "OK" - ], - [ - "-1 -1", - "BOOLAND", - "", - "OK" - ], - [ - "-1 0", - "BOOLOR", - "", - "OK" - ], - [ - "0 0", - "NUMEQUAL", - "", - "OK" - ], - [ - "5 4", - "NUMEQUAL FALSE EQUAL", - "", - "OK" - ], - [ - "0 0", - "NUMEQUALVERIFY 1", - "", - "OK" - ], - [ - "-1 0", - "NUMNOTEQUAL", - "", - "OK" - ], - [ - "-1 0", - "LESSTHAN", - "", - "OK" - ], - [ - "1 0", - "GREATERTHAN", - "", - "OK" - ], - [ - "0 0", - "LESSTHANOREQUAL", - "", - "OK" - ], - [ - "0 0", - "GREATERTHANOREQUAL", - "", - "OK" - ], - [ - "-1 0", - "MIN", - "", - "OK" - ], - [ - "1 0", - "MAX", - "", - "OK" - ], - [ - "-1 -1 0", - "WITHIN", - "", - "OK" - ], - [ - "0", - "SHA256", - "", - "OK" - ], - [ - "0", - "BLAKE2B", - "", - "OK" - ], - [ - "", - "0 0 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", - "", - "OK", - "CHECKMULTISIG is allowed to have zero keys and/or sigs" - ], - [ - "", - "0 0 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", - "", - "OK" - ], - [ - "", - "0 0 1 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", - "", - "OK", - "Zero sigs means no sigs are checked" - ], - [ - "", - "0 0 1 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", - "", - "OK" - ], - [ - "", - "0 0 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", - "", - "OK", - "CHECKMULTISIG is allowed to have zero keys and/or sigs" - ], - [ - "", - "0 0 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", - "", - "OK" - ], - [ - "", - "0 0 1 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", - "", - "OK", - "Zero sigs means no sigs are checked" - ], - [ - "", - "0 0 1 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", - "", - "OK" - ], - [ - "", - "0 'a' 'b' 2 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", - "", - "OK", - "Test from up to 20 pubkeys, all not checked" - ], - [ - "", - "0 'a' 'b' 'c' 3 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", - "", - "OK" - ], - [ - "", - "0 'a' 'b' 'c' 'd' 4 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", - "", - "OK" - ], - [ - "", - "0 'a' 'b' 'c' 'd' 'e' 5 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", - "", - "OK" - ], - [ - "", - "0 'a' 'b' 'c' 'd' 'e' 'f' 6 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", - "", - "OK" - ], - [ - "", - "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 7 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", - "", - "OK" - ], - [ - "", - "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 8 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", - "", - "OK" - ], - [ - "", - "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 9 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", - "", - "OK" - ], - [ - "", - "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 10 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", - "", - "OK" - ], - [ - "", - "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 11 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", - "", - "OK" - ], - [ - "", - "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 12 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", - "", - "OK" - ], - [ - "", - "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 13 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", - "", - "OK" - ], - [ - "", - "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 14 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", - "", - "OK" - ], - [ - "", - "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 15 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", - "", - "OK" - ], - [ - "", - "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 16 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", - "", - "OK" - ], - [ - "", - "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 17 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", - "", - "OK" - ], - [ - "", - "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 18 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", - "", - "OK" - ], - [ - "", - "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 19 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", - "", - "OK" - ], - [ - "", - "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", - "", - "OK" - ], - [ - "", - "0 'a' 1 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", - "", - "OK" - ], - [ - "", - "0 'a' 'b' 2 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", - "", - "OK" - ], - [ - "", - "0 'a' 'b' 'c' 3 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", - "", - "OK" - ], - [ - "", - "0 'a' 'b' 'c' 'd' 4 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", - "", - "OK" - ], - [ - "", - "0 'a' 'b' 'c' 'd' 'e' 5 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", - "", - "OK" - ], - [ - "", - "0 'a' 'b' 'c' 'd' 'e' 'f' 6 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", - "", - "OK" - ], - [ - "", - "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 7 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", - "", - "OK" - ], - [ - "", - "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 8 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", - "", - "OK" - ], - [ - "", - "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 9 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", - "", - "OK" - ], - [ - "", - "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 10 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", - "", - "OK" - ], - [ - "", - "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 11 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", - "", - "OK" - ], - [ - "", - "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 12 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", - "", - "OK" - ], - [ - "", - "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 13 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", - "", - "OK" - ], - [ - "", - "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 14 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", - "", - "OK" - ], - [ - "", - "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 15 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", - "", - "OK" - ], - [ - "", - "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 16 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", - "", - "OK" - ], - [ - "", - "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 17 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", - "", - "OK" - ], - [ - "", - "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 18 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", - "", - "OK" - ], - [ - "", - "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 19 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", - "", - "OK" - ], - [ - "", - "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", - "", - "OK" - ], - [ - "1", - "0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY", - "", - "OK", - "nOpCount is incremented by the number of keys evaluated in addition to the usual one op per op. In this case we have zero keys, so we can execute 201 CHECKMULTISIGS" - ], - [ - "", - "NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY DROP DROP DROP DROP DROP DROP DROP TRUE", - "", - "OK", - "Even though there are no signatures being checked nOpCount is incremented by the number of keys." - ], - [ - "1", - "NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY", - "", - "OK" - ], - [ - "0x01 1", - "BLAKE2B 0x20 0xce57216285125006ec18197bd8184221cefa559bb0798410d99a5bba5b07cd1d EQUAL", - "", - "OK", - "Very basic P2SH" - ], - [ - "0x00", - "SIZE 0 EQUALVERIFY DROP TRUE", - "", - "OK", - "Basic OP_0 execution" - ], - [ - "Numeric pushes" - ], - [ - "-1", - "0x4f EQUAL", - "", - "OK", - "OP1_NEGATE pushes 0x81" - ], - [ - "1", - "0x51 EQUAL", - "", - "OK", - "OP_1 pushes 0x01" - ], - [ - "2", - "0x52 EQUAL", - "", - "OK", - "OP_2 pushes 0x02" - ], - [ - "3", - "0x53 EQUAL", - "", - "OK", - "OP_3 pushes 0x03" - ], - [ - "4", - "0x54 EQUAL", - "", - "OK", - "OP_4 pushes 0x04" - ], - [ - "5", - "0x55 EQUAL", - "", - "OK", - "OP_5 pushes 0x05" - ], - [ - "6", - "0x56 EQUAL", - "", - "OK", - "OP_6 pushes 0x06" - ], - [ - "7", - "0x57 EQUAL", - "", - "OK", - "OP_7 pushes 0x07" - ], - [ - "8", - "0x58 EQUAL", - "", - "OK", - "OP_8 pushes 0x08" - ], - [ - "9", - "0x59 EQUAL", - "", - "OK", - "OP_9 pushes 0x09" - ], - [ - "10", - "0x5a EQUAL", - "", - "OK", - "OP_10 pushes 0x0a" - ], - [ - "11", - "0x5b EQUAL", - "", - "OK", - "OP_11 pushes 0x0b" - ], - [ - "12", - "0x5c EQUAL", - "", - "OK", - "OP_12 pushes 0x0c" - ], - [ - "13", - "0x5d EQUAL", - "", - "OK", - "OP_13 pushes 0x0d" - ], - [ - "14", - "0x5e EQUAL", - "", - "OK", - "OP_14 pushes 0x0e" - ], - [ - "15", - "0x5f EQUAL", - "", - "OK", - "OP_15 pushes 0x0f" - ], - [ - "16", - "0x60 EQUAL", - "", - "OK", - "OP_16 pushes 0x10" - ], - [ - "Unevaluated non-minimal pushes are ignored" - ], - [ - "0", - "IF 0x4c 0x00 ENDIF 1 ", - "", - "OK", - "non-minimal PUSHDATA1 ignored" - ], - [ - "0", - "IF 0x4d 0x0000 ENDIF 1 ", - "", - "OK", - "non-minimal PUSHDATA2 ignored" - ], - [ - "0", - "IF 0x4c 0x00000000 ENDIF 1 ", - "", - "OK", - "non-minimal PUSHDATA4 ignored" - ], - [ - "0", - "IF 0x01 0x81 ENDIF 1 ", - "", - "OK", - "1NEGATE equiv" - ], - [ - "0", - "IF 0x01 0x01 ENDIF 1 ", - "", - "OK", - "OP_1 equiv" - ], - [ - "0", - "IF 0x01 0x02 ENDIF 1 ", - "", - "OK", - "OP_2 equiv" - ], - [ - "0", - "IF 0x01 0x03 ENDIF 1 ", - "", - "OK", - "OP_3 equiv" - ], - [ - "0", - "IF 0x01 0x04 ENDIF 1 ", - "", - "OK", - "OP_4 equiv" - ], - [ - "0", - "IF 0x01 0x05 ENDIF 1 ", - "", - "OK", - "OP_5 equiv" - ], - [ - "0", - "IF 0x01 0x06 ENDIF 1 ", - "", - "OK", - "OP_6 equiv" - ], - [ - "0", - "IF 0x01 0x07 ENDIF 1 ", - "", - "OK", - "OP_7 equiv" - ], - [ - "0", - "IF 0x01 0x08 ENDIF 1 ", - "", - "OK", - "OP_8 equiv" - ], - [ - "0", - "IF 0x01 0x09 ENDIF 1 ", - "", - "OK", - "OP_9 equiv" - ], - [ - "0", - "IF 0x01 0x0a ENDIF 1 ", - "", - "OK", - "OP_10 equiv" - ], - [ - "0", - "IF 0x01 0x0b ENDIF 1 ", - "", - "OK", - "OP_11 equiv" - ], - [ - "0", - "IF 0x01 0x0c ENDIF 1 ", - "", - "OK", - "OP_12 equiv" - ], - [ - "0", - "IF 0x01 0x0d ENDIF 1 ", - "", - "OK", - "OP_13 equiv" - ], - [ - "0", - "IF 0x01 0x0e ENDIF 1 ", - "", - "OK", - "OP_14 equiv" - ], - [ - "0", - "IF 0x01 0x0f ENDIF 1 ", - "", - "OK", - "OP_15 equiv" - ], - [ - "0", - "IF 0x01 0x10 ENDIF 1 ", - "", - "OK", - "OP_16 equiv" - ], - [ - "Numeric minimaldata rules are only applied when a stack item is numerically evaluated; the push itself is allowed" - ], - [ - "0x01 0x00", - "1 VERIFY DROP TRUE", - "", - "OK" - ], - [ - "0x01 0x80", - "1 VERIFY DROP TRUE", - "", - "OK" - ], - [ - "0x02 0x0180", - "1 VERIFY DROP TRUE", - "", - "OK" - ], - [ - "0x02 0x0100", - "1 VERIFY DROP TRUE", - "", - "OK" - ], - [ - "0x02 0x0200", - "1 VERIFY DROP TRUE", - "", - "OK" - ], - [ - "0x02 0x0300", - "1 VERIFY DROP TRUE", - "", - "OK" - ], - [ - "0x02 0x0400", - "1 VERIFY DROP TRUE", - "", - "OK" - ], - [ - "0x02 0x0500", - "1 VERIFY DROP TRUE", - "", - "OK" - ], - [ - "0x02 0x0600", - "1 VERIFY DROP TRUE", - "", - "OK" - ], - [ - "0x02 0x0700", - "1 VERIFY DROP TRUE", - "", - "OK" - ], - [ - "0x02 0x0800", - "1 VERIFY DROP TRUE", - "", - "OK" - ], - [ - "0x02 0x0900", - "1 VERIFY DROP TRUE", - "", - "OK" - ], - [ - "0x02 0x0a00", - "1 VERIFY DROP TRUE", - "", - "OK" - ], - [ - "0x02 0x0b00", - "1 VERIFY DROP TRUE", - "", - "OK" - ], - [ - "0x02 0x0c00", - "1 VERIFY DROP TRUE", - "", - "OK" - ], - [ - "0x02 0x0d00", - "1 VERIFY DROP TRUE", - "", - "OK" - ], - [ - "0x02 0x0e00", - "1 VERIFY DROP TRUE", - "", - "OK" - ], - [ - "0x02 0x0f00", - "1 VERIFY DROP TRUE", - "", - "OK" - ], - [ - "0x02 0x1000", - "1 VERIFY DROP TRUE", - "", - "OK" - ], - [ - "While not really correctly DER encoded, the empty signature is allowed" - ], - [ - "to provide a compact way to provide a delibrately invalid signature." - ], - [ - "0", - "0x21 0x02865c40293a680cb9c020e7b1e106d8c1916d3cef99aa431a56d253e69256dac0 CHECKSIG NOT", - "", - "OK" - ], - [ - "0", - "1 0x21 0x02865c40293a680cb9c020e7b1e106d8c1916d3cef99aa431a56d253e69256dac0 1 CHECKMULTISIG NOT", - "", - "OK" - ], - [ - "TRUE DATA_8 0x0000000000000080", - "CHECKSEQUENCEVERIFY", - "", - "OK", - "CSV passes if stack top bit 1 << 63 is set" - ], - [ - "", - "DEPTH", - "", - "EVAL_FALSE", - "Test the test: we should have an empty stack after scriptSig evaluation" - ], - [ - " ", - "DEPTH", - "", - "EVAL_FALSE", - "and multiple spaces should not change that." - ], - [ - " ", - "DEPTH", - "", - "EVAL_FALSE" - ], - [ - " ", - "DEPTH", - "", - "EVAL_FALSE" - ], - [ - "", - "", - "", - "EVAL_FALSE" - ], - [ - "", - "NOP", - "", - "EVAL_FALSE" - ], - [ - "", - "NOP DEPTH", - "", - "EVAL_FALSE" - ], - [ - "", - "DEPTH", - "", - "EVAL_FALSE" - ], - [ - "", - "NOP", - "", - "EVAL_FALSE" - ], - [ - "", - "NOP DEPTH", - "", - "EVAL_FALSE" - ], - [ - "0x4c01", - "0x01 NOP", - "", - "BAD_OPCODE", - "PUSHDATA1 with not enough bytes" - ], - [ - "0x4d0200ff", - "0x01 NOP", - "", - "BAD_OPCODE", - "PUSHDATA2 with not enough bytes" - ], - [ - "0x4e03000000ffff", - "0x01 NOP", - "", - "BAD_OPCODE", - "PUSHDATA4 with not enough bytes" - ], - [ - "1", - "IF 0x50 ENDIF 1", - "", - "BAD_OPCODE", - "0x50 is reserved" - ], - [ - "0x52", - "0x5f ADD 0x60 EQUAL", - "", - "EVAL_FALSE", - "0x51 through 0x60 push 1 through 16 onto stack" - ], - [ - "0", - "NOP", - "", - "EVAL_FALSE", - "" - ], - [ - "1", - "IF VER ELSE 1 ENDIF", - "", - "BAD_OPCODE", - "VER non-functional" - ], - [ - "0", - "IF VERIF ELSE 1 ENDIF", - "", - "BAD_OPCODE", - "VERIF illegal everywhere" - ], - [ - "0", - "IF ELSE 1 ELSE VERIF ENDIF", - "", - "BAD_OPCODE", - "VERIF illegal everywhere" - ], - [ - "0", - "IF VERNOTIF ELSE 1 ENDIF", - "", - "BAD_OPCODE", - "VERNOTIF illegal everywhere" - ], - [ - "0", - "IF ELSE 1 ELSE VERNOTIF ENDIF", - "", - "BAD_OPCODE", - "VERNOTIF illegal everywhere" - ], - [ - "0", - "DUP IF ENDIF", - "", - "EVAL_FALSE" - ], - [ - "0", - "IF 1 ENDIF", - "", - "EVAL_FALSE" - ], - [ - "0", - "DUP IF ELSE ENDIF", - "", - "EVAL_FALSE" - ], - [ - "0", - "IF 1 ELSE ENDIF", - "", - "EVAL_FALSE" - ], - [ - "0", - "NOTIF ELSE 1 ENDIF", - "", - "EVAL_FALSE" - ], - [ - "0 1", - "IF IF 1 ELSE 0 ENDIF ENDIF", - "", - "EVAL_FALSE" - ], - [ - "0 0", - "IF IF 1 ELSE 0 ENDIF ENDIF", - "", - "EVAL_FALSE" - ], - [ - "1 0", - "IF IF 1 ELSE 0 ENDIF ELSE IF 0 ELSE 1 ENDIF ENDIF", - "", - "EVAL_FALSE" - ], - [ - "0 1", - "IF IF 1 ELSE 0 ENDIF ELSE IF 0 ELSE 1 ENDIF ENDIF", - "", - "EVAL_FALSE" - ], - [ - "0 0", - "NOTIF IF 1 ELSE 0 ENDIF ENDIF", - "", - "EVAL_FALSE" - ], - [ - "0 1", - "NOTIF IF 1 ELSE 0 ENDIF ENDIF", - "", - "EVAL_FALSE" - ], - [ - "1 1", - "NOTIF IF 1 ELSE 0 ENDIF ELSE IF 0 ELSE 1 ENDIF ENDIF", - "", - "EVAL_FALSE" - ], - [ - "0 0", - "NOTIF IF 1 ELSE 0 ENDIF ELSE IF 0 ELSE 1 ENDIF ENDIF", - "", - "EVAL_FALSE" - ], - [ - "1", - "IF RETURN ELSE ELSE 1 ENDIF", - "", - "OP_RETURN", - "Multiple ELSEs" - ], - [ - "1", - "IF 1 ELSE ELSE RETURN ENDIF", - "", - "OP_RETURN" - ], - [ - "1", - "ENDIF", - "", - "UNBALANCED_CONDITIONAL", - "Malformed IF/ELSE/ENDIF sequence" - ], - [ - "1", - "ELSE ENDIF", - "", - "UNBALANCED_CONDITIONAL" - ], - [ - "1", - "ENDIF ELSE", - "", - "UNBALANCED_CONDITIONAL" - ], - [ - "1", - "ENDIF ELSE IF", - "", - "UNBALANCED_CONDITIONAL" - ], - [ - "1", - "IF ELSE ENDIF ELSE", - "", - "UNBALANCED_CONDITIONAL" - ], - [ - "1", - "IF ELSE ENDIF ELSE ENDIF", - "", - "UNBALANCED_CONDITIONAL" - ], - [ - "1", - "IF ENDIF ENDIF", - "", - "UNBALANCED_CONDITIONAL" - ], - [ - "1", - "IF ELSE ELSE ENDIF ENDIF", - "", - "UNBALANCED_CONDITIONAL" - ], - [ - "1", - "RETURN", - "", - "OP_RETURN" - ], - [ - "1", - "DUP IF RETURN ENDIF", - "", - "OP_RETURN" - ], - [ - "1", - "RETURN 'data'", - "", - "OP_RETURN", - "canonical prunable txout format" - ], - [ - "0", - "VERIFY 1", - "", - "VERIFY" - ], - [ - "1", - "VERIFY", - "", - "EVAL_FALSE" - ], - [ - "1", - "VERIFY 0", - "", - "EVAL_FALSE" - ], - [ - "", - "IFDUP DEPTH 0 EQUAL", - "", - "INVALID_STACK_OPERATION" - ], - [ - "", - "DROP DEPTH 0 EQUAL", - "", - "INVALID_STACK_OPERATION" - ], - [ - "", - "DUP DEPTH 0 EQUAL", - "", - "INVALID_STACK_OPERATION" - ], - [ - "1", - "DUP 1 ADD 2 EQUALVERIFY 0 EQUAL", - "", - "EVAL_FALSE" - ], - [ - "", - "NIP", - "", - "INVALID_STACK_OPERATION" - ], - [ - "", - "1 NIP", - "", - "INVALID_STACK_OPERATION" - ], - [ - "", - "1 0 NIP", - "", - "EVAL_FALSE" - ], - [ - "", - "OVER 1", - "", - "INVALID_STACK_OPERATION" - ], - [ - "1", - "OVER", - "", - "INVALID_STACK_OPERATION" - ], - [ - "19 20 21", - "PICK 19 EQUALVERIFY DEPTH 2 EQUAL", - "", - "INVALID_STACK_OPERATION" - ], - [ - "", - "0 PICK", - "", - "INVALID_STACK_OPERATION" - ], - [ - "1", - "-1 PICK", - "", - "INVALID_STACK_OPERATION" - ], - [ - "19 20 21", - "0 PICK 20 EQUALVERIFY DEPTH 3 EQUAL", - "", - "EQUALVERIFY" - ], - [ - "19 20 21", - "1 PICK 21 EQUALVERIFY DEPTH 3 EQUAL", - "", - "EQUALVERIFY" - ], - [ - "19 20 21", - "2 PICK 22 EQUALVERIFY DEPTH 3 EQUAL", - "", - "EQUALVERIFY" - ], - [ - "", - "0 ROLL", - "", - "INVALID_STACK_OPERATION" - ], - [ - "1", - "-1 ROLL", - "", - "INVALID_STACK_OPERATION" - ], - [ - "19 20 21", - "0 ROLL 20 EQUALVERIFY DEPTH 2 EQUAL", - "", - "EQUALVERIFY" - ], - [ - "19 20 21", - "1 ROLL 21 EQUALVERIFY DEPTH 2 EQUAL", - "", - "EQUALVERIFY" - ], - [ - "19 20 21", - "2 ROLL 22 EQUALVERIFY DEPTH 2 EQUAL", - "", - "EQUALVERIFY" - ], - [ - "", - "ROT 1", - "", - "INVALID_STACK_OPERATION" - ], - [ - "", - "1 ROT 1", - "", - "INVALID_STACK_OPERATION" - ], - [ - "", - "1 2 ROT 1", - "", - "INVALID_STACK_OPERATION" - ], - [ - "", - "SWAP 1", - "", - "INVALID_STACK_OPERATION" - ], - [ - "1", - "SWAP 1", - "", - "INVALID_STACK_OPERATION" - ], - [ - "0 1", - "SWAP 1 EQUALVERIFY", - "", - "EQUALVERIFY" - ], - [ - "", - "TUCK 1", - "", - "INVALID_STACK_OPERATION" - ], - [ - "1", - "TUCK 1", - "", - "INVALID_STACK_OPERATION" - ], - [ - "1 0", - "TUCK DEPTH 3 EQUALVERIFY SWAP 2DROP", - "", - "EVAL_FALSE" - ], - [ - "", - "2DUP 1", - "", - "INVALID_STACK_OPERATION" - ], - [ - "1", - "2DUP 1", - "", - "INVALID_STACK_OPERATION" - ], - [ - "", - "3DUP 1", - "", - "INVALID_STACK_OPERATION" - ], - [ - "1", - "3DUP 1", - "", - "INVALID_STACK_OPERATION" - ], - [ - "1 2", - "3DUP 1", - "", - "INVALID_STACK_OPERATION" - ], - [ - "", - "2OVER 1 VERIFY DROP DROP DROP DROP TRUE", - "", - "INVALID_STACK_OPERATION" - ], - [ - "1", - "2 3 2OVER 1", - "", - "INVALID_STACK_OPERATION" - ], - [ - "", - "2SWAP 1", - "", - "INVALID_STACK_OPERATION" - ], - [ - "1", - "2 3 2SWAP 1", - "", - "INVALID_STACK_OPERATION" - ], - [ - "'a' 'b'", - "CAT", - "", - "DISABLED_OPCODE", - "CAT disabled" - ], - [ - "'a' 'b' 0", - "IF CAT ELSE 1 ENDIF", - "", - "DISABLED_OPCODE", - "CAT disabled" - ], - [ - "'abc' 1 1", - "SUBSTR", - "", - "DISABLED_OPCODE", - "SUBSTR disabled" - ], - [ - "'abc' 1 1 0", - "IF SUBSTR ELSE 1 ENDIF", - "", - "DISABLED_OPCODE", - "SUBSTR disabled" - ], - [ - "'abc' 2 0", - "IF LEFT ELSE 1 ENDIF", - "", - "DISABLED_OPCODE", - "LEFT disabled" - ], - [ - "'abc' 2 0", - "IF RIGHT ELSE 1 ENDIF", - "", - "DISABLED_OPCODE", - "RIGHT disabled" - ], - [ - "", - "SIZE 1", - "", - "INVALID_STACK_OPERATION" - ], - [ - "", - "NOP", - "", - "EMPTY_STACK", - "Checks EMPTY_STACK error" - ], - [ - "'abc'", - "INVERT VERIFY TRUE", - "", - "DISABLED_OPCODE", - "INVERT disabled" - ], - [ - "1 2 0", - "IF AND ELSE 1 ENDIF NOP", - "", - "DISABLED_OPCODE", - "AND disabled" - ], - [ - "1 2 0", - "IF OR ELSE 1 ENDIF NOP", - "", - "DISABLED_OPCODE", - "OR disabled" - ], - [ - "1 2 0", - "IF XOR ELSE 1 ENDIF NOP", - "", - "DISABLED_OPCODE", - "XOR disabled" - ], - [ - "2 0", - "IF 2MUL ELSE 1 ENDIF NOP", - "", - "DISABLED_OPCODE", - "2MUL disabled" - ], - [ - "2 0", - "IF 2DIV ELSE 1 ENDIF NOP", - "", - "DISABLED_OPCODE", - "2DIV disabled" - ], - [ - "2 2 0", - "IF MUL ELSE 1 ENDIF NOP", - "", - "DISABLED_OPCODE", - "MUL disabled" - ], - [ - "2 2 0", - "IF DIV ELSE 1 ENDIF NOP", - "", - "DISABLED_OPCODE", - "DIV disabled" - ], - [ - "2 2 0", - "IF MOD ELSE 1 ENDIF NOP", - "", - "DISABLED_OPCODE", - "MOD disabled" - ], - [ - "2 2 0", - "IF LSHIFT ELSE 1 ENDIF NOP", - "", - "DISABLED_OPCODE", - "LSHIFT disabled" - ], - [ - "2 2 0", - "IF RSHIFT ELSE 1 ENDIF NOP", - "", - "DISABLED_OPCODE", - "RSHIFT disabled" - ], - [ - "", - "EQUAL NOT", - "", - "INVALID_STACK_OPERATION", - "EQUAL must error when there are no stack items" - ], - [ - "0", - "EQUAL NOT", - "", - "INVALID_STACK_OPERATION", - "EQUAL must error when there are not 2 stack items" - ], - [ - "0 1", - "EQUAL", - "", - "EVAL_FALSE" - ], - [ - "1 1", - "ADD 0 EQUAL", - "", - "EVAL_FALSE" - ], - [ - "11 1", - "ADD 12 SUB 11 EQUAL", - "", - "EVAL_FALSE" - ], - [ - "2147483648 0", - "ADD NOP", - "", - "OK", - "numbers up to 8 bytes are supported since kip10" - ], - [ - "-2147483648 0", - "ADD NOP", - "", - "OK", - "numbers up to 8 bytes are supported since kip10" - ], - [ - "-9223372036854775808 0", - "ADD NOP", - "", - "UNKNOWN_ERROR", - "" - ], - [ - "2147483647", - "DUP ADD 4294967294 NUMEQUAL", - "", - "OK", - "NUMEQUAL is in numeric range since kip10" - ], - [ - "'abcdef'", - "NOT 0 EQUAL", - "", - "OK", - "numbers up to 8 bytes are supported since kip10" - ], - [ - "'abcdefghi'", - "NOT 0 EQUAL", - "", - "UNKNOWN_ERROR", - "NOT is an arithmetic operand" - ], - [ - "2", - "DUP MUL 4 EQUAL", - "", - "DISABLED_OPCODE", - "disabled" - ], - [ - "2", - "DUP DIV 1 EQUAL", - "", - "DISABLED_OPCODE", - "disabled" - ], - [ - "2", - "2MUL 4 EQUAL", - "", - "DISABLED_OPCODE", - "disabled" - ], - [ - "2", - "2DIV 1 EQUAL", - "", - "DISABLED_OPCODE", - "disabled" - ], - [ - "7 3", - "MOD 1 EQUAL", - "", - "DISABLED_OPCODE", - "disabled" - ], - [ - "2 2", - "LSHIFT 8 EQUAL", - "", - "DISABLED_OPCODE", - "disabled" - ], - [ - "2 1", - "RSHIFT 1 EQUAL", - "", - "DISABLED_OPCODE", - "disabled" - ], - [ - "0x50", - "1", - "", - "BAD_OPCODE", - "opcode 0x50 is reserved" - ], - [ - "1", - "IF 0xb2 ELSE 1 ENDIF", - "", - "BAD_OPCODE", - "OpTxVersion is reserved" - ], - [ - "1", - "IF 0xb3 ELSE 1 ENDIF", - "", - "OK", - "OpTxInputCount is enabled since kip10" - ], - [ - "1", - "IF 0xb4 ELSE 1 ENDIF", - "", - "OK", - "OpTxOutputCount is enabled since kip10" - ], - [ - "1", - "IF 0xb5 ELSE 1 ENDIF", - "", - "BAD_OPCODE", - "OpTxLockTime is reserved" - ], - [ - "1", - "IF 0xb6 ELSE 1 ENDIF", - "", - "BAD_OPCODE", - "OpTxSubnetId is reserved" - ], - [ - "1", - "IF 0xb7 ELSE 1 ENDIF", - "", - "BAD_OPCODE", - "OpTxGas is reserved" - ], - [ - "1", - "IF 0xb8 ELSE 1 ENDIF", - "", - "BAD_OPCODE", - "OpTxPayload is reserved" - ], - [ - "1", - "IF 0xb9 0 NUMEQUAL ELSE 1 ENDIF", - "", - "OK", - "OpTxInputIndex is enabled since kip10" - ], - [ - "1", - "IF 0xba ELSE 1 ENDIF", - "", - "BAD_OPCODE", - "OpOutpointTxId is reserved" - ], - [ - "1", - "IF 0xbb ELSE 1 ENDIF", - "", - "BAD_OPCODE", - "OpOutpointOutputIdx is reserved" - ], - [ - "1", - "IF 0xbc ELSE 1 ENDIF", - "", - "BAD_OPCODE", - "OpTxInputScriptSig is reserved" - ], - [ - "1", - "IF 0xbd ELSE 1 ENDIF", - "", - "BAD_OPCODE", - "OpTxInputSeq is reserved" - ], - [ - "0 1", - "IF 0xbe 0 NUMEQUAL ELSE 1 ENDIF", - "", - "OK", - "OpTxInputAmount is enabled since kip10" - ], - [ - "0 1", - "IF 0xbf ELSE 1 ENDIF", - "", - "OK", - "OpTxInputSpk is enabled since kip10" - ], - [ - "1", - "IF 0xc0 ELSE 1 ENDIF", - "", - "BAD_OPCODE", - "OpTxInputBlockDaaScore is reserved" - ], - [ - "1", - "IF 0xc1 ELSE 1 ENDIF", - "", - "BAD_OPCODE", - "OpTxInputIsCoinbase is reserved" - ], - [ - "0 1", - "IF 0xc2 0 NUMEQUAL ELSE 1 ENDIF", - "", - "OK", - "OpTxOutputAmount is enabled since kip10" - ], - [ - "0 1", - "IF 0xc3 0x02 0x0000 EQUAL ELSE 1 ENDIF", - "", - "OK", - "OpTxOutputSpk is enabled since kip10" - ], - [ - "1", - "IF 0xc4 ELSE 1 ENDIF", - "", - "BAD_OPCODE", - "opcodes above OpTxOutputSpk invalid if executed" - ], - [ - "1", - "IF 0xc5 ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xc6 ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xc7 ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xc8 ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xc9 ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xca ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xcb ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xcc ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xcd ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xce ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xcf ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xd0 ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xd1 ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xd2 ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xd3 ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xd4 ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xd5 ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xd6 ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xd7 ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xd8 ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xd9 ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xda ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xdb ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xdc ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xdd ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xde ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xdf ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xe0 ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xe1 ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xe2 ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xe3 ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xe4 ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xe5 ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xe6 ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xe7 ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xe8 ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xe9 ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xea ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xeb ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xec ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xed ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xee ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xef ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xf0 ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xf1 ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xf2 ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xf3 ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xf4 ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xf5 ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xf6 ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xf7 ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xf8 ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xf9 ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xfa ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xfb ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xfc ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xfd ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xfe ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "1", - "IF 0xff ELSE 1 ENDIF", - "", - "BAD_OPCODE" - ], - [ - "", - "SHA256", - "", - "INVALID_STACK_OPERATION" - ], - [ - "", - "SHA256", - "", - "INVALID_STACK_OPERATION" - ], - [ - "", - "SHA256", - "", - "INVALID_STACK_OPERATION" - ], - [ - "", - "BLAKE2B", - "", - "INVALID_STACK_OPERATION" - ], - [ - "", - "BLAKE2B", - "", - "INVALID_STACK_OPERATION" - ], - [ - "", - "'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb'", - "", - "PUSH_SIZE", - ">520 byte push" - ], - [ - "0", - "IF 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' ENDIF 1", - "", - "PUSH_SIZE", - ">520 byte push in non-executed IF branch" - ], - [ - "1", - "0x61616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161", - "", - "OP_COUNT", - ">201 opcodes executed. 0x61 is NOP" - ], - [ - "0", - "IF 0x6161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161 ENDIF 1", - "", - "OP_COUNT", - ">201 opcodes including non-executed IF branch. 0x61 is NOP" - ], - [ - "", - "1 2 3 4 5 6 0x6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f", - "", - "STACK_SIZE", - ">244 stack size (0x6f is 3DUP)" - ], - [ - "", - "1 TOALTSTACK 2 TOALTSTACK 3 4 5 6 7 8 0x6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f", - "", - "STACK_SIZE", - ">244 stack+altstack size" - ], - [ - "", - "0 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 0x6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f 2DUP 0x616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161", - "", - "SCRIPT_SIZE", - "10,001-byte scriptPubKey" - ], - [ - "1", - "VER", - "", - "BAD_OPCODE", - "OP_VER is reserved" - ], - [ - "1", - "VERIF", - "", - "BAD_OPCODE", - "OP_VERIF is reserved" - ], - [ - "1", - "VERNOTIF", - "", - "BAD_OPCODE", - "OP_VERNOTIF is reserved" - ], - [ - "1", - "RESERVED", - "", - "BAD_OPCODE", - "OP_RESERVED is reserved" - ], - [ - "1", - "RESERVED1", - "", - "BAD_OPCODE", - "OP_RESERVED1 is reserved" - ], - [ - "1", - "RESERVED2", - "", - "BAD_OPCODE", - "OP_RESERVED2 is reserved" - ], - [ - "1", - "0xb2", - "", - "BAD_OPCODE", - "0xb2 == OP_CHECKSEQUENCEVERIFY + 1" - ], - [ - "2147483648", - "1ADD 2147483649 NUMEQUAL", - "", - "OK", - "We can do math on 5-byte integers since kip10" - ], - [ - "2147483648", - "NEGATE -2147483648 NUMEQUAL", - "", - "OK", - "We can do math on 5-byte integers since kip10" - ], - [ - "-2147483648", - "1ADD -2147483647 NUMEQUAL", - "", - "OK", - "We can do math on 5-byte integers since kip10" - ], - [ - "2147483647", - "DUP 1ADD 1SUB NUMEQUAL", - "", - "OK", - "We can do math on 5-byte integers since kip10" - ], - [ - "2147483648", - "1SUB 2147483647 NUMEQUAL", - "", - "OK", - "We can do math on 5-byte integers since kip10" - ], - [ - "2147483648 1", - "BOOLOR 1 EQUAL", - "", - "OK", - "We can do math on 5-byte integers since kip10" - ], - [ - "2147483648 1", - "BOOLAND 1 EQUAL", - "", - "OK", - "We can do math on 5-byte integers since kip10" - ], - [ - "-9223372036854775808", - "1ADD 1", - "", - "UNKNOWN_ERROR", - "We cannot do math on 9-byte integers" - ], - [ - "-9223372036854775808", - "NEGATE 1", - "", - "UNKNOWN_ERROR", - "We cannot do math on 9-byte integers" - ], - [ - "-9223372036854775808", - "1ADD 1", - "", - "UNKNOWN_ERROR", - "Because we use a sign bit, -9223372036854775808 is also 9 bytes" - ], - [ - "-9223372036854775808", - "1ADD 1SUB 1", - "", - "UNKNOWN_ERROR", - "We cannot do math on 9-byte integers, even if the result is 8-bytes" - ], - [ - "-9223372036854775808", - "1SUB 1", - "", - "UNKNOWN_ERROR", - "We cannot do math on 9-byte integers, even if the result is 8-bytes" - ], - [ - "-9223372036854775808 1", - "BOOLOR 1", - "", - "UNKNOWN_ERROR", - "We cannot do BOOLOR on 9-byte integers (but we can still do IF etc)" - ], - [ - "-9223372036854775808 1", - "BOOLAND 1", - "", - "UNKNOWN_ERROR", - "We cannot do BOOLAND on 9-byte integers" - ], - [ - "-9223372036854775807", - "1SUB", - "", - "UNKNOWN_ERROR", - "result of math operation can't exceed 8 bytes" - ], - [ - "1", - "1 ENDIF", - "", - "UNBALANCED_CONDITIONAL", - "ENDIF without IF" - ], - [ - "1", - "IF 1", - "", - "UNBALANCED_CONDITIONAL", - "IF without ENDIF" - ], - [ - "", - "IF 1 ENDIF", - "", - "UNBALANCED_CONDITIONAL", - "The following tests check the if(stack.size() < N) tests in each opcode" - ], - [ - "", - "NOTIF 1 ENDIF", - "", - "UNBALANCED_CONDITIONAL", - "They are here to catch copy-and-paste errors" - ], - [ - "", - "VERIFY 1", - "", - "INVALID_STACK_OPERATION", - "Most of them are duplicated elsewhere," - ], - [ - "", - "TOALTSTACK 1", - "", - "INVALID_STACK_OPERATION", - "but, hey, more is always better, right?" - ], - [ - "1", - "FROMALTSTACK", - "", - "INVALID_ALTSTACK_OPERATION" - ], - [ - "1", - "2DROP 1", - "", - "INVALID_STACK_OPERATION" - ], - [ - "1", - "2DUP", - "", - "INVALID_STACK_OPERATION" - ], - [ - "1 1", - "3DUP", - "", - "INVALID_STACK_OPERATION" - ], - [ - "1 1 1", - "2OVER", - "", - "INVALID_STACK_OPERATION" - ], - [ - "1 1 1 1 1", - "2ROT", - "", - "INVALID_STACK_OPERATION" - ], - [ - "1 1 1", - "2SWAP", - "", - "INVALID_STACK_OPERATION" - ], - [ - "", - "IFDUP 1", - "", - "INVALID_STACK_OPERATION" - ], - [ - "", - "DROP 1", - "", - "INVALID_STACK_OPERATION" - ], - [ - "", - "DUP 1", - "", - "INVALID_STACK_OPERATION" - ], - [ - "1", - "NIP", - "", - "INVALID_STACK_OPERATION" - ], - [ - "1", - "OVER", - "", - "INVALID_STACK_OPERATION" - ], - [ - "1 1 1 3", - "PICK", - "", - "INVALID_STACK_OPERATION" - ], - [ - "0", - "PICK 1", - "", - "INVALID_STACK_OPERATION" - ], - [ - "1 1 1 3", - "ROLL", - "", - "INVALID_STACK_OPERATION" - ], - [ - "0", - "ROLL 1", - "", - "INVALID_STACK_OPERATION" - ], - [ - "1 1", - "ROT", - "", - "INVALID_STACK_OPERATION" - ], - [ - "1", - "SWAP", - "", - "INVALID_STACK_OPERATION" - ], - [ - "1", - "TUCK", - "", - "INVALID_STACK_OPERATION" - ], - [ - "", - "SIZE 1", - "", - "INVALID_STACK_OPERATION" - ], - [ - "1", - "EQUAL 1", - "", - "INVALID_STACK_OPERATION" - ], - [ - "1", - "EQUALVERIFY 1", - "", - "INVALID_STACK_OPERATION" - ], - [ - "", - "1ADD 1", - "", - "INVALID_STACK_OPERATION" - ], - [ - "", - "1SUB 1", - "", - "INVALID_STACK_OPERATION" - ], - [ - "", - "NEGATE 1", - "", - "INVALID_STACK_OPERATION" - ], - [ - "", - "ABS 1", - "", - "INVALID_STACK_OPERATION" - ], - [ - "", - "NOT 1", - "", - "INVALID_STACK_OPERATION" - ], - [ - "", - "0NOTEQUAL 1", - "", - "INVALID_STACK_OPERATION" - ], - [ - "1", - "ADD", - "", - "INVALID_STACK_OPERATION" - ], - [ - "1", - "SUB", - "", - "INVALID_STACK_OPERATION" - ], - [ - "1", - "BOOLAND", - "", - "INVALID_STACK_OPERATION" - ], - [ - "1", - "BOOLOR", - "", - "INVALID_STACK_OPERATION" - ], - [ - "1", - "NUMEQUAL", - "", - "INVALID_STACK_OPERATION" - ], - [ - "1", - "NUMEQUALVERIFY 1", - "", - "INVALID_STACK_OPERATION" - ], - [ - "1", - "NUMNOTEQUAL", - "", - "INVALID_STACK_OPERATION" - ], - [ - "1", - "LESSTHAN", - "", - "INVALID_STACK_OPERATION" - ], - [ - "1", - "GREATERTHAN", - "", - "INVALID_STACK_OPERATION" - ], - [ - "1", - "LESSTHANOREQUAL", - "", - "INVALID_STACK_OPERATION" - ], - [ - "1", - "GREATERTHANOREQUAL", - "", - "INVALID_STACK_OPERATION" - ], - [ - "1", - "MIN", - "", - "INVALID_STACK_OPERATION" - ], - [ - "1", - "MAX", - "", - "INVALID_STACK_OPERATION" - ], - [ - "1 1", - "WITHIN", - "", - "INVALID_STACK_OPERATION" - ], - [ - "", - "SHA256 1", - "", - "INVALID_STACK_OPERATION" - ], - [ - "", - "BLAKE2B 1", - "", - "INVALID_STACK_OPERATION" - ], - [ - "", - "BLAKE2B 1", - "", - "INVALID_STACK_OPERATION" - ], - [ - "Increase CHECKSIG and CHECKMULTISIG negative test coverage" - ], - [ - "", - "CHECKSIG NOT", - "", - "INVALID_STACK_OPERATION", - "CHECKSIG must error when there are no stack items" - ], - [ - "0", - "CHECKSIG NOT", - "", - "INVALID_STACK_OPERATION", - "CHECKSIG must error when there are not 2 stack items" - ], - [ - "", - "CHECKMULTISIG NOT", - "", - "INVALID_STACK_OPERATION", - "CHECKMULTISIG must error when there are no stack items" - ], - [ - "", - "-1 CHECKMULTISIG NOT", - "", - "PUBKEY_COUNT", - "CHECKMULTISIG must error when the specified number of pubkeys is negative" - ], - [ - "", - "1 CHECKMULTISIG NOT", - "", - "INVALID_STACK_OPERATION", - "CHECKMULTISIG must error when there are not enough pubkeys on the stack" - ], - [ - "", - "-1 0 CHECKMULTISIG NOT", - "", - "SIG_COUNT", - "CHECKMULTISIG must error when the specified number of signatures is negative" - ], - [ - "", - "1 'pk1' 1 CHECKMULTISIG NOT", - "", - "INVALID_STACK_OPERATION", - "CHECKMULTISIG must error when there are not enough signatures on the stack" - ], - [ - "", - "0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG", - "", - "OP_COUNT", - "202 CHECKMULTISIGS, fails due to 201 op limit" - ], - [ - "", - "NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG", - "", - "OP_COUNT", - "Fails due to 201 script operation limit" - ], - [ - "1", - "NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY", - "", - "OP_COUNT", - "" - ], - [ - "0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21", - "21 CHECKMULTISIG 1", - "", - "PUBKEY_COUNT", - "nPubKeys > 20" - ], - [ - "0 'sig' 1 0", - "CHECKMULTISIG 1", - "", - "SIG_COUNT", - "nSigs > nPubKeys" - ], - [ - "NOP 0x01 1", - "BLAKE2B 0x20 0xda1745e9b549bd0bfa1a569971c77eba30cd5a4b EQUAL", - "", - "SIG_PUSHONLY", - "Tests for Script.IsPushOnly()" - ], - [ - "0 0x01 0x50", - "BLAKE2B 0x20 0xece424a6bb6ddf4db592c0faed60685047a361b1 EQUAL", - "", - "BAD_OPCODE", - "OP_RESERVED in P2SH should fail" - ], - [ - "0 0x01", - "VER BLAKE2B 0x20 0x0f4d7845db968f2a81b530b6f3c1d6246d4c7e01 EQUAL", - "", - "BAD_OPCODE", - "OP_VER in P2SH should fail" - ], - [ - "0x00", - "'00' EQUAL", - "", - "EVAL_FALSE", - "Basic OP_0 execution" - ], - [ - "MINIMALDATA enforcement for PUSHDATAs" - ], - [ - "0x4c 0x00", - "DROP 1", - "", - "MINIMALDATA", - "Empty vector minimally represented by OP_0" - ], - [ - "0x01 0x81", - "DROP 1", - "", - "MINIMALDATA", - "-1 minimally represented by OP_1NEGATE" - ], - [ - "0x01 0x01", - "DROP 1", - "", - "MINIMALDATA", - "1 to 16 minimally represented by OP_1 to OP_16" - ], - [ - "0x01 0x02", - "DROP 1", - "", - "MINIMALDATA" - ], - [ - "0x01 0x03", - "DROP 1", - "", - "MINIMALDATA" - ], - [ - "0x01 0x04", - "DROP 1", - "", - "MINIMALDATA" - ], - [ - "0x01 0x05", - "DROP 1", - "", - "MINIMALDATA" - ], - [ - "0x01 0x06", - "DROP 1", - "", - "MINIMALDATA" - ], - [ - "0x01 0x07", - "DROP 1", - "", - "MINIMALDATA" - ], - [ - "0x01 0x08", - "DROP 1", - "", - "MINIMALDATA" - ], - [ - "0x01 0x09", - "DROP 1", - "", - "MINIMALDATA" - ], - [ - "0x01 0x0a", - "DROP 1", - "", - "MINIMALDATA" - ], - [ - "0x01 0x0b", - "DROP 1", - "", - "MINIMALDATA" - ], - [ - "0x01 0x0c", - "DROP 1", - "", - "MINIMALDATA" - ], - [ - "0x01 0x0d", - "DROP 1", - "", - "MINIMALDATA" - ], - [ - "0x01 0x0e", - "DROP 1", - "", - "MINIMALDATA" - ], - [ - "0x01 0x0f", - "DROP 1", - "", - "MINIMALDATA" - ], - [ - "0x01 0x10", - "DROP 1", - "", - "MINIMALDATA" - ], - [ - "0x4c 0x48 0x111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", - "DROP 1", - "", - "MINIMALDATA", - "PUSHDATA1 of 72 bytes minimally represented by direct push" - ], - [ - "0x4d 0xFF00 0x111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", - "DROP 1", - "", - "MINIMALDATA", - "PUSHDATA2 of 255 bytes minimally represented by PUSHDATA1" - ], - [ - "0x4e 0x00010000 0x11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", - "DROP 1", - "", - "MINIMALDATA", - "PUSHDATA4 of 256 bytes minimally represented by PUSHDATA2" - ], - [ - "MINIMALDATA enforcement for numeric arguments" - ], - [ - "0x01 0x00", - "NOT DROP 1", - "", - "UNKNOWN_ERROR", - "numequals 0" - ], - [ - "0x02 0x0000", - "NOT DROP 1", - "", - "UNKNOWN_ERROR", - "numequals 0" - ], - [ - "0x01 0x80", - "NOT DROP 1", - "", - "UNKNOWN_ERROR", - "0x80 (negative zero) numequals 0" - ], - [ - "0x02 0x0080", - "NOT DROP 1", - "", - "UNKNOWN_ERROR", - "numequals 0" - ], - [ - "0x02 0x0500", - "NOT DROP 1", - "", - "UNKNOWN_ERROR", - "numequals 5" - ], - [ - "0x03 0x050000", - "NOT DROP 1", - "", - "UNKNOWN_ERROR", - "numequals 5" - ], - [ - "0x02 0x0580", - "NOT DROP 1", - "", - "UNKNOWN_ERROR", - "numequals -5" - ], - [ - "0x03 0x050080", - "NOT DROP 1", - "", - "UNKNOWN_ERROR", - "numequals -5" - ], - [ - "0x03 0xff7f80", - "NOT DROP 1", - "", - "UNKNOWN_ERROR", - "Minimal encoding is 0xffff" - ], - [ - "0x03 0xff7f00", - "NOT DROP 1", - "", - "UNKNOWN_ERROR", - "Minimal encoding is 0xff7f" - ], - [ - "0x04 0xffff7f80", - "NOT DROP 1", - "", - "UNKNOWN_ERROR", - "Minimal encoding is 0xffffff" - ], - [ - "0x04 0xffff7f00", - "NOT DROP 1", - "", - "UNKNOWN_ERROR", - "Minimal encoding is 0xffff7f" - ], - [ - "Test every numeric-accepting opcode for correct handling of the numeric minimal encoding rule" - ], - [ - "1 0x02 0x0000", - "PICK DROP", - "", - "UNKNOWN_ERROR" - ], - [ - "1 0x02 0x0000", - "ROLL DROP 1", - "", - "UNKNOWN_ERROR" - ], - [ - "0x02 0x0000", - "1ADD DROP 1", - "", - "UNKNOWN_ERROR" - ], - [ - "0x02 0x0000", - "1SUB DROP 1", - "", - "UNKNOWN_ERROR" - ], - [ - "0x02 0x0000", - "NEGATE DROP 1", - "", - "UNKNOWN_ERROR" - ], - [ - "0x02 0x0000", - "ABS DROP 1", - "", - "UNKNOWN_ERROR" - ], - [ - "0x02 0x0000", - "NOT DROP 1", - "", - "UNKNOWN_ERROR" - ], - [ - "0x02 0x0000", - "0NOTEQUAL DROP 1", - "", - "UNKNOWN_ERROR" - ], - [ - "0 0x02 0x0000", - "ADD DROP 1", - "", - "UNKNOWN_ERROR" - ], - [ - "0x02 0x0000 0", - "ADD DROP 1", - "", - "UNKNOWN_ERROR" - ], - [ - "0 0x02 0x0000", - "SUB DROP 1", - "", - "UNKNOWN_ERROR" - ], - [ - "0x02 0x0000 0", - "SUB DROP 1", - "", - "UNKNOWN_ERROR" - ], - [ - "0 0x02 0x0000", - "BOOLAND DROP 1", - "", - "UNKNOWN_ERROR" - ], - [ - "0x02 0x0000 0", - "BOOLAND DROP 1", - "", - "UNKNOWN_ERROR" - ], - [ - "0 0x02 0x0000", - "BOOLOR DROP 1", - "", - "UNKNOWN_ERROR" - ], - [ - "0x02 0x0000 0", - "BOOLOR DROP 1", - "", - "UNKNOWN_ERROR" - ], - [ - "0 0x02 0x0000", - "NUMEQUAL DROP 1", - "", - "UNKNOWN_ERROR" - ], - [ - "0x02 0x0000 1", - "NUMEQUAL DROP 1", - "", - "UNKNOWN_ERROR" - ], - [ - "0 0x02 0x0000", - "NUMEQUALVERIFY 1", - "", - "UNKNOWN_ERROR" - ], - [ - "0x02 0x0000 0", - "NUMEQUALVERIFY 1", - "", - "UNKNOWN_ERROR" - ], - [ - "0 0x02 0x0000", - "NUMNOTEQUAL DROP 1", - "", - "UNKNOWN_ERROR" - ], - [ - "0x02 0x0000 0", - "NUMNOTEQUAL DROP 1", - "", - "UNKNOWN_ERROR" - ], - [ - "0 0x02 0x0000", - "LESSTHAN DROP 1", - "", - "UNKNOWN_ERROR" - ], - [ - "0x02 0x0000 0", - "LESSTHAN DROP 1", - "", - "UNKNOWN_ERROR" - ], - [ - "0 0x02 0x0000", - "GREATERTHAN DROP 1", - "", - "UNKNOWN_ERROR" - ], - [ - "0x02 0x0000 0", - "GREATERTHAN DROP 1", - "", - "UNKNOWN_ERROR" - ], - [ - "0 0x02 0x0000", - "LESSTHANOREQUAL DROP 1", - "", - "UNKNOWN_ERROR" - ], - [ - "0x02 0x0000 0", - "LESSTHANOREQUAL DROP 1", - "", - "UNKNOWN_ERROR" - ], - [ - "0 0x02 0x0000", - "GREATERTHANOREQUAL DROP 1", - "", - "UNKNOWN_ERROR" - ], - [ - "0x02 0x0000 0", - "GREATERTHANOREQUAL DROP 1", - "", - "UNKNOWN_ERROR" - ], - [ - "0 0x02 0x0000", - "MIN DROP 1", - "", - "UNKNOWN_ERROR" - ], - [ - "0x02 0x0000 0", - "MIN DROP 1", - "", - "UNKNOWN_ERROR" - ], - [ - "0 0x02 0x0000", - "MAX DROP 1", - "", - "UNKNOWN_ERROR" - ], - [ - "0x02 0x0000 0", - "MAX DROP 1", - "", - "UNKNOWN_ERROR" - ], - [ - "0x02 0x0000 0 0", - "WITHIN DROP 1", - "", - "UNKNOWN_ERROR" - ], - [ - "0 0x02 0x0000 0", - "WITHIN DROP 1", - "", - "UNKNOWN_ERROR" - ], - [ - "0 0 0x02 0x0000", - "WITHIN DROP 1", - "", - "UNKNOWN_ERROR" - ], - [ - "0 0x02 0x0000", - "CHECKMULTISIG DROP 1", - "", - "UNKNOWN_ERROR" - ], - [ - "0x02 0x0000 0", - "CHECKMULTISIG DROP 1", - "", - "UNKNOWN_ERROR" - ], - [ - "0x02 0x0000 0 1", - "CHECKMULTISIG DROP 1", - "", - "UNKNOWN_ERROR" - ], - [ - "0 0x02 0x0000", - "CHECKMULTISIGVERIFY 1", - "", - "UNKNOWN_ERROR" - ], - [ - "0x02 0x0000 0", - "CHECKMULTISIGVERIFY 1", - "", - "UNKNOWN_ERROR" - ], - [ - "Check MINIMALIF" - ], - [ - "2", - "IF TRUE ELSE FALSE", - "", - "MINIMALIF" - ], - [ - "2", - "NOTIF TRUE ELSE FALSE", - "", - "MINIMALIF" - ], - [ - "Order of CHECKMULTISIG evaluation tests, inverted by swapping the order of" - ], - [ - "pubkeys/signatures so they fail due to the STRICTENC rules on validly encoded" - ], - [ - "signatures and pubkeys." - ], - [ - "0x41 0x833682d4f60cc916a22a2c263e658fa662c49badb1e2a8c6208987bf99b1abd740498371480069e7a7a6e7471bf78c27bd9a1fd04fb212a92017346250ac187b01 0x41 0xea4a8d20562a950f4695dc24804565482e9fa111704886179d0c348f2b8a15fe691a305cd599c59c131677146661d5b98cb935330989a85f33afc70d0a21add101", - "2 0x21 0x02865c40293a680cb9c020e7b1e106d8c1916d3cef99aa431a56d253e69256dac0 0 2 CHECKMULTISIG NOT", - "", - "PUBKEYFORMAT", - "2-of-2 CHECKMULTISIG NOT with the first pubkey invalid, and both signatures validly encoded." - ], - [ - "CHECKSEQUENCEVERIFY tests" - ], - [ - "", - "CHECKSEQUENCEVERIFY", - "", - "INVALID_STACK_OPERATION", - "CSV automatically fails on a empty stack" - ], - [ - "0", - "CHECKSEQUENCEVERIFY", - "", - "UNSATISFIED_LOCKTIME", - "CSV fails if stack top bit 1 << 31 is set and the tx version < 2" - ], - [ - "4294967296", - "CHECKSEQUENCEVERIFY", - "", - "UNSATISFIED_LOCKTIME", - "CSV fails if stack top bit 1 << 31 is not set, and tx version < 2" - ], - [ - "NULLFAIL should cover all signatures and signatures only" - ], - [ - "0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0", - "0x01 0x14 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0x01 0x14 CHECKMULTISIG NOT", - "", - "OK", - "BIP66 and NULLFAIL-compliant" - ], - [ - "0x09 0x300602010102010101 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0", - "0x01 0x14 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0x01 0x14 CHECKMULTISIG NOT", - "", - "NULLFAIL", - "BIP66-compliant but not NULLFAIL-compliant 4" - ], - [ - "The End" - ] -] diff --git a/crypto/txscript/test-data/script_tests.json b/crypto/txscript/test-data/script_tests.json index 5bc5e7a160..947c8810de 100644 --- a/crypto/txscript/test-data/script_tests.json +++ b/crypto/txscript/test-data/script_tests.json @@ -3601,27 +3601,41 @@ "2147483648 0", "ADD NOP", "", - "UNKNOWN_ERROR", - "arithmetic operands must be in range [-2^31...2^31] " + "OK", + "numbers up to 8 bytes are supported since kip10" ], [ "-2147483648 0", "ADD NOP", "", + "OK", + "numbers up to 8 bytes are supported since kip10" + ], + [ + "-9223372036854775808 0", + "ADD NOP", + "", "UNKNOWN_ERROR", - "arithmetic operands must be in range [-2^31...2^31] " + "" ], [ "2147483647", "DUP ADD 4294967294 NUMEQUAL", "", - "UNKNOWN_ERROR", - "NUMEQUAL must be in numeric range" + "OK", + "NUMEQUAL is in numeric range since kip10" ], [ "'abcdef'", "NOT 0 EQUAL", "", + "OK", + "numbers up to 8 bytes are supported since kip10" + ], + [ + "'abcdefghi'", + "NOT 0 EQUAL", + "", "UNKNOWN_ERROR", "NOT is an arithmetic operand" ], @@ -3686,115 +3700,133 @@ "IF 0xb2 ELSE 1 ENDIF", "", "BAD_OPCODE", - "opcodes above OP_CHECKSEQUENCEVERIFY invalid if executed" + "OpTxVersion is reserved" ], [ "1", "IF 0xb3 ELSE 1 ENDIF", "", - "BAD_OPCODE" + "OK", + "OpTxInputCount is enabled since kip10" ], [ "1", "IF 0xb4 ELSE 1 ENDIF", "", - "BAD_OPCODE" + "OK", + "OpTxOutputCount is enabled since kip10" ], [ "1", "IF 0xb5 ELSE 1 ENDIF", "", - "BAD_OPCODE" + "BAD_OPCODE", + "OpTxLockTime is reserved" ], [ "1", "IF 0xb6 ELSE 1 ENDIF", "", - "BAD_OPCODE" + "BAD_OPCODE", + "OpTxSubnetId is reserved" ], [ "1", "IF 0xb7 ELSE 1 ENDIF", "", - "BAD_OPCODE" + "BAD_OPCODE", + "OpTxGas is reserved" ], [ "1", "IF 0xb8 ELSE 1 ENDIF", "", - "BAD_OPCODE" + "BAD_OPCODE", + "OpTxPayload is reserved" ], [ "1", - "IF 0xb9 ELSE 1 ENDIF", + "IF 0xb9 0 NUMEQUAL ELSE 1 ENDIF", "", - "BAD_OPCODE" + "OK", + "OpTxInputIndex is enabled since kip10" ], [ "1", "IF 0xba ELSE 1 ENDIF", "", - "BAD_OPCODE" + "BAD_OPCODE", + "OpOutpointTxId is reserved" ], [ "1", "IF 0xbb ELSE 1 ENDIF", "", - "BAD_OPCODE" + "BAD_OPCODE", + "OpOutpointOutputIdx is reserved" ], [ "1", "IF 0xbc ELSE 1 ENDIF", "", - "BAD_OPCODE" + "BAD_OPCODE", + "OpTxInputScriptSig is reserved" ], [ "1", "IF 0xbd ELSE 1 ENDIF", "", - "BAD_OPCODE" + "BAD_OPCODE", + "OpTxInputSeq is reserved" ], [ - "1", - "IF 0xbe ELSE 1 ENDIF", + "0 1", + "IF 0xbe 0 NUMEQUAL ELSE 1 ENDIF", "", - "BAD_OPCODE" + "OK", + "OpTxInputAmount is enabled since kip10" ], [ - "1", + "0 1", "IF 0xbf ELSE 1 ENDIF", "", - "BAD_OPCODE" + "OK", + "OpTxInputSpk is enabled since kip10" ], [ "1", "IF 0xc0 ELSE 1 ENDIF", "", - "BAD_OPCODE" + "BAD_OPCODE", + "OpTxInputBlockDaaScore is reserved" ], [ "1", "IF 0xc1 ELSE 1 ENDIF", "", - "BAD_OPCODE" + "BAD_OPCODE", + "OpTxInputIsCoinbase is reserved" ], [ - "1", - "IF 0xc2 ELSE 1 ENDIF", + "0 1", + "IF 0xc2 0 NUMEQUAL ELSE 1 ENDIF", "", - "BAD_OPCODE" + "OK", + "OpTxOutputAmount is enabled since kip10" ], [ - "1", - "IF 0xc3 ELSE 1 ENDIF", + "0 1", + "IF 0xc3 0x02 0x0000 EQUAL ELSE 1 ENDIF", "", - "BAD_OPCODE" + "OK", + "OpTxOutputSpk is enabled since kip10" ], [ "1", "IF 0xc4 ELSE 1 ENDIF", "", - "BAD_OPCODE" + "BAD_OPCODE", + "opcodes above OpTxOutputSpk invalid if executed" ], [ "1", @@ -4280,52 +4312,108 @@ ], [ "2147483648", + "1ADD 2147483649 NUMEQUAL", + "", + "OK", + "We can do math on 5-byte integers since kip10" + ], + [ + "2147483648", + "NEGATE -2147483648 NUMEQUAL", + "", + "OK", + "We can do math on 5-byte integers since kip10" + ], + [ + "-2147483648", + "1ADD -2147483647 NUMEQUAL", + "", + "OK", + "We can do math on 5-byte integers since kip10" + ], + [ + "2147483647", + "DUP 1ADD 1SUB NUMEQUAL", + "", + "OK", + "We can do math on 5-byte integers since kip10" + ], + [ + "2147483648", + "1SUB 2147483647 NUMEQUAL", + "", + "OK", + "We can do math on 5-byte integers since kip10" + ], + [ + "2147483648 1", + "BOOLOR 1 EQUAL", + "", + "OK", + "We can do math on 5-byte integers since kip10" + ], + [ + "2147483648 1", + "BOOLAND 1 EQUAL", + "", + "OK", + "We can do math on 5-byte integers since kip10" + ], + [ + "-9223372036854775808", "1ADD 1", "", "UNKNOWN_ERROR", - "We cannot do math on 5-byte integers" + "We cannot do math on 9-byte integers" ], [ - "2147483648", + "-9223372036854775808", "NEGATE 1", "", "UNKNOWN_ERROR", - "We cannot do math on 5-byte integers" + "We cannot do math on 9-byte integers" ], [ - "-2147483648", + "-9223372036854775808", "1ADD 1", "", "UNKNOWN_ERROR", - "Because we use a sign bit, -2147483648 is also 5 bytes" + "Because we use a sign bit, -9223372036854775808 is also 9 bytes" ], [ - "2147483647", + "-9223372036854775808", "1ADD 1SUB 1", "", "UNKNOWN_ERROR", - "We cannot do math on 5-byte integers, even if the result is 4-bytes" + "We cannot do math on 9-byte integers, even if the result is 8-bytes" ], [ - "2147483648", + "-9223372036854775808", "1SUB 1", "", "UNKNOWN_ERROR", - "We cannot do math on 5-byte integers, even if the result is 4-bytes" + "We cannot do math on 9-byte integers, even if the result is 8-bytes" ], [ - "2147483648 1", + "-9223372036854775808 1", "BOOLOR 1", "", "UNKNOWN_ERROR", - "We cannot do BOOLOR on 5-byte integers (but we can still do IF etc)" + "We cannot do BOOLOR on 9-byte integers (but we can still do IF etc)" ], [ - "2147483648 1", + "-9223372036854775808 1", "BOOLAND 1", "", "UNKNOWN_ERROR", - "We cannot do BOOLAND on 5-byte integers" + "We cannot do BOOLAND on 9-byte integers" + ], + [ + "-9223372036854775807", + "1SUB", + "", + "UNKNOWN_ERROR", + "result of math operation can't exceed 8 bytes" ], [ "1", diff --git a/database/src/access.rs b/database/src/access.rs index fad8ee1300..0efb5393b3 100644 --- a/database/src/access.rs +++ b/database/src/access.rs @@ -67,6 +67,51 @@ where } } + pub fn has_with_fallback(&self, fallback_prefix: &[u8], key: TKey) -> Result + where + TKey: Clone + AsRef<[u8]>, + { + if self.cache.contains_key(&key) { + Ok(true) + } else { + let db_key = DbKey::new(&self.prefix, key.clone()); + if self.db.get_pinned(&db_key)?.is_some() { + Ok(true) + } else { + let db_key = DbKey::new(fallback_prefix, key.clone()); + Ok(self.db.get_pinned(&db_key)?.is_some()) + } + } + } + + pub fn read_with_fallback(&self, fallback_prefix: &[u8], key: TKey) -> Result + where + TKey: Clone + AsRef<[u8]> + ToString, + TData: DeserializeOwned, + TFallbackDeser: DeserializeOwned + Into, + { + if let Some(data) = self.cache.get(&key) { + Ok(data) + } else { + let db_key = DbKey::new(&self.prefix, key.clone()); + if let Some(slice) = self.db.get_pinned(&db_key)? { + let data: TData = bincode::deserialize(&slice)?; + self.cache.insert(key, data.clone()); + Ok(data) + } else { + let db_key = DbKey::new(fallback_prefix, key.clone()); + if let Some(slice) = self.db.get_pinned(&db_key)? { + let data: TFallbackDeser = bincode::deserialize(&slice)?; + let data: TData = data.into(); + self.cache.insert(key, data.clone()); + Ok(data) + } else { + Err(StoreError::KeyNotFound(db_key)) + } + } + } + } + pub fn iterator(&self) -> impl Iterator> + '_ where TKey: Clone + AsRef<[u8]>, @@ -245,4 +290,47 @@ mod tests { db.write(batch).unwrap(); assert_eq!(0, access.iterator().count()); } + + #[test] + fn test_read_with_fallback() { + let (_lifetime, db) = create_temp_db!(ConnBuilder::default().with_files_limit(10)); + let primary_prefix = vec![1]; + let fallback_prefix = vec![2]; + let access = CachedDbAccess::::new(db.clone(), CachePolicy::Count(10), primary_prefix); + let fallback_access = CachedDbAccess::::new(db.clone(), CachePolicy::Count(10), fallback_prefix.clone()); + + let key: Hash = 1.into(); + let value = 100; + + // Write to fallback + fallback_access.write(DirectDbWriter::new(&db), key, value).unwrap(); + + // Read with fallback, should succeed + let result = access.read_with_fallback::(&fallback_prefix, key).unwrap(); + assert_eq!(result, value); + + // Key should now be in the primary cache + assert_eq!(access.read_from_cache(key).unwrap(), value); + } + + #[test] + fn test_has_with_fallback() { + let (_lifetime, db) = create_temp_db!(ConnBuilder::default().with_files_limit(10)); + let primary_prefix = vec![1]; + let fallback_prefix = vec![2]; + let access = CachedDbAccess::::new(db.clone(), CachePolicy::Count(10), primary_prefix); + let fallback_access = CachedDbAccess::::new(db.clone(), CachePolicy::Count(10), fallback_prefix.clone()); + + let key_in_fallback: Hash = 1.into(); + let key_not_found: Hash = 2.into(); + + // Write to fallback + fallback_access.write(DirectDbWriter::new(&db), key_in_fallback, 100).unwrap(); + + // Check for key in fallback, should exist + assert!(access.has_with_fallback(&fallback_prefix, key_in_fallback).unwrap()); + + // Check for key that doesn't exist, should not be found + assert!(!access.has_with_fallback(&fallback_prefix, key_not_found).unwrap()); + } } diff --git a/database/src/registry.rs b/database/src/registry.rs index 49c85f44d8..3583d55b2e 100644 --- a/database/src/registry.rs +++ b/database/src/registry.rs @@ -15,6 +15,7 @@ pub enum DatabaseStorePrefixes { Ghostdag = 5, GhostdagCompact = 6, HeadersSelectedTip = 7, + // Legacy headers store prefix. CompressedHeaders is used instead Headers = 8, HeadersCompact = 9, PastPruningPoints = 10, @@ -42,12 +43,18 @@ pub enum DatabaseStorePrefixes { ReachabilityTreeChildren = 30, ReachabilityFutureCoveringSet = 31, + // Stores headers with run-length encoded parents + CompressedHeaders = 32, + // ---- Ghostdag Proof TempGhostdag = 40, TempGhostdagCompact = 41, // ---- Retention Period Root ---- RetentionPeriodRoot = 50, + // ---- pruning metadata ---- + PruningUtxosetSyncFlag = 60, + BodyMissingAnticone = 61, // ---- Metadata ---- MultiConsensusMetadata = 124, diff --git a/docker/Dockerfile.kaspa-wallet b/docker/Dockerfile.kaspa-wallet new file mode 100644 index 0000000000..09a01f8b46 --- /dev/null +++ b/docker/Dockerfile.kaspa-wallet @@ -0,0 +1,51 @@ +# ---------------------------------------- Chef image ------------------------------------------- +FROM rust:1.90-alpine AS chef +RUN apk --no-cache add \ + musl-dev \ + protobuf-dev \ + g++ \ + clang15-dev \ + linux-headers \ + wasm-pack \ + openssl-dev +RUN cargo install cargo-chef --locked +WORKDIR /app + +# ---------------------------------------- Planner image ---------------------------------------- +FROM chef AS planner +COPY . . +RUN cargo chef prepare --recipe-path recipe.json + +# ---------------------------------------- Builder image ---------------------------------------- +FROM chef AS builder +COPY --from=planner /app/recipe.json recipe.json + +ENV RUSTFLAGS="-C target-feature=-crt-static" \ + CARGO_REGISTRIES_CRATES_IO_PROTOCOL="sparse" + +# Build dependencies - this is the caching Docker layer +RUN cargo chef cook --release --recipe-path recipe.json -p kaspa-wallet --bin kaspa-wallet + +COPY . . +RUN --mount=type=cache,id=cargo-registry,target=/usr/local/cargo/registry,sharing=locked \ + --mount=type=cache,id=cargo-git,target=/usr/local/cargo/git,sharing=locked \ + --mount=type=cache,id=cargo-target,target=/app/target,sharing=locked \ + cargo build --release --bin kaspa-wallet \ + && cp /app/target/release/kaspa-wallet /app/kaspa-wallet # <-- outside the mount, persists + + +# ---------------------------------------- Runtime image ---------------------------------------- +FROM alpine AS runtime +WORKDIR /app + +RUN apk --no-cache add \ + libgcc \ + libstdc++ \ + tini \ + ca-certificates + +COPY --from=builder /app/kaspa-wallet . + +USER root +ENTRYPOINT [ "/sbin/tini", "--" ] +CMD [ "/app/kaspa-wallet" ] diff --git a/docker/Dockerfile.kaspad b/docker/Dockerfile.kaspad new file mode 100644 index 0000000000..6f7c0e5e75 --- /dev/null +++ b/docker/Dockerfile.kaspad @@ -0,0 +1,51 @@ +# ---------------------------------------- Chef image ------------------------------------------- +FROM rust:1.90-alpine AS chef +RUN apk --no-cache add \ + musl-dev \ + protobuf-dev \ + g++ \ + clang15-dev \ + linux-headers \ + wasm-pack \ + openssl-dev +RUN cargo install cargo-chef --locked +WORKDIR /app + +# ---------------------------------------- Planner image ---------------------------------------- +FROM chef AS planner +COPY . . +RUN cargo chef prepare --recipe-path recipe.json + +# ---------------------------------------- Builder image ---------------------------------------- +FROM chef AS builder +COPY --from=planner /app/recipe.json recipe.json + +ENV RUSTFLAGS="-C target-feature=-crt-static" \ + CARGO_REGISTRIES_CRATES_IO_PROTOCOL="sparse" + +# Build dependencies - this is the caching Docker layer +RUN cargo chef cook --release --recipe-path recipe.json -p kaspad --bin kaspad + +COPY . . +RUN --mount=type=cache,id=cargo-registry,target=/usr/local/cargo/registry,sharing=locked \ + --mount=type=cache,id=cargo-git,target=/usr/local/cargo/git,sharing=locked \ + --mount=type=cache,id=cargo-target,target=/app/target,sharing=locked \ + cargo build --release --bin kaspad \ + && cp /app/target/release/kaspad /app/kaspad # <-- outside the mount, persists + + +# ---------------------------------------- Runtime image ---------------------------------------- +FROM alpine AS runtime +WORKDIR /app + +RUN apk --no-cache add \ + libgcc \ + libstdc++ \ + tini \ + ca-certificates + +COPY --from=builder /app/kaspad . + +USER root +ENTRYPOINT [ "/sbin/tini", "--" ] +CMD [ "/app/kaspad" ] diff --git a/docker/Dockerfile.rothschild b/docker/Dockerfile.rothschild new file mode 100644 index 0000000000..e31f7492ac --- /dev/null +++ b/docker/Dockerfile.rothschild @@ -0,0 +1,51 @@ +# ---------------------------------------- Chef image ------------------------------------------- +FROM rust:1.90-alpine AS chef +RUN apk --no-cache add \ + musl-dev \ + protobuf-dev \ + g++ \ + clang15-dev \ + linux-headers \ + wasm-pack \ + openssl-dev +RUN cargo install cargo-chef --locked +WORKDIR /app + +# ---------------------------------------- Planner image ---------------------------------------- +FROM chef AS planner +COPY . . +RUN cargo chef prepare --recipe-path recipe.json + +# ---------------------------------------- Builder image ---------------------------------------- +FROM chef AS builder +COPY --from=planner /app/recipe.json recipe.json + +ENV RUSTFLAGS="-C target-feature=-crt-static" \ + CARGO_REGISTRIES_CRATES_IO_PROTOCOL="sparse" + +# Build dependencies - this is the caching Docker layer +RUN cargo chef cook --release --recipe-path recipe.json -p rothschild --bin rothschild + +COPY . . +RUN --mount=type=cache,id=cargo-registry,target=/usr/local/cargo/registry,sharing=locked \ + --mount=type=cache,id=cargo-git,target=/usr/local/cargo/git,sharing=locked \ + --mount=type=cache,id=cargo-target,target=/app/target,sharing=locked \ + cargo build --release --bin rothschild \ + && cp /app/target/release/rothschild /app/rothschild # <-- outside the mount, persists + + +# ---------------------------------------- Runtime image ---------------------------------------- +FROM alpine AS runtime +WORKDIR /app + +RUN apk --no-cache add \ + libgcc \ + libstdc++ \ + tini \ + ca-certificates + +COPY --from=builder /app/rothschild . + +USER root +ENTRYPOINT [ "/sbin/tini", "--" ] +CMD [ "/app/rothschild" ] diff --git a/docker/Dockerfile.simpa b/docker/Dockerfile.simpa new file mode 100644 index 0000000000..61e8b63510 --- /dev/null +++ b/docker/Dockerfile.simpa @@ -0,0 +1,51 @@ +# ---------------------------------------- Chef image ------------------------------------------- +FROM rust:1.90-alpine AS chef +RUN apk --no-cache add \ + musl-dev \ + protobuf-dev \ + g++ \ + clang15-dev \ + linux-headers \ + wasm-pack \ + openssl-dev +RUN cargo install cargo-chef --locked +WORKDIR /app + +# ---------------------------------------- Planner image ---------------------------------------- +FROM chef AS planner +COPY . . +RUN cargo chef prepare --recipe-path recipe.json + +# ---------------------------------------- Builder image ---------------------------------------- +FROM chef AS builder +COPY --from=planner /app/recipe.json recipe.json + +ENV RUSTFLAGS="-C target-feature=-crt-static" \ + CARGO_REGISTRIES_CRATES_IO_PROTOCOL="sparse" + +# Build dependencies - this is the caching Docker layer +RUN cargo chef cook --release --recipe-path recipe.json -p simpa --bin simpa + +COPY . . +RUN --mount=type=cache,id=cargo-registry,target=/usr/local/cargo/registry,sharing=locked \ + --mount=type=cache,id=cargo-git,target=/usr/local/cargo/git,sharing=locked \ + --mount=type=cache,id=cargo-target,target=/app/target,sharing=locked \ + cargo build --release --bin simpa \ + && cp /app/target/release/simpa /app/simpa # <-- outside the mount, persists + + +# ---------------------------------------- Runtime image ---------------------------------------- +FROM alpine AS runtime +WORKDIR /app + +RUN apk --no-cache add \ + libgcc \ + libstdc++ \ + tini \ + ca-certificates + +COPY --from=builder /app/simpa . + +USER root +ENTRYPOINT [ "/sbin/tini", "--" ] +CMD [ "/app/simpa" ] diff --git a/docs/override-params.md b/docs/override-params.md new file mode 100644 index 0000000000..1cd0d7ba76 --- /dev/null +++ b/docs/override-params.md @@ -0,0 +1,160 @@ +# Using `--override-params-file` + +The `--override-params-file` flag lets you run `kaspad` with a custom set of +consensus parameters loaded from a JSON file. This is primarily useful for +local development, testing alternative fork schedules, or simulating extreme +network conditions. **Overriding consensus parameters on mainnet is blocked +and will make the node exit at startup.** + +## Quick start + +1. Pick a non-mainnet network flag (for example `--testnet`, `--devnet`, or + `--simnet`). +2. Create a JSON file containing the parameters you want to override. Any + field you omit keeps its default value for the chosen network. +3. Launch `kaspad` with the flag: + + ```bash + kaspad --devnet --override-params-file /path/to/overrides.json + ``` + +If the file cannot be read or parsed, `kaspad` prints the error and exits. + +### Example override file + +```json +{ + "prior_ghostdag_k": 124, + "timestamp_deviation_tolerance": 600, + "prior_target_time_per_block": 1000, + "prior_difficulty_window_size": 2641, + "min_difficulty_window_size": 150, + "prior_max_block_parents": 81, + "prior_mergeset_size_limit": 1240, + "prior_merge_depth": 3600, + "prior_finality_depth": 86400, + "prior_pruning_depth": 185798, + "coinbase_payload_script_public_key_max_len": 150, + "max_coinbase_payload_len": 204, + "prior_max_tx_inputs": 1000000000, + "prior_max_tx_outputs": 1000000000, + "prior_max_signature_script_len": 1000000000, + "prior_max_script_public_key_len": 1000000000, + "mass_per_tx_byte": 1, + "mass_per_script_pub_key_byte": 10, + "mass_per_sig_op": 1000, + "max_block_mass": 500000, + "storage_mass_parameter": 10000, + "deflationary_phase_daa_score": 15519600, + "pre_deflationary_phase_base_subsidy": 50000000000, + "prior_coinbase_maturity": 100, + "skip_proof_of_work": true, + "max_block_level": 254, + "pruning_proof_m": 1000, + "crescendo": { + "past_median_time_sampled_window_size": 27, + "sampled_difficulty_window_size": 661, + "target_time_per_block": 100, + "ghostdag_k": 124, + "past_median_time_sample_rate": 10, + "difficulty_sample_rate": 2, + "max_block_parents": 16, + "mergeset_size_limit": 248, + "merge_depth": 36000, + "finality_depth": 432000, + "pruning_depth": 1080000, + "max_tx_inputs": 1000, + "max_tx_outputs": 1000, + "max_signature_script_len": 10000, + "max_script_public_key_len": 10000, + "coinbase_maturity": 200 + }, + "crescendo_activation": 0 +} +``` + +All high level (non-nested) fields are optional, and if omitted, their default values in the respective network will be used. The sub-fields of the `crescendo` won't be overriden by the network default value, but instead will be set to 0 if not specified (this is a temporary behavior that will be changed once Crescendo activation logic is cleaned). +## Available parameters +| Field | Description | +|---------------------------------------------|----------------------------| +| prior_ghostdag_k | Pre-crescendo GHOSTDAG K parameter | +| timestamp_deviation_tolerance | Timestamp deviation tolerance | +| prior_target_time_per_block | Pre-crescendo target time per block | +| prior_difficulty_window_size | Pre-crescendo difficulty window size | +| min_difficulty_window_size | Minimum difficulty window size | +| prior_max_block_parents | Pre-crescendo max block parents | +| prior_mergeset_size_limit | Pre-crescendo mergeset size limit | +| prior_merge_depth | Pre-crescendo merge depth | +| prior_finality_depth | Pre-crescendo finality depth | +| prior_pruning_depth | Pre-crescendo pruning depth | +| coinbase_payload_script_public_key_max_len | Coinbase payload script public key max length | +| max_coinbase_payload_len | Maximum coinbase payload length | +| prior_max_tx_inputs | Pre-crescendo max transaction inputs | +| prior_max_tx_outputs | Pre-crescendo max transaction outputs | +| prior_max_signature_script_len | Pre-crescendo max signature script length | +| prior_max_script_public_key_len | Pre-crescendo max script public key length | +| mass_per_tx_byte | Mass per transaction byte | +| mass_per_script_pub_key_byte | Mass per script public key byte | +| mass_per_sig_op | Mass per signature operation | +| max_block_mass | Maximum block mass | +| storage_mass_parameter | Storage mass parameter | +| deflationary_phase_daa_score | Deflationary phase DAA score | +| pre_deflationary_phase_base_subsidy | Pre-deflationary phase base subsidy | +| prior_coinbase_maturity | Pre-crescendo coinbase maturity | +| skip_proof_of_work | Whether to skip proof of work checks | +| max_block_level | Maximum block level | +| pruning_proof_m | Pruning proof M parameter | +| crescendo | Post-crescendo parameters | +| crescendo_activation | Crescendo DAA score | + +**crescendo sub-fields:** + +| Field | Description | +|-------------------------------------|----------------------------| +| past_median_time_sampled_window_size| Post-crescendo median time window size | +| sampled_difficulty_window_size | Post-crescendo difficulty window size | +| target_time_per_block | Post-crescendo target time per block | +| ghostdag_k | Post-crescendo ghostdag K | +| past_median_time_sample_rate | Post-crescendo median time sample rate | +| difficulty_sample_rate | Post-crescendo difficulty sample rate | +| max_block_parents | Post-crescendo maximum block parents | +| mergeset_size_limit | Post-crescendo mergeset size limit | +| merge_depth | Post-crescendo merge depth | +| finality_depth | Post-crescendo finality depth | +| pruning_depth | Post-crescendo pruning depth | +| max_tx_inputs | Post-crescendo maximum transaction inputs | +| max_tx_outputs | Post-crescendo maximum transaction outputs | +| max_signature_script_len | Post-crescendo maximum signature script length | +| max_script_public_key_len | Post-crescendo maximum script public key length | +| coinbase_maturity | Post-crescendo coinbase maturity | + +Refer to the source definition in +`consensus/core/src/config/params.rs` for the full list of available fields and +their meaning. + +## Use simpa params + +If you want to run `kaspad` with the simpa generated database, you'll need to ask it to generate an override params file as well, so you can run `kaspad` with the same parameters. + +This can be done by passing the `--override-params-output` param, for example: +```bash +cargo run --release --bin simpa -- --override-params-output overrides.json -o=/path/to/simpa/database +``` + +And then launch kaspad with: +```bash +kaspad --simnet +``` + +and immedeiately close it. This will create the simnet datadir at `~/.rusty-kaspa/kaspa-simnet`. + +You can then override it with the simpa database by running: +```bash +rm -rf ~/.rusty-kaspa/kaspa-simnet/datadir/consensus/consensus-001/ +mv /path/to/simpa/database ~/.rusty-kaspa/kaspa-simnet/datadir/consensus/consensus-001/ +``` + +And finally launch kaspad with: +```bash +kaspad --simnet --override-params-file overrides.json +``` \ No newline at end of file diff --git a/indexes/utxoindex/src/index.rs b/indexes/utxoindex/src/index.rs index f3aa48bac1..83777036e0 100644 --- a/indexes/utxoindex/src/index.rs +++ b/indexes/utxoindex/src/index.rs @@ -19,11 +19,11 @@ use std::{ sync::{Arc, Weak}, }; -const RESYNC_CHUNK_SIZE: usize = 2048; //Increased from 1k (used in go-kaspad), for quicker resets, while still having a low memory footprint. +const RESYNC_CHUNK_SIZE: usize = 2048; // Increased from 1k (used in go-kaspad), for quicker resets, while still having a low memory footprint. /// UtxoIndex indexes `CompactUtxoEntryCollections` by [`ScriptPublicKey`](kaspa_consensus_core::tx::ScriptPublicKey), /// commits them to its owns store, and emits changes. -/// Note: The UtxoIndex struct by itself is not thread save, only correct usage of the supplied RwLock via `new` makes it so. +/// Note: The UtxoIndex struct by itself is not thread safe, only correct usage of the supplied RwLock via `new` makes it so. /// please follow guidelines found in the comments under `utxoindex::core::api::UtxoIndexApi` for proper thread safety. pub struct UtxoIndex { consensus_manager: Arc, diff --git a/kaspad/Cargo.toml b/kaspad/Cargo.toml index 6d2c58b5b3..91373baf50 100644 --- a/kaspad/Cargo.toml +++ b/kaspad/Cargo.toml @@ -44,7 +44,7 @@ kaspa-wrpc-server.workspace = true async-channel.workspace = true cfg-if.workspace = true -clap.workspace = true +clap = { workspace = true, features = ["env"]} dhat = { workspace = true, optional = true } dirs.workspace = true futures-util.workspace = true @@ -59,6 +59,7 @@ tempfile.workspace = true thiserror.workspace = true tokio = { workspace = true, features = ["rt", "macros", "rt-multi-thread"] } workflow-log.workspace = true +serde_json.workspace = true toml = "0.8.10" serde_with = "3.7.0" diff --git a/kaspad/src/args.rs b/kaspad/src/args.rs index 9a58bc6dbf..53fac0abe3 100644 --- a/kaspad/src/args.rs +++ b/kaspad/src/args.rs @@ -91,6 +91,8 @@ pub struct Args { pub disable_grpc: bool, pub ram_scale: f64, pub retention_period_days: Option, + + pub override_params_file: Option, } impl Default for Args { @@ -142,6 +144,7 @@ impl Default for Args { disable_grpc: false, ram_scale: 1.0, retention_period_days: None, + override_params_file: None, } } } @@ -201,14 +204,15 @@ pub fn cli() -> Command { let cmd = Command::new("kaspad") .about(format!("{} (rusty-kaspa) v{}", env!("CARGO_PKG_DESCRIPTION"), version())) .version(env!("CARGO_PKG_VERSION")) - .arg(arg!(-C --configfile "Path of config file.")) - .arg(arg!(-b --appdir "Directory to store data.")) - .arg(arg!(--logdir "Directory to log output.")) - .arg(arg!(--nologfiles "Disable logging to files.")) + .arg(arg!(-C --configfile "Path of config file.").env("KASPAD_CONFIGFILE")) + .arg(arg!(-b --appdir "Directory to store data.").env("KASPAD_APPDIR")) + .arg(arg!(--logdir "Directory to log output.").env("KASPAD_LOGDIR")) + .arg(arg!(--nologfiles "Disable logging to files.").env("KASPAD_NOLOGFILES")) .arg( Arg::new("async_threads") .short('t') .long("async-threads") + .env("KASPAD_ASYNC_THREADS") .value_name("async_threads") .require_equals(true) .value_parser(clap::value_parser!(usize)) @@ -218,6 +222,7 @@ pub fn cli() -> Command { Arg::new("log_level") .short('d') .long("loglevel") + .env("KASPAD_LOG_LEVEL") .value_name("LEVEL") .default_value("info") .require_equals(true) @@ -226,6 +231,7 @@ pub fn cli() -> Command { .arg( Arg::new("rpclisten") .long("rpclisten") + .env("KASPAD_RPCLISTEN") .value_name("IP[:PORT]") .num_args(0..=1) .require_equals(true) @@ -235,6 +241,7 @@ pub fn cli() -> Command { .arg( Arg::new("rpclisten-borsh") .long("rpclisten-borsh") + .env("KASPAD_RPCLISTEN_BORSH") .value_name("IP[:PORT]") .num_args(0..=1) .require_equals(true) @@ -246,6 +253,7 @@ pub fn cli() -> Command { .arg( Arg::new("rpclisten-json") .long("rpclisten-json") + .env("KASPAD_RPCLISTEN_JSON") .value_name("IP[:PORT]") .num_args(0..=1) .require_equals(true) @@ -253,10 +261,11 @@ pub fn cli() -> Command { .value_parser(clap::value_parser!(WrpcNetAddress)) .help("Interface:port to listen for wRPC JSON connections (default port: 18110, testnet: 18210)."), ) - .arg(arg!(--unsaferpc "Enable RPC commands which affect the state of the node")) + .arg(arg!(--unsaferpc "Enable RPC commands which affect the state of the node").env("KASPAD_UNSAFERPC")) .arg( Arg::new("connect-peers") .long("connect") + .env("KASPAD_CONNECTPEERS") .value_name("IP[:PORT]") .action(ArgAction::Append) .require_equals(true) @@ -266,6 +275,7 @@ pub fn cli() -> Command { .arg( Arg::new("add-peers") .long("addpeer") + .env("KASPAD_ADDPEERS") .value_name("IP[:PORT]") .action(ArgAction::Append) .require_equals(true) @@ -275,6 +285,7 @@ pub fn cli() -> Command { .arg( Arg::new("listen") .long("listen") + .env("KASPAD_LISTEN") .value_name("IP[:PORT]") .require_equals(true) .value_parser(clap::value_parser!(ContextualNetAddress)) @@ -283,6 +294,7 @@ pub fn cli() -> Command { .arg( Arg::new("outpeers") .long("outpeers") + .env("KASPAD_OUTPEERS") .value_name("outpeers") .require_equals(true) .value_parser(clap::value_parser!(usize)) @@ -290,7 +302,8 @@ pub fn cli() -> Command { ) .arg( Arg::new("maxinpeers") - .long("maxinpeers") + .long("maxinpeers") + .env("KASPAD_MAXINPEERS") .value_name("maxinpeers") .require_equals(true) .value_parser(clap::value_parser!(usize)) @@ -299,47 +312,52 @@ pub fn cli() -> Command { .arg( Arg::new("rpcmaxclients") .long("rpcmaxclients") + .env("KASPAD_RPCMAXCLIENTS") .value_name("rpcmaxclients") .require_equals(true) .value_parser(clap::value_parser!(usize)) .help("Max number of RPC clients for standard connections (default: 128)."), ) - .arg(arg!(--"reset-db" "Reset database before starting node. It's needed when switching between subnetworks.")) - .arg(arg!(--"enable-unsynced-mining" "Allow the node to accept blocks from RPC while not synced (this flag is mainly used for testing)")) + .arg(arg!(--"reset-db" "Reset database before starting node. It's needed when switching between subnetworks.").env("KASPAD_RESET_DB")) + .arg(arg!(--"enable-unsynced-mining" "Allow the node to accept blocks from RPC while not synced (this flag is mainly used for testing)").env("KASPAD_ENABLE_UNSYNCED_MINING")) .arg( Arg::new("enable-mainnet-mining") .long("enable-mainnet-mining") + .env("KASPAD_ENABLE_MAINNET_MINING") .action(ArgAction::SetTrue) .hide(true) .help("Allow mainnet mining (currently enabled by default while the flag is kept for backwards compatibility)"), ) - .arg(arg!(--utxoindex "Enable the UTXO index")) + .arg(arg!(--utxoindex "Enable the UTXO index").env("KASPAD_UTXOINDEX")) .arg( Arg::new("max-tracked-addresses") .long("max-tracked-addresses") + .env("KASPAD_MAX_TRACKED_ADDRESSES") .require_equals(true) .value_parser(clap::value_parser!(usize)) .help(format!("Max (preallocated) number of addresses being tracked for UTXO changed events (default: {}, maximum: {}). Setting to 0 prevents the preallocation and sets the maximum to {}, leading to 0 memory footprint as long as unused but to sub-optimal footprint if used.", 0, Tracker::MAX_ADDRESS_UPPER_BOUND, Tracker::DEFAULT_MAX_ADDRESSES)), ) - .arg(arg!(--testnet "Use the test network")) + .arg(arg!(--testnet "Use the test network").env("KASPAD_TESTNET")) .arg( Arg::new("netsuffix") .long("netsuffix") + .env("KASPAD_NETSUFFIX") .value_name("netsuffix") .require_equals(true) .value_parser(clap::value_parser!(u32)) .help("Testnet network suffix number"), ) - .arg(arg!(--devnet "Use the development test network")) - .arg(arg!(--simnet "Use the simulation test network")) - .arg(arg!(--archival "Run as an archival node: avoids deleting old block data when moving the pruning point (Warning: heavy disk usage)")) - .arg(arg!(--sanity "Enable various sanity checks which might be compute-intensive (mostly performed during pruning)")) - .arg(arg!(--yes "Answer yes to all interactive console questions")) + .arg(arg!(--devnet "Use the development test network").env("KASPAD_DEVNET")) + .arg(arg!(--simnet "Use the simulation test network").env("KASPAD_SIMNET")) + .arg(arg!(--archival "Run as an archival node: avoids deleting old block data when moving the pruning point (Warning: heavy disk usage)").env("KASPAD_ARCHIVAL")) + .arg(arg!(--sanity "Enable various sanity checks which might be compute-intensive (mostly performed during pruning)").env("KASPAD_SANITY")) + .arg(arg!(--yes "Answer yes to all interactive console questions").env("KASPAD_NONINTERACTIVE")) .arg( Arg::new("user_agent_comments") .long("uacomment") + .env("KASPAD_USER_AGENT_COMMENTS") .action(ArgAction::Append) .require_equals(true) .help("Comment to add to the user agent -- See BIP 14 for more information."), @@ -347,26 +365,29 @@ Setting to 0 prevents the preallocation and sets the maximum to {}, leading to 0 .arg( Arg::new("externalip") .long("externalip") + .env("KASPAD_EXTERNALIP") .value_name("externalip") .require_equals(true) .default_missing_value(None) .value_parser(clap::value_parser!(ContextualNetAddress)) .help("Add a socket address(ip:port) to the list of local addresses we claim to listen on to peers"), ) - .arg(arg!(--"perf-metrics" "Enable performance metrics: cpu, memory, disk io usage")) + .arg(arg!(--"perf-metrics" "Enable performance metrics: cpu, memory, disk io usage").env("KASPAD_PERF_METRICS")) .arg( Arg::new("perf-metrics-interval-sec") .long("perf-metrics-interval-sec") + .env("KASPAD_PERF_METRICS_INTERVAL_SEC") .require_equals(true) .value_parser(clap::value_parser!(u64)) .help("Interval in seconds for performance metrics collection."), ) - .arg(arg!(--"disable-upnp" "Disable upnp")) - .arg(arg!(--"nodnsseed" "Disable DNS seeding for peers")) - .arg(arg!(--"nogrpc" "Disable gRPC server")) + .arg(arg!(--"disable-upnp" "Disable upnp").env("KASPAD_DISABLE_UPNP")) + .arg(arg!(--"nodnsseed" "Disable DNS seeding for peers").env("KASPAD_NODNSSEED")) + .arg(arg!(--"nogrpc" "Disable gRPC server").env("KASPAD_NOGRPC")) .arg( Arg::new("ram-scale") .long("ram-scale") + .env("KASPAD_RAM_SCALE") .require_equals(true) .value_parser(clap::value_parser!(f64)) .help("Apply a scale factor to memory allocation bounds. Nodes with limited RAM (~4-8GB) should set this to ~0.3-0.5 respectively. Nodes with @@ -379,6 +400,13 @@ a large RAM (~64GB) can set this value to ~3.0-4.0 and gain superior performance .value_parser(clap::value_parser!(f64)) .help("The number of total days of data to keep.") ) + .arg( + Arg::new("override-params-file") + .long("override-params-file") + .require_equals(true) + .value_parser(clap::value_parser!(String)) + .help("Path to a JSON file containing override parameters.") + ) ; #[cfg(feature = "devnet-prealloc")] @@ -466,6 +494,7 @@ impl Args { prealloc_address: m.get_one::("prealloc-address").cloned(), #[cfg(feature = "devnet-prealloc")] prealloc_amount: arg_match_unwrap_or::(&m, "prealloc-amount", defaults.prealloc_amount), + override_params_file: m.get_one::("override-params-file").cloned(), }; if arg_match_unwrap_or::(&m, "enable-mainnet-mining", false) { diff --git a/kaspad/src/daemon.rs b/kaspad/src/daemon.rs index 17043b7d41..49f9ac87cb 100644 --- a/kaspad/src/daemon.rs +++ b/kaspad/src/daemon.rs @@ -8,12 +8,9 @@ use kaspa_consensus_core::{ mining_rules::MiningRules, }; use kaspa_consensus_notify::{root::ConsensusNotificationRoot, service::NotifyService}; -use kaspa_core::{core::Core, debug, info, trace}; +use kaspa_core::{core::Core, debug, info}; use kaspa_core::{kaspad_env::version, task::tick::TickService}; -use kaspa_database::{ - prelude::{CachePolicy, DbWriter, DirectDbWriter}, - registry::DatabaseStorePrefixes, -}; +use kaspa_database::prelude::CachePolicy; use kaspa_grpc_server::service::GrpcService; use kaspa_notify::{address::tracker::Tracker, subscription::context::SubscriptionContext}; use kaspa_p2p_lib::Hub; @@ -26,7 +23,11 @@ use kaspa_utils::sysinfo::SystemInfo; use kaspa_utils_tower::counters::TowerConnectionCounters; use kaspa_addressmanager::AddressManager; -use kaspa_consensus::{consensus::factory::Factory as ConsensusFactory, pipeline::ProcessingCounters}; +use kaspa_consensus::{ + consensus::factory::Factory as ConsensusFactory, + params::{OverrideParams, Params}, + pipeline::ProcessingCounters, +}; use kaspa_consensus::{ consensus::factory::MultiConsensusManagementStore, model::stores::headers::DbHeadersStore, pipeline::monitor::ConsensusMonitor, }; @@ -40,7 +41,6 @@ use kaspa_mining::{ }; use kaspa_p2p_flows::{flow_context::FlowContext, service::P2pService}; -use itertools::Itertools; use kaspa_perf_monitor::{builder::Builder as PerfMonitorBuilder, counters::CountersSnapshot}; use kaspa_utxoindex::{api::UtxoIndexProxy, UtxoIndex}; use kaspa_wrpc_server::service::{Options as WrpcServerOptions, WebSocketCounters as WrpcServerCounters, WrpcEncoding, WrpcService}; @@ -235,11 +235,31 @@ pub fn create_core_with_runtime(runtime: &Runtime, args: &Args, fd_total_budget: exit(1); } + let params = { + let params: Params = network.into(); + match &args.override_params_file { + Some(path) => { + if network.is_mainnet() { + println!("Overriding params on mainnet is not allowed."); + exit(1); + } + + let file_content = fs::read_to_string(path).unwrap_or_else(|err| { + println!("Failed to read override params file '{}': {}", path, err); + exit(1); + }); + let override_params: OverrideParams = serde_json::from_str(&file_content).unwrap_or_else(|err| { + println!("Failed to parse override params file '{}': {}", path, err); + exit(1); + }); + params.override_params(override_params) + } + None => params, + } + }; + let config = Arc::new( - ConfigBuilder::new(network.into()) - .adjust_perf_params_to_consensus_params() - .apply_args(|config| args.apply_to_config(config)) - .build(), + ConfigBuilder::new(params).adjust_perf_params_to_consensus_params().apply_args(|config| args.apply_to_config(config)).build(), ); let app_dir = get_app_dir_from_args(args); @@ -356,91 +376,12 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm // TODO: Update this entire section to a more robust implementation that allows applying multiple upgrade strategies. // If I'm at version 3 and latest version is 7, I need to be able to upgrade to that version following the intermediate // steps without having to delete the DB - if version == 3 { - let active_consensus_dir_name = mcms.active_consensus_dir_name().unwrap(); - - match active_consensus_dir_name { - Some(current_consensus_db) => { - // Apply soft upgrade logic: delete GD data from higher levels - // and then update DB version to 4 - let consensus_db = kaspa_database::prelude::ConnBuilder::default() - .with_db_path(consensus_db_dir.clone().join(current_consensus_db)) - .with_files_limit(1) - .build() - .unwrap(); - info!("Scanning for deprecated records to cleanup"); - - let mut gd_record_count: u32 = 0; - let mut compact_record_count: u32 = 0; - - let start_level: u8 = 1; - let start_level_bytes = start_level.to_le_bytes(); - let ghostdag_prefix_vec = DatabaseStorePrefixes::Ghostdag.into_iter().chain(start_level_bytes).collect_vec(); - let ghostdag_prefix = ghostdag_prefix_vec.as_slice(); - - // This section is used to count the records to be deleted. It's not used for the actual delete. - for result in consensus_db.iterator(rocksdb::IteratorMode::From(ghostdag_prefix, rocksdb::Direction::Forward)) { - let (key, _) = result.unwrap(); - if !key.starts_with(&[DatabaseStorePrefixes::Ghostdag.into()]) { - break; - } - - gd_record_count += 1; - } - - let compact_prefix_vec = DatabaseStorePrefixes::GhostdagCompact.into_iter().chain(start_level_bytes).collect_vec(); - let compact_prefix = compact_prefix_vec.as_slice(); - - for result in consensus_db.iterator(rocksdb::IteratorMode::From(compact_prefix, rocksdb::Direction::Forward)) { - let (key, _) = result.unwrap(); - if !key.starts_with(&[DatabaseStorePrefixes::GhostdagCompact.into()]) { - break; - } - - compact_record_count += 1; - } - - trace!("Number of Ghostdag records to cleanup: {}", gd_record_count); - trace!("Number of GhostdagCompact records to cleanup: {}", compact_record_count); - info!("Number of deprecated records to cleanup: {}", gd_record_count + compact_record_count); - - let msg = - "Node database currently at version 3. Upgrade process to version 4 needs to be applied. Continue? (y/n)"; - get_user_approval_or_exit(msg, args.yes); - - // Actual delete only happens after user consents to the upgrade: - let mut writer = DirectDbWriter::new(&consensus_db); - - let end_level: u8 = config.max_block_level + 1; - let end_level_bytes = end_level.to_le_bytes(); - - let start_ghostdag_prefix_vec = DatabaseStorePrefixes::Ghostdag.into_iter().chain(start_level_bytes).collect_vec(); - let end_ghostdag_prefix_vec = DatabaseStorePrefixes::Ghostdag.into_iter().chain(end_level_bytes).collect_vec(); - - let start_compact_prefix_vec = - DatabaseStorePrefixes::GhostdagCompact.into_iter().chain(start_level_bytes).collect_vec(); - let end_compact_prefix_vec = - DatabaseStorePrefixes::GhostdagCompact.into_iter().chain(end_level_bytes).collect_vec(); - - // Apply delete of range from level 1 to max (+1) for Ghostdag and GhostdagCompact: - writer.delete_range(start_ghostdag_prefix_vec.clone(), end_ghostdag_prefix_vec.clone()).unwrap(); - writer.delete_range(start_compact_prefix_vec.clone(), end_compact_prefix_vec.clone()).unwrap(); - - // Compact the deleted rangeto apply the delete immediately - consensus_db.compact_range(Some(start_ghostdag_prefix_vec.as_slice()), Some(end_ghostdag_prefix_vec.as_slice())); - consensus_db.compact_range(Some(start_compact_prefix_vec.as_slice()), Some(end_compact_prefix_vec.as_slice())); - - // Also update the version to one higher: - mcms.set_version(version + 1).unwrap(); - } - None => { - let msg = - "Node database is from a different Kaspad *DB* version and needs to be fully deleted, do you confirm the delete? (y/n)"; - get_user_approval_or_exit(msg, args.yes); - - is_db_reset_needed = true; - } - } + if version == 4 { + let msg = "NOTE: Node database is from an older version. Proceeding with the upgrade is instant and safe. +However, downgrading to an older node version later will require deleting the database. +Do you confirm? (y/n)"; + get_user_approval_or_exit(msg, args.yes); + mcms.set_version(kaspa_consensus::consensus::factory::LATEST_DB_VERSION).unwrap(); } else { let msg = "Node database is from a different Kaspad *DB* version and needs to be fully deleted, do you confirm the delete? (y/n)"; @@ -567,13 +508,8 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm config.block_template_cache_lifetime, mining_counters.clone(), ))); - let mining_monitor = Arc::new(MiningMonitor::new( - mining_manager.clone(), - consensus_manager.clone(), - mining_counters, - tx_script_cache_counters.clone(), - tick_service.clone(), - )); + let mining_monitor = + Arc::new(MiningMonitor::new(mining_manager.clone(), mining_counters, tx_script_cache_counters.clone(), tick_service.clone())); let hub = Hub::new(); let mining_rule_engine = Arc::new(MiningRuleEngine::new( diff --git a/metrics/core/src/lib.rs b/metrics/core/src/lib.rs index 53519b0f0c..1772f2d7a2 100644 --- a/metrics/core/src/lib.rs +++ b/metrics/core/src/lib.rs @@ -20,7 +20,7 @@ use workflow_core::time::unixtime_as_millis_f64; use workflow_log::*; pub type MetricsSinkFn = - Arc Option>)>>> + 'static>>; + Arc Option>>>> + 'static>>; pub struct Metrics { task_ctl: DuplexChannel, diff --git a/mining/benches/bench.rs b/mining/benches/bench.rs index 16cfcc234f..5eb27cdc36 100644 --- a/mining/benches/bench.rs +++ b/mining/benches/bench.rs @@ -69,11 +69,11 @@ pub fn bench_compare_topological_index_fns(c: &mut Criterion) { let mut group = c.benchmark_group("compare fns"); group.bench_function("TopologicalIndex::topological_index", |b| { let dag = build_dag(); - b.iter(|| (black_box(dag.topological_index()))) + b.iter(|| black_box(dag.topological_index())) }); group.bench_function("TopologicalIndex::topological_index_dfs", |b| { let dag = build_dag(); - b.iter(|| (black_box(dag.topological_index_dfs()))) + b.iter(|| black_box(dag.topological_index_dfs())) }); group.finish(); } @@ -102,7 +102,7 @@ pub fn bench_mempool_sampling(c: &mut Criterion) { } let len = cap; - let mut frontier = Frontier::default(); + let mut frontier = Frontier::new(1.0); for item in map.values().take(len).cloned() { frontier.insert(item).then_some(()).unwrap(); } @@ -183,7 +183,7 @@ pub fn bench_mempool_selectors(c: &mut Criterion) { } for len in [100, 300, 350, 500, 1000, 2000, 5000, 10_000, 100_000, 500_000, 1_000_000].into_iter().rev() { - let mut frontier = Frontier::default(); + let mut frontier = Frontier::new(1.0); for item in map.values().take(len).cloned() { frontier.insert(item).then_some(()).unwrap(); } @@ -252,7 +252,7 @@ pub fn bench_inplace_sampling_worst_case(c: &mut Criterion) { map.insert(key.tx.id(), key); } - let mut frontier = Frontier::default(); + let mut frontier = Frontier::new(1.0); for item in map.values().cloned() { frontier.insert(item).then_some(()).unwrap(); } diff --git a/mining/src/block_template/builder.rs b/mining/src/block_template/builder.rs index 6f0dbe6743..01360be8da 100644 --- a/mining/src/block_template/builder.rs +++ b/mining/src/block_template/builder.rs @@ -105,8 +105,7 @@ impl BlockTemplateBuilder { coinbase_tx.outputs.last_mut().unwrap().script_public_key = new_miner_data.script_public_key.clone(); } // Update the hash merkle root according to the modified transactions - block_template.block.header.hash_merkle_root = - consensus.calc_transaction_hash_merkle_root(&block_template.block.transactions, block_template.block.header.daa_score); + block_template.block.header.hash_merkle_root = consensus.calc_transaction_hash_merkle_root(&block_template.block.transactions); let new_timestamp = unix_now(); if new_timestamp > block_template.block.header.timestamp { // Only if new time stamp is later than current, update the header. Otherwise, diff --git a/mining/src/feerate/mod.rs b/mining/src/feerate/mod.rs index 5ef3579a56..633d6b3303 100644 --- a/mining/src/feerate/mod.rs +++ b/mining/src/feerate/mod.rs @@ -85,13 +85,15 @@ pub struct FeerateEstimator { /// other words, the inverse of the transaction inclusion rate. For instance, if the average transaction mass is 2500 grams, /// the block mass limit is 500,000 and the network has 10 BPS, then this number would be 1/2000 seconds. inclusion_interval: f64, + + target_time_per_block_seconds: f64, } impl FeerateEstimator { - pub fn new(total_weight: f64, inclusion_interval: f64) -> Self { + pub fn new(total_weight: f64, inclusion_interval: f64, target_time_per_block_seconds: f64) -> Self { assert!(total_weight >= 0.0); assert!((0f64..1f64).contains(&inclusion_interval)); - Self { total_weight, inclusion_interval } + Self { total_weight, inclusion_interval, target_time_per_block_seconds } } pub(crate) fn feerate_to_time(&self, feerate: f64) -> f64 { @@ -132,8 +134,8 @@ impl FeerateEstimator { pub fn calc_estimations(&self, minimum_standard_feerate: f64) -> FeerateEstimations { let min = minimum_standard_feerate; - // Choose `high` such that it provides sub-second waiting time - let high = self.time_to_feerate(1f64).max(min); + // Choose `high` such that the transaction is expected to be included in the next block. + let high = self.time_to_feerate(self.target_time_per_block_seconds).max(min); // Choose `low` feerate such that it provides sub-hour waiting time AND it covers (at least) the 0.25 quantile let low = self.time_to_feerate(3600f64).max(self.quantile(min, high, 0.25)); // Choose `normal` feerate such that it provides sub-minute waiting time AND it covers (at least) the 0.66 quantile between low and high. @@ -176,7 +178,8 @@ mod tests { #[test] fn test_feerate_estimations() { - let estimator = FeerateEstimator { total_weight: 1002283.659, inclusion_interval: 0.004f64 }; + let estimator = + FeerateEstimator { total_weight: 1002283.659, inclusion_interval: 0.004f64, target_time_per_block_seconds: 1.0 }; let estimations = estimator.calc_estimations(1.0); let buckets = estimations.ordered_buckets(); for (i, j) in buckets.into_iter().tuple_windows() { @@ -187,7 +190,7 @@ mod tests { #[test] fn test_min_feerate_estimations() { - let estimator = FeerateEstimator { total_weight: 0.00659, inclusion_interval: 0.004f64 }; + let estimator = FeerateEstimator { total_weight: 0.00659, inclusion_interval: 0.004f64, target_time_per_block_seconds: 1.0 }; let minimum_feerate = 0.755; let estimations = estimator.calc_estimations(minimum_feerate); println!("{estimations}"); @@ -201,7 +204,7 @@ mod tests { #[test] fn test_zero_values() { - let estimator = FeerateEstimator { total_weight: 0.0, inclusion_interval: 0.0 }; + let estimator = FeerateEstimator { total_weight: 0.0, inclusion_interval: 0.0, target_time_per_block_seconds: 1.0 }; let minimum_feerate = 0.755; let estimations = estimator.calc_estimations(minimum_feerate); let buckets = estimations.ordered_buckets(); @@ -210,7 +213,7 @@ mod tests { assert_eq!(0.0, bucket.estimated_seconds); } - let estimator = FeerateEstimator { total_weight: 0.0, inclusion_interval: 0.1 }; + let estimator = FeerateEstimator { total_weight: 0.0, inclusion_interval: 0.1, target_time_per_block_seconds: 1.0 }; let minimum_feerate = 0.755; let estimations = estimator.calc_estimations(minimum_feerate); let buckets = estimations.ordered_buckets(); @@ -219,7 +222,7 @@ mod tests { assert_eq!(estimator.inclusion_interval, bucket.estimated_seconds); } - let estimator = FeerateEstimator { total_weight: 0.1, inclusion_interval: 0.0 }; + let estimator = FeerateEstimator { total_weight: 0.1, inclusion_interval: 0.0, target_time_per_block_seconds: 1.0 }; let minimum_feerate = 0.755; let estimations = estimator.calc_estimations(minimum_feerate); let buckets = estimations.ordered_buckets(); diff --git a/mining/src/manager.rs b/mining/src/manager.rs index 43cc982927..ecbb5df909 100644 --- a/mining/src/manager.rs +++ b/mining/src/manager.rs @@ -205,11 +205,8 @@ impl MiningManager { } /// Returns realtime feerate estimations based on internal mempool state - pub(crate) fn get_realtime_feerate_estimations(&self, virtual_daa_score: u64) -> FeerateEstimations { - let args = FeerateEstimatorArgs::new( - self.config.network_blocks_per_second.get(virtual_daa_score), - self.config.maximum_mass_per_block, - ); + pub(crate) fn get_realtime_feerate_estimations(&self) -> FeerateEstimations { + let args = FeerateEstimatorArgs::new(self.config.network_blocks_per_second.after(), self.config.maximum_mass_per_block); let estimator = self.mempool.read().build_feerate_estimator(args); estimator.calc_estimations(self.config.minimum_feerate()) } @@ -220,10 +217,7 @@ impl MiningManager { consensus: &dyn ConsensusApi, prefix: kaspa_addresses::Prefix, ) -> MiningManagerResult { - let args = FeerateEstimatorArgs::new( - self.config.network_blocks_per_second.get(consensus.get_virtual_daa_score()), - self.config.maximum_mass_per_block, - ); + let args = FeerateEstimatorArgs::new(self.config.network_blocks_per_second.after(), self.config.maximum_mass_per_block); let network_mass_per_second = args.network_mass_per_second(); let mempool_read = self.mempool.read(); let estimator = mempool_read.build_feerate_estimator(args); @@ -862,8 +856,8 @@ impl MiningManagerProxy { } /// Returns realtime feerate estimations based on internal mempool state - pub async fn get_realtime_feerate_estimations(self, virtual_daa_score: u64) -> FeerateEstimations { - spawn_blocking(move || self.inner.get_realtime_feerate_estimations(virtual_daa_score)).await.unwrap() + pub async fn get_realtime_feerate_estimations(self) -> FeerateEstimations { + spawn_blocking(move || self.inner.get_realtime_feerate_estimations()).await.unwrap() } /// Returns realtime feerate estimations based on internal mempool state with additional verbose data diff --git a/mining/src/mempool/check_transaction_standard.rs b/mining/src/mempool/check_transaction_standard.rs index 2f9ff8a25d..9ef1b60624 100644 --- a/mining/src/mempool/check_transaction_standard.rs +++ b/mining/src/mempool/check_transaction_standard.rs @@ -263,7 +263,7 @@ mod tests { want: u64, } - let tests = vec![ + let tests = [ Test { // Ensure combination of size and fee that are less than 1000 // produce a non-zero fee. diff --git a/mining/src/mempool/model/accepted_transactions.rs b/mining/src/mempool/model/accepted_transactions.rs index b22eddafb7..0c0d49606e 100644 --- a/mining/src/mempool/model/accepted_transactions.rs +++ b/mining/src/mempool/model/accepted_transactions.rs @@ -44,7 +44,7 @@ impl AcceptedTransactions { pub(crate) fn expire(&mut self, virtual_daa_score: u64) { let now = unix_now(); if virtual_daa_score - < self.last_expire_scan_daa_score + self.config.accepted_transaction_expire_scan_interval_daa_score.get(virtual_daa_score) + < self.last_expire_scan_daa_score + self.config.accepted_transaction_expire_scan_interval_daa_score.after() || now < self.last_expire_scan_time + self.config.accepted_transaction_expire_scan_interval_milliseconds { return; @@ -54,7 +54,7 @@ impl AcceptedTransactions { .transactions .iter() .filter_map(|(transaction_id, daa_score)| { - if virtual_daa_score > daa_score + self.config.accepted_transaction_expire_interval_daa_score.get(virtual_daa_score) { + if virtual_daa_score > daa_score + self.config.accepted_transaction_expire_interval_daa_score.after() { Some(*transaction_id) } else { None diff --git a/mining/src/mempool/model/frontier.rs b/mining/src/mempool/model/frontier.rs index 70ac215bad..b5d12ad0b4 100644 --- a/mining/src/mempool/model/frontier.rs +++ b/mining/src/mempool/model/frontier.rs @@ -43,11 +43,18 @@ pub struct Frontier { /// Tracks the average transaction mass throughout the mempool's lifespan using a decayed weighting mechanism average_transaction_mass: f64, + + target_time_per_block_seconds: f64, } -impl Default for Frontier { - fn default() -> Self { - Self { search_tree: Default::default(), total_mass: Default::default(), average_transaction_mass: INITIAL_AVG_MASS } +impl Frontier { + pub fn new(target_time_per_block_seconds: f64) -> Self { + Self { + search_tree: Default::default(), + total_mass: Default::default(), + average_transaction_mass: INITIAL_AVG_MASS, + target_time_per_block_seconds, + } } } @@ -229,7 +236,7 @@ impl Frontier { let bps = args.network_blocks_per_second as f64; let mut mass_per_block = args.maximum_mass_per_block as f64; let mut inclusion_interval = average_transaction_mass / (mass_per_block * bps); - let mut estimator = FeerateEstimator::new(self.total_weight(), inclusion_interval); + let mut estimator = FeerateEstimator::new(self.total_weight(), inclusion_interval, self.target_time_per_block_seconds); // Search for better estimators by possibly removing extremely high outliers let mut down_iter = self.search_tree.descending_iter().peekable(); @@ -250,7 +257,7 @@ impl Frontier { // Compute the weight up to, and excluding, current key (which translates to zero weight if peek() is none) let prefix_weight = down_iter.peek().map(|key| self.search_tree.prefix_weight(key)).unwrap_or_default(); - let pending_estimator = FeerateEstimator::new(prefix_weight, inclusion_interval); + let pending_estimator = FeerateEstimator::new(prefix_weight, inclusion_interval, self.target_time_per_block_seconds); // Test the pending estimator vs. the current one if pending_estimator.feerate_to_time(1.0) < estimator.feerate_to_time(1.0) { @@ -294,7 +301,7 @@ mod tests { map.insert(key.tx.id(), key); } - let mut frontier = Frontier::default(); + let mut frontier = Frontier::new(1.0); for item in map.values().cloned() { frontier.insert(item).then_some(()).unwrap(); } @@ -314,7 +321,7 @@ mod tests { map.insert(key.tx.id(), key); } - let mut frontier = Frontier::default(); + let mut frontier = Frontier::new(1.0); for item in map.values().cloned() { frontier.insert(item).then_some(()).unwrap(); } @@ -348,7 +355,7 @@ mod tests { } let len = cap / 2; - let mut frontier = Frontier::default(); + let mut frontier = Frontier::new(1.0); for item in map.values().take(len).cloned() { frontier.insert(item).then_some(()).unwrap(); } @@ -402,7 +409,7 @@ mod tests { } for len in [0, 1, 10, 100, 200, 300, 500, 750, cap / 2, (cap * 2) / 3, (cap * 4) / 5, (cap * 5) / 6, cap] { - let mut frontier = Frontier::default(); + let mut frontier = Frontier::new(1.0); for item in map.values().take(len).cloned() { frontier.insert(item).then_some(()).unwrap(); } @@ -444,7 +451,7 @@ mod tests { for len in [0, 1, 10, 100, 200, 300, 500, 750, cap / 2, (cap * 2) / 3, (cap * 4) / 5, (cap * 5) / 6, cap] { println!(); println!("Testing a frontier with {} txs...", len.min(cap)); - let mut frontier = Frontier::default(); + let mut frontier = Frontier::new(1.0); for item in map.values().take(len).cloned() { frontier.insert(item).then_some(()).unwrap(); } @@ -477,7 +484,7 @@ mod tests { const HIGH_FEERATE: f64 = 1000.0; let cap = 20_000; - let mut frontier = Frontier::default(); + let mut frontier = Frontier::new(1.0); for i in 0..cap as u64 { let (mass, fee) = if i < 200 { let mass = 1650; @@ -533,7 +540,7 @@ mod tests { // All lens make for less than block capacity (given the mass used) for len in [0, 1, 10, 100, 200, 250, 300] { - let mut frontier = Frontier::default(); + let mut frontier = Frontier::new(1.0); for item in map.values().take(len).cloned() { frontier.insert(item).then_some(()).unwrap(); } diff --git a/mining/src/mempool/model/orphan_pool.rs b/mining/src/mempool/model/orphan_pool.rs index 851a0ed1f5..17ae0513cd 100644 --- a/mining/src/mempool/model/orphan_pool.rs +++ b/mining/src/mempool/model/orphan_pool.rs @@ -265,7 +265,7 @@ impl OrphanPool { } pub(crate) fn expire_low_priority_transactions(&mut self, virtual_daa_score: u64) -> RuleResult<()> { - if virtual_daa_score < self.last_expire_scan + self.config.orphan_expire_scan_interval_daa_score.get(virtual_daa_score) { + if virtual_daa_score < self.last_expire_scan + self.config.orphan_expire_scan_interval_daa_score.after() { return Ok(()); } @@ -276,7 +276,7 @@ impl OrphanPool { .values() .filter_map(|x| { if (x.priority == Priority::Low) - && virtual_daa_score > x.added_at_daa_score + self.config.orphan_expire_interval_daa_score.get(virtual_daa_score) + && virtual_daa_score > x.added_at_daa_score + self.config.orphan_expire_interval_daa_score.after() { Some(x.id()) } else { diff --git a/mining/src/mempool/model/transactions_pool.rs b/mining/src/mempool/model/transactions_pool.rs index c7a4f5a2b5..1b7d41d493 100644 --- a/mining/src/mempool/model/transactions_pool.rs +++ b/mining/src/mempool/model/transactions_pool.rs @@ -81,12 +81,14 @@ pub(crate) struct TransactionsPool { impl TransactionsPool { pub(crate) fn new(config: Arc) -> Self { + // [Crescendo] Delete `after()` after cleanup. + let target_time_per_block = 1.0 / (config.network_blocks_per_second.after() as f64); Self { config, all_transactions: MempoolTransactionCollection::default(), parent_transactions: TransactionsEdges::default(), chained_transactions: TransactionsEdges::default(), - ready_transactions: Default::default(), + ready_transactions: Frontier::new(target_time_per_block), last_expire_scan_daa_score: 0, last_expire_scan_time: unix_now(), utxo_set: MempoolUtxoSet::new(), @@ -314,8 +316,7 @@ impl TransactionsPool { pub(crate) fn collect_expired_low_priority_transactions(&mut self, virtual_daa_score: u64) -> Vec { let now = unix_now(); - if virtual_daa_score - < self.last_expire_scan_daa_score + self.config.transaction_expire_scan_interval_daa_score.get(virtual_daa_score) + if virtual_daa_score < self.last_expire_scan_daa_score + self.config.transaction_expire_scan_interval_daa_score.after() || now < self.last_expire_scan_time + self.config.transaction_expire_scan_interval_milliseconds { return vec![]; @@ -330,8 +331,7 @@ impl TransactionsPool { .values() .filter_map(|x| { if (x.priority == Priority::Low) - && virtual_daa_score - > x.added_at_daa_score + self.config.transaction_expire_interval_daa_score.get(virtual_daa_score) + && virtual_daa_score > x.added_at_daa_score + self.config.transaction_expire_interval_daa_score.after() { Some(x.id()) } else { diff --git a/mining/src/model/topological_index.rs b/mining/src/model/topological_index.rs index db818724a8..15b31aeba4 100644 --- a/mining/src/model/topological_index.rs +++ b/mining/src/model/topological_index.rs @@ -224,7 +224,7 @@ mod tests { is_acyclic: bool, } - let tests = vec![ + let tests = [ Test { name: "a regular DAG", dag: build_dag(false), is_acyclic: true }, Test { name: "an invalid DAG with one cycle", dag: build_dag(true), is_acyclic: false }, ]; @@ -278,7 +278,7 @@ mod tests { expected_result: TopologicalIndexResult<()>, } - let tests = vec![ + let tests = [ Test { name: "topologically ordered index", index: vec!["shirt", "socks", "tie", "boxer", "pants", "belt", "jacket", "shoes"], diff --git a/mining/src/monitor.rs b/mining/src/monitor.rs index 7358853883..74449424c1 100644 --- a/mining/src/monitor.rs +++ b/mining/src/monitor.rs @@ -1,6 +1,5 @@ use super::MiningCounters; use crate::manager::MiningManagerProxy; -use kaspa_consensusmanager::ConsensusManager; use kaspa_core::{ debug, info, task::{ @@ -17,8 +16,6 @@ const MONITOR: &str = "mempool-monitor"; pub struct MiningMonitor { mining_manager: MiningManagerProxy, - consensus_manager: Arc, - // Counters counters: Arc, @@ -31,12 +28,11 @@ pub struct MiningMonitor { impl MiningMonitor { pub fn new( mining_manager: MiningManagerProxy, - consensus_manager: Arc, counters: Arc, tx_script_cache_counters: Arc, tick_service: Arc, ) -> MiningMonitor { - MiningMonitor { mining_manager, consensus_manager, counters, tx_script_cache_counters, tick_service } + MiningMonitor { mining_manager, counters, tx_script_cache_counters, tick_service } } pub async fn worker(self: &Arc) { @@ -70,11 +66,7 @@ impl MiningMonitor { delta.low_priority_tx_counts, delta.tx_accepted_counts, ); - let feerate_estimations = self - .mining_manager - .clone() - .get_realtime_feerate_estimations(self.consensus_manager.consensus().unguarded_session().get_virtual_daa_score()) - .await; + let feerate_estimations = self.mining_manager.clone().get_realtime_feerate_estimations().await; debug!("Realtime feerate estimations: {}", feerate_estimations); } if delta.tx_evicted_counts > 0 { diff --git a/mining/src/testutils/consensus_mock.rs b/mining/src/testutils/consensus_mock.rs index c0328504e4..f17174937f 100644 --- a/mining/src/testutils/consensus_mock.rs +++ b/mining/src/testutils/consensus_mock.rs @@ -12,7 +12,7 @@ use kaspa_consensus_core::{ coinbase::CoinbaseResult, tx::{TxResult, TxRuleError}, }, - header::Header, + header::{CompressedParents, Header}, mass::{transaction_estimated_serialized_size, ContextualMasses, NonContextualMasses}, merkle::calc_hash_merkle_root, tx::{MutableTransaction, Transaction, TransactionId, TransactionOutpoint, UtxoEntry}, @@ -86,10 +86,10 @@ impl ConsensusApi for ConsensusMock { let coinbase = coinbase_manager.expected_coinbase_transaction(miner_data.clone()); txs.insert(0, coinbase.tx); let now = unix_now(); - let hash_merkle_root = self.calc_transaction_hash_merkle_root(&txs, 0); + let hash_merkle_root = self.calc_transaction_hash_merkle_root(&txs); let header = Header::new_finalized( BLOCK_VERSION, - vec![], + CompressedParents::default(), hash_merkle_root, ZERO_HASH, ZERO_HASH, @@ -176,7 +176,7 @@ impl ConsensusApi for ConsensusMock { Ok(coinbase_manager.modify_coinbase_payload(payload, miner_data)) } - fn calc_transaction_hash_merkle_root(&self, txs: &[Transaction], _pov_daa_score: u64) -> Hash { - calc_hash_merkle_root(txs.iter(), false) + fn calc_transaction_hash_merkle_root(&self, txs: &[Transaction]) -> Hash { + calc_hash_merkle_root(txs.iter()) } } diff --git a/musl-toolchain/build.sh b/musl-toolchain/build.sh index 1e114a9c5b..aa235620bb 100755 --- a/musl-toolchain/build.sh +++ b/musl-toolchain/build.sh @@ -84,13 +84,4 @@ export CARGO_TARGET_X86_64_UNKNOWN_LINUX_MUSL_LINKER=$CC rustup target add x86_64-unknown-linux-musl # Install missing dependencies -cargo fetch --target x86_64-unknown-linux-musl - -# Patch missing include in librocksdb-sys-0.16.0+8.10.0. Credit: @supertypo -FILE_PATH=$(find $HOME/.cargo/registry/src/ -path "*/librocksdb-sys-0.16.0+8.10.0/*/offpeak_time_info.h") - -if [ -n "$FILE_PATH" ]; then - sed -i '1i #include ' "$FILE_PATH" -else - echo "No such file for sed modification." -fi \ No newline at end of file +cargo fetch --target x86_64-unknown-linux-musl \ No newline at end of file diff --git a/notify/benches/bench.rs b/notify/benches/bench.rs index 3bb292169a..4184b0cba6 100644 --- a/notify/benches/bench.rs +++ b/notify/benches/bench.rs @@ -21,7 +21,7 @@ const ADDRESS_COUNT: usize = 1_000_000; pub fn bench_subscription_context(c: &mut Criterion) { c.bench_function("create_and_fill_context", |b| { let addresses = create_addresses(ADDRESS_COUNT); - b.iter(|| (black_box(create_and_fill_context(addresses.clone())))) + b.iter(|| black_box(create_and_fill_context(addresses.clone()))) }); } diff --git a/notify/src/error.rs b/notify/src/error.rs index 4ec239fac2..d9686c22ab 100644 --- a/notify/src/error.rs +++ b/notify/src/error.rs @@ -1,7 +1,7 @@ use async_channel::{RecvError, SendError, TrySendError}; use thiserror::Error; -pub type BoxedStdError = Box<(dyn std::error::Error + Sync + std::marker::Send + 'static)>; +pub type BoxedStdError = Box; #[derive(Clone, Debug, Error)] pub enum Error { diff --git a/protocol/flows/src/flow_context.rs b/protocol/flows/src/flow_context.rs index 3a8e8fd61c..a2b9a0e112 100644 --- a/protocol/flows/src/flow_context.rs +++ b/protocol/flows/src/flow_context.rs @@ -1,24 +1,18 @@ -use crate::{ - flowcontext::{ - orphans::{OrphanBlocksPool, OrphanOutput}, - process_queue::ProcessQueue, - transactions::TransactionsSpread, - }, - v7, +use crate::flowcontext::{ + orphans::{OrphanBlocksPool, OrphanOutput}, + process_queue::ProcessQueue, + transactions::TransactionsSpread, }; -use crate::{v5, v6}; +use crate::{v7, v8}; use async_trait::async_trait; use futures::future::join_all; use kaspa_addressmanager::AddressManager; use kaspa_connectionmanager::ConnectionManager; +use kaspa_consensus_core::api::{BlockValidationFuture, BlockValidationFutures}; use kaspa_consensus_core::block::Block; use kaspa_consensus_core::config::Config; use kaspa_consensus_core::errors::block::RuleError; use kaspa_consensus_core::tx::{Transaction, TransactionId}; -use kaspa_consensus_core::{ - api::{BlockValidationFuture, BlockValidationFutures}, - network::NetworkType, -}; use kaspa_consensus_notify::{ notification::{Notification, PruningPointUtxoSetOverrideNotification}, root::ConsensusNotificationRoot, @@ -65,7 +59,7 @@ use tokio_stream::{wrappers::UnboundedReceiverStream, StreamExt}; use uuid::Uuid; /// The P2P protocol version. -const PROTOCOL_VERSION: u32 = 7; +const PROTOCOL_VERSION: u32 = 8; /// See `check_orphan_resolution_range` const BASELINE_ORPHAN_RESOLUTION_RANGE: u32 = 5; @@ -235,8 +229,7 @@ pub struct FlowContextInner { // Special sampling logger used only for high-bps networks where logs must be throttled block_event_logger: Option, - // Bps upper bound - bps_upper_bound: usize, + bps: usize, // Orphan parameters orphan_resolution_range: u32, @@ -319,13 +312,13 @@ impl FlowContext { hub: Hub, mining_rule_engine: Arc, ) -> Self { - let bps_upper_bound = config.bps().upper_bound() as usize; - let orphan_resolution_range = BASELINE_ORPHAN_RESOLUTION_RANGE + (bps_upper_bound as f64).log2().ceil() as u32; + let bps = config.bps().after() as usize; + let orphan_resolution_range = BASELINE_ORPHAN_RESOLUTION_RANGE + (bps as f64).log2().ceil() as u32; // The maximum amount of orphans allowed in the orphans pool. This number is an approximation // of how many orphans there can possibly be on average bounded by an upper bound. let max_orphans = - (2u64.pow(orphan_resolution_range) as usize * config.ghostdag_k().upper_bound() as usize).min(MAX_ORPHANS_UPPER_BOUND); + (2u64.pow(orphan_resolution_range) as usize * config.ghostdag_k().after() as usize).min(MAX_ORPHANS_UPPER_BOUND); Self { inner: Arc::new(FlowContextInner { node_id: Uuid::new_v4().into(), @@ -342,8 +335,8 @@ impl FlowContext { mining_manager, tick_service, notification_root, - block_event_logger: if bps_upper_bound > 1 { Some(BlockEventLogger::new(bps_upper_bound)) } else { None }, - bps_upper_bound, + block_event_logger: Some(BlockEventLogger::new(bps)), + bps, orphan_resolution_range, max_orphans, config, @@ -353,7 +346,7 @@ impl FlowContext { } pub fn block_invs_channel_size(&self) -> usize { - self.bps_upper_bound * Router::incoming_flow_baseline_channel_size() + self.bps * Router::incoming_flow_baseline_channel_size() } pub fn orphan_resolution_range(&self) -> u32 { @@ -512,36 +505,12 @@ impl FlowContext { // Broadcast as soon as the block has been validated and inserted into the DAG self.hub.broadcast(make_message!(Payload::InvRelayBlock, InvRelayBlockMessage { hash: Some(hash.into()) })).await; - let daa_score = block.header.daa_score; self.on_new_block(consensus, Default::default(), block, virtual_state_task).await; - self.log_new_block_event(BlockLogEvent::Submit(hash), daa_score); + self.log_block_event(BlockLogEvent::Submit(hash)); Ok(()) } - /// [Crescendo] temp crescendo countdown logging - pub(super) fn log_new_block_event(&self, event: BlockLogEvent, daa_score: u64) { - if self.config.bps().before() == 1 && !self.config.crescendo_activation.is_active(daa_score) { - if let Some(dist) = self.config.crescendo_activation.is_within_range_before_activation(daa_score, 3600) { - match event { - BlockLogEvent::Relay(hash) => info!("Accepted block {} via relay \t [Crescendo countdown: -{}]", hash, dist), - BlockLogEvent::Submit(hash) => { - info!("Accepted block {} via submit block \t [Crescendo countdown: -{}]", hash, dist) - } - _ => {} - } - } else { - match event { - BlockLogEvent::Relay(hash) => info!("Accepted block {} via relay", hash), - BlockLogEvent::Submit(hash) => info!("Accepted block {} via submit block", hash), - _ => {} - } - } - } else { - self.log_block_event(event); - } - } - pub fn log_block_event(&self, event: BlockLogEvent) { if let Some(logger) = self.block_event_logger.as_ref() { logger.log(event) @@ -782,20 +751,10 @@ impl ConnectionInitializer for FlowContext { debug!("protocol versions - self: {}, peer: {}", PROTOCOL_VERSION, peer_version.protocol_version); // Register all flows according to version - let connect_only_new_versions = self.config.net.network_type() != NetworkType::Testnet; - - let (flows, applied_protocol_version) = if connect_only_new_versions { - match peer_version.protocol_version { - v if v >= PROTOCOL_VERSION => (v7::register(self.clone(), router.clone()), PROTOCOL_VERSION), - v => return Err(ProtocolError::VersionMismatch(PROTOCOL_VERSION, v)), - } - } else { - match peer_version.protocol_version { - v if v >= PROTOCOL_VERSION => (v7::register(self.clone(), router.clone()), PROTOCOL_VERSION), - 6 => (v6::register(self.clone(), router.clone()), 6), - 5 => (v5::register(self.clone(), router.clone()), 5), - v => return Err(ProtocolError::VersionMismatch(PROTOCOL_VERSION, v)), - } + let (flows, applied_protocol_version) = match peer_version.protocol_version { + v if v >= PROTOCOL_VERSION => (v8::register(self.clone(), router.clone()), PROTOCOL_VERSION), + 7 => (v7::register(self.clone(), router.clone()), 7), + v => return Err(ProtocolError::VersionMismatch(PROTOCOL_VERSION, v)), }; // Build and register the peer properties diff --git a/protocol/flows/src/v5/ibd/flow.rs b/protocol/flows/src/ibd/flow.rs similarity index 59% rename from protocol/flows/src/v5/ibd/flow.rs rename to protocol/flows/src/ibd/flow.rs index c2509f9e4e..0d84587d1e 100644 --- a/protocol/flows/src/v5/ibd/flow.rs +++ b/protocol/flows/src/ibd/flow.rs @@ -1,9 +1,7 @@ use crate::{ flow_context::FlowContext, - v5::{ - ibd::{HeadersChunkStream, TrustedEntryStream}, - Flow, - }, + flow_trait::Flow, + ibd::{negotiate::ChainNegotiationOutput, HeadersChunkStream, TrustedEntryStream}, }; use futures::future::{join_all, select, try_join_all, Either}; use itertools::Itertools; @@ -12,6 +10,8 @@ use kaspa_consensus_core::{ block::Block, header::Header, pruning::{PruningPointProof, PruningPointsList, PruningProofMetadata}, + trusted::TrustedBlock, + tx::Transaction, BlockHashSet, }; use kaspa_consensusmanager::{spawn_blocking, ConsensusProxy, StagingConsensus}; @@ -21,9 +21,9 @@ use kaspa_muhash::MuHash; use kaspa_p2p_lib::{ common::ProtocolError, convert::model::trusted::TrustedDataPackage, - dequeue_with_timeout, make_message, + dequeue_with_timeout, make_message, make_request, pb::{ - kaspad_message::Payload, RequestAntipastMessage, RequestHeadersMessage, RequestIbdBlocksMessage, + kaspad_message::Payload, RequestAntipastMessage, RequestBlockBodiesMessage, RequestHeadersMessage, RequestIbdBlocksMessage, RequestPruningPointAndItsAnticoneMessage, RequestPruningPointProofMessage, RequestPruningPointUtxoSetMessage, }, IncomingRoute, Router, @@ -36,12 +36,14 @@ use std::{ use tokio::time::sleep; use super::{progress::ProgressReporter, HeadersChunk, PruningPointUtxosetChunkStream, IBD_BATCH_SIZE}; +type BlockBody = Vec; /// Flow for managing IBD - Initial Block Download pub struct IbdFlow { pub(super) ctx: FlowContext, pub(super) router: Arc, pub(super) incoming_route: IncomingRoute, + pub(super) body_only_ibd_permitted: bool, // Receives relay blocks from relay flow which are out of orphan resolution range and hence trigger IBD relay_receiver: JobReceiver, @@ -59,9 +61,9 @@ impl Flow for IbdFlow { } pub enum IbdType { - None, - Sync(Hash), + Sync, DownloadHeadersProof, + PruningCatchUp, } struct QueueChunkOutput { @@ -72,8 +74,14 @@ struct QueueChunkOutput { // TODO: define a peer banning strategy impl IbdFlow { - pub fn new(ctx: FlowContext, router: Arc, incoming_route: IncomingRoute, relay_receiver: JobReceiver) -> Self { - Self { ctx, router, incoming_route, relay_receiver } + pub fn new( + ctx: FlowContext, + router: Arc, + incoming_route: IncomingRoute, + relay_receiver: JobReceiver, + body_only_ibd_permitted: bool, + ) -> Self { + Self { ctx, router, incoming_route, relay_receiver, body_only_ibd_permitted } } async fn start_impl(&mut self) -> Result<(), ProtocolError> { @@ -98,17 +106,45 @@ impl IbdFlow { let mut session = self.ctx.consensus().session().await; let negotiation_output = self.negotiate_missing_syncer_chain_segment(&session).await?; - let ibd_type = - self.determine_ibd_type(&session, &relay_block.header, negotiation_output.highest_known_syncer_chain_hash).await?; + let ibd_type = self + .determine_ibd_type( + &session, + &relay_block.header, + negotiation_output.highest_known_syncer_chain_hash, + negotiation_output.syncer_pruning_point, + ) + .await?; match ibd_type { - IbdType::None => { - return Err(ProtocolError::Other("peer has no known block and conditions for requesting headers proof are not met")) - } - IbdType::Sync(highest_known_syncer_chain_hash) => { + IbdType::Sync => { + let pruning_point = session.async_pruning_point().await; + + info!("syncing ahead from current pruning point"); + // Following IBD catchup a new pruning point is designated and finalized in consensus. Blocks from its anticone (including itself) + // have undergone normal header verification, but contain no body yet. Processing of new blocks in the pruning point's future cannot proceed + // since these blocks' parents are missing block data. + // Hence we explicitly process bodies of the currently body missing anticone blocks as trusted blocks + // Notice that this is degenerate following sync_with_headers_proof + // but not necessarily so after sync_headers - + // as it might sync following a previous pruning_catch_up that crashed before this stage concluded + if !session.async_is_pruning_point_anticone_fully_synced().await { + self.sync_missing_trusted_bodies(&session).await?; + } + if !session.async_is_pruning_utxoset_stable().await + // Utxo might not be available even if the pruning point block data is. + // Utxo must be synced before all so the node could function + { + info!( + "utxoset corresponding to the current pruning point is incomplete, attempting to download it from {}", + self.router + ); + + self.sync_new_utxo_set(&session, pruning_point).await?; + } + // Once utxo is valid, simply sync missing headers self.sync_headers( &session, negotiation_output.syncer_virtual_selected_parent, - highest_known_syncer_chain_hash, + negotiation_output.highest_known_syncer_chain_hash.unwrap(), &relay_block, ) .await?; @@ -121,19 +157,39 @@ impl IbdFlow { spawn_blocking(|| staging.commit()).await.unwrap(); info!( "Header download stage of IBD with headers proof completed successfully from {}. Committed staging consensus.", - self.router - ); - self.ctx.on_pruning_point_utxoset_override(); + self.router + ); + // This will reobtain the freshly committed staging consensus session = self.ctx.consensus().session().await; + // Next, sync a utxoset corresponding to the new pruning point from the syncer. + // Note that the new pruning point's anticone need not be downloaded separately as in other IBD types + // as it was just downloaded as part of the headers proof. + self.sync_new_utxo_set(&session, negotiation_output.syncer_pruning_point).await?; } Err(e) => { - info!("IBD with headers proof from {} was unsuccessful ({})", self.router, e); + warn!("IBD with headers proof from {} was unsuccessful ({})", self.router, e); staging.cancel(); return Err(e); } } } + IbdType::PruningCatchUp => { + info!("catching up to new pruning point {} ", negotiation_output.syncer_pruning_point); + match self.pruning_point_catchup(&session, &negotiation_output, &relay_block).await { + Ok(()) => { + info!("header stage of pruning catchup from peer {} completed", self.router); + self.sync_missing_trusted_bodies(&session).await?; + self.sync_new_utxo_set(&session, negotiation_output.syncer_pruning_point).await?; + // Note that pruning of old data will only occur once virtual has caught up sufficiently far + } + + Err(e) => { + warn!("IBD catchup from peer {} was unsuccessful ({})", self.router, e); + return Err(e); + } + } + } } // Sync missing bodies in the past of syncer sink (virtual selected parent) @@ -169,13 +225,47 @@ impl IbdFlow { consensus: &ConsensusProxy, relay_header: &Header, highest_known_syncer_chain_hash: Option, + syncer_pruning_point: Hash, ) -> Result { if let Some(highest_known_syncer_chain_hash) = highest_known_syncer_chain_hash { let pruning_point = consensus.async_pruning_point().await; + let sink = consensus.async_get_sink().await; + info!("current sink is:{}", sink); + info!("current pruning point is:{}", pruning_point); if consensus.async_is_chain_ancestor_of(pruning_point, highest_known_syncer_chain_hash).await? { - // The node is only missing a segment in the future of its current pruning point, and the chains - // agree as well, so we perform a simple sync IBD and only download the missing data - return Ok(IbdType::Sync(highest_known_syncer_chain_hash)); + if syncer_pruning_point == pruning_point { + // The node is only missing a segment in the future of its current pruning point, and the chains + // agree as well, so we perform a simple sync IBD and only download the missing data + return Ok(IbdType::Sync); + } else { + consensus.async_verify_is_pruning_sample(syncer_pruning_point).await?; + // The node is missing a segment in the near future of its current pruning point, but the syncer is ahead + // and already pruned the current pruning point. + + if consensus.async_get_block_status(syncer_pruning_point).await.is_some_and(|b| b.has_block_body()) + && !consensus.async_is_consensus_in_transitional_ibd_state().await + { + // The data pruned by the syncer is already available from within the node (from relay or past ibd attempts) + // and the consensus is not in a transitional state requiring data on the previous pruning point, + // hence we can carry on syncing as normal. + return Ok(IbdType::Sync); + } else { + // Two options: + // 1: syncer_pruning_point is in the future, and there is a need to partially resync from syncer_pruning_point + // 2: syncer_pruning_point is in the past of current pruning point, or is unknown on which case the syncing node is flawed, + // and IBD should be stopped + + if consensus + .async_is_chain_ancestor_of(pruning_point, syncer_pruning_point) + .await + .map_err(|_| ProtocolError::Other("syncer pruning point is corrupted"))? + { + return Ok(IbdType::PruningCatchUp); + } else { + return Err(ProtocolError::Other("syncer pruning point is outdated")); + } + } + } } // If the pruning point is not in the chain of `highest_known_syncer_chain_hash`, it @@ -185,24 +275,13 @@ impl IbdFlow { // // TODO (relaxed): consider performing additional actions on finality conflicts in addition // to disconnecting from the peer (e.g., banning, rpc notification) - return Ok(IbdType::None); + return Err(ProtocolError::Other("peer is in a finality conflict with the local pruning point")); } let hst_header = consensus.async_get_header(consensus.async_get_headers_selected_tip().await).await.unwrap(); - // [Crescendo]: use the post crescendo pruning depth depending on hst's DAA score. - // Having a shorter depth for this condition for the fork transition period (if hst is shortly before activation) - // is negligible since there are other conditions required for activating an headers proof IBD. The important - // thing is that we eventually adjust to the longer period. - let pruning_depth = self.ctx.config.pruning_depth().get(hst_header.daa_score); + let pruning_depth = self.ctx.config.pruning_depth().after(); if relay_header.blue_score >= hst_header.blue_score + pruning_depth && relay_header.blue_work > hst_header.blue_work { - // [Crescendo]: switch to the new *shorter* finality duration only after sufficient time has passed - // since activation (measured via the new *larger* finality depth). - // Note: these are not critical execution paths so such estimation heuristics are completely ok in this context. - let finality_duration_in_milliseconds = self - .ctx - .config - .finality_duration_in_milliseconds() - .get(hst_header.daa_score.saturating_sub(self.ctx.config.finality_depth().upper_bound())); + let finality_duration_in_milliseconds = self.ctx.config.finality_duration_in_milliseconds().after(); if unix_now() > consensus.async_creation_timestamp().await + finality_duration_in_milliseconds { let fp = consensus.async_finality_point().await; let fp_ts = consensus.async_get_header(fp).await?.timestamp; @@ -211,17 +290,48 @@ impl IbdFlow { // consensus has matured for long enough (and not recently synced). This is mostly a spam-protector // since subsequent checks identify these violations as well // TODO (relaxed): consider performing additional actions on finality conflicts in addition to disconnecting from the peer (e.g., banning, rpc notification) - return Ok(IbdType::None); + return Err(ProtocolError::Other( + "peer has no known block but local consensus appears to be up to date, this is most likely a spam attempt", + )); } } // The relayed block has sufficient blue score and blue work over the current header selected tip Ok(IbdType::DownloadHeadersProof) } else { - Ok(IbdType::None) + Err(ProtocolError::Other("peer has no known block but conditions for requesting headers proof are not met")) } } + /// This function is triggered when the syncer's pruning point is higher + /// than ours and we already processed its header before. + /// so we only need to sync more headers and set it to our new pruning point before proceeding with IBD + async fn pruning_point_catchup( + &mut self, + consensus: &ConsensusProxy, + negotiation_output: &ChainNegotiationOutput, + relay_block: &Block, + ) -> Result<(), ProtocolError> { + // Before attempting to update to the syncers pruning point, sync to the latest headers of the syncer, + // to ensure that we will locally have sufficient headers on top of the syncer's pruning point + let syncer_pp = negotiation_output.syncer_pruning_point; + let syncer_sink = negotiation_output.syncer_virtual_selected_parent; + self.sync_headers(consensus, syncer_sink, negotiation_output.highest_known_syncer_chain_hash.unwrap(), relay_block).await?; + + // This function's main effect is to confirm the syncer's pruning point can be finalized into the consensus, and to update + // all the relevant stores + consensus.async_intrusive_pruning_point_update(syncer_pp, syncer_sink).await?; + + // A sanity check to confirm that following the intrusive addition of new pruning points, + // the latest pruning point still correctly agrees with the DAG data, + // and is the head of a pruning points "chain" leading all the way down to genesis + // TODO(relaxed): once the catchup functionality has sufficiently matured, consider only doing this test if sanity checks are enabled + info!("validating pruning points consistency"); + consensus.async_validate_pruning_points(syncer_sink).await.unwrap(); + info!("pruning points consistency validated"); + Ok(()) + } + async fn ibd_with_headers_proof( &mut self, staging: &StagingConsensus, @@ -236,7 +346,6 @@ impl IbdFlow { self.sync_headers(&staging_session, syncer_virtual_selected_parent, pruning_point, relay_block).await?; staging_session.async_validate_pruning_points(syncer_virtual_selected_parent).await?; self.validate_staging_timestamps(&self.ctx.consensus().session().await, &staging_session).await?; - self.sync_pruning_point_utxoset(&staging_session, pruning_point).await?; Ok(()) } @@ -270,13 +379,12 @@ impl IbdFlow { if proof_pruning_point == consensus.async_pruning_point().await { return Err(ProtocolError::Other("the proof pruning point is the same as the current pruning point")); } - drop(consensus); self.router .enqueue(make_message!(Payload::RequestPruningPointAndItsAnticone, RequestPruningPointAndItsAnticoneMessage {})) .await?; - + // First, all pruning points up to the last are sent let msg = dequeue_with_timeout!(self.incoming_route, Payload::PruningPoints)?; let pruning_points: PruningPointsList = msg.try_into()?; @@ -294,11 +402,18 @@ impl IbdFlow { return Err(ProtocolError::Other("pruning points are violating finality")); } + // Trusted data is sent in two stages: + // The first, TrustedDataPackage, contains meta data about daa_window + // blocks headers, and ghostdag data, which are required to verify the pruning + // point and its anticone. + // The latter, the trusted data entries, each represent a block (with daa) from the anticone of the pruning point + // (including the PP itself), alongside indexing denoting the respective metadata headers or ghostdag data let msg = dequeue_with_timeout!(self.incoming_route, Payload::TrustedData)?; let pkg: TrustedDataPackage = msg.try_into()?; debug!("received trusted data with {} daa entries and {} ghostdag entries", pkg.daa_window.len(), pkg.ghostdag_window.len()); let mut entry_stream = TrustedEntryStream::new(&self.router, &mut self.incoming_route); + // The first entry of the trusted data is the pruning point itself. let Some(pruning_point_entry) = entry_stream.next().await? else { return Err(ProtocolError::Other("got `done` message before receiving the pruning point")); }; @@ -311,7 +426,8 @@ impl IbdFlow { while let Some(entry) = entry_stream.next().await? { entries.push(entry); } - + // Create a topologically ordered vector of trusted blocks - the pruning point and its anticone, + // and their daa windows headers let mut trusted_set = pkg.build_trusted_subdag(entries)?; if self.ctx.config.enable_sanity_checks { @@ -374,6 +490,7 @@ impl IbdFlow { // TODO (relaxed): queue and join in batches staging.validate_and_insert_trusted_block(tb).virtual_state_task.await?; } + staging.async_clear_body_missing_anticone_set().await; info!("Done processing trusted blocks"); Ok(proof_pruning_point) } @@ -444,6 +561,15 @@ impl IbdFlow { Ok(()) } + async fn sync_new_utxo_set(&mut self, consensus: &ConsensusProxy, pruning_point: Hash) -> Result<(), ProtocolError> { + // A better solution could be to create a copy of the old utxo state for some sort of fallback rather than delete it. + consensus.async_clear_pruning_utxo_set().await; // this deletes the old pruning utxoset and also sets the pruning utxo as invalidated + self.sync_pruning_point_utxoset(consensus, pruning_point).await?; + consensus.async_set_pruning_utxoset_stable().await; // only if the function has reached here, will the utxo be considered "final" + self.ctx.on_pruning_point_utxoset_override(); + Ok(()) + } + async fn sync_missing_relay_past_headers( &mut self, consensus: &ConsensusProxy, @@ -492,6 +618,8 @@ impl IbdFlow { consensus: &ConsensusProxy, staging_consensus: &ConsensusProxy, ) -> Result<(), ProtocolError> { + // The purpose of this check is to prevent the potential abuse explained here: + // https://github.com/kaspanet/research/issues/3#issuecomment-895243792 let staging_hst = staging_consensus.async_get_header(staging_consensus.async_get_headers_selected_tip().await).await.unwrap(); let current_hst = consensus.async_get_header(consensus.async_get_headers_selected_tip().await).await.unwrap(); // If staging is behind current or within 10 minutes ahead of it, then something is wrong and we reject the IBD @@ -507,6 +635,7 @@ staging selected tip ({}) is too small or negative. Aborting IBD...", } async fn sync_pruning_point_utxoset(&mut self, consensus: &ConsensusProxy, pruning_point: Hash) -> Result<(), ProtocolError> { + info!("downloading the pruning point utxoset, this can take a little while."); self.router .enqueue(make_message!( Payload::RequestPruningPointUtxoSet, @@ -527,7 +656,96 @@ staging selected tip ({}) is too small or negative. Aborting IBD...", consensus.clone().spawn_blocking(move |c| c.import_pruning_point_utxo_set(pruning_point, multiset)).await?; Ok(()) } - + async fn sync_missing_trusted_bodies(&mut self, consensus: &ConsensusProxy) -> Result<(), ProtocolError> { + info!("downloading pruning point anticone missing block data"); + let diesembodied_hashes = consensus.async_get_body_missing_anticone().await; + if self.body_only_ibd_permitted { + self.sync_missing_trusted_bodies_no_headers(consensus, diesembodied_hashes).await? + } else { + self.sync_missing_trusted_bodies_full_blocks(consensus, diesembodied_hashes).await?; + } + consensus.async_clear_body_missing_anticone_set().await; + Ok(()) + } + async fn sync_missing_trusted_bodies_no_headers( + &mut self, + consensus: &ConsensusProxy, + diesembodied_hashes: Vec, + ) -> Result<(), ProtocolError> { + let iter = diesembodied_hashes.chunks(IBD_BATCH_SIZE); + for chunk in iter { + self.router + .enqueue(make_message!( + Payload::RequestBlockBodies, + RequestBlockBodiesMessage { hashes: chunk.iter().map(|h| h.into()).collect() } + )) + .await?; + let mut jobs = Vec::with_capacity(chunk.len()); + + for &hash in chunk.iter() { + let msg = dequeue_with_timeout!(self.incoming_route, Payload::BlockBody)?; + let blk_body: BlockBody = msg.try_into()?; + // TODO (relaxed): make header queries in a batch. + let blk_header = consensus.async_get_header(hash).await.map_err(|err| { + // Conceptually this indicates local inconsistency, since we received the expected hashes via a local + // get_missing_block_body_hashes call. However for now we fail gracefully and only disconnect from this peer. + ProtocolError::OtherOwned(format!("syncee inconsistency: missing block header for {}, err: {}", hash, err)) + })?; + if blk_body.is_empty() { + return Err(ProtocolError::OtherOwned(format!("sent empty block body for block {}", hash))); + } + let block = Block { header: blk_header, transactions: blk_body.into() }; + // TODO (relaxed): sending ghostdag data may be redundant, especially when the headers were already verified. + // Consider sending empty ghostdag data, simplifying a great deal. The result should be the same - + // a trusted task is sent, however the header is already verified, and hence only the block body will be verified. + jobs.push( + consensus + .validate_and_insert_trusted_block(TrustedBlock::new(block, consensus.async_get_ghostdag_data(hash).await?)) + .virtual_state_task, + ); + } + try_join_all(jobs).await?; // TODO (relaxed): be more efficient with batching as done with block bodies in general + } + Ok(()) + } + async fn sync_missing_trusted_bodies_full_blocks( + &mut self, + consensus: &ConsensusProxy, + diesembodied_hashes: Vec, + ) -> Result<(), ProtocolError> { + let iter = diesembodied_hashes.chunks(IBD_BATCH_SIZE); + for chunk in iter { + self.router + .enqueue(make_message!( + Payload::RequestIbdBlocks, + RequestIbdBlocksMessage { hashes: chunk.iter().map(|h| h.into()).collect() } + )) + .await?; + let mut jobs = Vec::with_capacity(chunk.len()); + + for &hash in chunk.iter() { + // TODO: change to BodyOnly requests when incorporated + let msg = dequeue_with_timeout!(self.incoming_route, Payload::IbdBlock)?; + let block: Block = msg.try_into()?; + if block.hash() != hash { + return Err(ProtocolError::OtherOwned(format!("expected block {} but got {}", hash, block.hash()))); + } + if block.is_header_only() { + return Err(ProtocolError::OtherOwned(format!("sent header of {} where expected block with body", block.hash()))); + } + // TODO (relaxed): sending ghostdag data may be redundant, especially when the headers were already verified. + // Consider sending empty ghostdag data, simplifying a great deal. The result should be the same - + // a trusted task is sent, however the header is already verified, and hence only the block body will be verified. + jobs.push( + consensus + .validate_and_insert_trusted_block(TrustedBlock::new(block, consensus.async_get_ghostdag_data(hash).await?)) + .virtual_state_task, + ); + } + try_join_all(jobs).await?; // TODO (relaxed): be more efficient with batching as done with block bodies in general + } + Ok(()) + } async fn sync_missing_block_bodies(&mut self, consensus: &ConsensusProxy, high: Hash) -> Result<(), ProtocolError> { // TODO (relaxed): query consensus in batches let sleep_task = sleep(Duration::from_secs(2)); @@ -583,6 +801,18 @@ staging selected tip ({}) is too small or negative. Aborting IBD...", &mut self, consensus: &ConsensusProxy, chunk: &[Hash], + ) -> Result { + if self.body_only_ibd_permitted { + self.queue_block_processing_chunk_body_only(consensus, chunk).await + } else { + self.queue_block_processing_chunk_full_block(consensus, chunk).await + } + } + + async fn queue_block_processing_chunk_full_block( + &mut self, + consensus: &ConsensusProxy, + chunk: &[Hash], ) -> Result { let mut jobs = Vec::with_capacity(chunk.len()); let mut current_daa_score = 0; @@ -606,7 +836,41 @@ staging selected tip ({}) is too small or negative. Aborting IBD...", current_timestamp = block.header.timestamp; jobs.push(consensus.validate_and_insert_block(block).virtual_state_task); } + Ok(QueueChunkOutput { jobs, daa_score: current_daa_score, timestamp: current_timestamp }) + } + async fn queue_block_processing_chunk_body_only( + &mut self, + consensus: &ConsensusProxy, + chunk: &[Hash], + ) -> Result { + let mut jobs = Vec::with_capacity(chunk.len()); + let mut current_daa_score = 0; + let mut current_timestamp = 0; + self.router + .enqueue(make_request!( + Payload::RequestBlockBodies, + RequestBlockBodiesMessage { hashes: chunk.iter().map(|h| h.into()).collect() }, + self.incoming_route.id() + )) + .await?; + for &expected_hash in chunk { + let msg = dequeue_with_timeout!(self.incoming_route, Payload::BlockBody)?; + // TODO (relaxed): make header queries in a batch. + let blk_header = consensus.async_get_header(expected_hash).await.map_err(|err| { + // Conceptually this indicates local inconsistency, since we received the expected hashes via a local + // get_missing_block_body_hashes call. However for now we fail gracefully and only disconnect from this peer. + ProtocolError::OtherOwned(format!("syncee inconsistency: missing block header for {}, err: {}", expected_hash, err)) + })?; + let blk_body: BlockBody = msg.try_into()?; + if blk_body.is_empty() { + return Err(ProtocolError::OtherOwned(format!("sent empty block body for block {}", expected_hash))); + } + let block = Block { header: blk_header, transactions: blk_body.into() }; + current_daa_score = block.header.daa_score; + current_timestamp = block.header.timestamp; + jobs.push(consensus.validate_and_insert_block(block).virtual_state_task); + } Ok(QueueChunkOutput { jobs, daa_score: current_daa_score, timestamp: current_timestamp }) } } diff --git a/protocol/flows/src/v5/ibd/mod.rs b/protocol/flows/src/ibd/mod.rs similarity index 100% rename from protocol/flows/src/v5/ibd/mod.rs rename to protocol/flows/src/ibd/mod.rs diff --git a/protocol/flows/src/v5/ibd/negotiate.rs b/protocol/flows/src/ibd/negotiate.rs similarity index 97% rename from protocol/flows/src/v5/ibd/negotiate.rs rename to protocol/flows/src/ibd/negotiate.rs index 20963c14e7..1f16090f40 100644 --- a/protocol/flows/src/v5/ibd/negotiate.rs +++ b/protocol/flows/src/ibd/negotiate.rs @@ -17,6 +17,7 @@ pub struct ChainNegotiationOutput { // chain on block locator queries pub syncer_virtual_selected_parent: Hash, pub highest_known_syncer_chain_hash: Option, + pub syncer_pruning_point: Hash, } impl IbdFlow { @@ -36,6 +37,7 @@ impl IbdFlow { if locator_hashes.is_empty() { return Err(ProtocolError::Other("Expecting initial syncer chain block locator to contain at least one element")); } + let mut syncer_pruning_point = *locator_hashes.last().unwrap(); debug!( "IBD chain negotiation with peer {} started and received {} hashes ({}, {})", @@ -131,7 +133,7 @@ impl IbdFlow { self.router, negotiation_restart_counter ))); } - if negotiation_restart_counter > self.ctx.config.bps().upper_bound() { + if negotiation_restart_counter > self.ctx.config.bps().after() { // bps is just an intuitive threshold here warn!("IBD chain negotiation with syncer {} restarted {} times", self.router, negotiation_restart_counter); } else { @@ -157,11 +159,12 @@ impl IbdFlow { initial_locator_len = locator_hashes.len(); // Reset syncer's virtual selected parent syncer_virtual_selected_parent = locator_hashes[0]; + syncer_pruning_point = *locator_hashes.last().unwrap(); } } debug!("Found highest known syncer chain block {:?} from peer {}", highest_known_syncer_chain_hash, self.router); - Ok(ChainNegotiationOutput { syncer_virtual_selected_parent, highest_known_syncer_chain_hash }) + Ok(ChainNegotiationOutput { syncer_virtual_selected_parent, highest_known_syncer_chain_hash, syncer_pruning_point }) } async fn get_syncer_chain_block_locator( diff --git a/protocol/flows/src/v5/ibd/progress.rs b/protocol/flows/src/ibd/progress.rs similarity index 100% rename from protocol/flows/src/v5/ibd/progress.rs rename to protocol/flows/src/ibd/progress.rs diff --git a/protocol/flows/src/v5/ibd/streams.rs b/protocol/flows/src/ibd/streams.rs similarity index 100% rename from protocol/flows/src/v5/ibd/streams.rs rename to protocol/flows/src/ibd/streams.rs diff --git a/protocol/flows/src/lib.rs b/protocol/flows/src/lib.rs index 90bfef79d5..19a3325f0b 100644 --- a/protocol/flows/src/lib.rs +++ b/protocol/flows/src/lib.rs @@ -1,7 +1,7 @@ pub mod flow_context; pub mod flow_trait; pub mod flowcontext; +pub mod ibd; pub mod service; -pub mod v5; -pub mod v6; -pub use v6 as v7; +pub mod v7; +pub mod v8; diff --git a/protocol/flows/src/v5/request_pruning_point_and_anticone.rs b/protocol/flows/src/v5/request_pruning_point_and_anticone.rs deleted file mode 100644 index aa05ae7681..0000000000 --- a/protocol/flows/src/v5/request_pruning_point_and_anticone.rs +++ /dev/null @@ -1,117 +0,0 @@ -use std::sync::Arc; - -use itertools::Itertools; -use kaspa_consensus_core::BlockHashMap; -use kaspa_p2p_lib::{ - common::ProtocolError, - dequeue, dequeue_with_request_id, make_response, - pb::{ - self, kaspad_message::Payload, BlockWithTrustedDataV4Message, DoneBlocksWithTrustedDataMessage, PruningPointsMessage, - TrustedDataMessage, - }, - IncomingRoute, Router, -}; -use log::debug; - -use crate::{flow_context::FlowContext, flow_trait::Flow, v5::ibd::IBD_BATCH_SIZE}; - -pub struct PruningPointAndItsAnticoneRequestsFlow { - ctx: FlowContext, - router: Arc, - incoming_route: IncomingRoute, -} - -#[async_trait::async_trait] -impl Flow for PruningPointAndItsAnticoneRequestsFlow { - fn router(&self) -> Option> { - Some(self.router.clone()) - } - - async fn start(&mut self) -> Result<(), ProtocolError> { - self.start_impl().await - } -} - -impl PruningPointAndItsAnticoneRequestsFlow { - pub fn new(ctx: FlowContext, router: Arc, incoming_route: IncomingRoute) -> Self { - Self { ctx, router, incoming_route } - } - - async fn start_impl(&mut self) -> Result<(), ProtocolError> { - loop { - let (_, request_id) = dequeue_with_request_id!(self.incoming_route, Payload::RequestPruningPointAndItsAnticone)?; - debug!("Got request for pruning point and its anticone"); - - let consensus = self.ctx.consensus(); - let mut session = consensus.session().await; - - let pp_headers = session.async_pruning_point_headers().await; - self.router - .enqueue(make_response!( - Payload::PruningPoints, - PruningPointsMessage { headers: pp_headers.into_iter().map(|header| ::from(&*header)).collect() }, - request_id - )) - .await?; - - let trusted_data = session.async_get_pruning_point_anticone_and_trusted_data().await?; - let pp_anticone = &trusted_data.anticone; - let daa_window = &trusted_data.daa_window_blocks; - let ghostdag_data = &trusted_data.ghostdag_blocks; - self.router - .enqueue(make_response!( - Payload::TrustedData, - TrustedDataMessage { - daa_window: daa_window.iter().map(|daa_block| daa_block.into()).collect_vec(), - ghostdag_data: ghostdag_data.iter().map(|gd| gd.into()).collect_vec() - }, - request_id - )) - .await?; - - let daa_window_hash_to_index = - BlockHashMap::from_iter(daa_window.iter().enumerate().map(|(i, trusted_header)| (trusted_header.header.hash, i))); - let ghostdag_data_hash_to_index = - BlockHashMap::from_iter(ghostdag_data.iter().enumerate().map(|(i, trusted_gd)| (trusted_gd.hash, i))); - - for hashes in pp_anticone.chunks(IBD_BATCH_SIZE) { - for hash in hashes { - let hash = *hash; - let daa_window_indices = session - .async_get_daa_window(hash) - .await? - .into_iter() - .map(|hash| *daa_window_hash_to_index.get(&hash).unwrap() as u64) - .collect_vec(); - let ghostdag_data_indices = session - .async_get_trusted_block_associated_ghostdag_data_block_hashes(hash) - .await? - .into_iter() - .map(|hash| *ghostdag_data_hash_to_index.get(&hash).unwrap() as u64) - .collect_vec(); - let block = session.async_get_block(hash).await?; - self.router - .enqueue(make_response!( - Payload::BlockWithTrustedDataV4, - BlockWithTrustedDataV4Message { block: Some((&block).into()), daa_window_indices, ghostdag_data_indices }, - request_id - )) - .await?; - } - - if hashes.len() == IBD_BATCH_SIZE { - // No timeout here, as we don't care if the syncee takes its time computing, - // since it only blocks this dedicated flow - drop(session); // Avoid holding the session through dequeue calls - dequeue!(self.incoming_route, Payload::RequestNextPruningPointAndItsAnticoneBlocks)?; - session = consensus.session().await; - } - } - - self.router - .enqueue(make_response!(Payload::DoneBlocksWithTrustedData, DoneBlocksWithTrustedDataMessage {}, request_id)) - .await?; - debug!("Finished sending pruning point anticone") - } - } -} diff --git a/protocol/flows/src/v5/address.rs b/protocol/flows/src/v7/address.rs similarity index 100% rename from protocol/flows/src/v5/address.rs rename to protocol/flows/src/v7/address.rs diff --git a/protocol/flows/src/v5/blockrelay/flow.rs b/protocol/flows/src/v7/blockrelay/flow.rs similarity index 94% rename from protocol/flows/src/v5/blockrelay/flow.rs rename to protocol/flows/src/v7/blockrelay/flow.rs index 4811c061d8..96d2e0470a 100644 --- a/protocol/flows/src/v5/blockrelay/flow.rs +++ b/protocol/flows/src/v7/blockrelay/flow.rs @@ -92,6 +92,7 @@ impl HandleRelayInvsFlow { // Loop over incoming block inv messages let inv = self.invs_route.dequeue().await?; let session = self.ctx.consensus().unguarded_session(); + let is_ibd_in_transitional_state = session.async_is_consensus_in_transitional_ibd_state().await; match session.async_get_block_status(inv.hash).await { None | Some(BlockStatus::StatusHeaderOnly) => {} // Continue processing this missing inv @@ -149,6 +150,11 @@ impl HandleRelayInvsFlow { ); continue; } + // if in a transitional ibd state, do not wait, sync immediately + if is_ibd_in_transitional_state { + self.try_trigger_ibd(block)?; + continue; + } let BlockValidationFutures { block_task, mut virtual_state_task } = session.validate_and_insert_block(block.clone()); @@ -209,9 +215,8 @@ impl HandleRelayInvsFlow { // can continue processing the following relay blocks let ctx = self.ctx.clone(); tokio::spawn(async move { - let daa_score = block.header.daa_score; ctx.on_new_block(&session, ancestor_batch, block, virtual_state_task).await; - ctx.log_new_block_event(BlockLogEvent::Relay(inv.hash), daa_score); + ctx.log_block_event(BlockLogEvent::Relay(inv.hash)); }); } } @@ -290,12 +295,7 @@ impl HandleRelayInvsFlow { None | Some(OrphanOutput::Unknown) => {} } } else { - // Send the block to IBD flow via the dedicated job channel. If the channel has a pending job, we prefer - // the block with higher blue work, since it is usually more recent - match self.ibd_sender.try_send(block, |b, c| if b.header.blue_work > c.header.blue_work { b } else { c }) { - Ok(_) | Err(TrySendError::Full(_)) => {} - Err(TrySendError::Closed(_)) => return Err(ProtocolError::ConnectionClosed), // This indicates that IBD flow has exited - } + self.try_trigger_ibd(block)?; } Ok(None) } @@ -356,4 +356,13 @@ impl HandleRelayInvsFlow { } Ok(false) } + + // Send the block to IBD flow via the dedicated job channel. If the channel has a pending job, we prefer + // the block with higher blue work, since it is usually more recent + fn try_trigger_ibd(&self, block: Block) -> Result<(), ProtocolError> { + match self.ibd_sender.try_send(block.clone(), |b, c| if b.header.blue_work > c.header.blue_work { b } else { c }) { + Ok(_) | Err(TrySendError::Full(_)) => Ok(()), + Err(TrySendError::Closed(_)) => Err(ProtocolError::ConnectionClosed), // This indicates that IBD flow has exited + } + } } diff --git a/protocol/flows/src/v5/blockrelay/handle_requests.rs b/protocol/flows/src/v7/blockrelay/handle_requests.rs similarity index 84% rename from protocol/flows/src/v5/blockrelay/handle_requests.rs rename to protocol/flows/src/v7/blockrelay/handle_requests.rs index 1519b867fc..2d6179b8ac 100644 --- a/protocol/flows/src/v5/blockrelay/handle_requests.rs +++ b/protocol/flows/src/v7/blockrelay/handle_requests.rs @@ -50,6 +50,13 @@ impl HandleRelayBlockRequests { } async fn send_sink(&mut self) -> Result<(), ProtocolError> { + let session = self.ctx.consensus().unguarded_session(); + let is_in_transitional_ibd_state = session.async_is_consensus_in_transitional_ibd_state().await; + drop(session); + // The sink may miss block body while in a transitional state, hence syncing with others must be prevented in the meanwhile + if is_in_transitional_ibd_state { + return Ok(()); + } let sink = self.ctx.consensus().unguarded_session().async_get_sink().await; if sink == self.ctx.config.genesis.hash { return Ok(()); diff --git a/protocol/flows/src/v5/blockrelay/mod.rs b/protocol/flows/src/v7/blockrelay/mod.rs similarity index 100% rename from protocol/flows/src/v5/blockrelay/mod.rs rename to protocol/flows/src/v7/blockrelay/mod.rs diff --git a/protocol/flows/src/v5/mod.rs b/protocol/flows/src/v7/mod.rs similarity index 90% rename from protocol/flows/src/v5/mod.rs rename to protocol/flows/src/v7/mod.rs index 6dba3f4182..2b8caefc45 100644 --- a/protocol/flows/src/v5/mod.rs +++ b/protocol/flows/src/v7/mod.rs @@ -1,7 +1,7 @@ -use self::{ +use crate::ibd::IbdFlow; +use crate::v7::{ address::{ReceiveAddressesFlow, SendAddressesFlow}, blockrelay::{flow::HandleRelayInvsFlow, handle_requests::HandleRelayBlockRequests}, - ibd::IbdFlow, ping::{ReceivePingsFlow, SendPingsFlow}, request_antipast::HandleAntipastRequests, request_block_locator::RequestBlockLocatorFlow, @@ -14,14 +14,12 @@ use self::{ txrelay::flow::{RelayTransactionsFlow, RequestTransactionsFlow}, }; use crate::{flow_context::FlowContext, flow_trait::Flow}; - use kaspa_p2p_lib::{KaspadMessagePayloadType, Router, SharedIncomingRoute}; use kaspa_utils::channel; use std::sync::Arc; pub(crate) mod address; pub(crate) mod blockrelay; -pub(crate) mod ibd; pub(crate) mod ping; pub(crate) mod request_antipast; pub(crate) mod request_block_locator; @@ -37,7 +35,8 @@ pub fn register(ctx: FlowContext, router: Arc) -> Vec> { // IBD flow <-> invs flow communication uses a job channel in order to always // maintain at most a single pending job which can be updated let (ibd_sender, relay_receiver) = channel::job(); - let flows: Vec> = vec![ + let body_only_ibd_permitted = false; + let mut flows: Vec> = vec![ Box::new(IbdFlow::new( ctx.clone(), router.clone(), @@ -58,15 +57,7 @@ pub fn register(ctx: FlowContext, router: Arc) -> Vec> { KaspadMessagePayloadType::DonePruningPointUtxoSetChunks, ]), relay_receiver, - )), - Box::new(HandleRelayInvsFlow::new( - ctx.clone(), - router.clone(), - SharedIncomingRoute::new( - router.subscribe_with_capacity(vec![KaspadMessagePayloadType::InvRelayBlock], ctx.block_invs_channel_size()), - ), - router.subscribe(vec![KaspadMessagePayloadType::Block, KaspadMessagePayloadType::BlockLocator]), - ibd_sender, + body_only_ibd_permitted, )), Box::new(HandleRelayBlockRequests::new( ctx.clone(), @@ -138,12 +129,26 @@ pub fn register(ctx: FlowContext, router: Arc) -> Vec> { router.subscribe(vec![KaspadMessagePayloadType::RequestAddresses]), )), Box::new(RequestBlockLocatorFlow::new( - ctx, + ctx.clone(), router.clone(), router.subscribe(vec![KaspadMessagePayloadType::RequestBlockLocator]), )), ]; + let invs_route = router.subscribe_with_capacity(vec![KaspadMessagePayloadType::InvRelayBlock], ctx.block_invs_channel_size()); + let shared_invs_route = SharedIncomingRoute::new(invs_route); + + let num_relay_flows = (ctx.config.bps().after() as usize / 2).max(1); + flows.extend((0..num_relay_flows).map(|_| { + Box::new(HandleRelayInvsFlow::new( + ctx.clone(), + router.clone(), + shared_invs_route.clone(), + router.subscribe(vec![]), + ibd_sender.clone(), + )) as Box + })); + // The reject message is handled as a special case by the router // KaspadMessagePayloadType::Reject, diff --git a/protocol/flows/src/v5/ping.rs b/protocol/flows/src/v7/ping.rs similarity index 100% rename from protocol/flows/src/v5/ping.rs rename to protocol/flows/src/v7/ping.rs diff --git a/protocol/flows/src/v5/request_antipast.rs b/protocol/flows/src/v7/request_antipast.rs similarity index 93% rename from protocol/flows/src/v5/request_antipast.rs rename to protocol/flows/src/v7/request_antipast.rs index 521418a288..92b7e6dbde 100644 --- a/protocol/flows/src/v5/request_antipast.rs +++ b/protocol/flows/src/v7/request_antipast.rs @@ -46,9 +46,8 @@ impl HandleAntipastRequests { // intersected by past of the relayed block. We do not expect the relay block to be too much after // the sink (in fact usually it should be in its past or anticone), hence we bound the expected traversal to be // in the order of `mergeset_size_limit`. - let hashes = session - .async_get_antipast_from_pov(block, context, Some(self.ctx.config.mergeset_size_limit().upper_bound() * 4)) - .await?; + let hashes = + session.async_get_antipast_from_pov(block, context, Some(self.ctx.config.mergeset_size_limit().after() * 4)).await?; let mut headers = session .spawn_blocking(|c| hashes.into_iter().map(|h| c.get_header(h)).collect::, ConsensusError>>()) .await?; diff --git a/protocol/flows/src/v5/request_block_locator.rs b/protocol/flows/src/v7/request_block_locator.rs similarity index 100% rename from protocol/flows/src/v5/request_block_locator.rs rename to protocol/flows/src/v7/request_block_locator.rs diff --git a/protocol/flows/src/v5/request_headers.rs b/protocol/flows/src/v7/request_headers.rs similarity index 98% rename from protocol/flows/src/v5/request_headers.rs rename to protocol/flows/src/v7/request_headers.rs index 38f2fcac81..cfc54f8919 100644 --- a/protocol/flows/src/v5/request_headers.rs +++ b/protocol/flows/src/v7/request_headers.rs @@ -37,7 +37,7 @@ impl RequestHeadersFlow { async fn start_impl(&mut self) -> Result<(), ProtocolError> { const MAX_BLOCKS: usize = 1 << 10; // Internal consensus logic requires that `max_blocks > mergeset_size_limit` - let max_blocks = max(MAX_BLOCKS, self.ctx.config.mergeset_size_limit().upper_bound() as usize + 1); + let max_blocks = max(MAX_BLOCKS, self.ctx.config.mergeset_size_limit().after() as usize + 1); loop { let (msg, request_id) = dequeue_with_request_id!(self.incoming_route, Payload::RequestHeaders)?; diff --git a/protocol/flows/src/v5/request_ibd_blocks.rs b/protocol/flows/src/v7/request_ibd_blocks.rs similarity index 100% rename from protocol/flows/src/v5/request_ibd_blocks.rs rename to protocol/flows/src/v7/request_ibd_blocks.rs diff --git a/protocol/flows/src/v5/request_ibd_chain_block_locator.rs b/protocol/flows/src/v7/request_ibd_chain_block_locator.rs similarity index 100% rename from protocol/flows/src/v5/request_ibd_chain_block_locator.rs rename to protocol/flows/src/v7/request_ibd_chain_block_locator.rs diff --git a/protocol/flows/src/v5/request_pp_proof.rs b/protocol/flows/src/v7/request_pp_proof.rs similarity index 100% rename from protocol/flows/src/v5/request_pp_proof.rs rename to protocol/flows/src/v7/request_pp_proof.rs diff --git a/protocol/flows/src/v6/request_pruning_point_and_anticone.rs b/protocol/flows/src/v7/request_pruning_point_and_anticone.rs similarity index 97% rename from protocol/flows/src/v6/request_pruning_point_and_anticone.rs rename to protocol/flows/src/v7/request_pruning_point_and_anticone.rs index 34bff88423..a24cb7a9b1 100644 --- a/protocol/flows/src/v6/request_pruning_point_and_anticone.rs +++ b/protocol/flows/src/v7/request_pruning_point_and_anticone.rs @@ -16,7 +16,7 @@ use kaspa_p2p_lib::{ use log::debug; use std::sync::Arc; -use crate::{flow_context::FlowContext, flow_trait::Flow, v5::ibd::IBD_BATCH_SIZE}; +use crate::{flow_context::FlowContext, flow_trait::Flow, ibd::IBD_BATCH_SIZE}; pub struct PruningPointAndItsAnticoneRequestsFlow { ctx: FlowContext, diff --git a/protocol/flows/src/v5/request_pruning_point_utxo_set.rs b/protocol/flows/src/v7/request_pruning_point_utxo_set.rs similarity index 97% rename from protocol/flows/src/v5/request_pruning_point_utxo_set.rs rename to protocol/flows/src/v7/request_pruning_point_utxo_set.rs index e3eb5cc153..13c1799925 100644 --- a/protocol/flows/src/v5/request_pruning_point_utxo_set.rs +++ b/protocol/flows/src/v7/request_pruning_point_utxo_set.rs @@ -1,4 +1,4 @@ -use crate::{flow_context::FlowContext, flow_trait::Flow, v5::ibd::IBD_BATCH_SIZE}; +use crate::{flow_context::FlowContext, flow_trait::Flow, ibd::IBD_BATCH_SIZE}; use itertools::Itertools; use kaspa_consensus_core::errors::consensus::ConsensusError; use kaspa_core::debug; diff --git a/protocol/flows/src/v5/txrelay/flow.rs b/protocol/flows/src/v7/txrelay/flow.rs similarity index 100% rename from protocol/flows/src/v5/txrelay/flow.rs rename to protocol/flows/src/v7/txrelay/flow.rs diff --git a/protocol/flows/src/v5/txrelay/mod.rs b/protocol/flows/src/v7/txrelay/mod.rs similarity index 100% rename from protocol/flows/src/v5/txrelay/mod.rs rename to protocol/flows/src/v7/txrelay/mod.rs diff --git a/protocol/flows/src/v6/mod.rs b/protocol/flows/src/v8/mod.rs similarity index 91% rename from protocol/flows/src/v6/mod.rs rename to protocol/flows/src/v8/mod.rs index b73fae2d3f..c6588e0853 100644 --- a/protocol/flows/src/v6/mod.rs +++ b/protocol/flows/src/v8/mod.rs @@ -1,7 +1,6 @@ -use crate::v5::{ +use crate::v7::{ address::{ReceiveAddressesFlow, SendAddressesFlow}, blockrelay::{flow::HandleRelayInvsFlow, handle_requests::HandleRelayBlockRequests}, - ibd::IbdFlow, ping::{ReceivePingsFlow, SendPingsFlow}, request_antipast::HandleAntipastRequests, request_block_locator::RequestBlockLocatorFlow, @@ -9,24 +8,24 @@ use crate::v5::{ request_ibd_blocks::HandleIbdBlockRequests, request_ibd_chain_block_locator::RequestIbdChainBlockLocatorFlow, request_pp_proof::RequestPruningPointProofFlow, + request_pruning_point_and_anticone::PruningPointAndItsAnticoneRequestsFlow, request_pruning_point_utxo_set::RequestPruningPointUtxoSetFlow, txrelay::flow::{RelayTransactionsFlow, RequestTransactionsFlow}, }; +pub(crate) mod request_block_bodies; use crate::{flow_context::FlowContext, flow_trait::Flow}; +use crate::ibd::IbdFlow; use kaspa_p2p_lib::{KaspadMessagePayloadType, Router, SharedIncomingRoute}; use kaspa_utils::channel; +use request_block_bodies::HandleBlockBodyRequests; use std::sync::Arc; -use crate::v6::request_pruning_point_and_anticone::PruningPointAndItsAnticoneRequestsFlow; - -pub(crate) mod request_pruning_point_and_anticone; - pub fn register(ctx: FlowContext, router: Arc) -> Vec> { // IBD flow <-> invs flow communication uses a job channel in order to always // maintain at most a single pending job which can be updated let (ibd_sender, relay_receiver) = channel::job(); - + let body_only_ibd_permitted = true; let mut flows: Vec> = vec![ Box::new(IbdFlow::new( ctx.clone(), @@ -40,6 +39,7 @@ pub fn register(ctx: FlowContext, router: Arc) -> Vec> { KaspadMessagePayloadType::DoneBlocksWithTrustedData, KaspadMessagePayloadType::IbdChainBlockLocator, KaspadMessagePayloadType::IbdBlock, + KaspadMessagePayloadType::BlockBody, KaspadMessagePayloadType::TrustedData, KaspadMessagePayloadType::PruningPoints, KaspadMessagePayloadType::PruningPointProof, @@ -48,6 +48,7 @@ pub fn register(ctx: FlowContext, router: Arc) -> Vec> { KaspadMessagePayloadType::DonePruningPointUtxoSetChunks, ]), relay_receiver, + body_only_ibd_permitted, )), Box::new(HandleRelayBlockRequests::new( ctx.clone(), @@ -92,6 +93,11 @@ pub fn register(ctx: FlowContext, router: Arc) -> Vec> { router.clone(), router.subscribe(vec![KaspadMessagePayloadType::RequestIbdBlocks]), )), + Box::new(HandleBlockBodyRequests::new( + ctx.clone(), + router.clone(), + router.subscribe(vec![KaspadMessagePayloadType::RequestBlockBodies]), + )), Box::new(HandleAntipastRequests::new( ctx.clone(), router.clone(), @@ -128,7 +134,7 @@ pub fn register(ctx: FlowContext, router: Arc) -> Vec> { let invs_route = router.subscribe_with_capacity(vec![KaspadMessagePayloadType::InvRelayBlock], ctx.block_invs_channel_size()); let shared_invs_route = SharedIncomingRoute::new(invs_route); - let num_relay_flows = (ctx.config.bps().upper_bound() as usize / 2).max(1); + let num_relay_flows = (ctx.config.bps().after() as usize / 2).max(1); flows.extend((0..num_relay_flows).map(|_| { Box::new(HandleRelayInvsFlow::new( ctx.clone(), diff --git a/protocol/flows/src/v8/request_block_bodies.rs b/protocol/flows/src/v8/request_block_bodies.rs new file mode 100644 index 0000000000..72bd58bf51 --- /dev/null +++ b/protocol/flows/src/v8/request_block_bodies.rs @@ -0,0 +1,43 @@ +use crate::{flow_context::FlowContext, flow_trait::Flow}; +use kaspa_core::debug; +use kaspa_p2p_lib::{ + common::ProtocolError, dequeue_with_request_id, make_response, pb::kaspad_message::Payload, IncomingRoute, Router, +}; +use std::sync::Arc; + +pub struct HandleBlockBodyRequests { + ctx: FlowContext, + router: Arc, + incoming_route: IncomingRoute, +} + +#[async_trait::async_trait] +impl Flow for HandleBlockBodyRequests { + fn router(&self) -> Option> { + Some(self.router.clone()) + } + + async fn start(&mut self) -> Result<(), ProtocolError> { + self.start_impl().await + } +} + +impl HandleBlockBodyRequests { + pub fn new(ctx: FlowContext, router: Arc, incoming_route: IncomingRoute) -> Self { + Self { ctx, router, incoming_route } + } + + async fn start_impl(&mut self) -> Result<(), ProtocolError> { + loop { + let (msg, request_id) = dequeue_with_request_id!(self.incoming_route, Payload::RequestBlockBodies)?; + let hashes: Vec<_> = msg.try_into()?; + debug!("got request for {} blocks bodies", hashes.len()); + let session = self.ctx.consensus().unguarded_session(); + + for hash in hashes { + let body = session.async_get_block_body(hash).await?; + self.router.enqueue(make_response!(Payload::BlockBody, body.as_ref().into(), request_id)).await?; + } + } + } +} diff --git a/protocol/mining/src/rule_engine.rs b/protocol/mining/src/rule_engine.rs index f5f2736e86..7eb1dc98a3 100644 --- a/protocol/mining/src/rule_engine.rs +++ b/protocol/mining/src/rule_engine.rs @@ -64,16 +64,15 @@ impl MiningRuleEngine { if elapsed_time.as_secs() > 0 { let session = self.consensus_manager.consensus().unguarded_session(); - let sink_daa_timestamp = session.async_get_sink_daa_score_timestamp().await; let finality_point = session.async_finality_point().await; let finality_point_timestamp = session.async_get_header(finality_point).await.unwrap().timestamp; let extra_data = ExtraData { finality_point_timestamp, - target_time_per_block: self.config.target_time_per_block().get(sink_daa_timestamp.daa_score), + target_time_per_block: self.config.target_time_per_block().after(), has_sufficient_peer_connectivity: self.has_sufficient_peer_connectivity(), - finality_duration: self.config.finality_duration_in_milliseconds().get(sink_daa_timestamp.daa_score), + finality_duration: self.config.finality_duration_in_milliseconds().after(), elapsed_time, }; @@ -99,7 +98,7 @@ impl MiningRuleEngine { mining_rules: Arc, ) -> Self { let use_sync_rate_rule = Arc::new(AtomicBool::new(false)); - let rules: Vec> = vec![Arc::new(SyncRateRule::new(use_sync_rate_rule.clone()))]; + let rules: Vec> = vec![Arc::new(SyncRateRule::new(use_sync_rate_rule.clone()))]; Self { consensus_manager, config, processing_counters, tick_service, hub, use_sync_rate_rule, mining_rules, rules } } @@ -131,8 +130,7 @@ impl MiningRuleEngine { // enter the DAA window of fully-synced nodes and thus contribute to overall network difficulty // // [Crescendo]: both durations are nearly equal so this decision is negligible - let synced_threshold = - self.config.expected_difficulty_window_duration_in_milliseconds().get(sink_daa_score_timestamp.daa_score) / 4; + let synced_threshold = self.config.expected_difficulty_window_duration_in_milliseconds().after() / 4; // Roughly 10mins in all networks unix_now() < sink_timestamp + synced_threshold diff --git a/protocol/p2p/proto/messages.proto b/protocol/p2p/proto/messages.proto index 83f0f439f8..94cd7c39f7 100644 --- a/protocol/p2p/proto/messages.proto +++ b/protocol/p2p/proto/messages.proto @@ -52,6 +52,8 @@ message KaspadMessage { IbdChainBlockLocatorMessage ibdChainBlockLocator = 54; RequestAntipastMessage requestAntipast = 55; RequestNextPruningPointAndItsAnticoneBlocksMessage requestNextPruningPointAndItsAnticoneBlocks = 56; + BlockBodyMessage blockBody = 57; + RequestBlockBodiesMessage requestBlockBodies = 58; } } diff --git a/protocol/p2p/proto/p2p.proto b/protocol/p2p/proto/p2p.proto index f9395ed375..149ad62792 100644 --- a/protocol/p2p/proto/p2p.proto +++ b/protocol/p2p/proto/p2p.proto @@ -63,6 +63,10 @@ message BlockMessage{ repeated TransactionMessage transactions = 2; } +message BlockBodyMessage{ + repeated TransactionMessage transactions = 1; +} + message BlockHeader{ uint32 version = 1; repeated BlockLevelParents parents = 12; @@ -183,6 +187,10 @@ message RequestIBDBlocksMessage{ repeated Hash hashes = 1; } +message RequestBlockBodiesMessage{ + repeated Hash hashes = 1; +} + message UnexpectedPruningPointMessage{ } diff --git a/protocol/p2p/src/convert/block.rs b/protocol/p2p/src/convert/block.rs index 9d2011b6cf..2e66539f65 100644 --- a/protocol/p2p/src/convert/block.rs +++ b/protocol/p2p/src/convert/block.rs @@ -1,7 +1,7 @@ use super::{error::ConversionError, option::TryIntoOptionEx}; use crate::pb as protowire; use kaspa_consensus_core::{block::Block, tx::Transaction}; - +type BlockBody = Vec; // ---------------------------------------------------------------------------- // consensus_core to protowire // ---------------------------------------------------------------------------- @@ -11,6 +11,11 @@ impl From<&Block> for protowire::BlockMessage { Self { header: Some(block.header.as_ref().into()), transactions: block.transactions.iter().map(|tx| tx.into()).collect() } } } +impl From<&BlockBody> for protowire::BlockBodyMessage { + fn from(block_body: &BlockBody) -> Self { + Self { transactions: block_body.iter().map(|tx| tx.into()).collect() } + } +} // ---------------------------------------------------------------------------- // protowire to consensus_core @@ -26,3 +31,12 @@ impl TryFrom for Block { )) } } + +impl TryFrom for BlockBody { + type Error = ConversionError; + fn try_from(body_message: protowire::BlockBodyMessage) -> Result { + let blk_body: BlockBody = + body_message.transactions.into_iter().map(|i| i.try_into()).collect::, ConversionError>>()?; + Ok(blk_body) + } +} diff --git a/protocol/p2p/src/convert/error.rs b/protocol/p2p/src/convert/error.rs index 14b2cec1a8..d59b13745a 100644 --- a/protocol/p2p/src/convert/error.rs +++ b/protocol/p2p/src/convert/error.rs @@ -1,4 +1,4 @@ -use kaspa_consensus_core::subnets::SubnetworkConversionError; +use kaspa_consensus_core::subnets::SubnetworkConversionError; use thiserror::Error; #[derive(Clone, Debug, Error)] @@ -21,12 +21,15 @@ pub enum ConversionError { #[error("Integer parsing error: {0}")] IntCastingError(#[from] std::num::TryFromIntError), - #[error(transparent)] + #[error(transparent)] AddressParsingError(#[from] std::net::AddrParseError), - #[error(transparent)] + #[error(transparent)] IdentityError(#[from] uuid::Error), - - #[error(transparent)] - SubnetParsingError(#[from] SubnetworkConversionError), + + #[error(transparent)] + SubnetParsingError(#[from] SubnetworkConversionError), + + #[error(transparent)] + CompressedParentsError(#[from] kaspa_consensus_core::errors::header::CompressedParentsError), } diff --git a/protocol/p2p/src/convert/header.rs b/protocol/p2p/src/convert/header.rs index 98625f186d..d9f4a52849 100644 --- a/protocol/p2p/src/convert/header.rs +++ b/protocol/p2p/src/convert/header.rs @@ -13,7 +13,7 @@ impl From<&Header> for protowire::BlockHeader { fn from(item: &Header) -> Self { Self { version: item.version.into(), - parents: item.parents_by_level.iter().map(protowire::BlockLevelParents::from).collect(), + parents: item.parents_by_level.expanded_iter().map(protowire::BlockLevelParents::from).collect(), hash_merkle_root: Some(item.hash_merkle_root.into()), accepted_id_merkle_root: Some(item.accepted_id_merkle_root.into()), utxo_commitment: Some(item.utxo_commitment.into()), @@ -29,8 +29,8 @@ impl From<&Header> for protowire::BlockHeader { } } -impl From<&Vec> for protowire::BlockLevelParents { - fn from(item: &Vec) -> Self { +impl From<&[Hash]> for protowire::BlockLevelParents { + fn from(item: &[Hash]) -> Self { Self { parent_hashes: item.iter().map(|h| h.into()).collect() } } } @@ -44,7 +44,7 @@ impl TryFrom for Header { fn try_from(item: protowire::BlockHeader) -> Result { Ok(Self::new_finalized( item.version.try_into()?, - item.parents.into_iter().map(Vec::::try_from).collect::>, ConversionError>>()?, + item.parents.into_iter().map(Vec::::try_from).collect::>, ConversionError>>()?.try_into()?, item.hash_merkle_root.try_into_ex()?, item.accepted_id_merkle_root.try_into_ex()?, item.utxo_commitment.try_into_ex()?, diff --git a/protocol/p2p/src/convert/messages.rs b/protocol/p2p/src/convert/messages.rs index cfe90fd6d0..654eebd285 100644 --- a/protocol/p2p/src/convert/messages.rs +++ b/protocol/p2p/src/convert/messages.rs @@ -182,6 +182,13 @@ impl TryFrom for Vec { msg.hashes.into_iter().map(|v| v.try_into()).collect() } } +impl TryFrom for Vec { + type Error = ConversionError; + + fn try_from(msg: protowire::RequestBlockBodiesMessage) -> Result { + msg.hashes.into_iter().map(|v| v.try_into()).collect() + } +} impl TryFrom for Vec { type Error = ConversionError; diff --git a/protocol/p2p/src/core/payload_type.rs b/protocol/p2p/src/core/payload_type.rs index 3f265b7d90..03a6bdc043 100644 --- a/protocol/p2p/src/core/payload_type.rs +++ b/protocol/p2p/src/core/payload_type.rs @@ -46,6 +46,8 @@ pub enum KaspadMessagePayloadType { IbdChainBlockLocator, RequestAntipast, RequestNextPruningPointAndItsAnticoneBlocks, + BlockBody, + RequestBlockBodies, } impl From<&KaspadMessagePayload> for KaspadMessagePayloadType { @@ -100,6 +102,8 @@ impl From<&KaspadMessagePayload> for KaspadMessagePayloadType { KaspadMessagePayload::RequestNextPruningPointAndItsAnticoneBlocks(_) => { KaspadMessagePayloadType::RequestNextPruningPointAndItsAnticoneBlocks } + KaspadMessagePayload::BlockBody(_) => KaspadMessagePayloadType::BlockBody, + KaspadMessagePayload::RequestBlockBodies(_) => KaspadMessagePayloadType::RequestBlockBodies, } } } diff --git a/rothschild/src/main.rs b/rothschild/src/main.rs index 8c673a7721..9c68a71d03 100644 --- a/rothschild/src/main.rs +++ b/rothschild/src/main.rs @@ -233,10 +233,10 @@ async fn main() { let coinbase_maturity = match info.network.suffix { Some(11) => panic!("TN11 is not supported on this version"), - None | Some(_) => TESTNET_PARAMS.coinbase_maturity().upper_bound(), + None | Some(_) => TESTNET_PARAMS.coinbase_maturity().after(), }; info!( - "Node block-DAG info: \n\tNetwork: {}, \n\tBlock count: {}, \n\tHeader count: {}, \n\tDifficulty: {}, + "Node block-DAG info: \n\tNetwork: {}, \n\tBlock count: {}, \n\tHeader count: {}, \n\tDifficulty: {}, \tMedian time: {}, \n\tDAA score: {}, \n\tPruning point: {}, \n\tTips: {}, \n\t{} virtual parents: ...{}, \n\tCoinbase maturity: {}", info.network, info.block_count, diff --git a/rpc/core/Cargo.toml b/rpc/core/Cargo.toml index f2e9f72f9e..190fb09476 100644 --- a/rpc/core/Cargo.toml +++ b/rpc/core/Cargo.toml @@ -45,6 +45,7 @@ paste.workspace = true rand.workspace = true serde-wasm-bindgen.workspace = true serde.workspace = true +serde_nested_with.workspace = true smallvec.workspace = true thiserror.workspace = true uuid.workspace = true diff --git a/rpc/core/src/api/ops.rs b/rpc/core/src/api/ops.rs index 4541ddc56d..1ad1344185 100644 --- a/rpc/core/src/api/ops.rs +++ b/rpc/core/src/api/ops.rs @@ -138,6 +138,8 @@ pub enum RpcApiOps { GetCurrentBlockColor = 149, /// Get UTXO Return Addresses GetUtxoReturnAddress = 150, + /// Get Virtual Chain from Block V2 + GetVirtualChainFromBlockV2 = 151, } impl RpcApiOps { diff --git a/rpc/core/src/api/rpc.rs b/rpc/core/src/api/rpc.rs index f40f90e84d..01ff570cd4 100644 --- a/rpc/core/src/api/rpc.rs +++ b/rpc/core/src/api/rpc.rs @@ -482,6 +482,24 @@ pub trait RpcApi: Sync + Send + AnySync { request: GetCurrentBlockColorRequest, ) -> RpcResult; + async fn get_virtual_chain_from_block_v2( + &self, + start_hash: RpcHash, + data_verbosity_level: Option, + min_confirmation_count: Option, + ) -> RpcResult { + self.get_virtual_chain_from_block_v2_call( + None, + GetVirtualChainFromBlockV2Request::new(start_hash, data_verbosity_level, min_confirmation_count), + ) + .await + } + async fn get_virtual_chain_from_block_v2_call( + &self, + connection: Option<&DynRpcConnection>, + request: GetVirtualChainFromBlockV2Request, + ) -> RpcResult; + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // Notification API diff --git a/rpc/core/src/convert/block.rs b/rpc/core/src/convert/block.rs index 8cb0ab01e3..4ac84038b9 100644 --- a/rpc/core/src/convert/block.rs +++ b/rpc/core/src/convert/block.rs @@ -2,7 +2,7 @@ use std::sync::Arc; -use crate::{RpcBlock, RpcError, RpcRawBlock, RpcResult, RpcTransaction}; +use crate::{RpcBlock, RpcError, RpcOptionalBlock, RpcOptionalTransaction, RpcRawBlock, RpcResult, RpcTransaction}; use kaspa_consensus_core::block::{Block, MutableBlock}; // ---------------------------------------------------------------------------- @@ -56,7 +56,7 @@ impl TryFrom for Block { type Error = RpcError; fn try_from(item: RpcBlock) -> RpcResult { Ok(Self { - header: Arc::new(item.header.into()), + header: Arc::new(item.header.try_into()?), transactions: Arc::new( item.transactions .into_iter() @@ -71,7 +71,53 @@ impl TryFrom for Block { type Error = RpcError; fn try_from(item: RpcRawBlock) -> RpcResult { Ok(Self { - header: Arc::new(item.header.into()), + header: Arc::new(item.header.try_into()?), + transactions: Arc::new( + item.transactions + .into_iter() + .map(kaspa_consensus_core::tx::Transaction::try_from) + .collect::>>()?, + ), + }) + } +} + +// ---------------------------------------------------------------------------- +// consensus_core to optional rpc_core +// ---------------------------------------------------------------------------- + +impl From<&Block> for RpcOptionalBlock { + fn from(item: &Block) -> Self { + Self { + header: Some(item.header.as_ref().into()), + transactions: item.transactions.iter().map(RpcOptionalTransaction::from).collect(), + // TODO: Implement a populating process inspired from kaspad\app\rpc\rpccontext\verbosedata.go + verbose_data: None, + } + } +} + +impl From<&MutableBlock> for RpcOptionalBlock { + fn from(item: &MutableBlock) -> Self { + Self { + header: Some(item.header.as_ref().into()), + transactions: item.transactions.iter().map(RpcOptionalTransaction::from).collect(), + verbose_data: None, + } + } +} + +// ---------------------------------------------------------------------------- +// optional rpc_core to consensus_core +// ---------------------------------------------------------------------------- + +impl TryFrom for Block { + type Error = RpcError; + fn try_from(item: RpcOptionalBlock) -> RpcResult { + Ok(Self { + header: Arc::new( + (item.header.ok_or(RpcError::MissingRpcFieldError("RpcBlock".to_string(), "header".to_string()))?).try_into()?, + ), transactions: Arc::new( item.transactions .into_iter() diff --git a/rpc/core/src/convert/mod.rs b/rpc/core/src/convert/mod.rs index bc5c0e64b9..63c2595164 100644 --- a/rpc/core/src/convert/mod.rs +++ b/rpc/core/src/convert/mod.rs @@ -7,3 +7,4 @@ pub mod notification; pub mod scope; pub mod tx; pub mod utxo; +pub mod verbosity; diff --git a/rpc/core/src/convert/tx.rs b/rpc/core/src/convert/tx.rs index 5485f99df7..63a749bf42 100644 --- a/rpc/core/src/convert/tx.rs +++ b/rpc/core/src/convert/tx.rs @@ -1,6 +1,9 @@ //! Conversion of Transaction related types -use crate::{RpcError, RpcResult, RpcTransaction, RpcTransactionInput, RpcTransactionOutput}; +use crate::{ + RpcError, RpcOptionalTransaction, RpcOptionalTransactionInput, RpcOptionalTransactionOutput, RpcResult, RpcTransaction, + RpcTransactionInput, RpcTransactionOutput, +}; use kaspa_consensus_core::tx::{Transaction, TransactionInput, TransactionOutput}; // ---------------------------------------------------------------------------- @@ -81,3 +84,94 @@ impl TryFrom for TransactionInput { Ok(Self::new(item.previous_outpoint.into(), item.signature_script, item.sequence, item.sig_op_count)) } } + +// ---------------------------------------------------------------------------- +// consensus_core to optional rpc_core +// ---------------------------------------------------------------------------- + +impl From<&Transaction> for RpcOptionalTransaction { + fn from(item: &Transaction) -> Self { + Self { + version: Some(item.version), + inputs: item.inputs.iter().map(RpcOptionalTransactionInput::from).collect(), + outputs: item.outputs.iter().map(RpcOptionalTransactionOutput::from).collect(), + lock_time: Some(item.lock_time), + subnetwork_id: Some(item.subnetwork_id.clone()), + gas: Some(item.gas), + payload: Some(item.payload.clone()), + mass: Some(item.mass()), + verbose_data: None, + } + } +} + +impl From<&TransactionOutput> for RpcOptionalTransactionOutput { + fn from(item: &TransactionOutput) -> Self { + Self { value: Some(item.value), script_public_key: Some(item.script_public_key.clone()), verbose_data: None } + } +} + +impl From<&TransactionInput> for RpcOptionalTransactionInput { + fn from(item: &TransactionInput) -> Self { + Self { + previous_outpoint: Some(item.previous_outpoint.into()), + signature_script: Some(item.signature_script.clone()), + sequence: Some(item.sequence), + sig_op_count: Some(item.sig_op_count), + verbose_data: None, + } + } +} + +// ---------------------------------------------------------------------------- +// optional rpc_core to consensus_core +// ---------------------------------------------------------------------------- + +impl TryFrom for Transaction { + type Error = RpcError; + fn try_from(item: RpcOptionalTransaction) -> RpcResult { + let transaction = Transaction::new( + item.version.ok_or(RpcError::MissingRpcFieldError("RpcTransaction".to_owned(), "version".to_owned()))?, + item.inputs + .into_iter() + .map(kaspa_consensus_core::tx::TransactionInput::try_from) + .collect::>>()?, + item.outputs + .into_iter() + .map(kaspa_consensus_core::tx::TransactionOutput::try_from) + .collect::>>()?, + item.lock_time.ok_or(RpcError::MissingRpcFieldError("RpcTransaction".to_owned(), "lock_time".to_owned()))?, + item.subnetwork_id.ok_or(RpcError::MissingRpcFieldError("RpcTransaction".to_owned(), "subnetwork_id".to_owned()))?, + item.gas.ok_or(RpcError::MissingRpcFieldError("RpcTransaction".to_owned(), "gas".to_owned()))?, + item.payload.ok_or(RpcError::MissingRpcFieldError("RpcTransaction".to_owned(), "payload".to_owned()))?, + ); + transaction.set_mass(item.mass.ok_or(RpcError::MissingRpcFieldError("RpcTransaction".to_owned(), "mass".to_owned()))?); + Ok(transaction) + } +} + +impl TryFrom for TransactionOutput { + type Error = RpcError; + fn try_from(item: RpcOptionalTransactionOutput) -> RpcResult { + Ok(Self::new( + item.value.ok_or(RpcError::MissingRpcFieldError("RpcTransactionOutput".to_owned(), "value".to_owned()))?, + item.script_public_key + .ok_or(RpcError::MissingRpcFieldError("RpcTransactionOutput".to_owned(), "script_public_key".to_owned()))?, + )) + } +} + +impl TryFrom for TransactionInput { + type Error = RpcError; + fn try_from(item: RpcOptionalTransactionInput) -> RpcResult { + Ok(Self::new( + item.previous_outpoint + .ok_or(RpcError::MissingRpcFieldError("RpcTransactionInput".to_owned(), "previous_outpoint".to_owned()))? + .try_into()?, + item.signature_script + .ok_or(RpcError::MissingRpcFieldError("RpcTransactionInput".to_owned(), "signature_script".to_owned()))?, + item.sequence.ok_or(RpcError::MissingRpcFieldError("RpcTransactionInput".to_owned(), "sequence".to_owned()))?, + item.sig_op_count.ok_or(RpcError::MissingRpcFieldError("RpcTransactionInput".to_owned(), "sig_op_count".to_owned()))?, + )) + } +} diff --git a/rpc/core/src/convert/verbosity.rs b/rpc/core/src/convert/verbosity.rs new file mode 100644 index 0000000000..b63fa901c4 --- /dev/null +++ b/rpc/core/src/convert/verbosity.rs @@ -0,0 +1,192 @@ +use crate::{ + RpcAcceptanceDataVerbosity, RpcDataVerbosityLevel, RpcError, RpcHeaderVerbosity, RpcMergesetBlockAcceptanceDataVerbosity, + RpcTransactionInputVerboseDataVerbosity, RpcTransactionInputVerbosity, RpcTransactionOutputVerboseDataVerbosity, + RpcTransactionOutputVerbosity, RpcTransactionVerboseDataVerbosity, RpcTransactionVerbosity, RpcUtxoEntryVerboseDataVerbosity, + RpcUtxoEntryVerbosity, +}; + +macro_rules! impl_verbosity_from { + ( + for $target:ty, from $level:ty { + $( $field:ident : $handler:tt ),* $(,)? + } + ) => { + impl ::core::convert::From<$level> for $target { + fn from(level: $level) -> Self { + Self { + $( + $field: impl_verbosity_from!(@eval level, $level, $handler), + )* + } + } + } + }; + + // (|level| expr) -> Some(expr_result) + (@eval $lev:ident, $levty:ty, (| $L:ident | $e:expr)) => { + ::core::option::Option::Some((|$L: $levty| { $e })($lev)) + }; + + // (Level) -> Some(bool) + (@eval $lev:ident, $levty:ty, ($min_level:expr)) => { + ::core::option::Option::Some($lev.is_at_least($min_level)) + }; + + // (none) -> None + (@eval $lev:ident, $levty:ty, (none)) => { + ::core::option::Option::None + }; +} + +impl RpcDataVerbosityLevel { + /// Check if this verbosity level is at least the specified level + pub const fn is_at_least(&self, other: Self) -> bool { + *self as u8 >= other as u8 + } +} + +impl From for i32 { + fn from(v: RpcDataVerbosityLevel) -> Self { + v as i32 + } +} + +impl TryFrom for RpcDataVerbosityLevel { + type Error = RpcError; + fn try_from(v: i32) -> Result { + match v { + 0 => Ok(Self::None), + 1 => Ok(Self::Low), + 2 => Ok(Self::High), + 3 => Ok(Self::Full), + _ => Err(RpcError::NotImplemented), + } + } +} + +impl_verbosity_from! { + for RpcHeaderVerbosity, from RpcDataVerbosityLevel { + include_hash: (RpcDataVerbosityLevel::None), + include_version: (RpcDataVerbosityLevel::Low), + include_timestamp: (RpcDataVerbosityLevel::Low), + include_bits: (RpcDataVerbosityLevel::Low), + include_nonce: (RpcDataVerbosityLevel::Low), + include_daa_score: (RpcDataVerbosityLevel::Low), + include_blue_work: (RpcDataVerbosityLevel::Low), + include_blue_score: (RpcDataVerbosityLevel::Low), + include_parents_by_level: (RpcDataVerbosityLevel::High), + include_hash_merkle_root: (RpcDataVerbosityLevel::High), + include_accepted_id_merkle_root: (RpcDataVerbosityLevel::High), + include_utxo_commitment: (RpcDataVerbosityLevel::Full), + include_pruning_point: (RpcDataVerbosityLevel::Full), + } +} + +impl_verbosity_from! { + for RpcUtxoEntryVerboseDataVerbosity, from RpcDataVerbosityLevel { + include_script_public_key_type: (RpcDataVerbosityLevel::Low), + include_script_public_key_address: (RpcDataVerbosityLevel::Low), + } +} + +impl_verbosity_from! { + for RpcUtxoEntryVerbosity, from RpcDataVerbosityLevel { + include_amount: (RpcDataVerbosityLevel::High), + include_script_public_key: (RpcDataVerbosityLevel::High), + include_block_daa_score: (RpcDataVerbosityLevel::Full), + include_is_coinbase: (RpcDataVerbosityLevel::High), + verbose_data_verbosity: (|level| { + RpcUtxoEntryVerboseDataVerbosity::from(level) + }), + } +} + +impl_verbosity_from! { + for RpcTransactionInputVerbosity, from RpcDataVerbosityLevel { + include_signature_script: (RpcDataVerbosityLevel::Low), + include_sequence: (RpcDataVerbosityLevel::High), + include_sig_op_count: (RpcDataVerbosityLevel::High), + include_previous_outpoint: (RpcDataVerbosityLevel::High), + verbose_data_verbosity: (|level| { + RpcTransactionInputVerboseDataVerbosity::from(level) + }), + + } +} + +impl_verbosity_from! { + for RpcTransactionInputVerboseDataVerbosity, from RpcDataVerbosityLevel { + utxo_entry_verbosity: (|level| { + RpcUtxoEntryVerbosity::from(level) + }), + } +} + +impl_verbosity_from! { + for RpcTransactionOutputVerbosity, from RpcDataVerbosityLevel { + include_amount: (RpcDataVerbosityLevel::Low), + include_script_public_key: (RpcDataVerbosityLevel::Low), + verbose_data_verbosity: (|level| { + RpcTransactionOutputVerboseDataVerbosity::from(level) + }), + } +} + +impl_verbosity_from! { + for RpcTransactionOutputVerboseDataVerbosity, from RpcDataVerbosityLevel { + include_script_public_key_type: (RpcDataVerbosityLevel::Low), + include_script_public_key_address: (RpcDataVerbosityLevel::Low), + } +} + +impl_verbosity_from! { + for RpcTransactionVerbosity, from RpcDataVerbosityLevel { + include_payload: (RpcDataVerbosityLevel::High), + include_mass: (RpcDataVerbosityLevel::High), + include_version: (RpcDataVerbosityLevel::Full), + include_lock_time: (RpcDataVerbosityLevel::Full), + include_subnetwork_id: (RpcDataVerbosityLevel::Full), + include_gas: (RpcDataVerbosityLevel::Full), + input_verbosity: (|level| { + RpcTransactionInputVerbosity::from(level) + }), + output_verbosity: (|level| { + RpcTransactionOutputVerbosity::from(level) + }), + verbose_data_verbosity: (|level| { + RpcTransactionVerboseDataVerbosity::from(level) + }), + } +} + +impl_verbosity_from! { + for RpcTransactionVerboseDataVerbosity, from RpcDataVerbosityLevel { + include_transaction_id: (RpcDataVerbosityLevel::Low), + include_compute_mass: (RpcDataVerbosityLevel::High), + include_block_hash: (RpcDataVerbosityLevel::Low), + include_block_time: (RpcDataVerbosityLevel::Low), + include_hash: (RpcDataVerbosityLevel::Low), + } +} + +impl_verbosity_from! { + for RpcAcceptanceDataVerbosity, from RpcDataVerbosityLevel { + accepting_chain_header_verbosity: (|level| { + RpcHeaderVerbosity::from(level) + }), + mergeset_block_acceptance_data_verbosity: (|level| { + RpcMergesetBlockAcceptanceDataVerbosity::from(level) + }), + } +} + +impl_verbosity_from! { + for RpcMergesetBlockAcceptanceDataVerbosity, from RpcDataVerbosityLevel { + merged_header_verbosity: (|level| { + RpcHeaderVerbosity::from(level) + }), + accepted_transactions_verbosity: (|level| { + RpcTransactionVerbosity::from(level) + }), + } +} diff --git a/rpc/core/src/error.rs b/rpc/core/src/error.rs index 54763c71ba..8aee1fb1a4 100644 --- a/rpc/core/src/error.rs +++ b/rpc/core/src/error.rs @@ -2,7 +2,10 @@ //! [`RpcError`] enum used by RPC primitives. //! -use kaspa_consensus_core::{subnets::SubnetworkConversionError, tx::TransactionId, utxo::utxo_inquirer::UtxoInquirerError}; +use kaspa_consensus_core::{ + errors::header::CompressedParentsError, subnets::SubnetworkConversionError, tx::TransactionId, + utxo::utxo_inquirer::UtxoInquirerError, +}; use kaspa_utils::networking::IpAddress; use std::{net::AddrParseError, num::TryFromIntError}; use thiserror::Error; @@ -137,6 +140,15 @@ pub enum RpcError { #[error("utxo return address could not be found -> {0}")] UtxoReturnAddressNotFound(UtxoInquirerError), + + #[error("consensus converter required {0} - but was not found")] + ConsensusConverterNotFound(String), + + #[error("consensus is currently in a transitional ibd state")] + ConsensusInTransitionalIbdState, + + #[error(transparent)] + CompressedParentsError(#[from] CompressedParentsError), } impl From for RpcError { diff --git a/rpc/core/src/model/block.rs b/rpc/core/src/model/block.rs index 3f4870dc69..f8ed1d2666 100644 --- a/rpc/core/src/model/block.rs +++ b/rpc/core/src/model/block.rs @@ -132,7 +132,7 @@ cfg_if::cfg_if! { const TS_BLOCK: &'static str = r#" /** * Interface defining the structure of a block. - * + * * @category Consensus */ export interface IBlock { @@ -143,7 +143,7 @@ cfg_if::cfg_if! { /** * Interface defining the structure of a block verbose data. - * + * * @category Node RPC */ export interface IBlockVerboseData { @@ -161,11 +161,11 @@ cfg_if::cfg_if! { /** * Interface defining the structure of a raw block. - * + * * Raw block is a structure used by GetBlockTemplate and SubmitBlock RPCs * and differs from `IBlock` in that it does not include verbose data and carries * `IRawHeader` that does not include a cached block hash. - * + * * @category Consensus */ export interface IRawBlock { diff --git a/rpc/core/src/model/header.rs b/rpc/core/src/model/header.rs index fda6b70e1e..c9814d71a9 100644 --- a/rpc/core/src/model/header.rs +++ b/rpc/core/src/model/header.rs @@ -1,3 +1,4 @@ +use crate::RpcError; use borsh::{BorshDeserialize, BorshSerialize}; use kaspa_consensus_core::{header::Header, BlueWorkType}; use kaspa_hashes::Hash; @@ -65,7 +66,7 @@ impl From
for RpcHeader { Self { hash: header.hash, version: header.version, - parents_by_level: header.parents_by_level, + parents_by_level: header.parents_by_level.into(), hash_merkle_root: header.hash_merkle_root, accepted_id_merkle_root: header.accepted_id_merkle_root, utxo_commitment: header.utxo_commitment, @@ -85,7 +86,7 @@ impl From<&Header> for RpcHeader { Self { hash: header.hash, version: header.version, - parents_by_level: header.parents_by_level.clone(), + parents_by_level: (&header.parents_by_level).into(), hash_merkle_root: header.hash_merkle_root, accepted_id_merkle_root: header.accepted_id_merkle_root, utxo_commitment: header.utxo_commitment, @@ -100,12 +101,13 @@ impl From<&Header> for RpcHeader { } } -impl From for Header { - fn from(header: RpcHeader) -> Self { - Self { +impl TryFrom for Header { + type Error = RpcError; + fn try_from(header: RpcHeader) -> Result { + Ok(Self { hash: header.hash, version: header.version, - parents_by_level: header.parents_by_level, + parents_by_level: header.parents_by_level.try_into()?, hash_merkle_root: header.hash_merkle_root, accepted_id_merkle_root: header.accepted_id_merkle_root, utxo_commitment: header.utxo_commitment, @@ -116,16 +118,18 @@ impl From for Header { blue_work: header.blue_work, blue_score: header.blue_score, pruning_point: header.pruning_point, - } + }) } } -impl From<&RpcHeader> for Header { - fn from(header: &RpcHeader) -> Self { - Self { +impl TryFrom<&RpcHeader> for Header { + type Error = RpcError; + + fn try_from(header: &RpcHeader) -> Result { + Ok(Self { hash: header.hash, version: header.version, - parents_by_level: header.parents_by_level.clone(), + parents_by_level: header.parents_by_level.clone().try_into()?, hash_merkle_root: header.hash_merkle_root, accepted_id_merkle_root: header.accepted_id_merkle_root, utxo_commitment: header.utxo_commitment, @@ -136,7 +140,7 @@ impl From<&RpcHeader> for Header { blue_work: header.blue_work, blue_score: header.blue_score, pruning_point: header.pruning_point, - } + }) } } @@ -198,11 +202,13 @@ impl Deserializer for RpcHeader { } } -impl From for Header { - fn from(header: RpcRawHeader) -> Self { - Self::new_finalized( +impl TryFrom for Header { + type Error = RpcError; + + fn try_from(header: RpcRawHeader) -> Result { + Ok(Self::new_finalized( header.version, - header.parents_by_level, + header.parents_by_level.try_into()?, header.hash_merkle_root, header.accepted_id_merkle_root, header.utxo_commitment, @@ -213,15 +219,17 @@ impl From for Header { header.blue_work, header.blue_score, header.pruning_point, - ) + )) } } -impl From<&RpcRawHeader> for Header { - fn from(header: &RpcRawHeader) -> Self { - Self::new_finalized( +impl TryFrom<&RpcRawHeader> for Header { + type Error = RpcError; + + fn try_from(header: &RpcRawHeader) -> Result { + Ok(Self::new_finalized( header.version, - header.parents_by_level.clone(), + header.parents_by_level.clone().try_into()?, header.hash_merkle_root, header.accepted_id_merkle_root, header.utxo_commitment, @@ -232,7 +240,7 @@ impl From<&RpcRawHeader> for Header { header.blue_work, header.blue_score, header.pruning_point, - ) + )) } } @@ -240,7 +248,7 @@ impl From<&Header> for RpcRawHeader { fn from(header: &Header) -> Self { Self { version: header.version, - parents_by_level: header.parents_by_level.clone(), + parents_by_level: header.parents_by_level.clone().into(), hash_merkle_root: header.hash_merkle_root, accepted_id_merkle_root: header.accepted_id_merkle_root, utxo_commitment: header.utxo_commitment, @@ -259,7 +267,7 @@ impl From
for RpcRawHeader { fn from(header: Header) -> Self { Self { version: header.version, - parents_by_level: header.parents_by_level, + parents_by_level: header.parents_by_level.into(), hash_merkle_root: header.hash_merkle_root, accepted_id_merkle_root: header.accepted_id_merkle_root, utxo_commitment: header.utxo_commitment, diff --git a/rpc/core/src/model/hex_cnv.rs b/rpc/core/src/model/hex_cnv.rs index a84264f0b2..9531287c6d 100644 --- a/rpc/core/src/model/hex_cnv.rs +++ b/rpc/core/src/model/hex_cnv.rs @@ -115,13 +115,13 @@ mod tests { #[test] fn test_smallvec_hex_convert() { - type TestVec = SmallVec<[u8; 36]>; + type TestVec = SmallVec<[u8; 35]>; let v: TestVec = smallvec![0x0, 0xab, 0x55, 0x30, 0x1f, 0x63]; let k = "00ab55301f63"; assert_eq!(k.len(), v.len() * 2); assert_eq!(k.to_string(), v.to_rpc_hex()); - assert_eq!(SmallVec::<[u8; 36]>::from_rpc_hex(k).unwrap(), v); + assert_eq!(SmallVec::<[u8; 35]>::from_rpc_hex(k).unwrap(), v); assert!(TestVec::from_rpc_hex("not a number").is_err()); assert!(TestVec::from_rpc_hex("ab01").is_ok()); diff --git a/rpc/core/src/model/message.rs b/rpc/core/src/model/message.rs index fa1f12cbe9..7a0a4c23b1 100644 --- a/rpc/core/src/model/message.rs +++ b/rpc/core/src/model/message.rs @@ -2733,6 +2733,77 @@ impl Deserializer for GetUtxoReturnAddressResponse { } } +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct GetVirtualChainFromBlockV2Request { + pub start_hash: RpcHash, + pub data_verbosity_level: Option, + pub min_confirmation_count: Option, +} + +impl GetVirtualChainFromBlockV2Request { + pub fn new(start_hash: RpcHash, data_verbosity_level: Option, min_confirmation_count: Option) -> Self { + Self { start_hash, data_verbosity_level, min_confirmation_count } + } +} + +impl Serializer for GetVirtualChainFromBlockV2Request { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(RpcHash, &self.start_hash, writer)?; + serialize!(Option, &self.data_verbosity_level, writer)?; + store!(Option, &self.min_confirmation_count, writer)?; + + Ok(()) + } +} + +impl Deserializer for GetVirtualChainFromBlockV2Request { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let start_hash = load!(RpcHash, reader)?; + let data_verbosity_level = deserialize!(Option, reader)?; + let min_confirmation_count = load!(Option, reader)?; + + Ok(Self { start_hash, data_verbosity_level, min_confirmation_count }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct GetVirtualChainFromBlockV2Response { + /// always present, no matter the verbosity level + pub removed_chain_block_hashes: Arc>, + /// always present, no matter the verbosity level + pub added_chain_block_hashes: Arc>, + /// struct properties are optionally returned depending on the verbosity level + pub chain_block_accepted_transactions: Arc>, +} + +impl Serializer for GetVirtualChainFromBlockV2Response { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(Vec, &self.removed_chain_block_hashes, writer)?; + store!(Vec, &self.added_chain_block_hashes, writer)?; + serialize!(Vec, &self.chain_block_accepted_transactions, writer)?; + Ok(()) + } +} + +impl Deserializer for GetVirtualChainFromBlockV2Response { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let removed_chain_block_hashes = load!(Vec, reader)?; + let added_chain_block_hashes = load!(Vec, reader)?; + let chain_block_accepted_transactions = deserialize!(Vec, reader)?; + Ok(Self { + removed_chain_block_hashes: removed_chain_block_hashes.into(), + added_chain_block_hashes: added_chain_block_hashes.into(), + chain_block_accepted_transactions: chain_block_accepted_transactions.into(), + }) + } +} + // ---------------------------------------------------------------------------- // Subscriptions & notifications // ---------------------------------------------------------------------------- diff --git a/rpc/core/src/model/mod.rs b/rpc/core/src/model/mod.rs index a7c2556249..d5cd18e303 100644 --- a/rpc/core/src/model/mod.rs +++ b/rpc/core/src/model/mod.rs @@ -11,11 +11,13 @@ pub mod hex_cnv; pub mod mempool; pub mod message; pub mod network; +pub mod optional; pub mod peer; pub mod script_class; pub mod subnets; mod tests; pub mod tx; +pub mod verbosity; pub use address::*; pub use block::*; @@ -27,6 +29,8 @@ pub use hex_cnv::*; pub use mempool::*; pub use message::*; pub use network::*; +pub use optional::*; pub use peer::*; pub use subnets::*; pub use tx::*; +pub use verbosity::*; diff --git a/rpc/core/src/model/optional/block.rs b/rpc/core/src/model/optional/block.rs new file mode 100644 index 0000000000..b9b2214f29 --- /dev/null +++ b/rpc/core/src/model/optional/block.rs @@ -0,0 +1,38 @@ +use serde::{Deserialize, Serialize}; +use workflow_serializer::prelude::*; + +use crate::{RpcBlockVerboseData, RpcOptionalHeader, RpcOptionalTransaction}; + +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RpcOptionalBlock { + pub header: Option, + pub transactions: Vec, + pub verbose_data: Option, +} + +impl Serializer for RpcOptionalBlock { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + serialize!(Option, &self.header, writer)?; + serialize!(Vec, &self.transactions, writer)?; + serialize!(Option, &self.verbose_data, writer)?; + + Ok(()) + } +} + +impl Deserializer for RpcOptionalBlock { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + match _version { + 1 => { + let header = Some(deserialize!(RpcOptionalHeader, reader)?); + let transactions = deserialize!(Vec, reader)?; + let verbose_data = deserialize!(Option, reader)?; + Ok(Self { header, transactions, verbose_data }) + } + _ => Err(std::io::Error::new(std::io::ErrorKind::InvalidData, format!("Unsupported version: {}", _version))), + } + } +} diff --git a/rpc/core/src/model/optional/header.rs b/rpc/core/src/model/optional/header.rs new file mode 100644 index 0000000000..092fe7cace --- /dev/null +++ b/rpc/core/src/model/optional/header.rs @@ -0,0 +1,232 @@ +use borsh::{BorshDeserialize, BorshSerialize}; +use kaspa_consensus_core::{header::Header, BlueWorkType}; +use kaspa_hashes::Hash; +use serde::{Deserialize, Serialize}; +use workflow_serializer::prelude::*; + +use crate::{RpcError, RpcResult}; + +#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize, Default)] +#[serde(rename_all = "camelCase")] +pub struct RpcOptionalHeader { + /// Level: None - Cached hash + pub hash: Option, + /// Level: Low + pub version: Option, + /// Level: High + pub parents_by_level: Vec>, + /// Level: High + pub hash_merkle_root: Option, + /// Level: High + pub accepted_id_merkle_root: Option, + /// Level: Full + pub utxo_commitment: Option, + /// Level: Low - Timestamp is in milliseconds + pub timestamp: Option, + /// Level: Low + pub bits: Option, + /// Level: Low + pub nonce: Option, + /// Level: Low + pub daa_score: Option, + /// Level: Low + pub blue_work: Option, + /// Level: Low + pub blue_score: Option, + /// Level: Full + pub pruning_point: Option, +} + +impl RpcOptionalHeader { + pub fn is_empty(&self) -> bool { + self.hash.is_none() + && self.version.is_none() + && self.parents_by_level.is_empty() + && self.hash_merkle_root.is_none() + && self.accepted_id_merkle_root.is_none() + && self.utxo_commitment.is_none() + && self.timestamp.is_none() + && self.bits.is_none() + && self.nonce.is_none() + && self.daa_score.is_none() + && self.blue_work.is_none() + && self.blue_score.is_none() + && self.pruning_point.is_none() + } + pub fn direct_parents(&self) -> &[Hash] { + if self.parents_by_level.is_empty() { + &[] + } else { + &self.parents_by_level[0] + } + } +} + +impl AsRef for RpcOptionalHeader { + fn as_ref(&self) -> &RpcOptionalHeader { + self + } +} + +impl From
for RpcOptionalHeader { + fn from(header: Header) -> Self { + Self { + hash: Some(header.hash), + version: Some(header.version), + parents_by_level: header.parents_by_level.into(), + hash_merkle_root: Some(header.hash_merkle_root), + accepted_id_merkle_root: Some(header.accepted_id_merkle_root), + utxo_commitment: Some(header.utxo_commitment), + timestamp: Some(header.timestamp), + bits: Some(header.bits), + nonce: Some(header.nonce), + daa_score: Some(header.daa_score), + blue_work: Some(header.blue_work), + blue_score: Some(header.blue_score), + pruning_point: Some(header.pruning_point), + } + } +} + +impl From<&Header> for RpcOptionalHeader { + fn from(header: &Header) -> Self { + Self { + hash: Some(header.hash), + version: Some(header.version), + parents_by_level: header.parents_by_level.clone().into(), + hash_merkle_root: Some(header.hash_merkle_root), + accepted_id_merkle_root: Some(header.accepted_id_merkle_root), + utxo_commitment: Some(header.utxo_commitment), + timestamp: Some(header.timestamp), + bits: Some(header.bits), + nonce: Some(header.nonce), + daa_score: Some(header.daa_score), + blue_work: Some(header.blue_work), + blue_score: Some(header.blue_score), + pruning_point: Some(header.pruning_point), + } + } +} + +impl TryFrom for Header { + type Error = RpcError; + + fn try_from(header: RpcOptionalHeader) -> RpcResult { + Ok(Self { + hash: header.hash.ok_or(RpcError::MissingRpcFieldError("RpcHeader".to_owned(), "hash".to_owned()))?, + version: header.version.ok_or(RpcError::MissingRpcFieldError("RpcHeader".to_owned(), "version".to_owned()))?, + parents_by_level: header.parents_by_level.try_into()?, + hash_merkle_root: header + .hash_merkle_root + .ok_or(RpcError::MissingRpcFieldError("RpcHeader".to_owned(), "hash_merkle_root".to_owned()))?, + accepted_id_merkle_root: header + .accepted_id_merkle_root + .ok_or(RpcError::MissingRpcFieldError("RpcHeader".to_owned(), "accepted_id_merkle_root".to_owned()))?, + utxo_commitment: header + .utxo_commitment + .ok_or(RpcError::MissingRpcFieldError("RpcHeader".to_owned(), "utxo_commitment".to_owned()))?, + timestamp: header.timestamp.ok_or(RpcError::MissingRpcFieldError("RpcHeader".to_owned(), "timestamp".to_owned()))?, + bits: header.bits.ok_or(RpcError::MissingRpcFieldError("RpcHeader".to_owned(), "bits".to_owned()))?, + nonce: header.nonce.ok_or(RpcError::MissingRpcFieldError("RpcHeader".to_owned(), "nonce".to_owned()))?, + daa_score: header.daa_score.ok_or(RpcError::MissingRpcFieldError("RpcHeader".to_owned(), "daa_score".to_owned()))?, + blue_work: header.blue_work.ok_or(RpcError::MissingRpcFieldError("RpcHeader".to_owned(), "blue_work".to_owned()))?, + blue_score: header.blue_score.ok_or(RpcError::MissingRpcFieldError("RpcHeader".to_owned(), "blue_score".to_owned()))?, + pruning_point: header + .pruning_point + .ok_or(RpcError::MissingRpcFieldError("RpcHeader".to_owned(), "pruning_point".to_owned()))?, + }) + } +} + +impl TryFrom<&RpcOptionalHeader> for Header { + type Error = RpcError; + + fn try_from(header: &RpcOptionalHeader) -> RpcResult { + Ok(Self { + hash: header.hash.ok_or(RpcError::MissingRpcFieldError("RpcHeader".to_owned(), "hash".to_owned()))?, + version: header.version.ok_or(RpcError::MissingRpcFieldError("RpcHeader".to_owned(), "version".to_owned()))?, + parents_by_level: header.parents_by_level.clone().try_into()?, + hash_merkle_root: header + .hash_merkle_root + .ok_or(RpcError::MissingRpcFieldError("RpcHeader".to_owned(), "hash_merkle_root".to_owned()))?, + accepted_id_merkle_root: header + .accepted_id_merkle_root + .ok_or(RpcError::MissingRpcFieldError("RpcHeader".to_owned(), "accepted_id_merkle_root".to_owned()))?, + utxo_commitment: header + .utxo_commitment + .ok_or(RpcError::MissingRpcFieldError("RpcHeader".to_owned(), "utxo_commitment".to_owned()))?, + timestamp: header.timestamp.ok_or(RpcError::MissingRpcFieldError("RpcHeader".to_owned(), "timestamp".to_owned()))?, + bits: header.bits.ok_or(RpcError::MissingRpcFieldError("RpcHeader".to_owned(), "bits".to_owned()))?, + nonce: header.nonce.ok_or(RpcError::MissingRpcFieldError("RpcHeader".to_owned(), "nonce".to_owned()))?, + daa_score: header.daa_score.ok_or(RpcError::MissingRpcFieldError("RpcHeader".to_owned(), "daa_score".to_owned()))?, + blue_work: header.blue_work.ok_or(RpcError::MissingRpcFieldError("RpcHeader".to_owned(), "blue_work".to_owned()))?, + blue_score: header.blue_score.ok_or(RpcError::MissingRpcFieldError("RpcHeader".to_owned(), "blue_score".to_owned()))?, + pruning_point: header + .pruning_point + .ok_or(RpcError::MissingRpcFieldError("RpcHeader".to_owned(), "pruning_point".to_owned()))?, + }) + } +} + +impl Serializer for RpcOptionalHeader { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + + store!(Option, &self.hash, writer)?; + store!(Option, &self.version, writer)?; + store!(Vec>, &self.parents_by_level, writer)?; + store!(Option, &self.hash_merkle_root, writer)?; + store!(Option, &self.accepted_id_merkle_root, writer)?; + store!(Option, &self.utxo_commitment, writer)?; + store!(Option, &self.timestamp, writer)?; + store!(Option, &self.bits, writer)?; + store!(Option, &self.nonce, writer)?; + store!(Option, &self.daa_score, writer)?; + store!(Option, &self.blue_work, writer)?; + store!(Option, &self.blue_score, writer)?; + store!(Option, &self.pruning_point, writer)?; + + Ok(()) + } +} + +impl Deserializer for RpcOptionalHeader { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + + match _version { + 1 => { + let hash = load!(Option, reader)?; + let version = load!(Option, reader)?; + let parents_by_level = load!(Vec>, reader)?; + let hash_merkle_root = load!(Option, reader)?; + let accepted_id_merkle_root = load!(Option, reader)?; + let utxo_commitment = load!(Option, reader)?; + let timestamp = load!(Option, reader)?; + let bits = load!(Option, reader)?; + let nonce = load!(Option, reader)?; + let daa_score = load!(Option, reader)?; + let blue_work = load!(Option, reader)?; + let blue_score = load!(Option, reader)?; + let pruning_point = load!(Option, reader)?; + + Ok(Self { + hash, + version, + parents_by_level, + hash_merkle_root, + accepted_id_merkle_root, + utxo_commitment, + timestamp, + bits, + nonce, + daa_score, + blue_work, + blue_score, + pruning_point, + }) + } + _ => Err(std::io::Error::new(std::io::ErrorKind::InvalidData, format!("Unsupported version: {}", _version))), + } + } +} diff --git a/rpc/core/src/model/optional/mod.rs b/rpc/core/src/model/optional/mod.rs new file mode 100644 index 0000000000..1949785b53 --- /dev/null +++ b/rpc/core/src/model/optional/mod.rs @@ -0,0 +1,7 @@ +pub mod block; +pub mod header; +pub mod tx; + +pub use block::*; +pub use header::*; +pub use tx::*; diff --git a/rpc/core/src/model/optional/tx.rs b/rpc/core/src/model/optional/tx.rs new file mode 100644 index 0000000000..8654175251 --- /dev/null +++ b/rpc/core/src/model/optional/tx.rs @@ -0,0 +1,594 @@ +use borsh::{BorshDeserialize, BorshSerialize}; +use kaspa_addresses::Address; +use kaspa_consensus_core::tx::{ + ScriptPublicKey, TransactionId, TransactionIndexType, TransactionInput, TransactionOutpoint, TransactionOutput, UtxoEntry, +}; +use kaspa_utils::{hex::ToHex, serde_bytes_fixed_ref}; +use serde::{Deserialize, Serialize}; +use serde_nested_with::serde_nested; +use workflow_serializer::prelude::*; + +use crate::{ + prelude::{RpcHash, RpcScriptClass, RpcSubnetworkId}, + RpcError, RpcResult, RpcScriptPublicKey, RpcTransactionId, +}; + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[serde(rename_all = "camelCase")] +pub struct RpcOptionalUtxoEntry { + /// Level: High + pub amount: Option, + /// Level: High + pub script_public_key: Option, + /// Level: Full + pub block_daa_score: Option, + /// Level: High + pub is_coinbase: Option, + pub verbose_data: Option, +} + +impl RpcOptionalUtxoEntry { + pub fn is_empty(&self) -> bool { + self.amount.is_none() + && self.script_public_key.is_none() + && self.block_daa_score.is_none() + && self.is_coinbase.is_none() + && (self.verbose_data.is_none() || self.verbose_data.as_ref().is_some_and(|x| x.is_empty())) + } + + pub fn new( + amount: Option, + script_public_key: Option, + block_daa_score: Option, + is_coinbase: Option, + verbose_data: Option, + ) -> Self { + Self { amount, script_public_key, block_daa_score, is_coinbase, verbose_data } + } +} + +impl From for RpcOptionalUtxoEntry { + fn from(entry: UtxoEntry) -> Self { + Self { + amount: Some(entry.amount), + script_public_key: Some(entry.script_public_key), + block_daa_score: Some(entry.block_daa_score), + is_coinbase: Some(entry.is_coinbase), + verbose_data: None, + } + } +} + +impl TryFrom for UtxoEntry { + type Error = RpcError; + + fn try_from(entry: RpcOptionalUtxoEntry) -> RpcResult { + Ok(Self { + amount: entry.amount.ok_or(RpcError::MissingRpcFieldError("RpcUtxoEntry".to_string(), "amount".to_string()))?, + script_public_key: entry + .script_public_key + .ok_or(RpcError::MissingRpcFieldError("RpcUtxoEntry".to_string(), "script_public_key".to_string()))?, + block_daa_score: entry + .block_daa_score + .ok_or(RpcError::MissingRpcFieldError("RpcUtxoEntry".to_string(), "block_daa_score".to_string()))?, + is_coinbase: entry + .is_coinbase + .ok_or(RpcError::MissingRpcFieldError("RpcUtxoEntry".to_string(), "is_coinbase".to_string()))?, + }) + } +} + +impl Serializer for RpcOptionalUtxoEntry { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u8, &1, writer)?; + store!(Option, &self.amount, writer)?; + store!(Option, &self.script_public_key, writer)?; + store!(Option, &self.block_daa_score, writer)?; + store!(Option, &self.is_coinbase, writer)?; + serialize!(Option, &self.verbose_data, writer)?; + + Ok(()) + } +} + +impl Deserializer for RpcOptionalUtxoEntry { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u8, reader)?; + + match _version { + 1 => { + let amount = load!(Option, reader)?; + let script_public_key = load!(Option, reader)?; + let block_daa_score = load!(Option, reader)?; + let is_coinbase = load!(Option, reader)?; + let verbose_data = deserialize!(Option, reader)?; + + Ok(Self { amount, script_public_key, block_daa_score, is_coinbase, verbose_data }) + } + _ => Err(std::io::Error::new(std::io::ErrorKind::InvalidData, format!("Unsupported version: {}", _version))), + } + } +} + +#[derive(Eq, Hash, PartialEq, Debug, Clone, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[serde(rename_all = "camelCase")] +pub struct RpcOptionalUtxoEntryVerboseData { + /// Level: Low + pub script_public_key_type: Option, + /// Level: Low + pub script_public_key_address: Option
, +} + +impl RpcOptionalUtxoEntryVerboseData { + pub fn is_empty(&self) -> bool { + self.script_public_key_type.is_none() && self.script_public_key_address.is_none() + } + + pub fn new(script_public_key_type: Option, script_public_key_address: Option
) -> Self { + Self { script_public_key_type, script_public_key_address } + } +} + +impl Serializer for RpcOptionalUtxoEntryVerboseData { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u8, &1, writer)?; + store!(Option, &self.script_public_key_type, writer)?; + store!(Option
, &self.script_public_key_address, writer)?; + + Ok(()) + } +} + +impl Deserializer for RpcOptionalUtxoEntryVerboseData { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u8, reader)?; + let script_public_key_type = load!(Option, reader)?; + let script_public_key_address = load!(Option
, reader)?; + + Ok(Self { script_public_key_type, script_public_key_address }) + } +} + +/// Represents a Kaspa transaction outpoint +#[derive(Eq, Hash, PartialEq, Debug, Copy, Clone, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[serde_nested] +#[serde(rename_all = "camelCase")] +pub struct RpcOptionalTransactionOutpoint { + #[serde_nested(sub = "TransactionId", serde(with = "serde_bytes_fixed_ref"))] + pub transaction_id: Option, + pub index: Option, +} + +impl From for RpcOptionalTransactionOutpoint { + fn from(outpoint: TransactionOutpoint) -> Self { + Self { transaction_id: Some(outpoint.transaction_id), index: Some(outpoint.index) } + } +} + +impl TryFrom for TransactionOutpoint { + type Error = RpcError; + + fn try_from(outpoint: RpcOptionalTransactionOutpoint) -> RpcResult { + Ok(Self { + transaction_id: outpoint + .transaction_id + .ok_or(RpcError::MissingRpcFieldError("RpcTransactionOutpoint".to_string(), "transaction_id".to_string()))?, + index: outpoint.index.ok_or(RpcError::MissingRpcFieldError("RpcTransactionOutpoint".to_string(), "index".to_string()))?, + }) + } +} + +impl From for RpcOptionalTransactionOutpoint { + fn from(outpoint: kaspa_consensus_client::TransactionOutpoint) -> Self { + TransactionOutpoint::from(outpoint).into() + } +} + +impl TryFrom for kaspa_consensus_client::TransactionOutpoint { + type Error = RpcError; + + fn try_from(outpoint: RpcOptionalTransactionOutpoint) -> RpcResult { + Ok(TransactionOutpoint::try_from(outpoint)?.into()) + } +} + +impl Serializer for RpcOptionalTransactionOutpoint { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u8, &1, writer)?; + store!(Option, &self.transaction_id, writer)?; + store!(Option, &self.index, writer)?; + + Ok(()) + } +} + +impl Deserializer for RpcOptionalTransactionOutpoint { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u8, reader)?; + match _version { + 1 => { + let transaction_id = load!(Option, reader)?; + let index = load!(Option, reader)?; + Ok(Self { transaction_id, index }) + } + _ => Err(std::io::Error::new(std::io::ErrorKind::InvalidData, format!("Unsupported version: {}", _version))), + } + } +} + +/// Represents a Kaspa transaction input +#[derive(Clone, Serialize, Deserialize)] +#[serde_nested] +#[serde(rename_all = "camelCase")] +pub struct RpcOptionalTransactionInput { + /// Level: High + pub previous_outpoint: Option, + #[serde_nested(sub = "Vec", serde(with = "hex::serde"))] + /// Level: Low + pub signature_script: Option>, + /// Level: High + pub sequence: Option, + /// Level: High + pub sig_op_count: Option, + pub verbose_data: Option, +} + +impl std::fmt::Debug for RpcOptionalTransactionInput { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("RpcTransactionInput") + .field("previous_outpoint", &self.previous_outpoint) + .field("signature_script", &self.signature_script.as_ref().map(|v| v.to_hex())) + .field("sequence", &self.sequence) + .field("sig_op_count", &self.sig_op_count) + .field("verbose_data", &self.verbose_data) + .finish() + } +} + +impl From for RpcOptionalTransactionInput { + fn from(input: TransactionInput) -> Self { + Self { + previous_outpoint: Some(input.previous_outpoint.into()), + signature_script: Some(input.signature_script), + sequence: Some(input.sequence), + sig_op_count: Some(input.sig_op_count), + verbose_data: None, + } + } +} + +impl RpcOptionalTransactionInput { + /// Note: verbose data will not be automatically populated when converting from TransactionInput to RpcTransactionInput + pub fn from_transaction_inputs(other: Vec) -> Vec { + other.into_iter().map(Self::from).collect() + } + + pub fn is_empty(&self) -> bool { + self.previous_outpoint.is_none() + && self.signature_script.is_none() + && self.sequence.is_none() + && self.sig_op_count.is_none() + && (self.verbose_data.is_none() || self.verbose_data.as_ref().is_some_and(|x| x.is_empty())) + } +} + +impl Serializer for RpcOptionalTransactionInput { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u8, &1, writer)?; + serialize!(Option, &self.previous_outpoint, writer)?; + store!(Option>, &self.signature_script, writer)?; + store!(Option, &self.sequence, writer)?; + store!(Option, &self.sig_op_count, writer)?; + serialize!(Option, &self.verbose_data, writer)?; + + Ok(()) + } +} + +impl Deserializer for RpcOptionalTransactionInput { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u8, reader)?; + + Ok(match _version { + 1 => { + let previous_outpoint = deserialize!(Option, reader)?; + let signature_script = load!(Option>, reader)?; + let sequence = load!(Option, reader)?; + let sig_op_count = load!(Option, reader)?; + let verbose_data = deserialize!(Option, reader)?; + + Self { previous_outpoint, signature_script, sequence, sig_op_count, verbose_data } + } + _ => return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, format!("Unsupported version: {}", _version))), + }) + } +} + +/// Represent Kaspa transaction input verbose data +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RpcOptionalTransactionInputVerboseData { + pub utxo_entry: Option, +} + +impl RpcOptionalTransactionInputVerboseData { + pub fn is_empty(&self) -> bool { + self.utxo_entry.is_none() || self.utxo_entry.as_ref().is_some_and(|x| x.is_empty()) + } +} + +impl Serializer for RpcOptionalTransactionInputVerboseData { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u8, &1, writer)?; + serialize!(Option, &self.utxo_entry, writer)?; + Ok(()) + } +} + +impl Deserializer for RpcOptionalTransactionInputVerboseData { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u8, reader)?; + let utxo_entry = deserialize!(Option, reader)?; + Ok(Self { utxo_entry }) + } +} + +/// Represents a Kaspad transaction output +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RpcOptionalTransactionOutput { + /// Level - Low + pub value: Option, + /// Level - Low + pub script_public_key: Option, + pub verbose_data: Option, +} + +impl RpcOptionalTransactionOutput { + pub fn is_empty(&self) -> bool { + self.value.is_none() + && self.script_public_key.is_none() + && (self.verbose_data.is_none() || self.verbose_data.as_ref().is_some_and(|x| x.is_empty())) + } + + pub fn from_transaction_outputs(other: Vec) -> Vec { + other.into_iter().map(Self::from).collect() + } +} + +impl From for RpcOptionalTransactionOutput { + fn from(output: TransactionOutput) -> Self { + Self { value: Some(output.value), script_public_key: Some(output.script_public_key), verbose_data: None } + } +} + +impl Serializer for RpcOptionalTransactionOutput { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u8, &1, writer)?; + store!(Option, &self.value, writer)?; + store!(Option, &self.script_public_key, writer)?; + serialize!(Option, &self.verbose_data, writer)?; + + Ok(()) + } +} + +impl Deserializer for RpcOptionalTransactionOutput { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u8, reader)?; + Ok(match _version { + 1 => { + let value = load!(Option, reader)?; + let script_public_key = load!(Option, reader)?; + let verbose_data = deserialize!(Option, reader)?; + + Self { value, script_public_key, verbose_data } + } + _ => return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, format!("Unsupported version: {}", _version))), + }) + } +} + +/// Represent Kaspa transaction output verbose data +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RpcOptionalTransactionOutputVerboseData { + /// Level: Low + pub script_public_key_type: Option, + /// Level: Low + pub script_public_key_address: Option
, +} + +impl RpcOptionalTransactionOutputVerboseData { + pub fn is_empty(&self) -> bool { + self.script_public_key_type.is_none() && self.script_public_key_address.is_none() + } +} + +impl Serializer for RpcOptionalTransactionOutputVerboseData { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u8, &1, writer)?; + store!(Option, &self.script_public_key_type, writer)?; + store!(Option
, &self.script_public_key_address, writer)?; + + Ok(()) + } +} + +impl Deserializer for RpcOptionalTransactionOutputVerboseData { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u8, reader)?; + Ok(match _version { + 1 => { + let script_public_key_type = load!(Option, reader)?; + let script_public_key_address = load!(Option
, reader)?; + Self { script_public_key_type, script_public_key_address } + } + _ => return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, format!("Unsupported version: {}", _version))), + }) + } +} + +/// Represents a Kaspa transaction +#[derive(Clone, Serialize, Deserialize)] +#[serde_nested] +#[serde(rename_all = "camelCase")] +pub struct RpcOptionalTransaction { + /// Level: Full + pub version: Option, + pub inputs: Vec, + pub outputs: Vec, + /// Level: Full + pub lock_time: Option, + /// Level: Full + pub subnetwork_id: Option, + /// Level: Full + pub gas: Option, + #[serde_nested(sub = "Vec", serde(with = "hex::serde"))] + /// Level: High + pub payload: Option>, + /// Level: High + pub mass: Option, + pub verbose_data: Option, +} + +impl RpcOptionalTransaction { + pub fn is_empty(&self) -> bool { + self.version.is_none() + && (self.inputs.is_empty() || self.inputs.iter().all(|input| input.is_empty())) + && (self.outputs.is_empty() || self.outputs.iter().all(|output| output.is_empty())) + && self.lock_time.is_none() + && self.subnetwork_id.is_none() + && self.gas.is_none() + && self.payload.is_none() + && self.mass.is_none() + && (self.verbose_data.is_none() || self.verbose_data.as_ref().is_some_and(|x| x.is_empty())) + } +} + +impl std::fmt::Debug for RpcOptionalTransaction { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("RpcTransaction") + .field("version", &self.version) + .field("lock_time", &self.lock_time) + .field("subnetwork_id", &self.subnetwork_id) + .field("gas", &self.gas) + .field("payload", &self.payload.as_ref().map(|v|v.to_hex())) + .field("mass", &self.mass) + .field("inputs", &self.inputs) // Inputs and outputs are placed purposely at the end for better debug visibility + .field("outputs", &self.outputs) + .field("verbose_data", &self.verbose_data) + .finish() + } +} + +impl Serializer for RpcOptionalTransaction { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(Option, &self.version, writer)?; + serialize!(Vec, &self.inputs, writer)?; + serialize!(Vec, &self.outputs, writer)?; + store!(Option, &self.lock_time, writer)?; + store!(Option, &self.subnetwork_id, writer)?; + store!(Option, &self.gas, writer)?; + store!(Option>, &self.payload, writer)?; + store!(Option, &self.mass, writer)?; + serialize!(Option, &self.verbose_data, writer)?; + + Ok(()) + } +} + +impl Deserializer for RpcOptionalTransaction { + fn deserialize(reader: &mut R) -> std::io::Result { + let _struct_version = load!(u16, reader)?; + Ok(match _struct_version { + 1 => { + let version = load!(Option, reader)?; + let inputs = deserialize!(Vec, reader)?; + let outputs = deserialize!(Vec, reader)?; + let lock_time = load!(Option, reader)?; + let subnetwork_id = load!(Option, reader)?; + let gas = load!(Option, reader)?; + let payload = load!(Option>, reader)?; + let mass = load!(Option, reader)?; + let verbose_data = deserialize!(Option, reader)?; + + Self { version, inputs, outputs, lock_time, subnetwork_id, gas, payload, mass, verbose_data } + } + _ => { + return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, format!("Unsupported version: {}", _struct_version))) + } + }) + } +} + +/// Represent Kaspa transaction verbose data +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde_nested] +#[serde(rename_all = "camelCase")] +pub struct RpcOptionalTransactionVerboseData { + #[serde_nested(sub = "RpcTransactionId", serde(with = "serde_bytes_fixed_ref"))] + /// Level: Low + pub transaction_id: Option, + #[serde_nested(sub = "RpcHash", serde(with = "serde_bytes_fixed_ref"))] + /// Level: Low + pub hash: Option, + /// Level: High + pub compute_mass: Option, + #[serde_nested(sub = "RpcHash", serde(with = "serde_bytes_fixed_ref"))] + /// Level: Low + pub block_hash: Option, + /// Level: Low + pub block_time: Option, +} + +impl RpcOptionalTransactionVerboseData { + pub fn is_empty(&self) -> bool { + self.transaction_id.is_none() + && self.hash.is_none() + && self.compute_mass.is_none() + && self.block_hash.is_none() + && self.block_time.is_none() + } +} + +impl Serializer for RpcOptionalTransactionVerboseData { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u8, &1, writer)?; + store!(Option, &self.transaction_id, writer)?; + store!(Option, &self.hash, writer)?; + store!(Option, &self.compute_mass, writer)?; + store!(Option, &self.block_hash, writer)?; + store!(Option, &self.block_time, writer)?; + + Ok(()) + } +} + +impl Deserializer for RpcOptionalTransactionVerboseData { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u8, reader)?; + Ok(match _version { + 1 => { + let transaction_id = load!(Option, reader)?; + let hash = load!(Option, reader)?; + let compute_mass = load!(Option, reader)?; + let block_hash = load!(Option, reader)?; + let block_time = load!(Option, reader)?; + + Self { transaction_id, hash, compute_mass, block_hash, block_time } + } + _ => return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, format!("Unsupported version: {}", _version))), + }) + } +} + +/// Represents accepted transaction ids +#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[serde(rename_all = "camelCase")] +pub struct RpcOptionalAcceptedTransactionIds { + #[serde(with = "serde_bytes_fixed_ref")] + pub accepting_block_hash: RpcHash, + pub accepted_transaction_ids: Vec, +} diff --git a/rpc/core/src/model/tests.rs b/rpc/core/src/model/tests.rs index fcb80c399a..99a6cf229a 100644 --- a/rpc/core/src/model/tests.rs +++ b/rpc/core/src/model/tests.rs @@ -1,7 +1,7 @@ #[cfg(test)] mod mockery { - use crate::{model::*, RpcScriptClass}; + use crate::model::*; use kaspa_addresses::{Prefix, Version}; use kaspa_consensus_core::api::BlockCount; use kaspa_consensus_core::network::NetworkType; @@ -11,6 +11,7 @@ mod mockery { use kaspa_math::Uint192; use kaspa_notify::subscription::Command; use kaspa_rpc_macros::test_wrpc_serializer as test; + use kaspa_txscript::script_class::ScriptClass; use kaspa_utils::networking::{ContextualNetAddress, IpAddress, NetAddress}; use rand::Rng; use std::net::{IpAddr, Ipv4Addr}; @@ -55,7 +56,8 @@ mod mockery { where T: Mock, { - Mock::mock() + // forward to the type's Mock implementation + T::mock() } // this function tests serialization and deserialization of a type @@ -203,9 +205,9 @@ mod mockery { } } - impl Mock for RpcTransactionInputVerboseData { + impl Mock for RpcOptionalTransactionInputVerboseData { fn mock() -> Self { - RpcTransactionInputVerboseData {} + RpcOptionalTransactionInputVerboseData { utxo_entry: mock() } } } @@ -221,9 +223,33 @@ mod mockery { } } + impl Mock for RpcOptionalTransactionInput { + fn mock() -> Self { + RpcOptionalTransactionInput { + previous_outpoint: mock(), + signature_script: Some(Hash::mock().as_bytes().to_vec()), + sequence: mock(), + sig_op_count: mock(), + verbose_data: mock(), + } + } + } + + impl Mock for RpcOptionalTransactionOutpoint { + fn mock() -> Self { + RpcOptionalTransactionOutpoint { transaction_id: mock(), index: mock() } + } + } + impl Mock for RpcTransactionOutputVerboseData { fn mock() -> Self { - RpcTransactionOutputVerboseData { script_public_key_type: RpcScriptClass::PubKey, script_public_key_address: mock() } + RpcTransactionOutputVerboseData { script_public_key_type: mock(), script_public_key_address: mock() } + } + } + + impl Mock for RpcOptionalTransactionOutputVerboseData { + fn mock() -> Self { + RpcOptionalTransactionOutputVerboseData { script_public_key_type: mock(), script_public_key_address: mock() } } } @@ -233,6 +259,12 @@ mod mockery { } } + impl Mock for RpcOptionalTransactionOutput { + fn mock() -> Self { + RpcOptionalTransactionOutput { value: mock(), script_public_key: mock(), verbose_data: mock() } + } + } + impl Mock for RpcTransactionVerboseData { fn mock() -> Self { RpcTransactionVerboseData { @@ -245,6 +277,103 @@ mod mockery { } } + impl Mock for RpcOptionalTransactionVerboseData { + fn mock() -> Self { + RpcOptionalTransactionVerboseData { + transaction_id: mock(), + hash: mock(), + compute_mass: mock(), + block_hash: mock(), + block_time: mock(), + } + } + } + + impl Mock for RpcUtxoEntryVerbosity { + fn mock() -> Self { + RpcUtxoEntryVerbosity { + include_amount: mock(), + include_script_public_key: mock(), + include_block_daa_score: mock(), + include_is_coinbase: mock(), + verbose_data_verbosity: mock(), + } + } + } + + impl Mock for RpcUtxoEntryVerboseDataVerbosity { + fn mock() -> Self { + RpcUtxoEntryVerboseDataVerbosity { include_script_public_key_type: mock(), include_script_public_key_address: mock() } + } + } + + impl Mock for RpcTransactionInputVerboseDataVerbosity { + fn mock() -> Self { + RpcTransactionInputVerboseDataVerbosity { utxo_entry_verbosity: mock() } + } + } + + impl Mock for RpcTransactionInputVerboseData { + fn mock() -> Self { + RpcTransactionInputVerboseData {} + } + } + + impl Mock for RpcTransactionInputVerbosity { + fn mock() -> Self { + RpcTransactionInputVerbosity { + include_previous_outpoint: mock(), + include_signature_script: mock(), + include_sequence: mock(), + include_sig_op_count: mock(), + verbose_data_verbosity: mock(), + } + } + } + + impl Mock for RpcTransactionOutputVerbosity { + fn mock() -> Self { + RpcTransactionOutputVerbosity { include_amount: mock(), include_script_public_key: mock(), verbose_data_verbosity: mock() } + } + } + + impl Mock for RpcTransactionOutputVerboseDataVerbosity { + fn mock() -> Self { + RpcTransactionOutputVerboseDataVerbosity { + include_script_public_key_type: mock(), + include_script_public_key_address: mock(), + } + } + } + + impl Mock for RpcTransactionVerboseDataVerbosity { + fn mock() -> Self { + RpcTransactionVerboseDataVerbosity { + include_transaction_id: mock(), + include_hash: mock(), + include_compute_mass: mock(), + include_block_hash: mock(), + include_block_time: mock(), + } + } + } + + impl Mock for RpcTransactionVerbosity { + fn mock() -> Self { + RpcTransactionVerbosity { + include_version: mock(), + input_verbosity: mock(), + output_verbosity: mock(), + include_lock_time: mock(), + include_subnetwork_id: mock(), + include_gas: mock(), + include_payload: mock(), + include_mass: mock(), + verbose_data_verbosity: mock(), + } + } + } + impl Mock for RpcTransaction { fn mock() -> Self { RpcTransaction { @@ -261,6 +390,42 @@ mod mockery { } } + impl Mock for RpcOptionalTransaction { + fn mock() -> Self { + RpcOptionalTransaction { + version: mock(), + inputs: mock(), + outputs: mock(), + lock_time: mock(), + subnetwork_id: mock(), + gas: mock(), + payload: Some(Hash::mock().as_bytes().to_vec()), + mass: mock(), + verbose_data: mock(), + } + } + } + + impl Mock for RpcOptionalHeader { + fn mock() -> Self { + RpcOptionalHeader { + version: mock(), + timestamp: mock(), + bits: mock(), + nonce: mock(), + hash_merkle_root: mock(), + accepted_id_merkle_root: mock(), + utxo_commitment: mock(), + hash: mock(), + parents_by_level: vec![mock()], + daa_score: mock(), + blue_score: mock(), + blue_work: mock(), + pruning_point: mock(), + } + } + } + impl Mock for RpcNodeId { fn mock() -> Self { RpcNodeId::new(Uuid::new_v4()) @@ -321,7 +486,7 @@ mod mockery { impl Mock for ScriptPublicKey { fn mock() -> Self { - let mut bytes: [u8; 36] = [0; 36]; + let mut bytes: [u8; 35] = [0; 35]; rand::thread_rng().fill(&mut bytes[..]); ScriptPublicKey::from_vec(0, bytes.to_vec()) } @@ -329,7 +494,36 @@ mod mockery { impl Mock for RpcUtxoEntry { fn mock() -> Self { - RpcUtxoEntry { amount: mock(), script_public_key: mock(), block_daa_score: mock(), is_coinbase: true } + RpcUtxoEntry { amount: mock(), script_public_key: mock(), block_daa_score: mock(), is_coinbase: mock() } + } + } + + impl Mock for RpcOptionalUtxoEntry { + fn mock() -> Self { + RpcOptionalUtxoEntry { + amount: mock(), + script_public_key: mock(), + block_daa_score: mock(), + is_coinbase: mock(), + verbose_data: mock(), + } + } + } + + impl Mock for RpcOptionalUtxoEntryVerboseData { + fn mock() -> Self { + RpcOptionalUtxoEntryVerboseData { script_public_key_type: mock(), script_public_key_address: mock() } + } + } + + impl Mock for ScriptClass { + fn mock() -> Self { + match rand::thread_rng().gen::() % 4 { + 0 => ScriptClass::NonStandard, + 1 => ScriptClass::PubKey, + 2 => ScriptClass::PubKeyECDSA, + _ => ScriptClass::ScriptHash, // 3 + } } } @@ -1068,6 +1262,32 @@ mod mockery { test!(GetDaaScoreTimestampEstimateResponse); + impl Mock for GetVirtualChainFromBlockV2Request { + fn mock() -> Self { + GetVirtualChainFromBlockV2Request { start_hash: mock(), data_verbosity_level: None, min_confirmation_count: mock() } + } + } + + test!(GetVirtualChainFromBlockV2Request); + + impl Mock for RpcChainBlockAcceptedTransactions { + fn mock() -> Self { + RpcChainBlockAcceptedTransactions { chain_block_header: mock(), accepted_transactions: mock() } + } + } + + impl Mock for GetVirtualChainFromBlockV2Response { + fn mock() -> Self { + GetVirtualChainFromBlockV2Response { + removed_chain_block_hashes: mock(), + added_chain_block_hashes: mock(), + chain_block_accepted_transactions: mock(), + } + } + } + + test!(GetVirtualChainFromBlockV2Response); + impl Mock for NotifyBlockAddedRequest { fn mock() -> Self { NotifyBlockAddedRequest { command: Command::Start } diff --git a/rpc/core/src/model/tx.rs b/rpc/core/src/model/tx.rs index 0c17e26f53..fdcf407d89 100644 --- a/rpc/core/src/model/tx.rs +++ b/rpc/core/src/model/tx.rs @@ -8,7 +8,10 @@ use kaspa_utils::{hex::ToHex, serde_bytes_fixed_ref}; use serde::{Deserialize, Serialize}; use workflow_serializer::prelude::*; -use crate::prelude::{RpcHash, RpcScriptClass, RpcSubnetworkId}; +use crate::{ + prelude::{RpcHash, RpcScriptClass, RpcSubnetworkId}, + RpcOptionalHeader, RpcOptionalTransaction, +}; /// Represents the ID of a Kaspa transaction pub type RpcTransactionId = TransactionId; @@ -313,7 +316,7 @@ impl std::fmt::Debug for RpcTransaction { .field("gas", &self.gas) .field("payload", &self.payload.to_hex()) .field("mass", &self.mass) - .field("inputs", &self.inputs) // Inputs and outputs are placed purposely at the end for better debug visibility + .field("inputs", &self.inputs) // Inputs and outputs are placed purposely at the end for better debug visibility .field("outputs", &self.outputs) .field("verbose_data", &self.verbose_data) .finish() @@ -398,3 +401,31 @@ pub struct RpcAcceptedTransactionIds { pub accepting_block_hash: RpcHash, pub accepted_transaction_ids: Vec, } + +/// Represents accepted transaction ids +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RpcChainBlockAcceptedTransactions { + pub chain_block_header: RpcOptionalHeader, + pub accepted_transactions: Vec, +} + +impl Serializer for RpcChainBlockAcceptedTransactions { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + serialize!(RpcOptionalHeader, &self.chain_block_header, writer)?; + serialize!(Vec, &self.accepted_transactions, writer)?; + + Ok(()) + } +} + +impl Deserializer for RpcChainBlockAcceptedTransactions { + fn deserialize(reader: &mut R) -> std::io::Result { + let _struct_version = load!(u16, reader)?; + let chain_block_header = deserialize!(RpcOptionalHeader, reader)?; + let accepted_transactions = deserialize!(Vec, reader)?; + + Ok(Self { chain_block_header, accepted_transactions }) + } +} diff --git a/rpc/core/src/model/verbosity.rs b/rpc/core/src/model/verbosity.rs new file mode 100644 index 0000000000..6d322d44f4 --- /dev/null +++ b/rpc/core/src/model/verbosity.rs @@ -0,0 +1,614 @@ +use borsh::{BorshDeserialize, BorshSerialize}; +use serde::{Deserialize, Serialize}; +use workflow_serializer::prelude::*; + +#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize, Copy)] +#[borsh(use_discriminant = true)] +#[repr(i32)] +pub enum RpcDataVerbosityLevel { + None = 0, + Low = 1, + High = 2, + Full = 3, +} + +impl Serializer for RpcDataVerbosityLevel { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u8, &1, writer)?; + + let val: i32 = *self as i32; + writer.write_all(&val.to_le_bytes())?; + + Ok(()) + } +} + +impl Deserializer for RpcDataVerbosityLevel { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u8, reader)?; + + let mut buf = [0u8; 4]; + reader.read_exact(&mut buf)?; + let val = i32::from_le_bytes(buf); + RpcDataVerbosityLevel::try_from(val) + .map_err(|_| std::io::Error::new(std::io::ErrorKind::InvalidData, "invalid RpcDataVerbosityLevel")) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize, Default)] +#[serde(rename_all = "camelCase")] +pub struct RpcHeaderVerbosity { + /// Cached hash + pub include_hash: Option, + pub include_version: Option, + pub include_parents_by_level: Option, + pub include_hash_merkle_root: Option, + pub include_accepted_id_merkle_root: Option, + pub include_utxo_commitment: Option, + /// Timestamp is in milliseconds + pub include_timestamp: Option, + pub include_bits: Option, + pub include_nonce: Option, + pub include_daa_score: Option, + pub include_blue_work: Option, + pub include_blue_score: Option, + pub include_pruning_point: Option, +} + +impl Serializer for RpcHeaderVerbosity { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + + store!(Option, &self.include_hash, writer)?; + store!(Option, &self.include_version, writer)?; + store!(Option, &self.include_parents_by_level, writer)?; + store!(Option, &self.include_hash_merkle_root, writer)?; + store!(Option, &self.include_accepted_id_merkle_root, writer)?; + store!(Option, &self.include_utxo_commitment, writer)?; + store!(Option, &self.include_timestamp, writer)?; + store!(Option, &self.include_bits, writer)?; + store!(Option, &self.include_nonce, writer)?; + store!(Option, &self.include_daa_score, writer)?; + store!(Option, &self.include_blue_work, writer)?; + store!(Option, &self.include_blue_score, writer)?; + store!(Option, &self.include_pruning_point, writer)?; + + Ok(()) + } +} + +impl Deserializer for RpcHeaderVerbosity { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + + let include_hash = load!(Option, reader)?; + let include_version = load!(Option, reader)?; + let include_parents_by_level = load!(Option, reader)?; + let include_hash_merkle_root = load!(Option, reader)?; + let include_accepted_id_merkle_root = load!(Option, reader)?; + let include_utxo_commitment = load!(Option, reader)?; + let include_timestamp = load!(Option, reader)?; + let include_bits = load!(Option, reader)?; + let include_nonce = load!(Option, reader)?; + let include_daa_score = load!(Option, reader)?; + let include_blue_work = load!(Option, reader)?; + let include_blue_score = load!(Option, reader)?; + let include_pruning_point = load!(Option, reader)?; + + Ok(Self { + include_hash, + include_version, + include_parents_by_level, + include_hash_merkle_root, + include_accepted_id_merkle_root, + include_utxo_commitment, + include_timestamp, + include_bits, + include_nonce, + include_daa_score, + include_blue_work, + include_blue_score, + include_pruning_point, + }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize, Default)] +#[serde(rename_all = "camelCase")] +pub struct RpcUtxoEntryVerboseDataVerbosity { + pub include_script_public_key_type: Option, + pub include_script_public_key_address: Option, +} + +impl RpcUtxoEntryVerboseDataVerbosity { + pub fn new(include_script_public_key_type: Option, include_script_public_key_address: Option) -> Self { + Self { include_script_public_key_type, include_script_public_key_address } + } +} + +impl Serializer for RpcUtxoEntryVerboseDataVerbosity { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u8, &1, writer)?; + store!(Option, &self.include_script_public_key_type, writer)?; + store!(Option, &self.include_script_public_key_address, writer)?; + + Ok(()) + } +} + +impl Deserializer for RpcUtxoEntryVerboseDataVerbosity { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u8, reader)?; + let include_script_public_key_type = load!(Option, reader)?; + let include_script_public_key_address = load!(Option, reader)?; + + Ok(Self { include_script_public_key_type, include_script_public_key_address }) + } +} + +// RpcUtxoEntryVerbosity +#[derive(Clone, Debug, Serialize, Deserialize, Default)] +#[serde(rename_all = "camelCase")] +pub struct RpcUtxoEntryVerbosity { + pub include_amount: Option, + pub include_script_public_key: Option, + pub include_block_daa_score: Option, + pub include_is_coinbase: Option, + pub verbose_data_verbosity: Option, +} + +impl RpcUtxoEntryVerbosity { + pub fn new( + include_amount: Option, + include_script_public_key: Option, + include_block_daa_score: Option, + include_is_coinbase: Option, + verbose_data_verbosity: Option, + ) -> Self { + Self { include_amount, include_script_public_key, include_block_daa_score, include_is_coinbase, verbose_data_verbosity } + } +} + +impl Serializer for RpcUtxoEntryVerbosity { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u8, &1, writer)?; + store!(Option, &self.include_amount, writer)?; + store!(Option, &self.include_script_public_key, writer)?; + store!(Option, &self.include_block_daa_score, writer)?; + store!(Option, &self.include_is_coinbase, writer)?; + serialize!(Option, &self.verbose_data_verbosity, writer)?; + + Ok(()) + } +} + +impl Deserializer for RpcUtxoEntryVerbosity { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u8, reader)?; + + let include_amount = load!(Option, reader)?; + let include_script_public_key = load!(Option, reader)?; + let include_block_daa_score = load!(Option, reader)?; + let include_is_coinbase = load!(Option, reader)?; + let verbose_data_verbosity = deserialize!(Option, reader)?; + + Ok(Self { include_amount, include_script_public_key, include_block_daa_score, include_is_coinbase, verbose_data_verbosity }) + } +} + +// RpcTransactionInputVerbosity +#[derive(Clone, Debug, Serialize, Deserialize, Default)] +#[serde(rename_all = "camelCase")] +pub struct RpcTransactionInputVerbosity { + pub include_previous_outpoint: Option, + pub include_signature_script: Option, + pub include_sequence: Option, + pub include_sig_op_count: Option, + pub verbose_data_verbosity: Option, +} + +impl RpcTransactionInputVerbosity { + pub fn new( + include_previous_outpoint: Option, + include_signature_script: Option, + include_sequence: Option, + include_sig_op_count: Option, + verbose_data_verbosity: Option, + ) -> Self { + Self { include_previous_outpoint, include_signature_script, include_sequence, include_sig_op_count, verbose_data_verbosity } + } +} + +impl Serializer for RpcTransactionInputVerbosity { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u8, &1, writer)?; + store!(Option, &self.include_previous_outpoint, writer)?; + store!(Option, &self.include_signature_script, writer)?; + store!(Option, &self.include_sequence, writer)?; + store!(Option, &self.include_sig_op_count, writer)?; + serialize!(Option, &self.verbose_data_verbosity, writer)?; + + Ok(()) + } +} + +impl Deserializer for RpcTransactionInputVerbosity { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u8, reader)?; + + let include_previous_outpoint = load!(Option, reader)?; + let include_signature_script = load!(Option, reader)?; + let include_sequence = load!(Option, reader)?; + let include_sig_op_count = load!(Option, reader)?; + let verbose_data_verbosity = deserialize!(Option, reader)?; + + Ok(Self { + include_previous_outpoint, + include_signature_script, + include_sequence, + include_sig_op_count, + verbose_data_verbosity, + }) + } +} + +// RpcTransactionInputVerboseDataVerbosity +#[derive(Clone, Debug, Serialize, Deserialize, Default)] +#[serde(rename_all = "camelCase")] +pub struct RpcTransactionInputVerboseDataVerbosity { + pub utxo_entry_verbosity: Option, +} + +impl RpcTransactionInputVerboseDataVerbosity { + pub fn new(utxo_entry_verbosity: Option) -> Self { + Self { utxo_entry_verbosity } + } +} + +impl Serializer for RpcTransactionInputVerboseDataVerbosity { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u8, &1, writer)?; + serialize!(Option, &self.utxo_entry_verbosity, writer)?; + + Ok(()) + } +} + +impl Deserializer for RpcTransactionInputVerboseDataVerbosity { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u8, reader)?; + let utxo_entry_verbosity = deserialize!(Option, reader)?; + + Ok(Self { utxo_entry_verbosity }) + } +} + +// RpcTransactionOutputVerbosity + +#[derive(Clone, Debug, Serialize, Deserialize, Default)] +#[serde(rename_all = "camelCase")] +pub struct RpcTransactionOutputVerbosity { + pub include_amount: Option, + pub include_script_public_key: Option, + pub verbose_data_verbosity: Option, +} + +impl RpcTransactionOutputVerbosity { + pub fn new( + include_amount: Option, + include_script_public_key: Option, + verbose_data_verbosity: Option, + ) -> Self { + Self { include_amount, include_script_public_key, verbose_data_verbosity } + } +} + +impl Serializer for RpcTransactionOutputVerbosity { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u8, &1, writer)?; + store!(Option, &self.include_amount, writer)?; + store!(Option, &self.include_script_public_key, writer)?; + serialize!(Option, &self.verbose_data_verbosity, writer)?; + + Ok(()) + } +} + +impl Deserializer for RpcTransactionOutputVerbosity { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u8, reader)?; + + let include_amount = load!(Option, reader)?; + let include_script_public_key = load!(Option, reader)?; + let verbose_data_verbosity = deserialize!(Option, reader)?; + + Ok(Self { include_amount, include_script_public_key, verbose_data_verbosity }) + } +} + +// RpcTransactionOutputVerboseDataVerbosity +#[derive(Clone, Debug, Serialize, Deserialize, Default)] +#[serde(rename_all = "camelCase")] +pub struct RpcTransactionOutputVerboseDataVerbosity { + pub include_script_public_key_type: Option, + pub include_script_public_key_address: Option, +} + +impl RpcTransactionOutputVerboseDataVerbosity { + pub fn new(include_script_public_key_type: Option, include_script_public_key_address: Option) -> Self { + Self { include_script_public_key_type, include_script_public_key_address } + } +} + +impl Serializer for RpcTransactionOutputVerboseDataVerbosity { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u8, &1, writer)?; + store!(Option, &self.include_script_public_key_type, writer)?; + store!(Option, &self.include_script_public_key_address, writer)?; + + Ok(()) + } +} + +impl Deserializer for RpcTransactionOutputVerboseDataVerbosity { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u8, reader)?; + + let include_script_public_key_type = load!(Option, reader)?; + let include_script_public_key_address = load!(Option, reader)?; + + Ok(Self { include_script_public_key_type, include_script_public_key_address }) + } +} + +// RpcTransactionVerbosity +#[derive(Clone, Debug, Serialize, Deserialize, Default)] +#[serde(rename_all = "camelCase")] +pub struct RpcTransactionVerbosity { + pub include_version: Option, + pub input_verbosity: Option, + pub output_verbosity: Option, + pub include_lock_time: Option, + pub include_subnetwork_id: Option, + pub include_gas: Option, + pub include_payload: Option, + pub include_mass: Option, + pub verbose_data_verbosity: Option, +} + +impl RpcTransactionVerbosity { + pub fn new( + include_version: Option, + input_verbosity: Option, + output_verbosity: Option, + include_lock_time: Option, + include_subnetwork_id: Option, + include_gas: Option, + include_payload: Option, + include_mass: Option, + verbose_data_verbosity: Option, + ) -> Self { + Self { + include_version, + input_verbosity, + output_verbosity, + include_lock_time, + include_subnetwork_id, + include_gas, + include_payload, + include_mass, + verbose_data_verbosity, + } + } + + pub fn requires_populated_transaction(&self) -> bool { + self.input_verbosity + .as_ref() + .is_some_and(|active| active.verbose_data_verbosity.as_ref().is_some_and(|active| active.utxo_entry_verbosity.is_some())) + } + + pub fn requires_block_hash(&self) -> bool { + self.verbose_data_verbosity.as_ref().is_some_and(|active| active.include_block_hash.is_some()) + } + + pub fn requires_block_time(&self) -> bool { + self.verbose_data_verbosity.as_ref().is_some_and(|active| active.include_block_time.is_some()) + } +} + +impl Serializer for RpcTransactionVerbosity { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u8, &1, writer)?; + store!(Option, &self.include_version, writer)?; + serialize!(Option, &self.input_verbosity, writer)?; + serialize!(Option, &self.output_verbosity, writer)?; + store!(Option, &self.include_lock_time, writer)?; + store!(Option, &self.include_subnetwork_id, writer)?; + store!(Option, &self.include_gas, writer)?; + store!(Option, &self.include_payload, writer)?; + store!(Option, &self.include_mass, writer)?; + serialize!(Option, &self.verbose_data_verbosity, writer)?; + + Ok(()) + } +} + +impl Deserializer for RpcTransactionVerbosity { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u8, reader)?; + + let include_version = load!(Option, reader)?; + let input_verbosity = deserialize!(Option, reader)?; + let output_verbosity = deserialize!(Option, reader)?; + let include_lock_time = load!(Option, reader)?; + let include_subnetwork_id = load!(Option, reader)?; + let include_gas = load!(Option, reader)?; + let include_payload = load!(Option, reader)?; + let include_mass = load!(Option, reader)?; + let verbose_data_verbosity = deserialize!(Option, reader)?; + + Ok(Self { + include_version, + input_verbosity, + output_verbosity, + include_lock_time, + include_subnetwork_id, + include_gas, + include_payload, + include_mass, + verbose_data_verbosity, + }) + } +} + +// RpcTransactionVerboseDataVerbosity +#[derive(Clone, Debug, Serialize, Deserialize, Default)] +#[serde(rename_all = "camelCase")] +pub struct RpcTransactionVerboseDataVerbosity { + pub include_transaction_id: Option, + pub include_hash: Option, + pub include_compute_mass: Option, + pub include_block_hash: Option, + pub include_block_time: Option, +} + +impl RpcTransactionVerboseDataVerbosity { + pub fn new( + include_transaction_id: Option, + include_hash: Option, + include_compute_mass: Option, + include_block_hash: Option, + include_block_time: Option, + ) -> Self { + Self { include_transaction_id, include_hash, include_compute_mass, include_block_hash, include_block_time } + } +} + +impl Serializer for RpcTransactionVerboseDataVerbosity { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u8, &1, writer)?; + store!(Option, &self.include_transaction_id, writer)?; + store!(Option, &self.include_hash, writer)?; + store!(Option, &self.include_compute_mass, writer)?; + store!(Option, &self.include_block_hash, writer)?; + store!(Option, &self.include_block_time, writer)?; + + Ok(()) + } +} + +impl Deserializer for RpcTransactionVerboseDataVerbosity { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u8, reader)?; + + let include_transaction_id = load!(Option, reader)?; + let include_hash = load!(Option, reader)?; + let include_compute_mass = load!(Option, reader)?; + let include_block_hash = load!(Option, reader)?; + let include_block_time = load!(Option, reader)?; + + Ok(Self { include_transaction_id, include_hash, include_compute_mass, include_block_hash, include_block_time }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize, Default)] +#[serde(rename_all = "camelCase")] +pub struct RpcAcceptanceDataVerbosity { + pub accepting_chain_header_verbosity: Option, + pub mergeset_block_acceptance_data_verbosity: Option, +} + +impl RpcAcceptanceDataVerbosity { + pub fn new( + accepting_chain_header_verbosity: Option, + mergeset_block_acceptance_data_verbosity: Option, + ) -> Self { + Self { accepting_chain_header_verbosity, mergeset_block_acceptance_data_verbosity } + } + + pub fn requires_merged_header(&self, default: bool) -> bool { + self.mergeset_block_acceptance_data_verbosity.as_ref().map_or(default, |active| active.requires_merged_header()) + } + + pub fn requeires_accepted_header(&self, default: bool) -> bool { + self.mergeset_block_acceptance_data_verbosity.as_ref().map_or(default, |active| active.requires_merged_block_hash()) + } + + pub fn requires_accepted_transactions(&self, default: bool) -> bool { + self.mergeset_block_acceptance_data_verbosity + .as_ref() + .map_or(default, |active| active.accepted_transactions_verbosity.is_some()) + } +} + +impl Serializer for RpcAcceptanceDataVerbosity { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u8, &1, writer)?; + store!(Option, &self.accepting_chain_header_verbosity, writer)?; + serialize!(Option, &self.mergeset_block_acceptance_data_verbosity, writer)?; + + Ok(()) + } +} + +impl Deserializer for RpcAcceptanceDataVerbosity { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u8, reader); + let accepting_chain_header_verbosity = load!(Option, reader)?; + let mergeset_block_acceptance_data_verbosity = deserialize!(Option, reader)?; + + Ok(Self { accepting_chain_header_verbosity, mergeset_block_acceptance_data_verbosity }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize, Default)] +#[serde(rename_all = "camelCase")] +pub struct RpcMergesetBlockAcceptanceDataVerbosity { + pub merged_header_verbosity: Option, + pub accepted_transactions_verbosity: Option, +} + +impl RpcMergesetBlockAcceptanceDataVerbosity { + pub fn requires_merged_header(&self) -> bool { + self.merged_header_verbosity.is_some() + || self.accepted_transactions_verbosity.as_ref().is_some_and(|active| { + active.verbose_data_verbosity.as_ref().is_some_and(|active| active.include_block_hash.unwrap_or(false)) + }) + } + + pub fn requires_merged_block_hash(&self) -> bool { + self.merged_header_verbosity.as_ref().is_some_and(|active| active.include_hash.unwrap_or(false)) + || self.accepted_transactions_verbosity.as_ref().is_some_and(|active| { + active.verbose_data_verbosity.as_ref().is_some_and(|active| active.include_block_hash.unwrap_or(false)) + }) + } +} + +impl RpcMergesetBlockAcceptanceDataVerbosity { + pub fn new( + merged_header_verbosity: Option, + accepted_transactions_verbosity: Option, + ) -> Self { + Self { merged_header_verbosity, accepted_transactions_verbosity } + } +} + +impl Serializer for RpcMergesetBlockAcceptanceDataVerbosity { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u8, &1, writer)?; + serialize!(Option, &self.merged_header_verbosity, writer)?; + serialize!(Option, &self.accepted_transactions_verbosity, writer)?; + + Ok(()) + } +} + +impl Deserializer for RpcMergesetBlockAcceptanceDataVerbosity { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u8, reader)?; + let merged_header_verbosity = deserialize!(Option, reader)?; + let accepted_transactions_verbosity = deserialize!(Option, reader)?; + + Ok(Self { merged_header_verbosity, accepted_transactions_verbosity }) + } +} diff --git a/rpc/core/src/wasm/convert.rs b/rpc/core/src/wasm/convert.rs index 319f74bf0c..49e62aeadd 100644 --- a/rpc/core/src/wasm/convert.rs +++ b/rpc/core/src/wasm/convert.rs @@ -76,5 +76,54 @@ cfg_if::cfg_if! { } } } + + impl From for RpcOptionalTransactionInput { + fn from(tx_input: TransactionInput) -> Self { + let inner = tx_input.inner(); + RpcOptionalTransactionInput { + previous_outpoint: Some(inner.previous_outpoint.clone().into()), + signature_script: Some(inner.signature_script.clone().unwrap_or_default()), + sequence: Some(inner.sequence), + sig_op_count: Some(inner.sig_op_count), + verbose_data: None, + } + } + } + + impl From for RpcOptionalTransactionOutput { + fn from(output: TransactionOutput) -> Self { + let inner = output.inner(); + RpcOptionalTransactionOutput { value: Some(inner.value), script_public_key: Some(inner.script_public_key.clone()), verbose_data: None } + } + } + + impl From for RpcOptionalTransaction { + fn from(tx: Transaction) -> Self { + RpcOptionalTransaction::from(&tx) + } + } + + impl From<&Transaction> for RpcOptionalTransaction { + + fn from(tx: &Transaction) -> Self { + let inner = tx.inner(); + let inputs: Vec = + inner.inputs.clone().into_iter().map(|input| input.into()).collect::>(); + let outputs: Vec = + inner.outputs.clone().into_iter().map(|output| output.into()).collect::>(); + + RpcOptionalTransaction { + version: Some(inner.version), + inputs, + outputs, + lock_time: Some(inner.lock_time), + subnetwork_id: Some(inner.subnetwork_id.clone()), + gas: Some(inner.gas), + payload: Some(inner.payload.clone()), + mass: Some(inner.mass), + verbose_data: None, + } + } + } } } diff --git a/rpc/core/src/wasm/message.rs b/rpc/core/src/wasm/message.rs index 53ba49fe29..9f1c5095d3 100644 --- a/rpc/core/src/wasm/message.rs +++ b/rpc/core/src/wasm/message.rs @@ -44,6 +44,30 @@ const TS_ACCEPTED_TRANSACTION_IDS: &'static str = r#" } "#; +#[wasm_bindgen(typescript_custom_section)] +const TS_ADDED_ACCEPTANCE_DATA: &'static str = r#" + /** + * Accepted Acceptance Data + * + * @category Node RPC + */ + export interface IChainBlockAddedTransactions { + chainBlockHeader: Header; + acceptedTransactions: Transaction[]; + } +"#; + +// DataVerbosityLevel +#[wasm_bindgen(typescript_custom_section)] +const TS_DATA_VERBOSITY_LEVEL: &'static str = r#" + /** + * Data Verbosity level + * + * @category Node RPC + */ + export type DataVerbosityLevel = "None" | "Low" | "High" | "Full"; +"#; + // --- declare! { @@ -1263,6 +1287,49 @@ try_from! ( args: GetVirtualChainFromBlockResponse, IGetVirtualChainFromBlockRes Ok(to_value(&args)?.into()) }); +declare! { + IGetVirtualChainFromBlockV2Request, + r#" + /** + * + * + * @category Node RPC + */ + export interface IGetVirtualChainFromBlockV2Request { + startHash : HexString; + dataVerbosityLevel?: DataVerbosityLevel; + /** + * If passed, this request will only return blocks that have at least minConfirmationCount number of confirmations. Confirmation is counted through the distance from virtual chain tip. + * If not passed, it will be interpreted as 0. + */ + minConfirmationCount?: number; + } + "#, +} + +try_from! ( args: IGetVirtualChainFromBlockV2Request, GetVirtualChainFromBlockV2Request, { + Ok(from_value(args.into())?) +}); + +declare! { + IGetVirtualChainFromBlockV2Response, + r#" + /** + * + * + * @category Node RPC + */ + export interface IGetVirtualChainFromBlockV2Response { + removedChainBlockHashes : HexString[]; + addedChainBlockHashes : HexString[]; + chainBlockAcceptedTransactions : IChainBlockAddedTransactions[]; + } + "#, +} + +try_from! ( args: GetVirtualChainFromBlockV2Response, IGetVirtualChainFromBlockV2Response, { + Ok(to_value(&args)?.into()) +}); // --- declare! { diff --git a/rpc/grpc/client/src/error.rs b/rpc/grpc/client/src/error.rs index 31bd217d67..c237e9f9ee 100644 --- a/rpc/grpc/client/src/error.rs +++ b/rpc/grpc/client/src/error.rs @@ -2,7 +2,7 @@ use kaspa_notify::error::Error as NotifyError; use kaspa_rpc_core::RpcError; use thiserror::Error; -pub type BoxedStdError = Box<(dyn std::error::Error + Sync + std::marker::Send + 'static)>; +pub type BoxedStdError = Box; #[derive(Debug, Error)] pub enum Error { diff --git a/rpc/grpc/client/src/lib.rs b/rpc/grpc/client/src/lib.rs index faa2a3522a..5d45d9f5b3 100644 --- a/rpc/grpc/client/src/lib.rs +++ b/rpc/grpc/client/src/lib.rs @@ -277,6 +277,7 @@ impl RpcApi for GrpcClient { route!(get_fee_estimate_experimental_call, GetFeeEstimateExperimental); route!(get_current_block_color_call, GetCurrentBlockColor); route!(get_utxo_return_address_call, GetUtxoReturnAddress); + route!(get_virtual_chain_from_block_v2_call, GetVirtualChainFromBlockV2); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // Notification API diff --git a/rpc/grpc/core/Cargo.toml b/rpc/grpc/core/Cargo.toml index 05ad952eb2..29f6500b64 100644 --- a/rpc/grpc/core/Cargo.toml +++ b/rpc/grpc/core/Cargo.toml @@ -38,5 +38,8 @@ workflow-core.workspace = true [build-dependencies] tonic-build = { workspace = true, features = ["prost"] } +[dev-dependencies] +itertools.workspace = true + [lints] workspace = true diff --git a/rpc/grpc/core/proto/messages.proto b/rpc/grpc/core/proto/messages.proto index ccb2798b67..af0a91614d 100644 --- a/rpc/grpc/core/proto/messages.proto +++ b/rpc/grpc/core/proto/messages.proto @@ -65,7 +65,8 @@ message KaspadRequest { GetFeeEstimateRequestMessage getFeeEstimateRequest = 1106; GetFeeEstimateExperimentalRequestMessage getFeeEstimateExperimentalRequest = 1108; GetCurrentBlockColorRequestMessage getCurrentBlockColorRequest = 1110; - GetUtxoReturnAddressRequestMessage GetUtxoReturnAddressRequest = 1112; + GetUtxoReturnAddressRequestMessage getUtxoReturnAddressRequest = 1112; + GetVirtualChainFromBlockV2RequestMessage getVirtualChainFromBlockV2Request = 1114; } } @@ -131,7 +132,8 @@ message KaspadResponse { GetFeeEstimateResponseMessage getFeeEstimateResponse = 1107; GetFeeEstimateExperimentalResponseMessage getFeeEstimateExperimentalResponse = 1109; GetCurrentBlockColorResponseMessage getCurrentBlockColorResponse = 1111; - GetUtxoReturnAddressResponseMessage GetUtxoReturnAddressResponse = 1113; + GetUtxoReturnAddressResponseMessage getUtxoReturnAddressResponse = 1113; + GetVirtualChainFromBlockV2ResponseMessage getVirtualChainFromBlockV2Response = 1115; } } diff --git a/rpc/grpc/core/proto/rpc.proto b/rpc/grpc/core/proto/rpc.proto index 50e0c57964..fda862e261 100644 --- a/rpc/grpc/core/proto/rpc.proto +++ b/rpc/grpc/core/proto/rpc.proto @@ -33,8 +33,9 @@ message RpcBlockHeader { uint64 nonce = 8; uint64 daaScore = 9; string blueWork = 10; - string pruningPoint = 14; uint64 blueScore = 13; + string pruningPoint = 14; + string hash = 15; } message RpcBlockLevelParents { @@ -66,6 +67,14 @@ message RpcTransaction { uint64 mass = 10; } +message RpcTransactionVerboseData{ + string transactionId = 1; + string hash = 2; + uint64 computeMass = 4; + string blockHash = 12; + uint64 blockTime = 14; +} + message RpcTransactionInput { RpcOutpoint previousOutpoint = 1; string signatureScript = 2; @@ -74,6 +83,23 @@ message RpcTransactionInput { RpcTransactionInputVerboseData verboseData = 4; } +message RpcTransactionInputVerboseData { + RpcUtxoEntry utxoEntry = 1; // this field is new, if this is null, the utxo cannot be found, or is coinbase. +} + +message RpcUtxoEntry { + uint64 amount = 1; + RpcScriptPublicKey scriptPublicKey = 2; + uint64 blockDaaScore = 3; + bool isCoinbase = 4; + RpcUtxoEntryVerboseData verboseData = 5; +} + +message RpcUtxoEntryVerboseData { + string scriptPublicKeyType = 5; + string scriptPublicKeyAddress = 6; +} + message RpcScriptPublicKey { uint32 version = 1; string scriptPublicKey = 2; @@ -90,24 +116,6 @@ message RpcOutpoint { uint32 index = 2; } -message RpcUtxoEntry { - uint64 amount = 1; - RpcScriptPublicKey scriptPublicKey = 2; - uint64 blockDaaScore = 3; - bool isCoinbase = 4; -} - -message RpcTransactionVerboseData{ - string transactionId = 1; - string hash = 2; - uint64 computeMass = 4; - string blockHash = 12; - uint64 blockTime = 14; -} - -message RpcTransactionInputVerboseData{ -} - message RpcTransactionOutputVerboseData{ string scriptPublicKeyType = 5; string scriptPublicKeyAddress = 6; @@ -557,7 +565,7 @@ message GetUtxosByAddressesResponseMessage { } // GetBalanceByAddressRequest returns the total balance in unspent transactions towards a given address -// +// // This call is only available when this kaspad was started with `--utxoindex` message GetBalanceByAddressRequestMessage { string address = 1; @@ -792,7 +800,7 @@ message ConnectionMetrics { uint32 jsonLiveConnections = 41; uint64 jsonConnectionAttempts = 42; uint64 jsonHandshakeFailures = 43; - + uint32 activePeers = 51; } @@ -815,7 +823,7 @@ message ConsensusMetrics{ uint64 txsCounts = 5; uint64 chainBlockCounts = 6; uint64 massCounts = 7; - + uint64 blockCount = 11; uint64 headerCount = 12; uint64 mempoolSize = 13; @@ -917,7 +925,7 @@ message RpcFeerateBucket { } // Data required for making fee estimates. -// +// // Feerate values represent fee/mass of a transaction in `sompi/gram` units. // Given a feerate value recommendation, calculate the required fee by // taking the transaction mass and multiplying it by feerate: `fee = feerate * mass(tx)` @@ -966,22 +974,135 @@ message GetFeeEstimateExperimentalResponseMessage { RPCError error = 1000; } -message GetCurrentBlockColorRequestMessage { - string hash = 1; +message GetUtxoReturnAddressRequestMessage { + string txid = 1; + uint64 accepting_block_daa_score = 2; } -message GetCurrentBlockColorResponseMessage { - bool blue = 1; +message GetUtxoReturnAddressResponseMessage { + string return_address = 1; + RPCError error = 1000; +} + +// ### messages for vspc v2 ### + +enum RpcDataVerbosityLevel { + NONE = 0; + LOW = 1; + HIGH = 2; + FULL = 3; +} + +message RpcOptionalHeader { + optional string hash = 2; + optional uint32 version = 3; + repeated RpcBlockLevelParents parentsByLevel = 4; + optional string hashMerkleRoot = 5; + optional string acceptedIdMerkleRoot = 6; + optional string utxoCommitment = 7; + optional int64 timestamp = 8; + optional uint32 bits = 9; + optional uint64 nonce = 10; + optional uint64 daaScore = 11; + optional string blueWork = 12; + optional uint64 blueScore = 13; + optional string pruningPoint = 14; +} + +message RpcOptionalTransactionOutpoint { + optional string transactionId = 2; + optional uint32 index = 3; +} + +message RpcOptionalUtxoEntryVerboseData { + optional string scriptPublicKeyType = 2; + optional string scriptPublicKeyAddress = 3; +} + +message RpcOptionalUtxoEntry { + optional uint64 amount = 2; + optional RpcScriptPublicKey scriptPublicKey = 3; + optional uint64 blockDaaScore = 4; + optional bool isCoinbase = 5; + optional RpcOptionalUtxoEntryVerboseData verboseData = 6; +} + +message RpcOptionalTransactionInputVerboseData { + optional RpcOptionalUtxoEntry utxoEntry = 2; +} + +message RpcOptionalTransactionInput { + optional RpcOptionalTransactionOutpoint previousOutpoint = 2; + optional string signatureScript = 3; + optional uint64 sequence = 4; + optional uint32 sigOpCount = 5; + optional RpcOptionalTransactionInputVerboseData verboseData = 6; +} + +message RpcOptionalTransactionOutputVerboseData { + optional string scriptPublicKeyType = 2; + optional string scriptPublicKeyAddress = 3; +} + +message RpcOptionalTransactionOutput { + optional uint64 value = 2; + optional RpcScriptPublicKey scriptPublicKey = 3; + optional RpcOptionalTransactionOutputVerboseData verboseData = 4; +} + +message RpcOptionalTransactionVerboseData { + optional string transactionId = 2; + optional string hash = 3; + optional uint64 computeMass = 4; + optional string blockHash = 5; + optional uint64 blockTime = 6; +} + +message RpcOptionalTransaction { + optional uint32 version = 2; + repeated RpcOptionalTransactionInput inputs = 3; + repeated RpcOptionalTransactionOutput outputs = 4; + optional uint64 lockTime = 5; + optional string subnetworkId = 6; + optional uint64 gas = 7; + optional string payload = 8; + optional uint64 mass = 9; + optional RpcOptionalTransactionVerboseData verboseData = 10; +} + +message RpcChainBlockAcceptedTransactions { + RpcOptionalHeader chainBlockHeader = 2; + repeated RpcOptionalTransaction acceptedTransactions = 3; +} + +/// GetVirtualChainFromBlockV2RequestMessage requests the virtual selected +/// parent chain from some startHash to this kaspad's current virtual +message GetVirtualChainFromBlockV2RequestMessage { + string startHash = 1; + + optional RpcDataVerbosityLevel dataVerbosityLevel = 2; + optional uint64 minConfirmationCount = 3; +} + +message GetVirtualChainFromBlockV2ResponseMessage { + // The chain blocks that were removed, in high-to-low order + repeated string removedChainBlockHashes = 1; + + // The chain blocks that were added, in low-to-high order + repeated string addedChainBlockHashes = 2; + + // Will be filled depending on the supplied RpcDataVerbosityLevel. + repeated RpcChainBlockAcceptedTransactions chainBlockAcceptedTransactions = 3; RPCError error = 1000; } -message GetUtxoReturnAddressRequestMessage { - string txid = 1; - uint64 accepting_block_daa_score = 2; +message GetCurrentBlockColorRequestMessage { + string hash = 1; } -message GetUtxoReturnAddressResponseMessage { - string return_address = 1; +message GetCurrentBlockColorResponseMessage { + bool blue = 1; + RPCError error = 1000; } diff --git a/rpc/grpc/core/src/convert/acceptance_data.rs b/rpc/grpc/core/src/convert/acceptance_data.rs new file mode 100644 index 0000000000..09c3715fed --- /dev/null +++ b/rpc/grpc/core/src/convert/acceptance_data.rs @@ -0,0 +1,29 @@ +use crate::protowire::{self}; +use crate::{from, try_from}; +use kaspa_rpc_core::RpcError; + +// ---------------------------------------------------------------------------- +// rpc_core to protowire +// ---------------------------------------------------------------------------- + +from!(item: &kaspa_rpc_core::RpcDataVerbosityLevel, protowire::RpcDataVerbosityLevel, { + match item { + kaspa_rpc_core::RpcDataVerbosityLevel::None => protowire::RpcDataVerbosityLevel::None, + kaspa_rpc_core::RpcDataVerbosityLevel::Low => protowire::RpcDataVerbosityLevel::Low, + kaspa_rpc_core::RpcDataVerbosityLevel::High => protowire::RpcDataVerbosityLevel::High, + kaspa_rpc_core::RpcDataVerbosityLevel::Full => protowire::RpcDataVerbosityLevel::Full, + } +}); + +// ---------------------------------------------------------------------------- +// protowire to rpc_core +// ---------------------------------------------------------------------------- + +try_from!(item: &protowire::RpcDataVerbosityLevel, kaspa_rpc_core::RpcDataVerbosityLevel, { + match item { + protowire::RpcDataVerbosityLevel::None => kaspa_rpc_core::RpcDataVerbosityLevel::None, + protowire::RpcDataVerbosityLevel::Low => kaspa_rpc_core::RpcDataVerbosityLevel::Low, + protowire::RpcDataVerbosityLevel::High => kaspa_rpc_core::RpcDataVerbosityLevel::High, + protowire::RpcDataVerbosityLevel::Full => kaspa_rpc_core::RpcDataVerbosityLevel::Full + } +}); diff --git a/rpc/grpc/core/src/convert/block.rs b/rpc/grpc/core/src/convert/block.rs index 6ab9e37fa0..94db02b429 100644 --- a/rpc/grpc/core/src/convert/block.rs +++ b/rpc/grpc/core/src/convert/block.rs @@ -7,6 +7,14 @@ use std::str::FromStr; // rpc_core to protowire // ---------------------------------------------------------------------------- +from!(item: &kaspa_rpc_core::RpcOptionalBlock, protowire::RpcBlock, { + Self { + header: item.header.as_ref().map(protowire::RpcBlockHeader::from), + transactions: item.transactions.iter().map(protowire::RpcTransaction::from).collect(), + verbose_data: item.verbose_data.as_ref().map(|x| x.into()), + } +}); + from!(item: &kaspa_rpc_core::RpcBlock, protowire::RpcBlock, { Self { header: Some(protowire::RpcBlockHeader::from(&item.header)), @@ -42,19 +50,19 @@ from!(item: &kaspa_rpc_core::RpcBlockVerboseData, protowire::RpcBlockVerboseData // protowire to rpc_core // ---------------------------------------------------------------------------- -try_from!(item: &protowire::RpcBlock, kaspa_rpc_core::RpcBlock, { +try_from!(item: &protowire::RpcBlock, kaspa_rpc_core::RpcOptionalBlock, { Self { header: item .header .as_ref() - .ok_or_else(|| RpcError::MissingRpcFieldError("RpcBlock".to_string(), "header".to_string()))? - .try_into()?, - transactions: item.transactions.iter().map(kaspa_rpc_core::RpcTransaction::try_from).collect::, _>>()?, + .map(kaspa_rpc_core::RpcOptionalHeader::try_from) + .transpose()?, + transactions: item.transactions.iter().map(kaspa_rpc_core::RpcOptionalTransaction::try_from).collect::, _>>()?, verbose_data: item.verbose_data.as_ref().map(kaspa_rpc_core::RpcBlockVerboseData::try_from).transpose()?, } }); -try_from!(item: &protowire::RpcBlock, kaspa_rpc_core::RpcRawBlock, { +try_from!(item: &protowire::RpcBlock, kaspa_rpc_core::RpcBlock, { Self { header: item .header @@ -62,6 +70,14 @@ try_from!(item: &protowire::RpcBlock, kaspa_rpc_core::RpcRawBlock, { .ok_or_else(|| RpcError::MissingRpcFieldError("RpcBlock".to_string(), "header".to_string()))? .try_into()?, transactions: item.transactions.iter().map(kaspa_rpc_core::RpcTransaction::try_from).collect::, _>>()?, + verbose_data: item.verbose_data.as_ref().map(kaspa_rpc_core::RpcBlockVerboseData::try_from).transpose()?, + } +}); + +try_from!(item: &protowire::RpcBlock, kaspa_rpc_core::RpcRawBlock, { + Self { + header: kaspa_rpc_core::RpcRawHeader::try_from(item.header.as_ref().ok_or(RpcError::MissingRpcFieldError("RpcBlock".to_string(), "header".to_string()))?)?, + transactions: item.transactions.iter().map(kaspa_rpc_core::RpcTransaction::try_from).collect::, _>>()?, } }); diff --git a/rpc/grpc/core/src/convert/header.rs b/rpc/grpc/core/src/convert/header.rs index 5d763034a7..0c7dc69481 100644 --- a/rpc/grpc/core/src/convert/header.rs +++ b/rpc/grpc/core/src/convert/header.rs @@ -11,7 +11,7 @@ use std::str::FromStr; from!(item: &kaspa_rpc_core::RpcHeader, protowire::RpcBlockHeader, { Self { version: item.version.into(), - parents: item.parents_by_level.iter().map(protowire::RpcBlockLevelParents::from).collect(), + parents: item.parents_by_level.iter().map(|x| x.as_slice().into()).collect(), hash_merkle_root: item.hash_merkle_root.to_string(), accepted_id_merkle_root: item.accepted_id_merkle_root.to_string(), utxo_commitment: item.utxo_commitment.to_string(), @@ -22,13 +22,37 @@ from!(item: &kaspa_rpc_core::RpcHeader, protowire::RpcBlockHeader, { blue_work: item.blue_work.to_rpc_hex(), blue_score: item.blue_score, pruning_point: item.pruning_point.to_string(), + hash: item.hash.to_string(), + } +}); + +from!(item: &kaspa_rpc_core::RpcOptionalHeader, protowire::RpcBlockHeader, { + Self { + hash: item.hash.map(|x| x.to_string()).unwrap_or_default(), + version: item.version.map(|x| x.into()).unwrap_or_default(), + parents: item + .parents_by_level + .iter() + .map(|level| level.as_slice().into()) + .collect(), + hash_merkle_root: item.hash_merkle_root.map(|x| x.to_string()).unwrap_or_default(), + accepted_id_merkle_root: item.accepted_id_merkle_root.map(|x| x.to_string()).unwrap_or_default(), + utxo_commitment: item.utxo_commitment.map(|x| x.to_string()).unwrap_or_default(), + timestamp: item.timestamp.map(|x| x.try_into().expect("timestamp is always convertible to i64")).unwrap_or_default(), + bits: item.bits.unwrap_or_default(), + nonce: item.nonce.unwrap_or_default(), + daa_score: item.daa_score.unwrap_or_default(), + blue_work: item.blue_work.map(|x| x.to_rpc_hex()).unwrap_or_default(), + blue_score: item.blue_score.unwrap_or_default(), + pruning_point: item.pruning_point.map(|x| x.to_string()).unwrap_or_default(), } }); from!(item: &kaspa_rpc_core::RpcRawHeader, protowire::RpcBlockHeader, { Self { + hash: Default::default(), // We don't include the hash for the raw header version: item.version.into(), - parents: item.parents_by_level.iter().map(protowire::RpcBlockLevelParents::from).collect(), + parents: item.parents_by_level.iter().map(|x| x.as_slice().into()).collect(), hash_merkle_root: item.hash_merkle_root.to_string(), accepted_id_merkle_root: item.accepted_id_merkle_root.to_string(), utxo_commitment: item.utxo_commitment.to_string(), @@ -42,7 +66,7 @@ from!(item: &kaspa_rpc_core::RpcRawHeader, protowire::RpcBlockHeader, { } }); -from!(item: &Vec, protowire::RpcBlockLevelParents, { Self { parent_hashes: item.iter().map(|x| x.to_string()).collect() } }); +from!(item: &[RpcHash], protowire::RpcBlockLevelParents, { Self { parent_hashes: item.iter().map(|x| x.to_string()).collect() } }); // ---------------------------------------------------------------------------- // protowire to rpc_core @@ -52,7 +76,7 @@ try_from!(item: &protowire::RpcBlockHeader, kaspa_rpc_core::RpcHeader, { // We re-hash the block to remain as most trustless as possible let header = Header::new_finalized( item.version.try_into()?, - item.parents.iter().map(Vec::::try_from).collect::>>>()?, + item.parents.iter().map(Vec::::try_from).collect::>>>()?.try_into()?, RpcHash::from_str(&item.hash_merkle_root)?, RpcHash::from_str(&item.accepted_id_merkle_root)?, RpcHash::from_str(&item.utxo_commitment)?, @@ -85,6 +109,26 @@ try_from!(item: &protowire::RpcBlockHeader, kaspa_rpc_core::RpcRawHeader, { } }); +try_from!(item: &protowire::RpcBlockHeader, kaspa_rpc_core::RpcOptionalHeader, { + // We re-hash the block to remain as most trustless as possible + let header = Header::new_finalized( + item.version.try_into()?, + item.parents.iter().map(Vec::::try_from).collect::>>>()?.try_into()?, + RpcHash::from_str(&item.hash_merkle_root)?, + RpcHash::from_str(&item.accepted_id_merkle_root)?, + RpcHash::from_str(&item.utxo_commitment)?, + item.timestamp.try_into()?, + item.bits, + item.nonce, + item.daa_score, + kaspa_rpc_core::RpcBlueWorkType::from_rpc_hex(&item.blue_work)?, + item.blue_score, + RpcHash::from_str(&item.pruning_point)?, + ); + + kaspa_rpc_core::RpcOptionalHeader::from(header) +}); + try_from!(item: &protowire::RpcBlockLevelParents, Vec, { item.parent_hashes.iter().map(|x| RpcHash::from_str(x)).collect::, _>>()? }); @@ -92,6 +136,7 @@ try_from!(item: &protowire::RpcBlockLevelParents, Vec, { #[cfg(test)] mod tests { use crate::protowire; + use itertools::Itertools; use kaspa_consensus_core::{block::Block, header::Header}; use kaspa_rpc_core::{RpcBlock, RpcHash, RpcHeader}; @@ -102,51 +147,52 @@ mod tests { RpcHash::from_u64_word(c) } - fn test_parents_by_level_rxr(r: &[Vec], r2: &[Vec]) { - for i in 0..r.len() { - for j in 0..r[i].len() { - assert_eq!(r[i][j], r2[i][j]); - } - } + fn test_parents_by_level_rxr(rpc_parents_1: &[Vec], rpc_parents_2: &[Vec]) { + assert_eq!(rpc_parents_1, rpc_parents_2); } - fn test_parents_by_level_rxp(r: &[Vec], p: &[protowire::RpcBlockLevelParents]) { - for i in 0..r.len() { - for j in 0..r[i].len() { - assert_eq!(r[i][j].to_string(), p[i].parent_hashes[j]); + fn test_parents_by_level_rxp(rpc_parents: &[Vec], proto_parents: &[protowire::RpcBlockLevelParents]) { + for (r_level_parents, proto_level_parents) in rpc_parents.iter().zip_eq(proto_parents.iter()) { + for (r_parent, proto_parent) in r_level_parents.iter().zip_eq(proto_level_parents.parent_hashes.iter()) { + assert_eq!(r_parent.to_string(), *proto_parent); } } } #[test] fn test_rpc_block_level_parents() { - let p = protowire::RpcBlockLevelParents { + let proto_block_level_parents = protowire::RpcBlockLevelParents { parent_hashes: vec![new_unique().to_string(), new_unique().to_string(), new_unique().to_string()], }; - let r: Vec = (&p).try_into().unwrap(); - let p2: protowire::RpcBlockLevelParents = (&r).into(); - for (i, _) in r.iter().enumerate() { - assert_eq!(p.parent_hashes[i], r[i].to_string()); - assert_eq!(p2.parent_hashes[i], r[i].to_string()); - assert_eq!(p.parent_hashes[i], p2.parent_hashes[i]); + let rpc_block_level_parents: Vec = (&proto_block_level_parents).try_into().unwrap(); + let proto_block_level_parents_reconverted: protowire::RpcBlockLevelParents = rpc_block_level_parents.as_slice().into(); + for (i, _) in rpc_block_level_parents.iter().enumerate() { + assert_eq!(proto_block_level_parents.parent_hashes[i], rpc_block_level_parents[i].to_string()); + assert_eq!(proto_block_level_parents_reconverted.parent_hashes[i], rpc_block_level_parents[i].to_string()); + assert_eq!(proto_block_level_parents.parent_hashes[i], proto_block_level_parents_reconverted.parent_hashes[i]); } - assert_eq!(p, p2); - - let r: Vec = vec![new_unique(), new_unique()]; - let p: protowire::RpcBlockLevelParents = (&r).into(); - let r2: Vec = (&p).try_into().unwrap(); - for i in 0..r.len() { - assert_eq!(p.parent_hashes[i], r[i].to_string()); - assert_eq!(p.parent_hashes[i], r2[i].to_string()); - assert_eq!(r[i], r2[i]); + assert_eq!(proto_block_level_parents, proto_block_level_parents_reconverted); + + let rpc_block_level_parents: Vec = vec![new_unique(), new_unique()]; + let proto_block_level_parents: protowire::RpcBlockLevelParents = rpc_block_level_parents.as_slice().into(); + let rpc_block_level_parents_reconverted: Vec = (&proto_block_level_parents).try_into().unwrap(); + + assert_eq!(rpc_block_level_parents, rpc_block_level_parents_reconverted); + for ((p_hash, r1_hash), r2_hash) in + proto_block_level_parents.parent_hashes.iter().zip_eq(rpc_block_level_parents).zip_eq(rpc_block_level_parents_reconverted) + { + assert_eq!(p_hash, &r1_hash.to_string()); + assert_eq!(p_hash, &r2_hash.to_string()); + assert_eq!(r1_hash, r2_hash); } - assert_eq!(r, r2); } #[test] fn test_rpc_header() { - let r = Header::new_finalized( + let header = Header::new_finalized( 0, - vec![vec![new_unique(), new_unique(), new_unique()], vec![new_unique()], vec![new_unique(), new_unique()]], + vec![vec![new_unique(), new_unique(), new_unique()], vec![new_unique()], vec![new_unique(), new_unique()]] + .try_into() + .unwrap(), new_unique(), new_unique(), new_unique(), @@ -158,27 +204,29 @@ mod tests { 1928374, new_unique(), ); - let r = RpcHeader::from(r); - let p: protowire::RpcBlockHeader = (&r).into(); - let r2: RpcHeader = (&p).try_into().unwrap(); - let p2: protowire::RpcBlockHeader = (&r2).into(); - - assert_eq!(r.parents_by_level, r2.parents_by_level); - assert_eq!(p.parents, p2.parents); - test_parents_by_level_rxr(&r.parents_by_level, &r2.parents_by_level); - test_parents_by_level_rxp(&r.parents_by_level, &p.parents); - test_parents_by_level_rxp(&r.parents_by_level, &p2.parents); - test_parents_by_level_rxp(&r2.parents_by_level, &p2.parents); - - assert_eq!(r.hash, r2.hash); - assert_eq!(p, p2); + let rpc_header = RpcHeader::from(header); + let proto_header: protowire::RpcBlockHeader = (&rpc_header).into(); + let reconverted_rpc_header: RpcHeader = (&proto_header).try_into().unwrap(); + let reconverted_proto_header: protowire::RpcBlockHeader = (&reconverted_rpc_header).into(); + + assert_eq!(rpc_header.parents_by_level, reconverted_rpc_header.parents_by_level); + assert_eq!(proto_header.parents, reconverted_proto_header.parents.to_vec()); + test_parents_by_level_rxr(&rpc_header.parents_by_level, &reconverted_rpc_header.parents_by_level); + test_parents_by_level_rxp(&rpc_header.parents_by_level, &proto_header.parents); + test_parents_by_level_rxp(&rpc_header.parents_by_level, &reconverted_proto_header.parents); + test_parents_by_level_rxp(&reconverted_rpc_header.parents_by_level, &reconverted_proto_header.parents); + + assert_eq!(rpc_header.hash, reconverted_rpc_header.hash); + assert_eq!(proto_header, reconverted_proto_header); } #[test] fn test_rpc_block() { - let h = Header::new_finalized( + let header = Header::new_finalized( 0, - vec![vec![new_unique(), new_unique(), new_unique()], vec![new_unique()], vec![new_unique(), new_unique()]], + vec![vec![new_unique(), new_unique(), new_unique()], vec![new_unique()], vec![new_unique(), new_unique()]] + .try_into() + .unwrap(), new_unique(), new_unique(), new_unique(), @@ -190,25 +238,30 @@ mod tests { 1928374, new_unique(), ); - let b = Block::from_header(h); - let r: RpcBlock = (&b).into(); - let p: protowire::RpcBlock = (&r).into(); - let r2: RpcBlock = (&p).try_into().unwrap(); - let b2: Block = r2.clone().try_into().unwrap(); - let r3: RpcBlock = (&b2).into(); - let p2: protowire::RpcBlock = (&r3).into(); - - assert_eq!(r.header.parents_by_level, r2.header.parents_by_level); - assert_eq!(p.header.as_ref().unwrap().parents, p2.header.as_ref().unwrap().parents); - test_parents_by_level_rxr(&r.header.parents_by_level, &r2.header.parents_by_level); - test_parents_by_level_rxr(&r.header.parents_by_level, &r3.header.parents_by_level); - test_parents_by_level_rxr(&b.header.parents_by_level, &r2.header.parents_by_level); - test_parents_by_level_rxr(&b.header.parents_by_level, &b2.header.parents_by_level); - test_parents_by_level_rxp(&r.header.parents_by_level, &p.header.as_ref().unwrap().parents); - test_parents_by_level_rxp(&r.header.parents_by_level, &p2.header.as_ref().unwrap().parents); - test_parents_by_level_rxp(&r2.header.parents_by_level, &p2.header.as_ref().unwrap().parents); - - assert_eq!(b.hash(), b2.hash()); - assert_eq!(p, p2); + let consensus_block = Block::from_header(header); + let rpc_block: RpcBlock = (&consensus_block).into(); + let proto_block: protowire::RpcBlock = (&rpc_block).into(); + let rpc_block_converted_from_proto: RpcBlock = (&proto_block).try_into().unwrap(); + let consensus_block_reconverted: Block = rpc_block_converted_from_proto.clone().try_into().unwrap(); + let rpc_block_reconverted_from_consensus: RpcBlock = (&consensus_block_reconverted).into(); + let proto_block_reconverted: protowire::RpcBlock = (&rpc_block_reconverted_from_consensus).into(); + let consensus_parents = Vec::from(&consensus_block.header.parents_by_level); + let consensus_reconverted_parents = Vec::from(&consensus_block_reconverted.header.parents_by_level); + + assert_eq!(rpc_block.header.parents_by_level, rpc_block_converted_from_proto.header.parents_by_level); + assert_eq!(proto_block.header.as_ref().unwrap().parents, proto_block_reconverted.header.as_ref().unwrap().parents); + test_parents_by_level_rxr(&rpc_block.header.parents_by_level, &rpc_block_converted_from_proto.header.parents_by_level); + test_parents_by_level_rxr(&rpc_block.header.parents_by_level, &rpc_block_reconverted_from_consensus.header.parents_by_level); + test_parents_by_level_rxr(&consensus_parents, &rpc_block_converted_from_proto.header.parents_by_level); + test_parents_by_level_rxr(&consensus_parents, &consensus_reconverted_parents); + test_parents_by_level_rxp(&rpc_block.header.parents_by_level, &proto_block.header.as_ref().unwrap().parents); + test_parents_by_level_rxp(&rpc_block.header.parents_by_level, &proto_block_reconverted.header.as_ref().unwrap().parents); + test_parents_by_level_rxp( + &rpc_block_converted_from_proto.header.parents_by_level, + &proto_block_reconverted.header.as_ref().unwrap().parents, + ); + + assert_eq!(consensus_block.hash(), consensus_block_reconverted.hash()); + assert_eq!(proto_block, proto_block_reconverted); } } diff --git a/rpc/grpc/core/src/convert/kaspad.rs b/rpc/grpc/core/src/convert/kaspad.rs index 7243fd401a..3d1d51a980 100644 --- a/rpc/grpc/core/src/convert/kaspad.rs +++ b/rpc/grpc/core/src/convert/kaspad.rs @@ -64,6 +64,7 @@ pub mod kaspad_request_convert { impl_into_kaspad_request!(GetFeeEstimateExperimental); impl_into_kaspad_request!(GetCurrentBlockColor); impl_into_kaspad_request!(GetUtxoReturnAddress); + impl_into_kaspad_request!(GetVirtualChainFromBlockV2); impl_into_kaspad_request!(NotifyBlockAdded); impl_into_kaspad_request!(NotifyNewBlockTemplate); @@ -202,6 +203,7 @@ pub mod kaspad_response_convert { impl_into_kaspad_response!(GetFeeEstimateExperimental); impl_into_kaspad_response!(GetCurrentBlockColor); impl_into_kaspad_response!(GetUtxoReturnAddress); + impl_into_kaspad_response!(GetVirtualChainFromBlockV2); impl_into_kaspad_notify_response!(NotifyBlockAdded); impl_into_kaspad_notify_response!(NotifyNewBlockTemplate); diff --git a/rpc/grpc/core/src/convert/message.rs b/rpc/grpc/core/src/convert/message.rs index 254710b5ff..2c039758c1 100644 --- a/rpc/grpc/core/src/convert/message.rs +++ b/rpc/grpc/core/src/convert/message.rs @@ -24,11 +24,11 @@ use kaspa_consensus_core::{network::NetworkId, Hash}; use kaspa_core::debug; use kaspa_notify::subscription::Command; use kaspa_rpc_core::{ - RpcContextualPeerAddress, RpcError, RpcExtraData, RpcHash, RpcIpAddress, RpcNetworkType, RpcPeerAddress, RpcResult, - SubmitBlockRejectReason, SubmitBlockReport, + RpcContextualPeerAddress, RpcDataVerbosityLevel, RpcError, RpcExtraData, RpcHash, RpcIpAddress, RpcNetworkType, RpcPeerAddress, + RpcResult, SubmitBlockRejectReason, SubmitBlockReport, }; use kaspa_utils::hex::*; -use std::str::FromStr; +use std::{str::FromStr, sync::Arc}; macro_rules! from { // Response capture @@ -264,8 +264,6 @@ from!(item: RpcResult<&kaspa_rpc_core::GetSubnetworkResponse>, protowire::GetSub Self { gas_limit: item.gas_limit, error: None } }); -// ~~~ - from!(item: &kaspa_rpc_core::GetVirtualChainFromBlockRequest, protowire::GetVirtualChainFromBlockRequestMessage, { Self { start_hash: item.start_hash.to_string(), include_accepted_transaction_ids: item.include_accepted_transaction_ids, min_confirmation_count: item.min_confirmation_count } }); @@ -518,6 +516,23 @@ from!(item: RpcResult<&kaspa_rpc_core::GetSyncStatusResponse>, protowire::GetSyn } }); +from!(item: &kaspa_rpc_core::GetVirtualChainFromBlockV2Request, protowire::GetVirtualChainFromBlockV2RequestMessage, { + Self { + start_hash: item.start_hash.to_string(), + data_verbosity_level: item.data_verbosity_level.map(|v| v as i32), + min_confirmation_count: item.min_confirmation_count + } +}); + +from!(item: RpcResult<&kaspa_rpc_core::GetVirtualChainFromBlockV2Response>, protowire::GetVirtualChainFromBlockV2ResponseMessage, { + Self { + removed_chain_block_hashes: item.removed_chain_block_hashes.iter().map(|x| x.to_string()).collect(), + added_chain_block_hashes: item.added_chain_block_hashes.iter().map(|x| x.to_string()).collect(), + chain_block_accepted_transactions: item.chain_block_accepted_transactions.iter().map(|x| x.into()).collect(), + error: None, + } +}); + from!(item: &kaspa_rpc_core::NotifyUtxosChangedRequest, protowire::NotifyUtxosChangedRequestMessage, { Self { addresses: item.addresses.iter().map(|x| x.into()).collect(), command: item.command.into() } }); @@ -771,6 +786,21 @@ try_from!(item: &protowire::GetVirtualChainFromBlockResponseMessage, RpcResult, { + Self { + removed_chain_block_hashes: Arc::new(item.removed_chain_block_hashes.iter().map(|x| RpcHash::from_str(x)).collect::, _>>()?), + added_chain_block_hashes: Arc::new(item.added_chain_block_hashes.iter().map(|x| RpcHash::from_str(x)).collect::, _>>()?), + chain_block_accepted_transactions: Arc::new(item.chain_block_accepted_transactions.iter().map(|x| x.try_into()).collect::, _>>()?), + } +}); + try_from!(item: &protowire::GetBlocksRequestMessage, kaspa_rpc_core::GetBlocksRequest, { Self { low_hash: if item.low_hash.is_empty() { None } else { Some(RpcHash::from_str(&item.low_hash)?) }, diff --git a/rpc/grpc/core/src/convert/mod.rs b/rpc/grpc/core/src/convert/mod.rs index d4948f57dc..2b813d087e 100644 --- a/rpc/grpc/core/src/convert/mod.rs +++ b/rpc/grpc/core/src/convert/mod.rs @@ -1,3 +1,4 @@ +pub mod acceptance_data; pub mod address; pub mod block; pub mod error; @@ -8,5 +9,6 @@ pub mod mempool; pub mod message; pub mod metrics; pub mod notification; +pub mod optional; pub mod peer; pub mod tx; diff --git a/rpc/grpc/core/src/convert/notification.rs b/rpc/grpc/core/src/convert/notification.rs index 2f2273af1c..28b42d4c3d 100644 --- a/rpc/grpc/core/src/convert/notification.rs +++ b/rpc/grpc/core/src/convert/notification.rs @@ -33,7 +33,7 @@ from!(item: &kaspa_rpc_core::Notification, Payload, { Notification::VirtualDaaScoreChanged(ref notification) => Payload::VirtualDaaScoreChangedNotification(notification.into()), Notification::PruningPointUtxoSetOverride(ref notification) => { Payload::PruningPointUtxoSetOverrideNotification(notification.into()) - } + }, } }); diff --git a/rpc/grpc/core/src/convert/optional/header.rs b/rpc/grpc/core/src/convert/optional/header.rs new file mode 100644 index 0000000000..fd3bc5eac1 --- /dev/null +++ b/rpc/grpc/core/src/convert/optional/header.rs @@ -0,0 +1,48 @@ +use crate::protowire; +use crate::{from, try_from}; +use kaspa_rpc_core::{FromRpcHex, RpcError, RpcHash, RpcResult, ToRpcHex}; +use std::str::FromStr; + +// ---------------------------------------------------------------------------- +// rpc_core to protowire +// ---------------------------------------------------------------------------- + +from!(item: &kaspa_rpc_core::RpcOptionalHeader, protowire::RpcOptionalHeader, { + Self { + version: item.version.map(|x| x.into()), + hash: item.hash.map(|x| x.to_string()), + parents_by_level: item.parents_by_level.iter().map(|x| x.as_slice().into()).collect(), + hash_merkle_root: item.hash_merkle_root.map(|x| x.to_string()), + accepted_id_merkle_root: item.accepted_id_merkle_root.map(|x| x.to_string()), + utxo_commitment: item.utxo_commitment.map(|x| x.to_string()), + timestamp: item.timestamp.map(|x| x as i64), + bits: item.bits, + nonce: item.nonce, + daa_score: item.daa_score, + blue_work: item.blue_work.map(|x| x.to_rpc_hex()), + blue_score: item.blue_score, + pruning_point: item.pruning_point.map(|x| x.to_string()), + } +}); + +// ---------------------------------------------------------------------------- +// protowire to rpc_core +// ---------------------------------------------------------------------------- + +try_from!(item: &protowire::RpcOptionalHeader, kaspa_rpc_core::RpcOptionalHeader, { + Self { + version: item.version.map(|x| x as u16), + hash: item.hash.as_ref().map(|x| RpcHash::from_str(x)).transpose()?, + parents_by_level: item.parents_by_level.iter().map(Vec::::try_from).collect::>>>()?, + hash_merkle_root: item.hash_merkle_root.as_ref().map(|x| RpcHash::from_str(x)).transpose()?, + accepted_id_merkle_root: item.accepted_id_merkle_root.as_ref().map(|x| RpcHash::from_str(x)).transpose()?, + utxo_commitment: item.utxo_commitment.as_ref().map(|x| RpcHash::from_str(x)).transpose()?, + timestamp: item.timestamp.map(|x| x as u64), + bits: item.bits, + nonce: item.nonce, + daa_score: item.daa_score, + blue_work: item.blue_work.as_ref().map(|x| kaspa_rpc_core::RpcBlueWorkType::from_rpc_hex(x)).transpose()?, + blue_score: item.blue_score, + pruning_point: item.pruning_point.as_ref().map(|x| RpcHash::from_str(x)).transpose()?, + } +}); diff --git a/rpc/grpc/core/src/convert/optional/mod.rs b/rpc/grpc/core/src/convert/optional/mod.rs new file mode 100644 index 0000000000..fee8dc6442 --- /dev/null +++ b/rpc/grpc/core/src/convert/optional/mod.rs @@ -0,0 +1,2 @@ +pub mod header; +pub mod tx; diff --git a/rpc/grpc/core/src/convert/optional/tx.rs b/rpc/grpc/core/src/convert/optional/tx.rs new file mode 100644 index 0000000000..206aad7168 --- /dev/null +++ b/rpc/grpc/core/src/convert/optional/tx.rs @@ -0,0 +1,170 @@ +use crate::protowire; +use crate::{from, try_from}; +use kaspa_rpc_core::{FromRpcHex, RpcAddress, RpcError, RpcResult, RpcScriptClass, RpcSubnetworkId, ToRpcHex}; +use std::str::FromStr; + +// ---------------------------------------------------------------------------- +// rpc_core to protowire +// ---------------------------------------------------------------------------- + +from!(item: &kaspa_rpc_core::RpcOptionalTransaction, protowire::RpcOptionalTransaction, { + Self { + version: item.version.map(|x| x.into()), + inputs: item.inputs.iter().map(protowire::RpcOptionalTransactionInput::from).collect(), + outputs: item.outputs.iter().map(protowire::RpcOptionalTransactionOutput::from).collect(), + lock_time: item.lock_time, + subnetwork_id: item.subnetwork_id.as_ref().map(|x| x.to_string()), + gas: item.gas, + payload: item.payload.as_ref().map(|x| x.to_rpc_hex()), + mass: item.mass, + verbose_data: item.verbose_data.as_ref().map(|x| x.into()), + } +}); + +from!(item: &kaspa_rpc_core::RpcOptionalTransactionInput, protowire::RpcOptionalTransactionInput, { + Self { + previous_outpoint: item.previous_outpoint.as_ref().map(|x| x.into()), + signature_script: item.signature_script.as_ref().map(|x| x.to_rpc_hex()), + sequence: item.sequence, + sig_op_count: item.sig_op_count.map(|x| x.into()), + verbose_data: item.verbose_data.as_ref().map(|x| x.into()), + } +}); + +from!(item: &kaspa_rpc_core::RpcOptionalTransactionOutput, protowire::RpcOptionalTransactionOutput, { + Self { + value: item.value, + script_public_key: item.script_public_key.as_ref().map(|x| x.into()), + verbose_data: item.verbose_data.as_ref().map(|x| x.into()), + } +}); + +from!(item: &kaspa_rpc_core::RpcOptionalTransactionOutpoint, protowire::RpcOptionalTransactionOutpoint, { + Self { + transaction_id: item.transaction_id.map(|x| x.to_string()), + index: item.index, + } +}); + +from!(item: &kaspa_rpc_core::RpcOptionalTransactionVerboseData, protowire::RpcOptionalTransactionVerboseData, { + Self { + transaction_id: item.transaction_id.map(|v| v.to_string()), + hash: item.hash.map(|v| v.to_string()), + compute_mass: item.compute_mass, + block_hash: item.block_hash.map(|v| v.to_string()), + block_time: item.block_time, + } +}); + +from!(item: &kaspa_rpc_core::RpcOptionalTransactionInputVerboseData, protowire::RpcOptionalTransactionInputVerboseData, { + Self { + utxo_entry: item.utxo_entry.as_ref().map(|x| x.into()), + } +}); + +from!(item: &kaspa_rpc_core::RpcOptionalTransactionOutputVerboseData, protowire::RpcOptionalTransactionOutputVerboseData, { + Self { + script_public_key_type: item.script_public_key_type.as_ref().map(|x| x.to_string()), + script_public_key_address: item.script_public_key_address.as_ref().map(|x| x.to_string()), + } +}); + +from!(item: &kaspa_rpc_core::RpcOptionalUtxoEntry, protowire::RpcOptionalUtxoEntry, { + Self { + amount: item.amount, + script_public_key: item.script_public_key.as_ref().map(|x| x.into()), + block_daa_score: item.block_daa_score, + is_coinbase: item.is_coinbase, + verbose_data: item.verbose_data.as_ref().map(|x| x.into()), + } +}); + +from!(item: &kaspa_rpc_core::RpcOptionalUtxoEntryVerboseData, protowire::RpcOptionalUtxoEntryVerboseData, { + Self { + script_public_key_type: item.script_public_key_type.as_ref().map(|x| x.to_string()), + script_public_key_address: item.script_public_key_address.as_ref().map(|x| x.to_string()), + } +}); + +// ---------------------------------------------------------------------------- +// protowire to rpc_core +// ---------------------------------------------------------------------------- + +try_from!(item: &protowire::RpcOptionalTransaction, kaspa_rpc_core::RpcOptionalTransaction, { + Self { + version: item.version.map(|x| x as u16), + inputs: item.inputs.iter().map(kaspa_rpc_core::RpcOptionalTransactionInput::try_from).collect::>()?, + outputs: item.outputs.iter().map(kaspa_rpc_core::RpcOptionalTransactionOutput::try_from).collect::>()?, + lock_time: item.lock_time, + subnetwork_id: item.subnetwork_id.as_ref().map(|x| RpcSubnetworkId::from_str(x)).transpose()?, + gas: item.gas, + payload: item.payload.as_ref().map(|x| Vec::from_rpc_hex(x)).transpose()?, + mass: item.mass, + verbose_data: item.verbose_data.as_ref().map(kaspa_rpc_core::RpcOptionalTransactionVerboseData::try_from).transpose()?, + } +}); + +try_from!(item: &protowire::RpcOptionalTransactionInput, kaspa_rpc_core::RpcOptionalTransactionInput, { + Self { + previous_outpoint: item.previous_outpoint.as_ref().map(kaspa_rpc_core::RpcOptionalTransactionOutpoint::try_from).transpose()?, + signature_script: item.signature_script.as_ref().map(|x| Vec::from_rpc_hex(x)).transpose()?, + sequence: item.sequence, + sig_op_count: item.sig_op_count.map(|x| x as u8), + verbose_data: item.verbose_data.as_ref().map(kaspa_rpc_core::RpcOptionalTransactionInputVerboseData::try_from).transpose()?, + } +}); + +try_from!(item: &protowire::RpcOptionalTransactionOutput, kaspa_rpc_core::RpcOptionalTransactionOutput, { + Self { + value: item.value, + script_public_key: item.script_public_key.as_ref().map(kaspa_rpc_core::RpcScriptPublicKey::try_from).transpose()?, + verbose_data: item.verbose_data.as_ref().map(kaspa_rpc_core::RpcOptionalTransactionOutputVerboseData::try_from).transpose()?, + } +}); + +try_from!(item: &protowire::RpcOptionalTransactionOutpoint, kaspa_rpc_core::RpcOptionalTransactionOutpoint, { + Self { + transaction_id: item.transaction_id.as_ref().map(|x| kaspa_rpc_core::RpcHash::from_str(x)).transpose()?, + index: item.index, + } +}); + +try_from!(item: &protowire::RpcOptionalTransactionVerboseData, kaspa_rpc_core::RpcOptionalTransactionVerboseData, { + Self { + transaction_id: item.transaction_id.as_ref().map(|x| kaspa_rpc_core::RpcHash::from_str(x)).transpose()?, + hash: item.hash.as_ref().map(|x| kaspa_rpc_core::RpcHash::from_str(x)).transpose()?, + compute_mass: item.compute_mass, + block_hash: item.block_hash.as_ref().map(|x| kaspa_rpc_core::RpcHash::from_str(x)).transpose()?, + block_time: item.block_time, + } +}); + +try_from!(item: &protowire::RpcOptionalTransactionInputVerboseData, kaspa_rpc_core::RpcOptionalTransactionInputVerboseData, { + Self { + utxo_entry: item.utxo_entry.as_ref().map(kaspa_rpc_core::RpcOptionalUtxoEntry::try_from).transpose()?, + } +}); + +try_from!(item: &protowire::RpcOptionalTransactionOutputVerboseData, kaspa_rpc_core::RpcOptionalTransactionOutputVerboseData, { + Self { + script_public_key_type: item.script_public_key_type.as_ref().map(|x| RpcScriptClass::from_str(x)).transpose()?, + script_public_key_address: item.script_public_key_address.as_ref().map(|x| RpcAddress::try_from(x.as_str())).transpose()?, + } +}); + +try_from!(item: &protowire::RpcOptionalUtxoEntry, kaspa_rpc_core::RpcOptionalUtxoEntry, { + Self { + amount: item.amount, + script_public_key: item.script_public_key.as_ref().map(|x| x.try_into()).transpose()?, + block_daa_score: item.block_daa_score, + is_coinbase: item.is_coinbase, + verbose_data: item.verbose_data.as_ref().map(|x| x.try_into()).transpose()?, + } +}); + +try_from!(item: &protowire::RpcOptionalUtxoEntryVerboseData, kaspa_rpc_core::RpcOptionalUtxoEntryVerboseData, { + Self { + script_public_key_type: item.script_public_key_type.as_ref().map(|x| RpcScriptClass::from_str(x)).transpose()?, + script_public_key_address: item.script_public_key_address.as_ref().map(|x| RpcAddress::try_from(x.as_str())).transpose()?, + } +}); diff --git a/rpc/grpc/core/src/convert/tx.rs b/rpc/grpc/core/src/convert/tx.rs index 7a75a0255e..dc4a1de9cf 100644 --- a/rpc/grpc/core/src/convert/tx.rs +++ b/rpc/grpc/core/src/convert/tx.rs @@ -1,6 +1,6 @@ -use crate::protowire; +use crate::protowire::{self}; use crate::{from, try_from}; -use kaspa_rpc_core::{FromRpcHex, RpcError, RpcHash, RpcResult, RpcScriptVec, ToRpcHex}; +use kaspa_rpc_core::{FromRpcHex, RpcAddress, RpcError, RpcHash, RpcResult, RpcScriptClass, RpcScriptVec, ToRpcHex}; use std::str::FromStr; // ---------------------------------------------------------------------------- @@ -21,6 +21,20 @@ from!(item: &kaspa_rpc_core::RpcTransaction, protowire::RpcTransaction, { } }); +from!(item: &kaspa_rpc_core::RpcOptionalTransaction, protowire::RpcTransaction, { + Self { + version: item.version.unwrap_or_default().into(), + inputs: item.inputs.iter().map(protowire::RpcTransactionInput::from).collect(), + outputs: item.outputs.iter().map(protowire::RpcTransactionOutput::from).collect(), + lock_time: item.lock_time.unwrap_or_default(), + subnetwork_id: item.subnetwork_id.as_ref().map(|x| x.to_string()).unwrap_or_default(), + gas: item.gas.unwrap_or_default(), + payload: item.payload.as_ref().map(|x| x.to_rpc_hex()).unwrap_or_default(), + mass: item.mass.unwrap_or_default(), + verbose_data: item.verbose_data.as_ref().map(|x| x.into()), + } +}); + from!(item: &kaspa_rpc_core::RpcTransactionInput, protowire::RpcTransactionInput, { Self { previous_outpoint: Some((&item.previous_outpoint).into()), @@ -31,6 +45,16 @@ from!(item: &kaspa_rpc_core::RpcTransactionInput, protowire::RpcTransactionInput } }); +from!(item: &kaspa_rpc_core::RpcOptionalTransactionInput, protowire::RpcTransactionInput, { + Self { + previous_outpoint: item.previous_outpoint.as_ref().map(protowire::RpcOutpoint::from), + signature_script: item.signature_script.as_ref().map(|x| x.to_rpc_hex()).unwrap_or_default(), + sequence: item.sequence.unwrap_or_default(), + sig_op_count: item.sig_op_count.map(|x| x.into()).unwrap_or_default(), + verbose_data: item.verbose_data.as_ref().map(protowire::RpcTransactionInputVerboseData::from), + } +}); + from!(item: &kaspa_rpc_core::RpcTransactionOutput, protowire::RpcTransactionOutput, { Self { amount: item.value, @@ -39,16 +63,53 @@ from!(item: &kaspa_rpc_core::RpcTransactionOutput, protowire::RpcTransactionOutp } }); +from!(item: &kaspa_rpc_core::RpcOptionalTransactionOutput, protowire::RpcTransactionOutput, { + Self { + amount: item.value.unwrap_or_default(), + script_public_key: item.script_public_key.as_ref().map(|x| x.into()), + verbose_data: item.verbose_data.as_ref().map(|x| x.into()), + } +}); + from!(item: &kaspa_rpc_core::RpcTransactionOutpoint, protowire::RpcOutpoint, { Self { transaction_id: item.transaction_id.to_string(), index: item.index } }); +from!(item: &kaspa_rpc_core::RpcOptionalTransactionOutpoint, protowire::RpcOutpoint, { + Self { transaction_id: item.transaction_id.as_ref().map(|x| x.to_string()).unwrap_or_default(), index: item.index.unwrap_or_default() } +}); + from!(item: &kaspa_rpc_core::RpcUtxoEntry, protowire::RpcUtxoEntry, { Self { amount: item.amount, script_public_key: Some((&item.script_public_key).into()), block_daa_score: item.block_daa_score, is_coinbase: item.is_coinbase, + verbose_data: None, + } +}); + +from!(item: &kaspa_rpc_core::RpcOptionalUtxoEntry, protowire::RpcUtxoEntry, { + Self { + amount: item.amount.unwrap_or_default(), + script_public_key: item.script_public_key.as_ref().map(|x| x.into()), + block_daa_score: item.block_daa_score.unwrap_or_default(), + is_coinbase: item.is_coinbase.unwrap_or_default(), + verbose_data: item.verbose_data.as_ref().map(|x| x.into()), + } +}); + +from!(item: &kaspa_rpc_core::RpcOptionalUtxoEntryVerboseData, protowire::RpcUtxoEntryVerboseData, { + Self { + script_public_key_type: item.script_public_key_type.as_ref().map(|x| x.to_string()).unwrap_or_default(), + script_public_key_address: item.script_public_key_address.as_ref().map(|x| x.to_string()).unwrap_or_default(), + } +}); + +from!(item: &kaspa_rpc_core::RpcChainBlockAcceptedTransactions, protowire::RpcChainBlockAcceptedTransactions, { + Self { + chain_block_header: Some(protowire::RpcOptionalHeader::from(&item.chain_block_header)), + accepted_transactions: item.accepted_transactions.iter().map(protowire::RpcOptionalTransaction::from).collect(), } }); @@ -56,6 +117,16 @@ from!(item: &kaspa_rpc_core::RpcScriptPublicKey, protowire::RpcScriptPublicKey, Self { version: item.version().into(), script_public_key: item.script().to_rpc_hex() } }); +from!(item: &kaspa_rpc_core::RpcOptionalTransactionVerboseData, protowire::RpcTransactionVerboseData, { + Self { + transaction_id: item.transaction_id.map(|v| v.to_string()).unwrap_or_default(), + hash: item.hash.map(|v| v.to_string()).unwrap_or_default(), + compute_mass: item.compute_mass.unwrap_or_default(), + block_hash: item.block_hash.map(|v| v.to_string()).unwrap_or_default(), + block_time: item.block_time.unwrap_or_default(), + } +}); + from!(item: &kaspa_rpc_core::RpcTransactionVerboseData, protowire::RpcTransactionVerboseData, { Self { transaction_id: item.transaction_id.to_string(), @@ -66,7 +137,17 @@ from!(item: &kaspa_rpc_core::RpcTransactionVerboseData, protowire::RpcTransactio } }); -from!(&kaspa_rpc_core::RpcTransactionInputVerboseData, protowire::RpcTransactionInputVerboseData); +from!(item: &kaspa_rpc_core::RpcOptionalTransactionInputVerboseData, protowire::RpcTransactionInputVerboseData, { + Self { + utxo_entry: item.utxo_entry.as_ref().map(|x| x.into()), + } +}); + +from!(_item: &kaspa_rpc_core::RpcTransactionInputVerboseData, protowire::RpcTransactionInputVerboseData, { + Self { + utxo_entry: None, + } +}); from!(item: &kaspa_rpc_core::RpcTransactionOutputVerboseData, protowire::RpcTransactionOutputVerboseData, { Self { @@ -75,6 +156,13 @@ from!(item: &kaspa_rpc_core::RpcTransactionOutputVerboseData, protowire::RpcTran } }); +from!(item: &kaspa_rpc_core::RpcOptionalTransactionOutputVerboseData, protowire::RpcTransactionOutputVerboseData, { +Self { + script_public_key_type: item.script_public_key_type.as_ref().map(|x| x.to_string()).unwrap_or_default(), + script_public_key_address: item.script_public_key_address.as_ref().map(|x| x.to_string()).unwrap_or_default(), + } +}); + from!(item: &kaspa_rpc_core::RpcAcceptedTransactionIds, protowire::RpcAcceptedTransactionIds, { Self { accepting_block_hash: item.accepting_block_hash.to_string(), @@ -116,6 +204,44 @@ try_from!(item: &protowire::RpcTransaction, kaspa_rpc_core::RpcTransaction, { } }); +try_from!(item: &protowire::RpcTransaction, kaspa_rpc_core::RpcOptionalTransaction, { + Self { + version: Some(item.version.try_into()?), + inputs: item + .inputs + .iter() + .map(kaspa_rpc_core::RpcOptionalTransactionInput::try_from) + .collect::>>()?, + outputs: item + .outputs + .iter() + .map(kaspa_rpc_core::RpcOptionalTransactionOutput::try_from) + .collect::>>()?, + lock_time: Some(item.lock_time), + subnetwork_id: Some(kaspa_rpc_core::RpcSubnetworkId::from_str(&item + .subnetwork_id)?), + gas: Some(item.gas), + payload: Some(Vec::from_rpc_hex(&item.payload)?), + mass: Some(item.mass), + verbose_data: item.verbose_data.as_ref().map(kaspa_rpc_core::RpcOptionalTransactionVerboseData::try_from).transpose()?, + } +}); + +try_from!(item: &protowire::RpcTransactionInput, kaspa_rpc_core::RpcOptionalTransactionInput, { + Self { + previous_outpoint: item + .previous_outpoint + .as_ref() + .map(kaspa_rpc_core::RpcOptionalTransactionOutpoint::try_from) + .transpose()?, + signature_script: Some(Vec::from_rpc_hex(&item + .signature_script)?), + sequence: Some(item.sequence), + sig_op_count: Some(item.sig_op_count.try_into()?), + verbose_data: item.verbose_data.as_ref().map(kaspa_rpc_core::RpcOptionalTransactionInputVerboseData::try_from).transpose()?, + } +}); + try_from!(item: &protowire::RpcTransactionInput, kaspa_rpc_core::RpcTransactionInput, { Self { previous_outpoint: item @@ -142,6 +268,25 @@ try_from!(item: &protowire::RpcTransactionOutput, kaspa_rpc_core::RpcTransaction } }); +try_from!(item: &protowire::RpcTransactionOutput, kaspa_rpc_core::RpcOptionalTransactionOutput, { + Self { + value: Some(item.amount), + script_public_key: item + .script_public_key + .as_ref() + .map(kaspa_rpc_core::RpcScriptPublicKey::try_from) + .transpose()?, + verbose_data: item.verbose_data.as_ref().map(kaspa_rpc_core::RpcOptionalTransactionOutputVerboseData::try_from).transpose()?, + } +}); + +try_from!(item: &protowire::RpcOutpoint, kaspa_rpc_core::RpcOptionalTransactionOutpoint, { + Self { + transaction_id: Some(RpcHash::from_str(&item.transaction_id)?), + index: Some(item.index), + } +}); + try_from!(item: &protowire::RpcOutpoint, kaspa_rpc_core::RpcTransactionOutpoint, { Self { transaction_id: RpcHash::from_str(&item.transaction_id)?, index: item.index } }); @@ -159,6 +304,27 @@ try_from!(item: &protowire::RpcUtxoEntry, kaspa_rpc_core::RpcUtxoEntry, { } }); +try_from!(item: &protowire::RpcUtxoEntry, kaspa_rpc_core::RpcOptionalUtxoEntry, { + Self { + amount: Some(item.amount), + script_public_key: item + .script_public_key + .as_ref() + .map(|x| x.try_into()) + .transpose()?, + block_daa_score: Some(item.block_daa_score), + is_coinbase: Some(item.is_coinbase), + verbose_data: item.verbose_data.as_ref().map(kaspa_rpc_core::RpcOptionalUtxoEntryVerboseData::try_from).transpose()?, + } +}); + +try_from!(item: &protowire::RpcUtxoEntryVerboseData, kaspa_rpc_core::RpcOptionalUtxoEntryVerboseData, { + Self { + script_public_key_type: Some(RpcScriptClass::from_str(&item.script_public_key_type)?), + script_public_key_address: Some(RpcAddress::try_from(item.script_public_key_address.as_ref())?), + } +}); + try_from!(item: &protowire::RpcScriptPublicKey, kaspa_rpc_core::RpcScriptPublicKey, { Self::new(u16::try_from(item.version)?, RpcScriptVec::from_rpc_hex(item.script_public_key.as_str())?) }); @@ -173,8 +339,35 @@ try_from!(item: &protowire::RpcTransactionVerboseData, kaspa_rpc_core::RpcTransa } }); +try_from!(item: &protowire::RpcTransactionVerboseData, kaspa_rpc_core::RpcOptionalTransactionVerboseData, { + Self { + transaction_id: Some(RpcHash::from_str(item.transaction_id.as_ref())?), + hash: Some(RpcHash::from_str(item.hash.as_ref())?), + compute_mass: Some(item.compute_mass), + block_hash: if item.block_hash.is_empty() { + None + } else { + Some(RpcHash::from_str(item.block_hash.as_ref())?) + }, + block_time: Some(item.block_time), + } +}); + try_from!(&protowire::RpcTransactionInputVerboseData, kaspa_rpc_core::RpcTransactionInputVerboseData); +try_from!(item: &protowire::RpcTransactionInputVerboseData, kaspa_rpc_core::RpcOptionalTransactionInputVerboseData, { + Self { + utxo_entry: item.utxo_entry.as_ref().map(kaspa_rpc_core::RpcOptionalUtxoEntry::try_from).transpose()?, + } +}); + +try_from!(item: &protowire::RpcTransactionOutputVerboseData, kaspa_rpc_core::RpcOptionalTransactionOutputVerboseData, { + Self { + script_public_key_type: Some(RpcScriptClass::from_str(item.script_public_key_type.as_ref())?), + script_public_key_address: Some(RpcAddress::try_from(item.script_public_key_address.as_ref())?), + } +}); + try_from!(item: &protowire::RpcTransactionOutputVerboseData, kaspa_rpc_core::RpcTransactionOutputVerboseData, { Self { script_public_key_type: item.script_public_key_type.as_str().try_into()?, @@ -189,6 +382,18 @@ try_from!(item: &protowire::RpcAcceptedTransactionIds, kaspa_rpc_core::RpcAccept } }); +try_from!(item: &protowire::RpcChainBlockAcceptedTransactions, kaspa_rpc_core::RpcChainBlockAcceptedTransactions, { + Self { + chain_block_header: item + .chain_block_header + .as_ref() + .map(kaspa_rpc_core::RpcOptionalHeader::try_from) + .transpose()? + .ok_or_else(|| RpcError::MissingRpcFieldError("RpcChainBlockAcceptedTransactions".to_string(), "chain_block_header".to_string()))?, + accepted_transactions: item.accepted_transactions.iter().map(kaspa_rpc_core::RpcOptionalTransaction::try_from).collect::>()?, + } +}); + try_from!(item: &protowire::RpcUtxosByAddressesEntry, kaspa_rpc_core::RpcUtxosByAddressesEntry, { let address = if item.address.is_empty() { None } else { Some(item.address.as_str().try_into()?) }; Self { diff --git a/rpc/grpc/core/src/ops.rs b/rpc/grpc/core/src/ops.rs index 223774c74c..923d9511a1 100644 --- a/rpc/grpc/core/src/ops.rs +++ b/rpc/grpc/core/src/ops.rs @@ -88,6 +88,7 @@ pub enum KaspadPayloadOps { GetFeeEstimateExperimental, GetCurrentBlockColor, GetUtxoReturnAddress, + GetVirtualChainFromBlockV2, // Subscription commands for starting/stopping notifications NotifyBlockAdded, diff --git a/wallet/native/Cargo.toml b/rpc/grpc/examples/simple_client/Cargo.toml similarity index 53% rename from wallet/native/Cargo.toml rename to rpc/grpc/examples/simple_client/Cargo.toml index 36ca53d9d4..fbddde266f 100644 --- a/wallet/native/Cargo.toml +++ b/rpc/grpc/examples/simple_client/Cargo.toml @@ -1,6 +1,7 @@ [package] -name = "kaspa-wallet" -description = "Kaspa wallet" +name = "kaspa-grpc-simple-client-example" +description = "Kaspa GRPC simple client example" +publish = false rust-version.workspace = true version.workspace = true edition.workspace = true @@ -9,16 +10,12 @@ include.workspace = true license.workspace = true repository.workspace = true -[features] -default = [] - [dependencies] -async-std.workspace = true -async-trait.workspace = true -kaspa-cli.workspace = true +futures.workspace = true +kaspa-rpc-core.workspace = true +kaspa-grpc-client.workspace = true tokio.workspace = true -workflow-log.workspace = true -workflow-terminal.workspace = true + [lints] workspace = true diff --git a/rpc/grpc/examples/simple_client/src/main.rs b/rpc/grpc/examples/simple_client/src/main.rs new file mode 100644 index 0000000000..1a59664990 --- /dev/null +++ b/rpc/grpc/examples/simple_client/src/main.rs @@ -0,0 +1,74 @@ +// Example of simple grpc client to connect with Kaspa node and collect some node and network basic data + +use kaspa_grpc_client::GrpcClient; +use kaspa_rpc_core::notify::mode::NotificationMode; +use kaspa_rpc_core::RpcResult; +use kaspa_rpc_core::{api::rpc::RpcApi, GetBlockDagInfoResponse, GetServerInfoResponse}; +use std::process::ExitCode; + +#[tokio::main] +async fn main() -> ExitCode { + match check_node_status().await { + Ok(_) => { + println!("Well done! You successfully completed your first client connection to Kaspa node!"); + ExitCode::SUCCESS + } + Err(error) => { + println!("An error occurred: {error}"); + ExitCode::FAILURE + } + } +} + +async fn check_node_status() -> RpcResult<()> { + let url = "grpc://localhost:16110".to_string(); + + let client = + GrpcClient::connect_with_args(NotificationMode::Direct, url, None, false, None, false, Some(500_000), Default::default()) + .await + .unwrap(); + + // Retrieve and show Kaspa node information + let GetServerInfoResponse { is_synced, server_version, network_id, has_utxo_index, .. } = client.get_server_info().await?; + + println!("Node version: {server_version}"); + println!("Network: {network_id}"); + println!("Node is synced: {is_synced}"); + println!("Node is indexing UTXOs: {has_utxo_index}"); + + // Retrieve and show Kaspa network information + let GetBlockDagInfoResponse { + block_count, + header_count, + tip_hashes, + difficulty, + past_median_time, + virtual_parent_hashes, + pruning_point_hash, + virtual_daa_score, + sink, + .. + } = client.get_block_dag_info().await?; + + println!("Block count: {block_count}"); + println!("Header count: {header_count}"); + println!("Tip hashes:"); + for tip_hash in tip_hashes { + println!("{tip_hash}"); + } + println!("Difficulty: {difficulty}"); + println!("Past median time: {past_median_time}"); + println!("Virtual parent hashes:"); + for virtual_parent_hash in virtual_parent_hashes.clone() { + println!("{virtual_parent_hash}"); + } + println!("Pruning point hash: {pruning_point_hash}"); + println!("Virtual DAA score: {virtual_daa_score}"); + println!("Sink: {sink}"); + + // Disconnect client from Kaspa node + client.disconnect().await?; + + // Return function result + Ok(()) +} diff --git a/rpc/grpc/server/src/request_handler/factory.rs b/rpc/grpc/server/src/request_handler/factory.rs index 9fec86e476..002b6bc603 100644 --- a/rpc/grpc/server/src/request_handler/factory.rs +++ b/rpc/grpc/server/src/request_handler/factory.rs @@ -82,6 +82,7 @@ impl Factory { GetFeeEstimateExperimental, GetCurrentBlockColor, GetUtxoReturnAddress, + GetVirtualChainFromBlockV2, NotifyBlockAdded, NotifyNewBlockTemplate, NotifyFinalityConflict, diff --git a/rpc/grpc/server/src/request_handler/method.rs b/rpc/grpc/server/src/request_handler/method.rs index 7dc94bbe2c..6a7d77550c 100644 --- a/rpc/grpc/server/src/request_handler/method.rs +++ b/rpc/grpc/server/src/request_handler/method.rs @@ -33,7 +33,7 @@ pub type MethodFn = Arc MethodFnReturn + 'static>>; /// RPC method function return type -pub type MethodFnReturn = Pin>)>>; +pub type MethodFnReturn = Pin>>>; /// RPC drop function type pub type DropFn = Arc GrpcServerResult>>; diff --git a/rpc/grpc/server/src/service.rs b/rpc/grpc/server/src/service.rs index 0dd4bb8972..ac8ec02ee0 100644 --- a/rpc/grpc/server/src/service.rs +++ b/rpc/grpc/server/src/service.rs @@ -64,7 +64,7 @@ impl AsyncService for GrpcService { let manager = Manager::new(self.rpc_max_clients); let grpc_adaptor = Adaptor::server( self.net_address, - self.config.bps().upper_bound(), + self.config.bps().after(), manager, self.core_service.clone(), self.core_service.notifier(), diff --git a/rpc/grpc/server/src/tests/rpc_core_mock.rs b/rpc/grpc/server/src/tests/rpc_core_mock.rs index ada801850e..27d443e477 100644 --- a/rpc/grpc/server/src/tests/rpc_core_mock.rs +++ b/rpc/grpc/server/src/tests/rpc_core_mock.rs @@ -370,6 +370,14 @@ impl RpcApi for RpcCoreMock { Err(RpcError::NotImplemented) } + async fn get_virtual_chain_from_block_v2_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetVirtualChainFromBlockV2Request, + ) -> RpcResult { + Err(RpcError::NotImplemented) + } + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // Notification API diff --git a/rpc/macros/src/handler.rs b/rpc/macros/src/handler.rs index 165c4e0ebc..98522a6dea 100644 --- a/rpc/macros/src/handler.rs +++ b/rpc/macros/src/handler.rs @@ -36,9 +36,13 @@ impl Handler { _ => (handler.to_token_stream().to_string(), vec![]), }; //let name = handler.to_token_stream().to_string(); - let fn_call = Ident::new(&format!("{}_call", name.to_case(Case::Snake)), Span::call_site()); - let fn_with_suffix = fn_suffix.map(|suffix| Ident::new(&format!("{}_{suffix}", name.to_case(Case::Snake)), Span::call_site())); - let fn_no_suffix = Ident::new(&name.to_case(Case::Snake), Span::call_site()); + + // replace _v_2 with _v2 + let snake_case_name = name.to_case(Case::Snake).replacen("_v_", "_v", 1); + + let fn_call = Ident::new(&format!("{}_call", snake_case_name.clone()), Span::call_site()); + let fn_with_suffix = fn_suffix.map(|suffix| Ident::new(&format!("{}_{suffix}", snake_case_name.clone()), Span::call_site())); + let fn_no_suffix = Ident::new(&snake_case_name, Span::call_site()); let fn_camel = Ident::new(&name.to_case(Case::Camel), Span::call_site()); let request_type = Ident::new(&format!("{name}Request"), Span::call_site()); let response_type = Ident::new(&format!("{name}Response"), Span::call_site()); diff --git a/rpc/service/src/converter/consensus.rs b/rpc/service/src/converter/consensus.rs index 5080280476..1e998c7026 100644 --- a/rpc/service/src/converter/consensus.rs +++ b/rpc/service/src/converter/consensus.rs @@ -1,26 +1,36 @@ use async_trait::async_trait; -use kaspa_addresses::Address; +use kaspa_addresses::{Address, AddressError}; use kaspa_consensus_core::{ + acceptance_data::{AcceptanceData, MergesetBlockAcceptanceData}, block::Block, config::Config, hashing::tx::hash, header::Header, - tx::{MutableTransaction, Transaction, TransactionId, TransactionInput, TransactionOutput}, + tx::{ + MutableTransaction, SignableTransaction, Transaction, TransactionId, TransactionInput, TransactionOutput, + TransactionQueryResult, TransactionType, UtxoEntry, + }, ChainPath, }; use kaspa_consensus_notify::notification::{self as consensus_notify, Notification as ConsensusNotification}; use kaspa_consensusmanager::{ConsensusManager, ConsensusProxy}; +use kaspa_hashes::Hash; use kaspa_math::Uint256; use kaspa_mining::model::{owner_txs::OwnerTransactions, TransactionIdSet}; use kaspa_notify::converter::Converter; use kaspa_rpc_core::{ - BlockAddedNotification, Notification, RpcAcceptedTransactionIds, RpcBlock, RpcBlockVerboseData, RpcHash, RpcMempoolEntry, - RpcMempoolEntryByAddress, RpcResult, RpcTransaction, RpcTransactionInput, RpcTransactionOutput, RpcTransactionOutputVerboseData, - RpcTransactionVerboseData, + BlockAddedNotification, Notification, RpcAcceptanceDataVerbosity, RpcAcceptedTransactionIds, RpcBlock, RpcBlockVerboseData, + RpcChainBlockAcceptedTransactions, RpcError, RpcHash, RpcHeaderVerbosity, RpcMempoolEntry, RpcMempoolEntryByAddress, + RpcMergesetBlockAcceptanceDataVerbosity, RpcOptionalHeader, RpcOptionalTransaction, RpcOptionalTransactionInput, + RpcOptionalTransactionInputVerboseData, RpcOptionalTransactionOutput, RpcOptionalTransactionOutputVerboseData, + RpcOptionalTransactionVerboseData, RpcOptionalUtxoEntry, RpcOptionalUtxoEntryVerboseData, RpcResult, RpcTransaction, + RpcTransactionInput, RpcTransactionInputVerboseDataVerbosity, RpcTransactionInputVerbosity, RpcTransactionOutput, + RpcTransactionOutputVerboseData, RpcTransactionOutputVerboseDataVerbosity, RpcTransactionOutputVerbosity, + RpcTransactionVerboseData, RpcTransactionVerboseDataVerbosity, RpcTransactionVerbosity, RpcUtxoEntryVerboseDataVerbosity, + RpcUtxoEntryVerbosity, }; use kaspa_txscript::{extract_script_pub_key_address, script_class::ScriptClass}; use std::{collections::HashMap, fmt::Debug, sync::Arc}; - /// Conversion of consensus_core to rpc_core structures pub struct ConsensusConverter { consensus_manager: Arc, @@ -124,7 +134,7 @@ impl ConsensusConverter { if include_verbose_data { let verbose_data = Some(RpcTransactionVerboseData { transaction_id: transaction.id(), - hash: hash(transaction, false), + hash: hash(transaction), compute_mass: consensus.calculate_transaction_non_contextual_masses(transaction).compute_mass, // TODO: make block_hash an option block_hash: header.map_or_else(RpcHash::default, |x| x.hash), @@ -178,6 +188,477 @@ impl ConsensusConverter { }) .collect()) } + + fn adapt_header_to_header_with_verbosity( + &self, + verbosity: &RpcHeaderVerbosity, + header: &Arc
, + ) -> RpcResult { + Ok(RpcOptionalHeader { + hash: if verbosity.include_hash.unwrap_or(false) { Some(header.hash) } else { Default::default() }, + version: if verbosity.include_version.unwrap_or(false) { Some(header.version) } else { Default::default() }, + parents_by_level: if verbosity.include_parents_by_level.unwrap_or(false) { + header.parents_by_level.to_owned().into() + } else { + Default::default() + }, + hash_merkle_root: if verbosity.include_hash_merkle_root.unwrap_or(false) { + Some(header.hash_merkle_root) + } else { + Default::default() + }, + accepted_id_merkle_root: if verbosity.include_accepted_id_merkle_root.unwrap_or(false) { + Some(header.accepted_id_merkle_root) + } else { + Default::default() + }, + utxo_commitment: if verbosity.include_utxo_commitment.unwrap_or(false) { + Some(header.utxo_commitment) + } else { + Default::default() + }, + timestamp: if verbosity.include_timestamp.unwrap_or(false) { Some(header.timestamp) } else { Default::default() }, + bits: if verbosity.include_bits.unwrap_or(false) { Some(header.bits) } else { Default::default() }, + nonce: if verbosity.include_nonce.unwrap_or(false) { Some(header.nonce) } else { Default::default() }, + daa_score: if verbosity.include_daa_score.unwrap_or(false) { Some(header.daa_score) } else { Default::default() }, + blue_work: if verbosity.include_blue_work.unwrap_or(false) { Some(header.blue_work) } else { Default::default() }, + blue_score: if verbosity.include_blue_score.unwrap_or(false) { Some(header.blue_score) } else { Default::default() }, + pruning_point: if verbosity.include_pruning_point.unwrap_or(false) { + Some(header.pruning_point) + } else { + Default::default() + }, + }) + } + + fn convert_utxo_entry_with_verbosity( + &self, + utxo: UtxoEntry, + verbosity: &RpcUtxoEntryVerbosity, + ) -> RpcResult { + Ok(RpcOptionalUtxoEntry { + amount: if verbosity.include_amount.unwrap_or(false) { Some(utxo.amount) } else { Default::default() }, + script_public_key: if verbosity.include_script_public_key.unwrap_or(false) { + Some(utxo.script_public_key.clone()) + } else { + Default::default() + }, + block_daa_score: if verbosity.include_block_daa_score.unwrap_or(false) { + Some(utxo.block_daa_score) + } else { + Default::default() + }, + is_coinbase: if verbosity.include_is_coinbase.unwrap_or(false) { Some(utxo.is_coinbase) } else { Default::default() }, + verbose_data: if let Some(utxo_entry_verbosity) = verbosity.verbose_data_verbosity.as_ref() { + Some(self.get_utxo_verbose_data_with_verbosity(&utxo, utxo_entry_verbosity)?) + } else { + Default::default() + }, + }) + } + + fn get_utxo_verbose_data_with_verbosity( + &self, + utxo: &UtxoEntry, + verbosity: &RpcUtxoEntryVerboseDataVerbosity, + ) -> RpcResult { + Ok(RpcOptionalUtxoEntryVerboseData { + script_public_key_type: if verbosity.include_script_public_key_type.unwrap_or(false) { + Some(ScriptClass::from_script(&utxo.script_public_key)) + } else { + Default::default() + }, + script_public_key_address: if verbosity.include_script_public_key_address.unwrap_or(false) { + Some( + extract_script_pub_key_address(&utxo.script_public_key, self.config.prefix()) + .map_err(|_| AddressError::InvalidAddress)?, + ) + } else { + Default::default() + }, + }) + } + + fn get_input_verbose_data_with_verbosity( + &self, + utxo: Option, + verbosity: &RpcTransactionInputVerboseDataVerbosity, + ) -> RpcResult { + Ok(RpcOptionalTransactionInputVerboseData { + utxo_entry: if let Some(utxo_entry_verbosity) = verbosity.utxo_entry_verbosity.as_ref() { + if let Some(utxo) = utxo { + Some(self.convert_utxo_entry_with_verbosity(utxo, utxo_entry_verbosity)?) + } else { + return Err(RpcError::ConsensusConverterNotFound("UtxoEntry".to_string())); + } + } else { + Default::default() + }, + }) + } + + fn get_transaction_verbose_data_with_verbosity( + &self, + transaction: &Transaction, + block_hash: Hash, + block_time: u64, + compute_mass: u64, + verbosity: &RpcTransactionVerboseDataVerbosity, + ) -> RpcResult { + Ok(RpcOptionalTransactionVerboseData { + transaction_id: if verbosity.include_transaction_id.unwrap_or(false) { + Some(transaction.id()) + } else { + Default::default() + }, + hash: if verbosity.include_hash.unwrap_or(false) { Some(hash(transaction)) } else { Default::default() }, + compute_mass: if verbosity.include_compute_mass.unwrap_or(false) { Some(compute_mass) } else { Default::default() }, + block_hash: if verbosity.include_block_hash.unwrap_or(false) { Some(block_hash) } else { Default::default() }, + block_time: if verbosity.include_block_time.unwrap_or(false) { Some(block_time) } else { Default::default() }, + }) + } + + fn get_transaction_output_verbose_data_with_verbosity( + &self, + output: &TransactionOutput, + verbosity: &RpcTransactionOutputVerboseDataVerbosity, + ) -> RpcResult { + Ok(RpcOptionalTransactionOutputVerboseData { + script_public_key_type: if verbosity.include_script_public_key_type.unwrap_or(false) { + Some(ScriptClass::from_script(&output.script_public_key)) + } else { + Default::default() + }, + script_public_key_address: if verbosity.include_script_public_key_address.unwrap_or(false) { + Some( + extract_script_pub_key_address(&output.script_public_key, self.config.prefix()) + .map_err(|_| AddressError::InvalidAddress)?, + ) + } else { + Default::default() + }, + }) + } + + fn convert_transaction_output_with_verbosity( + &self, + output: &TransactionOutput, + verbosity: &RpcTransactionOutputVerbosity, + ) -> RpcResult { + Ok(RpcOptionalTransactionOutput { + value: if verbosity.include_amount.unwrap_or(false) { Some(output.value) } else { Default::default() }, + script_public_key: if verbosity.include_script_public_key.unwrap_or(false) { + Some(output.script_public_key.clone()) + } else { + Default::default() + }, + verbose_data: if let Some(output_verbose_data_verbosity) = verbosity.verbose_data_verbosity.as_ref() { + Some(self.get_transaction_output_verbose_data_with_verbosity(output, output_verbose_data_verbosity)?) + } else { + Default::default() + }, + }) + } + + pub fn get_transaction_input_with_verbosity( + &self, + input: &TransactionInput, + utxo: Option, + verbosity: &RpcTransactionInputVerbosity, + ) -> RpcResult { + Ok(RpcOptionalTransactionInput { + previous_outpoint: if verbosity.include_previous_outpoint.unwrap_or(false) { + Some(input.previous_outpoint.into()) + } else { + Default::default() + }, + signature_script: if verbosity.include_signature_script.unwrap_or(false) { + Some(input.signature_script.clone()) + } else { + Default::default() + }, + sequence: if verbosity.include_sequence.unwrap_or(false) { Some(input.sequence) } else { Default::default() }, + sig_op_count: if verbosity.include_sig_op_count.unwrap_or(false) { Some(input.sig_op_count) } else { Default::default() }, + verbose_data: if let Some(input_verbose_data_verbosity) = verbosity.verbose_data_verbosity.as_ref() { + Some(self.get_input_verbose_data_with_verbosity(utxo, input_verbose_data_verbosity)?) + } else { + Default::default() + }, + }) + } + + pub async fn convert_transaction_with_verbosity( + &self, + consensus: &ConsensusProxy, + transaction: &Transaction, + block_hash: Option, + block_time: u64, + verbosity: &RpcTransactionVerbosity, + ) -> RpcResult { + Ok(RpcOptionalTransaction { + version: if verbosity.include_version.unwrap_or(false) { Some(transaction.version) } else { Default::default() }, + inputs: if let Some(ref input_verbosity) = verbosity.input_verbosity { + transaction + .inputs + .iter() + .map(|x| self.get_transaction_input_with_verbosity(x, None, input_verbosity)) + .collect::, _>>()? + } else { + Default::default() + }, + outputs: if let Some(ref output_verbosity) = verbosity.output_verbosity { + transaction + .outputs + .iter() + .map(|x| self.convert_transaction_output_with_verbosity(x, output_verbosity)) + .collect::, _>>()? + } else { + Default::default() + }, + lock_time: if verbosity.include_lock_time.unwrap_or(false) { Some(transaction.lock_time) } else { Default::default() }, + subnetwork_id: if verbosity.include_subnetwork_id.unwrap_or(false) { + Some(transaction.subnetwork_id.clone()) + } else { + Default::default() + }, + gas: if verbosity.include_gas.unwrap_or(false) { Some(transaction.gas) } else { Default::default() }, + payload: if verbosity.include_payload.unwrap_or(false) { Some(transaction.payload.clone()) } else { Default::default() }, + mass: if verbosity.include_mass.unwrap_or(false) { Some(transaction.mass()) } else { Default::default() }, + verbose_data: if let Some(verbose_data_verbosity) = verbosity.verbose_data_verbosity.as_ref() { + Some(self.get_transaction_verbose_data_with_verbosity( + transaction, + block_hash.unwrap(), + block_time, + consensus.calculate_transaction_non_contextual_masses(transaction).compute_mass, + verbose_data_verbosity, + )?) + } else { + Default::default() + }, + }) + } + + pub async fn convert_signable_transaction_with_verbosity( + &self, + consensus: &ConsensusProxy, + transaction: &SignableTransaction, + block_hash: Option, + block_time: u64, + verbosity: &RpcTransactionVerbosity, + ) -> RpcResult { + Ok(RpcOptionalTransaction { + version: if verbosity.include_version.unwrap_or(false) { Some(transaction.tx.version) } else { Default::default() }, + inputs: if let Some(input_verbosity) = verbosity.input_verbosity.as_ref() { + transaction + .tx + .inputs + .iter() + .enumerate() + .map(|(i, x)| self.get_transaction_input_with_verbosity(x, transaction.entries[i].clone(), input_verbosity)) + .collect::, _>>()? + } else { + Default::default() + }, + outputs: if let Some(output_verbosity) = verbosity.output_verbosity.as_ref() { + transaction + .tx + .outputs + .iter() + .map(|x| self.convert_transaction_output_with_verbosity(x, output_verbosity)) + .collect::, _>>()? + } else { + Default::default() + }, + lock_time: if verbosity.include_lock_time.unwrap_or(false) { Some(transaction.tx.lock_time) } else { Default::default() }, + subnetwork_id: Some(transaction.tx.subnetwork_id.clone()), + gas: Some(transaction.tx.gas), + payload: Some(transaction.tx.payload.clone()), + mass: Some(transaction.tx.mass()), + verbose_data: if let Some(verbose_data_verbosity) = verbosity.verbose_data_verbosity.as_ref() { + Some( + self.get_transaction_verbose_data_with_verbosity( + &transaction.tx, + block_hash.unwrap(), + block_time, + transaction + .calculated_non_contextual_masses + .unwrap_or(consensus.calculate_transaction_non_contextual_masses(transaction.tx.as_ref())) + .compute_mass, + verbose_data_verbosity, + )?, + ) + } else { + Default::default() + }, + }) + } + + pub async fn get_accepted_transactions_with_verbosity( + &self, + consensus: &ConsensusProxy, + tx_ids: Option>, + accepting_block: Hash, + merged_block_data: &MergesetBlockAcceptanceData, + verbosity: &RpcTransactionVerbosity, + ) -> RpcResult> { + let merged_block_timestamp = consensus.async_get_header(merged_block_data.block_hash).await?.timestamp; + + let txs = consensus + .async_get_transactions_by_block_acceptance_data( + accepting_block, + merged_block_data.clone(), + tx_ids, + if verbosity.requires_populated_transaction() { + TransactionType::SignableTransaction + } else { + TransactionType::Transaction + }, + ) + .await?; + + Ok(match txs { + TransactionQueryResult::Transaction(txs) => { + let mut converted = Vec::with_capacity(txs.len()); + + for tx in txs.iter() { + converted.push({ + let rpc_tx = self + .convert_transaction_with_verbosity( + consensus, + tx, + Some(merged_block_data.block_hash), + merged_block_timestamp, + verbosity, + ) + .await?; + + if rpc_tx.is_empty() { + continue; + }; + + rpc_tx + }); + } + + converted + } + TransactionQueryResult::SignableTransaction(txs) => { + let mut converted = Vec::with_capacity(txs.len()); + + for tx in txs.iter() { + converted.push({ + let rpc_tx = self + .convert_signable_transaction_with_verbosity( + consensus, + tx, + Some(merged_block_data.block_hash), + merged_block_timestamp, + verbosity, + ) + .await?; + + if rpc_tx.is_empty() { + continue; + }; + + rpc_tx + }); + } + + converted + } + }) + } + + async fn get_mergeset_accepted_transactions_with_verbosity( + &self, + consensus: &ConsensusProxy, + accepting_block: Hash, + mergeset_blocks_acceptance_data: &Arc, + verbosity: &RpcMergesetBlockAcceptanceDataVerbosity, + ) -> RpcResult> { + let mut mergeset_accepted_transactions: Vec = Vec::new(); + + for merged_block_acceptance_data in mergeset_blocks_acceptance_data.iter() { + let mut accepted_txs = if let Some(accepted_transaction_verbosity) = verbosity.accepted_transactions_verbosity.as_ref() { + self.get_accepted_transactions_with_verbosity( + consensus, + None, + accepting_block, + merged_block_acceptance_data, + accepted_transaction_verbosity, + ) + .await? + } else { + Vec::new() + }; + + mergeset_accepted_transactions.append(&mut accepted_txs); + } + + Ok(mergeset_accepted_transactions) + } + + pub async fn get_chain_blocks_accepted_transactions( + &self, + consensus: &ConsensusProxy, + verbosity: &RpcAcceptanceDataVerbosity, + chain_path: &ChainPath, + merged_blocks_limit: Option, + ) -> RpcResult> { + if verbosity.accepting_chain_header_verbosity.is_none() && verbosity.mergeset_block_acceptance_data_verbosity.is_none() { + // specified verbosity doesn't need acceptance data + return Ok(Vec::new()); + } + + let chain_block_mergeset_acceptance_data_vec = + consensus.async_get_blocks_acceptance_data(chain_path.added.clone(), merged_blocks_limit).await.unwrap(); + let mut rpc_acceptance_data = + Vec::::with_capacity(chain_block_mergeset_acceptance_data_vec.len()); + + // for each chain block + for (accepting_chain_hash, chain_block_mergeset_acceptance_data) in + chain_path.added.iter().zip(chain_block_mergeset_acceptance_data_vec.iter()) + { + // accepting chain block header is always needed to populate transactions + let accepting_chain_header = consensus.async_get_header(*accepting_chain_hash).await?; + + // adapt it to fit target verbosity in response + let accepting_chain_header_with_verbosity: RpcOptionalHeader = + if let Some(verbosity) = verbosity.accepting_chain_header_verbosity.as_ref() { + let header = self.adapt_header_to_header_with_verbosity(verbosity, &accepting_chain_header)?; + if header.is_empty() { + Default::default() + } else { + header + } + } else { + Default::default() + }; + + if let Some(mergeset_block_acceptance_data_verbosity) = verbosity.mergeset_block_acceptance_data_verbosity.as_ref() { + let mergeset_transactions_with_verbosity = self + .get_mergeset_accepted_transactions_with_verbosity( + consensus, + *accepting_chain_hash, + chain_block_mergeset_acceptance_data, + mergeset_block_acceptance_data_verbosity, + ) + .await?; + + rpc_acceptance_data.push(RpcChainBlockAcceptedTransactions { + chain_block_header: accepting_chain_header_with_verbosity, + accepted_transactions: mergeset_transactions_with_verbosity, + }); + } else { + rpc_acceptance_data.push(RpcChainBlockAcceptedTransactions { + chain_block_header: accepting_chain_header_with_verbosity, + accepted_transactions: Default::default(), + }); + }; + } + Ok(rpc_acceptance_data) + } } #[async_trait] diff --git a/rpc/service/src/service.rs b/rpc/service/src/service.rs index 0993167d69..2251832bac 100644 --- a/rpc/service/src/service.rs +++ b/rpc/service/src/service.rs @@ -7,7 +7,7 @@ use async_trait::async_trait; use kaspa_consensus_core::api::counters::ProcessingCounters; use kaspa_consensus_core::daa_score_timestamp::DaaScoreTimestamp; use kaspa_consensus_core::errors::block::RuleError; -use kaspa_consensus_core::mass::{calc_storage_mass, UtxoCell}; +use kaspa_consensus_core::tx::{TransactionQueryResult, TransactionType}; use kaspa_consensus_core::utxo::utxo_inquirer::UtxoInquirerError; use kaspa_consensus_core::{ block::Block, @@ -285,62 +285,6 @@ impl RpcCoreService { (false, false) => Ok(TransactionQuery::TransactionsOnly), } } - - fn sanity_check_storage_mass(&self, block: Block) { - // [Crescendo]: warn non updated miners to upgrade their rpc flow before Crescendo activation - if self.config.crescendo_activation.is_active(block.header.daa_score) { - return; - } - - // It is sufficient to witness a single transaction with non default mass to conclude that miner rpc flow is correct - if block.transactions.iter().any(|tx| tx.mass() > 0) { - return; - } - - // Iterate over non-coinbase transactions and search for a transaction which is proven to have positive storage mass - for tx in block.transactions.iter().skip(1) { - /* - Below we apply a workaround to compute a lower bound to the storage mass even without having full UTXO context (thus lacking input amounts). - Notes: - 1. We know that plurality is always 1 for std tx ins/outs (assuming the submitted block was built via the local std mempool). - 2. The submitted block was accepted by consensus hence all transactions passed the basic in-isolation validity checks - - |O| > |I| means that the formula used is C·|O| / H(O) - C·|I| / A(I). Additionally we know that sum(O) <= sum(I) (outs = ins minus fee). - Combined, we can use sum(O)/|I| as a lower bound for A(I). We simulate this by using sum(O)/|I| as the value of each (unknown) input. - Plugging in to the storage formula we obtain a lower bound for the real storage mass (intuitively, making inputs smaller only decreases the mass). - */ - if tx.outputs.len() > tx.inputs.len() { - let num_ins = tx.inputs.len() as u64; - let sum_outs = tx.outputs.iter().map(|o| o.value).sum::(); - if num_ins == 0 || sum_outs < num_ins { - // Sanity checks - continue; - } - - let avg_ins_lower = sum_outs / num_ins; // >= 1 - let storage_mass_lower = calc_storage_mass( - tx.is_coinbase(), - tx.inputs.iter().map(|_| UtxoCell { plurality: 1, amount: avg_ins_lower }), - tx.outputs.iter().map(|o| o.into()), - self.config.storage_mass_parameter, - ) - .unwrap_or(u64::MAX); - - // Despite being a lower bound, storage mass is still calculated to be positive, so we found our problem - if storage_mass_lower > 0 { - warn!("The RPC submitted block {} contains a transaction {} with mass = 0 while it should have been strictly positive. -This indicates that the RPC conversion flow used by the miner does not preserve the mass values received from GetBlockTemplate. -You must upgrade your miner flow to propagate the mass field correctly prior to the Crescendo hardfork activation. -Failure to do so will result in your blocks being considered invalid when Crescendo activates.", - block.hash(), - tx.id() - ); - // A single warning is sufficient - break; - } - } - } - } } #[async_trait] @@ -353,8 +297,13 @@ impl RpcApi for RpcCoreService { let session = self.consensus_manager.consensus().unguarded_session(); let sink_daa_score_timestamp = session.async_get_sink_daa_score_timestamp().await; + // do not attempt to submit blocks while in unstable ibd state. + if session.async_is_consensus_in_transitional_ibd_state().await { + return Err(RpcError::ConsensusInTransitionalIbdState); + } + // TODO: consider adding an error field to SubmitBlockReport to document both the report and error fields - let is_synced: bool = self.mining_rule_engine.should_mine(sink_daa_score_timestamp); + let is_synced = self.mining_rule_engine.should_mine(sink_daa_score_timestamp); if !self.config.enable_unsynced_mining && !is_synced { // error = "Block not submitted - node is not synced" @@ -374,13 +323,8 @@ impl RpcApi for RpcCoreService { let virtual_daa_score = session.get_virtual_daa_score(); // A simple heuristic check which signals that the mined block is out of date - // and should not be accepted unless user explicitly requests - // - // [Crescendo]: switch to the larger duration only after a full window with the new duration is reached post activation - let difficulty_window_duration = self - .config - .difficulty_window_duration_in_block_units() - .get(virtual_daa_score.saturating_sub(self.config.difficulty_window_duration_in_block_units().after())); + // and should not be accepted unless user explicitly requests. + let difficulty_window_duration = self.config.difficulty_window_duration_in_block_units().after(); if virtual_daa_score > difficulty_window_duration && block.header.daa_score < virtual_daa_score - difficulty_window_duration { @@ -391,13 +335,10 @@ impl RpcApi for RpcCoreService { trace!("incoming SubmitBlockRequest for block {}", hash); match self.flow_context.submit_rpc_block(&session, block.clone()).await { - Ok(_) => { - self.sanity_check_storage_mass(block); - Ok(SubmitBlockResponse { report: SubmitBlockReport::Success }) - } + Ok(_) => Ok(SubmitBlockResponse { report: SubmitBlockReport::Success }), Err(ProtocolError::RuleError(RuleError::BadMerkleRoot(h1, h2))) => { warn!( - "The RPC submitted block {} triggered a {} error: {}. + "The RPC submitted block {} triggered a {} error: {}. NOTE: This error usually indicates an RPC conversion error between the node and the miner. This is likely to reflect using a NON-SUPPORTED miner.", hash, stringify!(RuleError::BadMerkleRoot), @@ -432,10 +373,15 @@ NOTE: This error usually indicates an RPC conversion error between the node and } // Build block template + let session = self.consensus_manager.consensus().unguarded_session(); + + // do not attempt to mine blocks while in unstable ibd state. + if session.async_is_consensus_in_transitional_ibd_state().await { + return Err(RpcError::ConsensusInTransitionalIbdState); + } let script_public_key = kaspa_txscript::pay_to_address_script(&request.pay_address); let extra_data = version().as_bytes().iter().chain(once(&(b'/'))).chain(&request.extra_data).cloned().collect::>(); let miner_data: MinerData = MinerData::new(script_public_key, extra_data); - let session = self.consensus_manager.consensus().unguarded_session(); let block_template = self.mining_manager.clone().get_block_template(&session, miner_data).await?; // Check coinbase tx payload length @@ -504,15 +450,24 @@ NOTE: This error usually indicates an RPC conversion error between the node and // We use +1 because low_hash is also returned // max_blocks MUST be >= mergeset_size_limit + 1 - let max_blocks = self.config.mergeset_size_limit().upper_bound() as usize + 1; + let max_blocks = self.config.mergeset_size_limit().after() as usize + 1; let (block_hashes, high_hash) = session.async_get_hashes_between(low_hash, sink_hash, max_blocks).await?; // If the high hash is equal to sink it means get_hashes_between didn't skip any hashes, and // there's space to add the sink anticone, otherwise we cannot add the anticone because // there's no guarantee that all of the anticone root ancestors will be present. - let sink_anticone = if high_hash == sink_hash { session.async_get_anticone(sink_hash).await? } else { vec![] }; - // Prepend low hash to make it inclusive and append the sink anticone - let block_hashes = once(low_hash).chain(block_hashes).chain(sink_anticone).collect::>(); + let filtered_sink_anticone = if high_hash == sink_hash { + // Get the sink anticone and filter out duplicates: remove low_hash and any blocks already in block_hashes + // This prevents the bug where low_hash appears twice (once at the start and once in sink_anticone) + let sink_anticone = session.async_get_anticone(sink_hash).await?; + let mut seen_hashes: std::collections::HashSet<_> = once(low_hash).chain(block_hashes.iter().copied()).collect(); + sink_anticone.into_iter().filter(|hash| seen_hashes.insert(*hash)).collect() + } else { + vec![] + }; + + // Prepend low hash to make it inclusive and append the filtered sink anticone + let block_hashes = once(low_hash).chain(block_hashes).chain(filtered_sink_anticone).collect::>(); let blocks = if request.include_blocks { let mut blocks = Vec::with_capacity(block_hashes.len()); for hash in block_hashes.iter().copied() { @@ -677,12 +632,17 @@ NOTE: This error usually indicates an RPC conversion error between the node and ) -> RpcResult { let session = self.consensus_manager.consensus().session().await; + // This RPC call attempts to retrieve transactions on route from the block to the virtual + // These transactions may not be present during a transitional state where the sink is missing a block body + if session.async_is_consensus_in_transitional_ibd_state().await { + return Err(RpcError::ConsensusInTransitionalIbdState); + } // batch_size is set to 10 times the mergeset_size_limit. // this means batch_size is 2480 on 10 bps, and 1800 on mainnet. // this bounds by number of merged blocks, if include_accepted_transactions = true // else it returns the batch_size amount on pure chain blocks. // Note: batch_size does not bound removed chain blocks, only added chain blocks. - let batch_size = (self.config.mergeset_size_limit().upper_bound() * 10) as usize; + let batch_size = (self.config.mergeset_size_limit().after() * 10) as usize; let mut virtual_chain_batch = session.async_get_virtual_chain_from_block(request.start_hash, Some(batch_size)).await?; if let Some(min_confirmation_count) = request.min_confirmation_count { @@ -734,6 +694,12 @@ NOTE: This error usually indicates an RPC conversion error between the node and if !self.config.utxoindex { return Err(RpcError::NoUtxoIndex); } + let session = self.consensus_manager.consensus().unguarded_session(); + // do not retrieve utxos while in unstable ibd state. + if session.async_is_consensus_in_transitional_ibd_state().await { + return Err(RpcError::ConsensusInTransitionalIbdState); + } + // TODO: discuss if the entry order is part of the method requirements // (the current impl does not retain an entry order matching the request addresses order) let entry_map = self.get_utxo_set_by_script_public_key(request.addresses.iter()).await; @@ -748,6 +714,13 @@ NOTE: This error usually indicates an RPC conversion error between the node and if !self.config.utxoindex { return Err(RpcError::NoUtxoIndex); } + + let session = self.consensus_manager.consensus().unguarded_session(); + + // do not retrieve utxo balances while in unstable ibd state. + if session.async_is_consensus_in_transitional_ibd_state().await { + return Err(RpcError::ConsensusInTransitionalIbdState); + } let entry_map = self.get_balance_by_script_public_key(once(&request.address)).await; let balance = entry_map.values().sum(); Ok(GetBalanceByAddressResponse::new(balance)) @@ -761,6 +734,12 @@ NOTE: This error usually indicates an RPC conversion error between the node and if !self.config.utxoindex { return Err(RpcError::NoUtxoIndex); } + let session = self.consensus_manager.consensus().unguarded_session(); + + // do not retrieve utxo balances while in unstable ibd state. + if session.async_is_consensus_in_transitional_ibd_state().await { + return Err(RpcError::ConsensusInTransitionalIbdState); + } let entry_map = self.get_balance_by_script_public_key(request.addresses.iter()).await; let entries = request .addresses @@ -782,6 +761,12 @@ NOTE: This error usually indicates an RPC conversion error between the node and if !self.config.utxoindex { return Err(RpcError::NoUtxoIndex); } + let session = self.consensus_manager.consensus().unguarded_session(); + + // do not retrieve supply balances while in unstable ibd state. + if session.async_is_consensus_in_transitional_ibd_state().await { + return Err(RpcError::ConsensusInTransitionalIbdState); + } let circulating_sompi = self.utxoindex.clone().unwrap().get_circulating_supply().await.map_err(|e| RpcError::General(e.to_string()))?; Ok(GetCoinSupplyResponse::new(MAX_SOMPI, circulating_sompi)) @@ -824,7 +809,7 @@ NOTE: This error usually indicates an RPC conversion error between the node and // For daa_score later than the last header, we estimate in milliseconds based on the difference let time_adjustment = if header_idx == 0 { // estimate milliseconds = (daa_score * target_time_per_block) - (curr_daa_score - header.daa_score).saturating_mul(self.config.target_time_per_block().get(header.daa_score)) + (curr_daa_score - header.daa_score).saturating_mul(self.config.target_time_per_block().after()) } else { // "next" header is the one that we processed last iteration let next_header = &headers[header_idx - 1]; @@ -858,16 +843,8 @@ NOTE: This error usually indicates an RPC conversion error between the node and _request: GetFeeEstimateRequest, ) -> RpcResult { let mining_manager = self.mining_manager.clone(); - let consensus_manager = self.consensus_manager.clone(); - let estimate = self - .fee_estimate_cache - .get(async move { - mining_manager - .get_realtime_feerate_estimations(consensus_manager.consensus().unguarded_session().get_virtual_daa_score()) - .await - .into_rpc() - }) - .await; + let estimate = + self.fee_estimate_cache.get(async move { mining_manager.get_realtime_feerate_estimations().await.into_rpc() }).await; Ok(GetFeeEstimateResponse { estimate }) } @@ -902,23 +879,39 @@ NOTE: This error usually indicates an RPC conversion error between the node and ) -> RpcResult { let session = self.consensus_manager.consensus().session().await; - match session.async_get_populated_transaction(request.txid, request.accepting_block_daa_score).await { - Ok(tx) => { - if tx.tx.inputs.is_empty() || tx.entries.is_empty() { - return Err(RpcError::UtxoReturnAddressNotFound(UtxoInquirerError::TxFromCoinbase)); + // do not retrieve utxos while in unstable ibd state. + if session.async_is_consensus_in_transitional_ibd_state().await { + return Err(RpcError::ConsensusInTransitionalIbdState); + } + + match session + .async_get_transactions_by_accepting_daa_score( + request.accepting_block_daa_score, + Some(vec![request.txid]), + TransactionType::SignableTransaction, + ) + .await? + { + TransactionQueryResult::SignableTransaction(txs) => { + if txs.is_empty() { + return Err(RpcError::ConsensusError(UtxoInquirerError::TransactionNotFound.into())); + }; + + if txs[0].tx.inputs.is_empty() || txs[0].entries.is_empty() { + return Err(RpcError::ConsensusError(UtxoInquirerError::TxFromCoinbase.into())); } - if let Some(utxo_entry) = &tx.entries[0] { + if let Some(utxo_entry) = &txs[0].entries[0] { if let Ok(address) = extract_script_pub_key_address(&utxo_entry.script_public_key, self.config.prefix()) { Ok(GetUtxoReturnAddressResponse { return_address: address }) } else { - Err(RpcError::UtxoReturnAddressNotFound(UtxoInquirerError::NonStandard)) + Err(RpcError::ConsensusError(UtxoInquirerError::NonStandard.into())) } } else { - Err(RpcError::UtxoReturnAddressNotFound(UtxoInquirerError::UnfilledUtxoEntry)) + Err(RpcError::ConsensusError(UtxoInquirerError::UnfilledUtxoEntry.into())) } } - Err(error) => return Err(RpcError::UtxoReturnAddressNotFound(error)), + TransactionQueryResult::Transaction(_) => Err(RpcError::ConsensusError(UtxoInquirerError::TransactionNotFound.into())), } } @@ -964,8 +957,8 @@ NOTE: This error usually indicates an RPC conversion error between the node and if !self.config.unsafe_rpc && request.window_size > MAX_SAFE_WINDOW_SIZE { return Err(RpcError::WindowSizeExceedingMaximum(request.window_size, MAX_SAFE_WINDOW_SIZE)); } - if request.window_size as u64 > self.config.pruning_depth().lower_bound() { - return Err(RpcError::WindowSizeExceedingPruningDepth(request.window_size, self.config.prior_pruning_depth)); + if request.window_size as u64 > self.config.pruning_depth().after() { + return Err(RpcError::WindowSizeExceedingPruningDepth(request.window_size, self.config.pruning_depth().after())); } // In the previous golang implementation the convention for virtual was the following const. @@ -1078,6 +1071,8 @@ NOTE: This error usually indicates an RPC conversion error between the node and _connection: Option<&DynRpcConnection>, _request: ResolveFinalityConflictRequest, ) -> RpcResult { + // TODO(Relaxed): implement this functionality + // When implementing, make sure to consider transitional IBD state if !self.config.unsafe_rpc { warn!("ResolveFinalityConflict RPC command called while node in safe RPC mode -- ignoring."); return Err(RpcError::UnavailableInSafeMode); @@ -1221,7 +1216,7 @@ NOTE: This error usually indicates an RPC conversion error between the node and ) -> RpcResult { let session = self.consensus_manager.consensus().unguarded_session(); let sink_daa_score_timestamp = session.async_get_sink_daa_score_timestamp().await; - let is_synced: bool = self.mining_rule_engine.is_sink_recent_and_connected(sink_daa_score_timestamp); + let is_synced = self.mining_rule_engine.is_sink_recent_and_connected(sink_daa_score_timestamp); let virtual_daa_score = session.get_virtual_daa_score(); Ok(GetServerInfoResponse { @@ -1240,12 +1235,62 @@ NOTE: This error usually indicates an RPC conversion error between the node and _connection: Option<&DynRpcConnection>, _request: GetSyncStatusRequest, ) -> RpcResult { - let sink_daa_score_timestamp = - self.consensus_manager.consensus().unguarded_session().async_get_sink_daa_score_timestamp().await; - let is_synced: bool = self.mining_rule_engine.is_sink_recent_and_connected(sink_daa_score_timestamp); + let session = self.consensus_manager.consensus().unguarded_session(); + + let sink_daa_score_timestamp = session.async_get_sink_daa_score_timestamp().await; + let is_synced = self.mining_rule_engine.is_sink_recent_and_connected(sink_daa_score_timestamp) + && !session.async_is_consensus_in_transitional_ibd_state().await; Ok(GetSyncStatusResponse { is_synced }) } + async fn get_virtual_chain_from_block_v2_call( + &self, + _connection: Option<&DynRpcConnection>, + request: GetVirtualChainFromBlockV2Request, + ) -> RpcResult { + let session = self.consensus_manager.consensus().session().await; + // sets to full by default + let data_verbosity_level = request.data_verbosity_level.or(Some(RpcDataVerbosityLevel::Full)); + let verbosity: RpcAcceptanceDataVerbosity = data_verbosity_level.map(RpcAcceptanceDataVerbosity::from).unwrap_or_default(); + let batch_size = (self.config.mergeset_size_limit().upper_bound() * 10) as usize; + + let mut chain_path = session.async_get_virtual_chain_from_block(request.start_hash, Some(batch_size)).await?; + + // if min confirmation count is present, strip chain head if needed + // so the new head has at least min_confirmation_count confirmations + if let Some(min_confirmation_count) = request.min_confirmation_count { + if min_confirmation_count > 0 { + let sink_blue_score = session.async_get_sink_blue_score().await; + + while !chain_path.added.is_empty() { + let vc_last_accepted_block_hash = chain_path.added.last().unwrap(); + let vc_last_accepted_block = session.async_get_block(*vc_last_accepted_block_hash).await?; + + let distance = sink_blue_score.saturating_sub(vc_last_accepted_block.header.blue_score); + + if distance > min_confirmation_count { + break; + } + + chain_path.added.pop(); + } + } + } + + let chain_blocks_accepted_transactions = self + .consensus_converter + .get_chain_blocks_accepted_transactions(&session, &verbosity, &chain_path, Some(batch_size)) + .await?; + + chain_path.added.truncate(chain_blocks_accepted_transactions.len()); + + Ok(GetVirtualChainFromBlockV2Response { + removed_chain_block_hashes: chain_path.removed.into(), + added_chain_block_hashes: chain_path.added.into(), + chain_block_accepted_transactions: chain_blocks_accepted_transactions.into(), + }) + } + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // Notification API diff --git a/rpc/wrpc/client/src/client.rs b/rpc/wrpc/client/src/client.rs index f22bcf6255..497b156ae3 100644 --- a/rpc/wrpc/client/src/client.rs +++ b/rpc/wrpc/client/src/client.rs @@ -647,6 +647,7 @@ impl RpcApi for KaspaRpcClient { GetUtxoReturnAddress, GetUtxosByAddresses, GetVirtualChainFromBlock, + GetVirtualChainFromBlockV2, ResolveFinalityConflict, Shutdown, SubmitBlock, diff --git a/rpc/wrpc/examples/simple_client/src/main.rs b/rpc/wrpc/examples/simple_client/src/main.rs index 0d63a24c27..6092647cda 100644 --- a/rpc/wrpc/examples/simple_client/src/main.rs +++ b/rpc/wrpc/examples/simple_client/src/main.rs @@ -3,8 +3,7 @@ use kaspa_rpc_core::{api::rpc::RpcApi, GetBlockDagInfoResponse, GetServerInfoResponse}; use kaspa_wrpc_client::{ client::{ConnectOptions, ConnectStrategy}, - prelude::NetworkId, - prelude::NetworkType, + prelude::{NetworkId, NetworkType}, result::Result, KaspaRpcClient, Resolver, WrpcEncoding, }; diff --git a/rpc/wrpc/examples/vcc_v2/Cargo.toml b/rpc/wrpc/examples/vcc_v2/Cargo.toml new file mode 100644 index 0000000000..38abf1d370 --- /dev/null +++ b/rpc/wrpc/examples/vcc_v2/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "kaspa-wrpc-vcc-v2" +description = "Kaspa VCCV2 example" +publish = false +rust-version.workspace = true +version.workspace = true +edition.workspace = true +authors.workspace = true +include.workspace = true +license.workspace = true +repository.workspace = true + +[dependencies] +futures.workspace = true +kaspa-rpc-core.workspace = true +kaspa-wrpc-client.workspace = true +kaspa-addresses.workspace = true +tokio.workspace = true + + +[lints] +workspace = true diff --git a/rpc/wrpc/examples/vcc_v2/src/main.rs b/rpc/wrpc/examples/vcc_v2/src/main.rs new file mode 100644 index 0000000000..964c8b9b03 --- /dev/null +++ b/rpc/wrpc/examples/vcc_v2/src/main.rs @@ -0,0 +1,115 @@ +// Example of VCCv2 endpoint + +use kaspa_addresses::Address; +use kaspa_rpc_core::{api::rpc::RpcApi, RpcDataVerbosityLevel, RpcHash, RpcOptionalTransaction}; +use kaspa_wrpc_client::{ + client::{ConnectOptions, ConnectStrategy}, + prelude::NetworkId, + prelude::NetworkType, + result::Result, + KaspaRpcClient, WrpcEncoding, +}; +use std::time::Duration; +use std::{collections::HashSet, process::ExitCode}; + +#[tokio::main] +async fn main() -> ExitCode { + match get_vcc_v2().await { + Ok(_) => { + println!("Well done! You successfully completed your first client connection to Kaspa node!"); + ExitCode::SUCCESS + } + Err(error) => { + println!("An error occurred: {error}"); + ExitCode::FAILURE + } + } +} + +/// Helper to extract the sender address from the first input, if everything is present. +fn first_input_sender_address(tx: &RpcOptionalTransaction) -> Option<&Address> { + let first_input = tx.inputs.first()?; + + let utxo_entry = first_input.verbose_data.as_ref()?.utxo_entry.as_ref()?; + + utxo_entry.verbose_data.as_ref()?.script_public_key_address.as_ref() +} + +async fn get_vcc_v2() -> Result<()> { + let encoding = WrpcEncoding::Borsh; + + let url = Some("ws://127.0.0.1:17110"); + let resolver = None; + + let network_type = NetworkType::Mainnet; + let selected_network = Some(NetworkId::new(network_type)); + + // Advanced options + let subscription_context = None; + + // Create new wRPC client with parameters defined above + let client = KaspaRpcClient::new(encoding, url, resolver, selected_network, subscription_context)?; + + // Advanced connection options + let timeout = 5_000; + let options = ConnectOptions { + block_async_connect: true, + connect_timeout: Some(Duration::from_millis(timeout)), + strategy: ConnectStrategy::Fallback, + ..Default::default() + }; + + // Connect to selected Kaspa node + client.connect(Some(options)).await?; + + let dag_info = client.get_block_dag_info().await?; + // using pruning point as a demonstration, in a real usage you'd probably start from a checkpoint + // and keep iterating checkpoints to checkpoints to get a live view (high reactivity environment) + let pp_hash = dag_info.pruning_point_hash; + + let response = client.get_virtual_chain_from_block_v2(pp_hash, Some(RpcDataVerbosityLevel::High), None).await?; + + // keep track of accepted transaction ids + let mut global_seen_tx = HashSet::::with_capacity(30_000); + + for acd in response.chain_block_accepted_transactions.iter() { + let header = acd.chain_block_header.clone(); + + let Some(mergeset_block_hash) = header.hash else { + eprintln!("chain_block_header.hash is missing"); + continue; + }; + + println!("mergeset of {} has {} accepted transactions", mergeset_block_hash, acd.accepted_transactions.len()); + + for tx in &acd.accepted_transactions { + let Some(verbose) = tx.verbose_data.as_ref() else { + eprintln!("transaction.verbose_data is missing"); + continue; + }; + + let Some(id) = verbose.transaction_id else { + eprintln!("transaction_id is missing in verbose_data"); + continue; + }; + + // Example: use transaction payload here (borrow instead of cloning) + let _payload = &tx.payload; + + // Example: use first input's sender address, if available + if let Some(sender_address) = first_input_sender_address(tx) { + println!("{id} - {sender_address}"); + } + + global_seen_tx.insert(id); + } + } + + println!("total transactions count: {}", global_seen_tx.len()); + + // Disconnect client from Kaspa node + client.disconnect().await?; + + // Return function result + Ok(()) +} diff --git a/rpc/wrpc/server/src/router.rs b/rpc/wrpc/server/src/router.rs index b4c74a3374..7dc3d6b9bd 100644 --- a/rpc/wrpc/server/src/router.rs +++ b/rpc/wrpc/server/src/router.rs @@ -69,6 +69,7 @@ impl Router { GetSystemInfo, GetUtxosByAddresses, GetVirtualChainFromBlock, + GetVirtualChainFromBlockV2, ResolveFinalityConflict, Shutdown, SubmitBlock, diff --git a/rpc/wrpc/wasm/src/client.rs b/rpc/wrpc/wasm/src/client.rs index ff42a69028..cead974c21 100644 --- a/rpc/wrpc/wasm/src/client.rs +++ b/rpc/wrpc/wasm/src/client.rs @@ -37,7 +37,7 @@ declare! { r#" /** * RPC client configuration options - * + * * @category Node RPC */ export interface IRpcConfig { @@ -1056,6 +1056,10 @@ build_wrpc_wasm_bindgen_interface!( /// Returned information: None. Unban, /// Get UTXO Return Addresses. - GetUtxoReturnAddress + GetUtxoReturnAddress, + /// Retrieves the virtual chain corresponding to a specified block hash. + /// Returned information: Virtual chain information. (Version 2) + /// May be used to get fully populated transactions + GetVirtualChainFromBlockV2 ] ); diff --git a/simpa/Cargo.toml b/simpa/Cargo.toml index 3cefb83e5a..795896f163 100644 --- a/simpa/Cargo.toml +++ b/simpa/Cargo.toml @@ -36,6 +36,7 @@ rand.workspace = true rayon.workspace = true secp256k1.workspace = true tokio = { workspace = true, features = ["rt", "macros", "rt-multi-thread"] } +serde_json.workspace = true [features] heap = ["dhat", "kaspa-alloc/heap"] diff --git a/simpa/src/main.rs b/simpa/src/main.rs index d68e9b1986..aa5be9c4ec 100644 --- a/simpa/src/main.rs +++ b/simpa/src/main.rs @@ -13,11 +13,11 @@ use kaspa_consensus::{ headers::HeaderStoreReader, relations::RelationsStoreReader, }, - params::{ForkActivation, Params, TenBps, DEVNET_PARAMS, NETWORK_DELAY_BOUND, SIMNET_PARAMS}, + params::{ForkActivation, OverrideParams, Params, TenBps, DEVNET_PARAMS, NETWORK_DELAY_BOUND, SIMNET_PARAMS}, }; use kaspa_consensus_core::{ api::ConsensusApi, block::Block, blockstatus::BlockStatus, config::bps::calculate_ghostdag_k, errors::block::BlockProcessResult, - mining_rules::MiningRules, BlockHashSet, BlockLevel, HashMapCustomHasher, + mining_rules::MiningRules, tx::TransactionType, BlockHashSet, BlockLevel, HashMapCustomHasher, }; use kaspa_consensus_notify::root::ConsensusNotificationRoot; use kaspa_core::{ @@ -126,6 +126,9 @@ struct Args { long_payload: bool, #[arg(long)] retention_period_days: Option, + + #[arg(long)] + override_params_output: Option, } #[cfg(feature = "heap")] @@ -218,6 +221,13 @@ fn main_impl(mut args: Args) { if let Some(rocksdb_mem_budget) = args.rocksdb_mem_budget { conn_builder = conn_builder.with_mem_budget(rocksdb_mem_budget); } + + if let Some(output_path) = args.override_params_output { + let override_params: OverrideParams = config.params.clone().into(); + let override_params_json = serde_json::to_string_pretty(&override_params).unwrap(); + std::fs::write(output_path, override_params_json).expect("Unable to write override_params to file"); + } + // Load an existing consensus or run the simulation let (consensus, _lifetime) = if let Some(input_dir) = args.input_dir { let mut config = (*config).clone(); @@ -286,7 +296,13 @@ fn main_impl(mut args: Args) { let block = consensus.get_block(hash).unwrap(); cbad.accepted_transactions.iter().for_each(|ate| { assert!( - consensus.get_populated_transaction(ate.transaction_id, block.header.daa_score).is_ok(), + consensus + .get_transactions_by_accepting_daa_score( + block.header.daa_score, + Some(vec![ate.transaction_id]), + TransactionType::SignableTransaction + ) + .is_ok(), "Expected to find find tx {} at accepted daa {} via get_populated_transaction", ate.transaction_id, block.header.daa_score diff --git a/simpa/src/simulator/miner.rs b/simpa/src/simulator/miner.rs index 6848f72e27..d482522baf 100644 --- a/simpa/src/simulator/miner.rs +++ b/simpa/src/simulator/miner.rs @@ -180,7 +180,7 @@ impl Miner { let entry = utxo_view.get(&outpoint)?; if entry.amount < 2 || (entry.is_coinbase - && (virtual_daa_score as i64 - entry.block_daa_score as i64) <= self.params.coinbase_maturity().upper_bound() as i64) + && (virtual_daa_score as i64 - entry.block_daa_score as i64) <= self.params.coinbase_maturity().after() as i64) { return None; } diff --git a/testing/integration/src/common/utils.rs b/testing/integration/src/common/utils.rs index 10fb9cb671..b1dced53e6 100644 --- a/testing/integration/src/common/utils.rs +++ b/testing/integration/src/common/utils.rs @@ -188,7 +188,7 @@ pub async fn mine_block(pay_address: Address, submitting_client: &GrpcClient, li // Mine a block let template = submitting_client.get_block_template(pay_address.clone(), vec![]).await.unwrap(); - let header: Header = (&template.block.header).into(); + let header: Header = (&template.block.header).try_into().unwrap(); let block_hash = header.hash; submitting_client.submit_block(template.block, false).await.unwrap(); diff --git a/testing/integration/src/consensus_integration_tests.rs b/testing/integration/src/consensus_integration_tests.rs index e0c1c6beeb..c08d723081 100644 --- a/testing/integration/src/consensus_integration_tests.rs +++ b/testing/integration/src/consensus_integration_tests.rs @@ -53,9 +53,7 @@ use crate::common; use flate2::read::GzDecoder; use futures_util::future::try_join_all; use itertools::Itertools; -use kaspa_consensus_core::errors::tx::TxRuleError; use kaspa_consensus_core::hashing::sighash::calc_schnorr_signature_hash; -use kaspa_consensus_core::merkle::calc_hash_merkle_root; use kaspa_consensus_core::muhash::MuHashExtensions; use kaspa_core::core::Core; use kaspa_core::signals::Shutdown; @@ -77,6 +75,7 @@ use std::cmp::{max, Ordering}; use std::collections::HashSet; use std::path::Path; use std::sync::Arc; +use std::time::{Duration, Instant}; use std::{ collections::HashMap, fs::File, @@ -207,7 +206,9 @@ async fn consensus_sanity_test() { let wait_handles = consensus.init(); consensus - .validate_and_insert_block(consensus.build_block_with_parents(genesis_child, vec![MAINNET_PARAMS.genesis.hash]).to_immutable()) + .validate_and_insert_block( + consensus.build_header_only_block_with_parents(genesis_child, vec![MAINNET_PARAMS.genesis.hash]).to_immutable(), + ) .virtual_state_task .await .unwrap(); @@ -372,7 +373,7 @@ async fn block_window_test() { for test_block in test_blocks { info!("Processing block {}", test_block.id); let block_id = string_to_hash(test_block.id); - let block = consensus.build_block_with_parents( + let block = consensus.build_header_only_block_with_parents( block_id, strings_to_hashes(&test_block.parents.iter().map(|parent| String::from(*parent)).collect()), ); @@ -409,7 +410,7 @@ async fn header_in_isolation_validation_test() { let config = ConfigBuilder::new(MAINNET_PARAMS).edit_consensus_params(|p| p.skip_proof_of_work = true).build(); let consensus = TestConsensus::new(&config); let wait_handles = consensus.init(); - let block = consensus.build_block_with_parents(1.into(), vec![config.genesis.hash]); + let block = consensus.build_header_only_block_with_parents(1.into(), vec![config.genesis.hash]); { let mut block = block.clone(); @@ -445,7 +446,7 @@ async fn header_in_isolation_validation_test() { { let mut block = block.clone(); block.header.hash = 3.into(); - block.header.parents_by_level[0] = vec![]; + block.header.parents_by_level.set_direct_parents(vec![]); match consensus.validate_and_insert_block(block.to_immutable()).virtual_state_task.await { Err(RuleError::NoParents) => {} res => { @@ -457,12 +458,13 @@ async fn header_in_isolation_validation_test() { { let mut block = block.clone(); block.header.hash = 4.into(); - block.header.parents_by_level[0] = - std::iter::repeat_n(config.genesis.hash, config.prior_max_block_parents as usize + 1).collect(); + let max_block_parents = config.max_block_parents().after() as usize; + let parents = std::iter::repeat_n(config.genesis.hash, max_block_parents + 1).collect(); + block.header.parents_by_level.set_direct_parents(parents); match consensus.validate_and_insert_block(block.to_immutable()).virtual_state_task.await { Err(RuleError::TooManyParents(num_parents, limit)) => { - assert_eq!((config.prior_max_block_parents + 1) as usize, num_parents); - assert_eq!(limit, config.prior_max_block_parents as usize); + assert_eq!(max_block_parents + 1, num_parents); + assert_eq!(limit, max_block_parents); } res => { panic!("Unexpected result: {res:?}") @@ -479,13 +481,14 @@ async fn incest_test() { let config = ConfigBuilder::new(MAINNET_PARAMS).skip_proof_of_work().build(); let consensus = TestConsensus::new(&config); let wait_handles = consensus.init(); - let block = consensus.build_block_with_parents(1.into(), vec![config.genesis.hash]); + let block = consensus.build_header_only_block_with_parents(1.into(), vec![config.genesis.hash]); let BlockValidationFutures { block_task, virtual_state_task } = consensus.validate_and_insert_block(block.to_immutable()); block_task.await.unwrap(); // Assert that block task completes as well virtual_state_task.await.unwrap(); - let mut block = consensus.build_block_with_parents(2.into(), vec![config.genesis.hash]); - block.header.parents_by_level[0] = vec![1.into(), config.genesis.hash]; + let mut block = consensus.build_header_only_block_with_parents(2.into(), vec![config.genesis.hash]); + block.header.parents_by_level.set_direct_parents(vec![1.into(), config.genesis.hash]); + let BlockValidationFutures { block_task, virtual_state_task } = consensus.validate_and_insert_block(block.to_immutable()); match virtual_state_task.await { Err(RuleError::InvalidParentsRelation(a, b)) => { @@ -508,8 +511,9 @@ async fn missing_parents_test() { let config = ConfigBuilder::new(MAINNET_PARAMS).skip_proof_of_work().build(); let consensus = TestConsensus::new(&config); let wait_handles = consensus.init(); - let mut block = consensus.build_block_with_parents(1.into(), vec![config.genesis.hash]); - block.header.parents_by_level[0] = vec![0.into()]; + let mut block = consensus.build_header_only_block_with_parents(1.into(), vec![config.genesis.hash]); + block.header.parents_by_level.set_direct_parents(vec![0.into()]); + let BlockValidationFutures { block_task, virtual_state_task } = consensus.validate_and_insert_block(block.to_immutable()); match virtual_state_task.await { Err(RuleError::MissingParents(missing)) => { @@ -533,7 +537,7 @@ async fn known_invalid_test() { let config = ConfigBuilder::new(MAINNET_PARAMS).skip_proof_of_work().build(); let consensus = TestConsensus::new(&config); let wait_handles = consensus.init(); - let mut block = consensus.build_block_with_parents(1.into(), vec![config.genesis.hash]); + let mut block = consensus.build_header_only_block_with_parents(1.into(), vec![config.genesis.hash]); block.header.timestamp -= 1; match consensus.validate_and_insert_block(block.clone().to_immutable()).virtual_state_task.await { @@ -594,12 +598,12 @@ async fn median_time_test() { let timestamp_deviation_tolerance = test.config.timestamp_deviation_tolerance; for i in 1..(num_blocks + 1) { let parent = if i == 1 { test.config.genesis.hash } else { (i - 1).into() }; - let mut block = consensus.build_block_with_parents(i.into(), vec![parent]); + let mut block = consensus.build_header_only_block_with_parents(i.into(), vec![parent]); block.header.timestamp = test.config.genesis.timestamp + i; consensus.validate_and_insert_block(block.to_immutable()).virtual_state_task.await.unwrap(); } - let mut block = consensus.build_block_with_parents((num_blocks + 2).into(), vec![num_blocks.into()]); + let mut block = consensus.build_header_only_block_with_parents((num_blocks + 2).into(), vec![num_blocks.into()]); // We set the timestamp to be less than the median time and expect the block to be rejected block.header.timestamp = test.config.genesis.timestamp + num_blocks - timestamp_deviation_tolerance - 1; match consensus.validate_and_insert_block(block.to_immutable()).virtual_state_task.await { @@ -609,7 +613,7 @@ async fn median_time_test() { } } - let mut block = consensus.build_block_with_parents((num_blocks + 3).into(), vec![num_blocks.into()]); + let mut block = consensus.build_header_only_block_with_parents((num_blocks + 3).into(), vec![num_blocks.into()]); // We set the timestamp to be the exact median time and expect the block to be rejected block.header.timestamp = test.config.genesis.timestamp + num_blocks - timestamp_deviation_tolerance; match consensus.validate_and_insert_block(block.to_immutable()).virtual_state_task.await { @@ -619,7 +623,7 @@ async fn median_time_test() { } } - let mut block = consensus.build_block_with_parents((num_blocks + 4).into(), vec![(num_blocks).into()]); + let mut block = consensus.build_header_only_block_with_parents((num_blocks + 4).into(), vec![(num_blocks).into()]); // We set the timestamp to be bigger than the median time and expect the block to be inserted successfully. block.header.timestamp = test.config.genesis.timestamp + timestamp_deviation_tolerance + 1; consensus.validate_and_insert_block(block.to_immutable()).virtual_state_task.await.unwrap(); @@ -635,27 +639,27 @@ async fn mergeset_size_limit_test() { let consensus = TestConsensus::new(&config); let wait_handles = consensus.init(); - let num_blocks_per_chain = config.prior_mergeset_size_limit + 1; + let num_blocks_per_chain = config.mergeset_size_limit().after() + 1; let mut tip1_hash = config.genesis.hash; for i in 1..(num_blocks_per_chain + 1) { - let block = consensus.build_block_with_parents(i.into(), vec![tip1_hash]); + let block = consensus.build_header_only_block_with_parents(i.into(), vec![tip1_hash]); tip1_hash = block.header.hash; consensus.validate_and_insert_block(block.to_immutable()).virtual_state_task.await.unwrap(); } let mut tip2_hash = config.genesis.hash; for i in (num_blocks_per_chain + 2)..(2 * num_blocks_per_chain + 1) { - let block = consensus.build_block_with_parents(i.into(), vec![tip2_hash]); + let block = consensus.build_header_only_block_with_parents(i.into(), vec![tip2_hash]); tip2_hash = block.header.hash; consensus.validate_and_insert_block(block.to_immutable()).virtual_state_task.await.unwrap(); } - let block = consensus.build_block_with_parents((3 * num_blocks_per_chain + 1).into(), vec![tip1_hash, tip2_hash]); + let block = consensus.build_header_only_block_with_parents((3 * num_blocks_per_chain + 1).into(), vec![tip1_hash, tip2_hash]); match consensus.validate_and_insert_block(block.to_immutable()).virtual_state_task.await { Err(RuleError::MergeSetTooBig(a, b)) => { - assert_eq!(a, config.prior_mergeset_size_limit + 1); - assert_eq!(b, config.prior_mergeset_size_limit); + assert_eq!(a, config.mergeset_size_limit().after() + 1); + assert_eq!(b, config.mergeset_size_limit().after()); } res => { panic!("Unexpected result: {res:?}") @@ -668,7 +672,7 @@ async fn mergeset_size_limit_test() { #[allow(non_snake_case)] #[derive(Deserialize, Debug)] struct RPCBlock { - Header: RPCBlockHeader, + Header: RpcHeader, Transactions: Vec, VerboseData: RPCBlockVerboseData, } @@ -717,7 +721,7 @@ struct RPCOutpoint { #[allow(non_snake_case)] #[derive(Deserialize, Debug)] -struct RPCBlockHeader { +struct RpcHeader { Version: u16, Parents: Vec, HashMerkleRoot: String, @@ -981,7 +985,7 @@ async fn json_test(file_path: &str, concurrency: bool) { let proof_lines = gzip_file_lines(&main_path.join("proof.json.gz")); let proof = proof_lines .map(|line| { - let rpc_headers: Vec = serde_json::from_str(&line).unwrap(); + let rpc_headers: Vec = serde_json::from_str(&line).unwrap(); rpc_headers.iter().map(|rh| Arc::new(rpc_header_to_header(rh))).collect_vec() }) .collect_vec(); @@ -1122,14 +1126,16 @@ fn submit_body_chunk( futures } -fn rpc_header_to_header(rpc_header: &RPCBlockHeader) -> Header { +fn rpc_header_to_header(rpc_header: &RpcHeader) -> Header { Header::new_finalized( rpc_header.Version, rpc_header .Parents .iter() - .map(|item| item.ParentHashes.iter().map(|parent| Hash::from_str(parent).unwrap()).collect()) - .collect(), + .map(|item| item.ParentHashes.iter().map(|parent| Hash::from_str(parent).unwrap()).collect::>()) + .collect::>>() + .try_into() + .unwrap(), Hash::from_str(&rpc_header.HashMerkleRoot).unwrap(), Hash::from_str(&rpc_header.AcceptedIDMerkleRoot).unwrap(), Hash::from_str(&rpc_header.UTXOCommitment).unwrap(), @@ -1280,7 +1286,7 @@ async fn bounded_merge_depth_test() { let mut selected_chain = vec![config.genesis.hash]; for i in 1..(config.prior_merge_depth + 3) { let hash: Hash = (i + 1).into(); - consensus.add_block_with_parents(hash, vec![*selected_chain.last().unwrap()]).await.unwrap(); + consensus.add_header_only_block_with_parents(hash, vec![*selected_chain.last().unwrap()]).await.unwrap(); selected_chain.push(hash); } @@ -1288,19 +1294,22 @@ async fn bounded_merge_depth_test() { let mut block_chain_2 = vec![config.genesis.hash]; for i in 1..(config.prior_merge_depth + 2) { let hash: Hash = (i + config.prior_merge_depth + 3).into(); - consensus.add_block_with_parents(hash, vec![*block_chain_2.last().unwrap()]).await.unwrap(); + consensus.add_header_only_block_with_parents(hash, vec![*block_chain_2.last().unwrap()]).await.unwrap(); block_chain_2.push(hash); } // The merge depth root belongs to selected_chain, and block_chain_2[1] is red and doesn't have it in its past, and is not in the // past of any kosherizing block, so we expect the next block to be rejected. - match consensus.add_block_with_parents(100.into(), vec![block_chain_2[1], *selected_chain.last().unwrap()]).await { + match consensus.add_header_only_block_with_parents(100.into(), vec![block_chain_2[1], *selected_chain.last().unwrap()]).await { Err(RuleError::ViolatingBoundedMergeDepth) => {} res => panic!("Unexpected result: {res:?}"), } // A block that points to tip of both chains will be rejected for similar reasons (since block_chain_2 tip is also red). - match consensus.add_block_with_parents(101.into(), vec![*block_chain_2.last().unwrap(), *selected_chain.last().unwrap()]).await { + match consensus + .add_header_only_block_with_parents(101.into(), vec![*block_chain_2.last().unwrap(), *selected_chain.last().unwrap()]) + .await + { Err(RuleError::ViolatingBoundedMergeDepth) => {} res => panic!("Unexpected result: {res:?}"), } @@ -1308,7 +1317,7 @@ async fn bounded_merge_depth_test() { let kosherizing_hash: Hash = 102.into(); // This will pass since now genesis is the mutual merge depth root. consensus - .add_block_with_parents( + .add_header_only_block_with_parents( kosherizing_hash, vec![block_chain_2[block_chain_2.len() - 3], selected_chain[selected_chain.len() - 3]], ) @@ -1318,25 +1327,28 @@ async fn bounded_merge_depth_test() { let point_at_blue_kosherizing: Hash = 103.into(); // We expect it to pass because all of the reds are in the past of a blue kosherizing block. consensus - .add_block_with_parents(point_at_blue_kosherizing, vec![kosherizing_hash, *selected_chain.last().unwrap()]) + .add_header_only_block_with_parents(point_at_blue_kosherizing, vec![kosherizing_hash, *selected_chain.last().unwrap()]) .await .unwrap(); // We extend the selected chain until kosherizing_hash will be red from the virtual POV. for i in 0..config.ghostdag_k().before() { let hash = Hash::from_u64_word((i + 1) as u64 * 1000); - consensus.add_block_with_parents(hash, vec![*selected_chain.last().unwrap()]).await.unwrap(); + consensus.add_header_only_block_with_parents(hash, vec![*selected_chain.last().unwrap()]).await.unwrap(); selected_chain.push(hash); } // Since kosherizing_hash is now red, we expect this to fail. - match consensus.add_block_with_parents(1200.into(), vec![kosherizing_hash, *selected_chain.last().unwrap()]).await { + match consensus.add_header_only_block_with_parents(1200.into(), vec![kosherizing_hash, *selected_chain.last().unwrap()]).await { Err(RuleError::ViolatingBoundedMergeDepth) => {} res => panic!("Unexpected result: {res:?}"), } // point_at_blue_kosherizing is kosherizing kosherizing_hash, so this should pass. - consensus.add_block_with_parents(1201.into(), vec![point_at_blue_kosherizing, *selected_chain.last().unwrap()]).await.unwrap(); + consensus + .add_header_only_block_with_parents(1201.into(), vec![point_at_blue_kosherizing, *selected_chain.last().unwrap()]) + .await + .unwrap(); consensus.shutdown(wait_handles); } @@ -1393,7 +1405,7 @@ async fn difficulty_test() { const PMT_SAMPLED_WINDOW_SIZE: u64 = 13; const HIGH_BPS_SAMPLED_WINDOW_SIZE: u64 = 12; const HIGH_BPS: u64 = 4; - let tests = vec![ + let tests = [ Test { name: "MAINNET with full window", enabled: true, @@ -1463,7 +1475,7 @@ async fn difficulty_test() { let fake_genesis = Header { hash: test.config.genesis.hash, version: 0, - parents_by_level: vec![], + parents_by_level: Vec::>::new().try_into().unwrap(), hash_merkle_root: 0.into(), accepted_id_merkle_root: 0.into(), utxo_commitment: 0.into(), @@ -1775,28 +1787,27 @@ async fn staging_consensus_test() { core.join(joins); } -/// Tests the KIP-10 transaction introspection opcode activation by verifying that: -/// 1. Transactions using these opcodes are rejected before the activation DAA score -/// 2. The same transactions are accepted at and after the activation score -/// Uses OpInputSpk opcode as an example +/// Tests the KIP-10 transaction introspection opcodes by verifying that: +/// 1. Transactions using these opcodes are accepted from genesis (DAA score 0) +/// 2. The introspection opcodes (like OpInputSpk) function correctly in transaction validation +/// +/// KIP-10 is now enabled by default from the genesis block, allowing scripts to access +/// transaction data through introspection opcodes for advanced smart contract capabilities. #[tokio::test] -async fn run_kip10_activation_test() { +async fn kip10_test() { use kaspa_consensus_core::subnets::SUBNETWORK_ID_NATIVE; use kaspa_txscript::opcodes::codes::{Op0, OpTxInputSpk}; use kaspa_txscript::pay_to_script_hash_script; use kaspa_txscript::script_builder::ScriptBuilder; - // KIP-10 activates at DAA score 3 in this test - const KIP10_ACTIVATION_DAA_SCORE: u64 = 3; - init_allocator_with_default_settings(); // Create P2SH script that attempts to use OpInputSpk - this will be our test subject // The script should fail before KIP-10 activation and succeed after let redeem_script = ScriptBuilder::new() - .add_op(Op0).unwrap() // Push 0 for input index - .add_op(OpTxInputSpk).unwrap() // Get the input's script pubkey - .drain(); + .add_op(Op0).unwrap() // Push 0 for input index + .add_op(OpTxInputSpk).unwrap() // Get the input's script pubkey + .drain(); let spk = pay_to_script_hash_script(&redeem_script); // Set up initial UTXO with our test script @@ -1818,7 +1829,7 @@ async fn run_kip10_activation_test() { cfg.params.genesis.hash = genesis_header.hash; }) .edit_consensus_params(|p| { - p.crescendo_activation = ForkActivation::new(KIP10_ACTIVATION_DAA_SCORE); + p.crescendo_activation = ForkActivation::always(); }) .build(); @@ -1828,14 +1839,8 @@ async fn run_kip10_activation_test() { consensus.import_pruning_point_utxo_set(config.genesis.hash, genesis_multiset).unwrap(); consensus.init(); - // Build blockchain up to one block before activation - let mut index = 0; - for _ in 0..KIP10_ACTIVATION_DAA_SCORE - 1 { - let parent = if index == 0 { config.genesis.hash } else { index.into() }; - consensus.add_utxo_valid_block_with_parents((index + 1).into(), vec![parent], vec![]).await.unwrap(); - index += 1; - } - assert_eq!(consensus.get_virtual_daa_score(), index); + // Start from genesis block + let index = 0; // Create transaction that attempts to use the KIP-10 opcode let mut tx = Transaction::new( @@ -1859,30 +1864,8 @@ async fn run_kip10_activation_test() { // This triggers storage mass population let _ = consensus.validate_mempool_transaction(&mut tx, &TransactionValidationArgs::default()); let tx = tx.tx.unwrap_or_clone(); - - // Test 1: Build empty block, then manually insert invalid tx and verify consensus rejects it - { - let miner_data = MinerData::new(ScriptPublicKey::from_vec(0, vec![]), vec![]); - - // First build block without transactions - let mut block = - consensus.build_utxo_valid_block_with_parents((index + 1).into(), vec![index.into()], miner_data.clone(), vec![]); - - // Insert our test transaction and recalculate block hashes - block.transactions.push(tx.clone()); - block.header.hash_merkle_root = - calc_hash_merkle_root(block.transactions.iter(), config.crescendo_activation.is_active(block.header.daa_score)); - let block_status = consensus.validate_and_insert_block(block.to_immutable()).virtual_state_task.await; - assert!(matches!(block_status, Ok(BlockStatus::StatusDisqualifiedFromChain))); - assert_eq!(consensus.lkg_virtual_state.load().daa_score, 2); - index += 1; - } - // // Add one more block to reach activation score - consensus.add_utxo_valid_block_with_parents((index + 1).into(), vec![(index - 1).into()], vec![]).await.unwrap(); - index += 1; - - // Test 2: Verify the same transaction is accepted after activation - let status = consensus.add_utxo_valid_block_with_parents((index + 1).into(), vec![index.into()], vec![tx.clone()]).await; + // Verify the transaction with KIP-10 opcodes is accepted + let status = consensus.add_utxo_valid_block_with_parents((index + 1).into(), vec![config.genesis.hash], vec![tx.clone()]).await; assert!(matches!(status, Ok(BlockStatus::StatusUTXOValid))); assert!(consensus.lkg_virtual_state.load().accepted_tx_ids.contains(&tx_id)); } @@ -1941,12 +1924,9 @@ async fn payload_test() { } #[tokio::test] -async fn payload_activation_test() { +async fn payload_for_native_tx_test() { use kaspa_consensus_core::subnets::SUBNETWORK_ID_NATIVE; - // Set payload activation at DAA score 3 for this test - const PAYLOAD_ACTIVATION_DAA_SCORE: u64 = 3; - init_allocator_with_default_settings(); // Create initial UTXO to fund our test transactions @@ -1972,9 +1952,6 @@ async fn payload_activation_test() { let genesis_header: Header = (&cfg.params.genesis).into(); cfg.params.genesis.hash = genesis_header.hash; }) - .edit_consensus_params(|p| { - p.crescendo_activation = ForkActivation::new(PAYLOAD_ACTIVATION_DAA_SCORE); - }) .build(); let consensus = TestConsensus::new(&config); @@ -1983,15 +1960,6 @@ async fn payload_activation_test() { consensus.import_pruning_point_utxo_set(config.genesis.hash, genesis_multiset).unwrap(); consensus.init(); - // Build blockchain up to one block before activation - let mut index = 0; - for _ in 0..PAYLOAD_ACTIVATION_DAA_SCORE - 1 { - let parent = if index == 0 { config.genesis.hash } else { index.into() }; - consensus.add_utxo_valid_block_with_parents((index + 1).into(), vec![parent], vec![]).await.unwrap(); - index += 1; - } - assert_eq!(consensus.get_virtual_daa_score(), index); - // Create transaction with large payload let large_payload = vec![0u8; (config.params.max_block_mass / TRANSIENT_BYTE_TO_MASS_FACTOR / 2) as usize]; let mut tx_with_payload = Transaction::new( @@ -2011,45 +1979,30 @@ async fn payload_activation_test() { tx_with_payload.finalize(); let tx_id = tx_with_payload.id(); - // Test 1: Build empty block, then manually insert invalid tx and verify consensus rejects it - { - let miner_data = MinerData::new(ScriptPublicKey::from_vec(0, vec![]), vec![]); - - // First build block without transactions - let mut block = - consensus.build_utxo_valid_block_with_parents((index + 1).into(), vec![index.into()], miner_data.clone(), vec![]); - - let mut tx = MutableTransaction::from_tx(tx_with_payload.clone()); - // This triggers storage mass population - let _ = consensus.validate_mempool_transaction(&mut tx, &TransactionValidationArgs::default()); - - // Insert our test transaction and recalculate block hashes - block.transactions.push(tx.tx.unwrap_or_clone()); - - block.header.hash_merkle_root = - calc_hash_merkle_root(block.transactions.iter(), config.crescendo_activation.is_active(block.header.daa_score)); - let block_status = consensus.validate_and_insert_block(block.to_immutable()).virtual_state_task.await; - assert!(matches!(block_status, Err(RuleError::TxInContextFailed(tx, TxRuleError::NonCoinbaseTxHasPayload)) if tx == tx_id)); - assert_eq!(consensus.lkg_virtual_state.load().daa_score, PAYLOAD_ACTIVATION_DAA_SCORE - 1); - index += 1; - } - - // Add one more block to reach activation score - consensus.add_utxo_valid_block_with_parents((index + 1).into(), vec![(index - 1).into()], vec![]).await.unwrap(); - index += 1; - let mut tx = MutableTransaction::from_tx(tx_with_payload.clone()); // This triggers storage mass population let _ = consensus.validate_mempool_transaction(&mut tx, &TransactionValidationArgs::default()); // Test 2: Verify the same transaction is accepted after activation - let status = - consensus.add_utxo_valid_block_with_parents((index + 1).into(), vec![index.into()], vec![tx.tx.unwrap_or_clone()]).await; + let status = consensus.add_utxo_valid_block_with_parents(1.into(), vec![config.genesis.hash], vec![tx.tx.unwrap_or_clone()]).await; assert!(matches!(status, Ok(BlockStatus::StatusUTXOValid))); assert!(consensus.lkg_virtual_state.load().accepted_tx_ids.contains(&tx_id)); } +/// Tests runtime signature operation counting by verifying that: +/// 1. Transactions are validated using runtime (execution-time) sig op counting from genesis (DAA score 0) +/// 2. Only executed signature operations are counted, not all opcodes in the script +/// +/// Runtime sig op counting is now enabled by default from genesis. This allows scripts with +/// multiple signature opcodes to pass validation if only a subset are actually executed +/// (e.g., in if-branches), as opposed to static counting which counts all sig ops regardless +/// of execution path. +/// +/// Test scenario: A P2SH script with sig_op_count=1 contains 3 CheckSig opcodes, but only 1 +/// is executed due to conditional logic. With runtime counting enabled from genesis, this +/// transaction is accepted because only 1 sig op is actually executed, even though static +/// analysis would see 3 sig ops. #[tokio::test] async fn runtime_sig_op_counting_test() { use kaspa_consensus_core::{ @@ -2057,9 +2010,6 @@ async fn runtime_sig_op_counting_test() { }; use kaspa_txscript::{opcodes::codes::*, script_builder::ScriptBuilder}; - // Runtime sig op counting activates at DAA score 3 - const RUNTIME_SIGOP_ACTIVATION_DAA_SCORE: u64 = 3; - init_allocator_with_default_settings(); // Set up signing key for signature verification @@ -2106,7 +2056,7 @@ async fn runtime_sig_op_counting_test() { cfg.params.genesis.hash = genesis_header.hash; }) .edit_consensus_params(|p| { - p.crescendo_activation = ForkActivation::new(RUNTIME_SIGOP_ACTIVATION_DAA_SCORE); + p.crescendo_activation = ForkActivation::always(); }) .build(); @@ -2117,12 +2067,7 @@ async fn runtime_sig_op_counting_test() { consensus.init(); // Build blockchain up to one block before activation - let mut index = 0; - for _ in 0..RUNTIME_SIGOP_ACTIVATION_DAA_SCORE - 1 { - let parent = if index == 0 { config.genesis.hash } else { index.into() }; - consensus.add_utxo_valid_block_with_parents((index + 1).into(), vec![parent], vec![]).await.unwrap(); - index += 1; - } + let index = 0; // Create transaction spending P2SH with 1 sig op limit let mut tx = Transaction::new( @@ -2131,7 +2076,7 @@ async fn runtime_sig_op_counting_test() { initial_utxo_collection[0].0, vec![], // Placeholder for signature script 0, - 1, // Only allowing 1 sig op - important for test + 1, // Script declares 1 sig op (will execute only 1 despite having 3 CheckSig opcodes) )], vec![TransactionOutput::new(initial_utxo_collection[0].1.amount - 5000, ScriptPublicKey::from_vec(0, vec![OpTrue]))], 0, @@ -2164,24 +2109,102 @@ async fn runtime_sig_op_counting_test() { let _ = consensus.validate_mempool_transaction(&mut tx, &TransactionValidationArgs::default()); let tx = tx.tx.unwrap_or_clone(); - // Test 1: Before activation, tx should be rejected due to static sig op counting (sees 3 ops) - { - let miner_data = MinerData::new(ScriptPublicKey::from_vec(0, vec![]), vec![]); - let mut block = - consensus.build_utxo_valid_block_with_parents((index + 1).into(), vec![index.into()], miner_data.clone(), vec![]); - block.transactions.push(tx.clone()); - block.header.hash_merkle_root = - calc_hash_merkle_root(block.transactions.iter(), config.crescendo_activation.is_active(block.header.daa_score)); - let block_status = consensus.validate_and_insert_block(block.to_immutable()).virtual_state_task.await; - assert!(matches!(block_status, Ok(BlockStatus::StatusDisqualifiedFromChain))); - index += 1; + // Verify transaction is accepted with runtime sig op counting from genesis + // Runtime counting sees only 1 executed sig op (in the IF branch), not the 3 total CheckSig opcodes + let status = consensus.add_utxo_valid_block_with_parents((index + 1).into(), vec![config.genesis.hash], vec![tx]).await; + assert!(matches!(status, Ok(BlockStatus::StatusUTXOValid))); +} + +// Checks that pruning works and that we do not allow attaching a body to a pruned block +#[tokio::test] +async fn pruning_test() { + init_allocator_with_default_settings(); + let config = ConfigBuilder::new(MAINNET_PARAMS) + .skip_proof_of_work() + .edit_consensus_params(|p| { + p.prior_finality_depth = 2; + p.prior_mergeset_size_limit = 2; + p.prior_ghostdag_k = 2; + p.prior_merge_depth = 3; + p.prior_pruning_depth = 100; + + p.crescendo.finality_depth = 2; + p.crescendo.mergeset_size_limit = 2; + p.crescendo.ghostdag_k = 2; + p.crescendo.merge_depth = 3; + p.crescendo.pruning_depth = 100; + }) + .build(); + + assert!((config.prior_ghostdag_k as u64) < config.prior_merge_depth, "K must be smaller than merge depth for this test to run"); + + let consensus = TestConsensus::new(&config); + let wait_handles = consensus.init(); + + let mut selected_chain = vec![config.genesis.hash]; + + let genesis_child = 1.into(); + consensus.add_empty_utxo_valid_block_with_parents(genesis_child, vec![*selected_chain.last().unwrap()]).await.unwrap(); + selected_chain.push(genesis_child); + let genesis_child_block = consensus.get_block(genesis_child).unwrap(); + + let genesis_child_child = 2.into(); + consensus.add_empty_utxo_valid_block_with_parents(genesis_child_child, vec![*selected_chain.last().unwrap()]).await.unwrap(); + selected_chain.push(genesis_child_child); + let genesis_child_child_block = consensus.get_block(genesis_child_child).unwrap(); + + for i in 3..config.prior_pruning_depth + config.prior_finality_depth + 100 { + let hash: Hash = i.into(); + consensus.add_empty_utxo_valid_block_with_parents(hash, vec![*selected_chain.last().unwrap()]).await.unwrap(); + selected_chain.push(hash); } - // Add block to reach activation - consensus.add_utxo_valid_block_with_parents((index + 1).into(), vec![(index - 1).into()], vec![]).await.unwrap(); - index += 1; + // Waiting for genesis_child_child to get pruned + let start = Instant::now(); + while consensus.get_block_status(genesis_child_child).unwrap() == BlockStatus::StatusUTXOValid { + if start.elapsed() > Duration::from_secs(10) { + panic!("Timed out waiting 10 seconds for pruning to occur"); + } - // Test 2: After activation, tx should be accepted as runtime counting only sees 1 executed sig op - let status = consensus.add_utxo_valid_block_with_parents((index + 1).into(), vec![index.into()], vec![tx]).await; - assert!(matches!(status, Ok(BlockStatus::StatusUTXOValid))); + tokio::time::sleep(Duration::from_millis(100)).await; + } + + // Since pruning happens topologically from older to later blocks, we expect both blocks to be pruned at this point + assert_match!( + consensus.validate_and_insert_block(genesis_child_block).virtual_state_task.await, + Err(RuleError::MissingParents(_)) + ); + + assert_match!( + consensus.validate_and_insert_block(genesis_child_child_block).virtual_state_task.await, + Err(RuleError::MissingParents(_)) + ); + + consensus.shutdown(wait_handles); +} + +// Checks that consensus can handle blocks from multiple levels +#[tokio::test] +async fn indirect_parents_test() { + init_allocator_with_default_settings(); + let config = ConfigBuilder::new(DEVNET_PARAMS).skip_proof_of_work().build(); + let consensus = TestConsensus::new(&config); + let wait_handles = consensus.init(); + + let mut level_3_count = 3; + let mut selected_chain = vec![config.genesis.hash]; + for i in 1.. { + let hash: Hash = i.into(); + consensus.add_header_only_block_with_parents(hash, vec![*selected_chain.last().unwrap()]).await.unwrap(); + selected_chain.push(hash); + if consensus.get_header(*selected_chain.last().unwrap()).unwrap().parents_by_level.expanded_len() >= 3 { + level_3_count += 1; + } + + if level_3_count > 5 { + break; + } + } + + consensus.shutdown(wait_handles); } diff --git a/testing/integration/src/consensus_pipeline_tests.rs b/testing/integration/src/consensus_pipeline_tests.rs index a6dc387133..7bb68d39e0 100644 --- a/testing/integration/src/consensus_pipeline_tests.rs +++ b/testing/integration/src/consensus_pipeline_tests.rs @@ -34,7 +34,7 @@ async fn test_concurrent_pipeline() { for (hash, parents) in blocks { // Submit to consensus twice to make sure duplicates are handled - let b: kaspa_consensus_core::block::Block = consensus.build_block_with_parents(hash, parents).to_immutable(); + let b: kaspa_consensus_core::block::Block = consensus.build_header_only_block_with_parents(hash, parents).to_immutable(); let results = join!( consensus.validate_and_insert_block(b.clone()).virtual_state_task, consensus.validate_and_insert_block(b).virtual_state_task diff --git a/testing/integration/src/daemon_integration_tests.rs b/testing/integration/src/daemon_integration_tests.rs index 7959c11677..9ada16cbd6 100644 --- a/testing/integration/src/daemon_integration_tests.rs +++ b/testing/integration/src/daemon_integration_tests.rs @@ -78,7 +78,7 @@ async fn daemon_mining_test() { .get_block_template(Address::new(kaspad1.network.into(), kaspa_addresses::Version::PubKey, &[0; 32]), vec![]) .await .unwrap(); - let header: Header = (&template.block.header).into(); + let header: Header = (&template.block.header).try_into().unwrap(); last_block_hash = Some(header.hash); rpc_client1.submit_block(template.block, false).await.unwrap(); @@ -189,7 +189,7 @@ async fn daemon_utxos_propagation_test() { let mut last_block_hash = None; for i in 0..initial_blocks { let template = rpc_client1.get_block_template(miner_address.clone(), vec![]).await.unwrap(); - let header: Header = (&template.block.header).into(); + let header: Header = (&template.block.header).try_into().unwrap(); last_block_hash = Some(header.hash); rpc_client1.submit_block(template.block, false).await.unwrap(); diff --git a/testing/integration/src/lib.rs b/testing/integration/src/lib.rs index a5be1b23d9..158b6ab890 100644 --- a/testing/integration/src/lib.rs +++ b/testing/integration/src/lib.rs @@ -23,5 +23,9 @@ pub mod mempool_benchmarks; #[cfg(feature = "devnet-prealloc")] pub mod subscribe_benchmarks; +#[cfg(test)] +#[cfg(feature = "devnet-prealloc")] +pub mod rpc_perf_benchmarks; + #[cfg(test)] pub mod rpc_tests; diff --git a/testing/integration/src/rpc_perf_benchmarks.rs b/testing/integration/src/rpc_perf_benchmarks.rs new file mode 100644 index 0000000000..b057a9fa7d --- /dev/null +++ b/testing/integration/src/rpc_perf_benchmarks.rs @@ -0,0 +1,176 @@ +use crate::{ + common::{ + args::ArgsBuilder, + daemon::{ClientManager, Daemon}, + utils::{generate_tx_dag, verify_tx_dag, CONTRACT_FACTOR}, + }, + tasks::{block::group::MinerGroupTask, daemon::DaemonTask, tx::group::TxSenderGroupTask, Stopper, TasksRunner}, +}; +use futures_util::future::join_all; +use kaspa_addresses::Address; +use kaspa_alloc::init_allocator_with_default_settings; +use kaspa_consensus::params::Params; +use kaspa_consensus_core::network::NetworkType; +use kaspa_core::info; +use kaspa_rpc_core::api::rpc::RpcApi; +use kaspa_txscript::pay_to_address_script; +use rand::thread_rng; +use rand::Rng; +use std::{ + sync::Arc, + time::{Duration, Instant}, +}; + +const SUBMIT_BLOCK_CLIENTS: usize = 2; +const BLOCK_COUNT: usize = 100_000; + +// Constants for transaction generation and mempool pressure +const MEMPOOL_TARGET: u64 = 10_000; +const TX_COUNT: usize = 200_000; +const TX_LEVEL_WIDTH: usize = 5_000; +const TPS_PRESSURE: u64 = 40; +const PREALLOC_AMOUNT_SOMPI: u64 = 1; +const SUBMIT_TX_CLIENTS: usize = 2; + +/// `cargo test --release --package kaspa-testing-integration --lib --features devnet-prealloc -- rpc_perf_benchmarks::bench_rpc_high_load --exact --nocapture --ignored` +#[tokio::test] +#[ignore = "bmk"] +#[cfg(feature = "devnet-prealloc")] // Add this feature gate +async fn bench_rpc_high_load() { + use tokio::time::sleep; + + init_allocator_with_default_settings(); + kaspa_core::log::try_init_logger("info,kaspa_core::time=debug,kaspa_mining::monitor=debug"); + kaspa_core::panic::configure_panic(); + + // Setup for pre-allocated UTXOs and transaction generation + let (prealloc_sk, prealloc_pk) = secp256k1::generate_keypair(&mut thread_rng()); + let prealloc_address = + Address::new(NetworkType::Simnet.into(), kaspa_addresses::Version::PubKey, &prealloc_pk.x_only_public_key().0.serialize()); + let schnorr_key = secp256k1::Keypair::from_secret_key(secp256k1::SECP256K1, &prealloc_sk); + let spk = pay_to_address_script(&prealloc_address); + + let args = ArgsBuilder::simnet(TX_LEVEL_WIDTH as u64 * CONTRACT_FACTOR, PREALLOC_AMOUNT_SOMPI) // Use simnet with prealloc args + .prealloc_address(prealloc_address.clone()) // Set prealloc address + .apply_args(Daemon::fill_args_with_random_ports) + .utxoindex(true) // Ensure utxoindex is enabled for transaction validation + .build(); + + let network = args.network(); + let params: Params = network.into(); + + // Generate UTXOs from args + let utxoset = args.generate_prealloc_utxos(args.num_prealloc_utxos.unwrap()); + let txs = generate_tx_dag(utxoset.clone(), schnorr_key, spk, TX_COUNT / TX_LEVEL_WIDTH, TX_LEVEL_WIDTH); + verify_tx_dag(&utxoset, &txs); + info!("Generated overall {} txs for mempool pressure.", txs.len()); + + let client_manager = Arc::new(ClientManager::new(args)); + + let mut tasks = TasksRunner::new(Some(DaemonTask::build(client_manager.clone()))).launch().await; + + // Continuous mining + tasks = tasks.task( + MinerGroupTask::build( + network, + client_manager.clone(), + SUBMIT_BLOCK_CLIENTS, + params.bps().upper_bound(), + BLOCK_COUNT, + Stopper::Signal, + ) + .await, + ); + + // Transaction generator/simulator + tasks = tasks.task( + TxSenderGroupTask::build(client_manager.clone(), SUBMIT_TX_CLIENTS, false, txs, TPS_PRESSURE, MEMPOOL_TARGET, Stopper::Signal) + .await, + ); + + tasks.run().await; + + // todo: ideally rely on number of block required (here likely 20-ish), to avoid arbitrary wait + sleep(Duration::from_secs(2)).await; + + let main_client = client_manager.new_client().await; + let dag_info = main_client.get_block_dag_info().await.unwrap(); + let sink = dag_info.sink; + + sleep(Duration::from_secs(2)).await; + + let initial_virtual_chain = main_client.get_virtual_chain_from_block(sink, false, None).await.unwrap().added_chain_block_hashes; + + // todo: here it waits `n` seconds to simulate "enough" tx and block data + // it could instead specify a tx number and block number targets, to avoid arbitrary wait (that may randomize the results) + info!("Waiting 20 seconds before starting..."); + sleep(Duration::from_secs(20)).await; + + // High load RPC simulation + info!("Starting high load RPC simulation..."); + + let num_clients = 20; + let num_requests_per_client = 20; + + let start_total = Instant::now(); + + let mut handles = Vec::new(); + for _ in 0..num_clients { + let client = client_manager.new_client().await; + let thread_virtual_chain = initial_virtual_chain.clone(); + let handle = tokio::spawn(async move { + let mut latencies = Vec::with_capacity(num_requests_per_client); + // get a random start hash for this client + let index = rand::thread_rng().gen_range(0..(thread_virtual_chain.len() - 1)); + + for _ in 0..num_requests_per_client { + let hash = thread_virtual_chain.get(index).unwrap(); + + let start = Instant::now(); + client.get_virtual_chain_from_block_v2(*hash, Some(kaspa_rpc_core::RpcDataVerbosityLevel::High), None).await.unwrap(); + + latencies.push(start.elapsed()); + } + client.disconnect().await.unwrap(); + latencies + }); + handles.push(handle); + } + + let results = join_all(handles).await; + let total_duration = start_total.elapsed(); + + let mut all_latencies: Vec<_> = results.into_iter().flat_map(|res| res.unwrap()).collect(); + all_latencies.sort_unstable(); + + let total_requests = all_latencies.len(); + if total_requests == 0 { + info!("No requests were made."); + } else { + let rps = total_requests as f64 / total_duration.as_secs_f64(); + let avg_latency: Duration = all_latencies.iter().sum::() / total_requests as u32; + let min_latency = all_latencies.first().unwrap(); + let max_latency = all_latencies.last().unwrap(); + let p95_index = ((0.95 * total_requests as f64).ceil() as usize).saturating_sub(1); + let p99_index = ((0.99 * total_requests as f64).ceil() as usize).saturating_sub(1); + let p95_latency = all_latencies[p95_index]; + let p99_latency = all_latencies[p99_index]; + + info!("Finished high load simulation."); + info!("Total requests: {}", total_requests); + info!("Total duration: {:?}", total_duration); + info!("Requests per second: {:.2}", rps); + info!("--------------------"); + info!("Latency metrics:"); + info!(" Min: {:?}", min_latency); + info!(" Max: {:?}", max_latency); + info!(" Avg: {:?}", avg_latency); + info!(" p95: {:?}", p95_latency); + info!(" p99: {:?}", p99_latency); + } + + tasks.stop(); + + // Wait for tasks to shutdown + tasks.join().await; +} diff --git a/testing/integration/src/rpc_tests.rs b/testing/integration/src/rpc_tests.rs index 414dbca2ad..67a898a1fc 100644 --- a/testing/integration/src/rpc_tests.rs +++ b/testing/integration/src/rpc_tests.rs @@ -117,7 +117,7 @@ async fn sanity_test() { assert!(!is_synced); // Compute the expected block hash for the received block - let header: Header = (&block.header).into(); + let header: Header = (&block.header).try_into().unwrap(); let block_hash = header.hash; // Submit the template (no mining, in simnet PoW is skipped) @@ -689,6 +689,25 @@ async fn sanity_test() { }) } + KaspadPayloadOps::GetVirtualChainFromBlockV2 => { + let rpc_client = client.clone(); + tst!(op, { + let response = rpc_client + .get_virtual_chain_from_block_v2_call( + None, + GetVirtualChainFromBlockV2Request { + start_hash: SIMNET_GENESIS.hash, + data_verbosity_level: None, + min_confirmation_count: None, + }, + ) + .await + .unwrap(); + assert!(response.added_chain_block_hashes.is_empty()); + assert!(response.removed_chain_block_hashes.is_empty()); + }) + } + KaspadPayloadOps::NotifyBlockAdded => { let rpc_client = client.clone(); let id = listener_id; diff --git a/utils/alloc/Cargo.toml b/utils/alloc/Cargo.toml index 86d90c19be..d8072bf824 100644 --- a/utils/alloc/Cargo.toml +++ b/utils/alloc/Cargo.toml @@ -9,16 +9,14 @@ edition.workspace = true include.workspace = true repository.workspace = true -# TODO: advance to version > 0.1.46 once released. The following commit includes a fix required for rust 1.87 windows linker error [target.'cfg(not(target_os = "macos"))'.dependencies] -mimalloc = { git = "https://github.com/purpleprotocol/mimalloc_rust", rev = "eff21096d5ee5337ec89e2b7174f1bbb11026c70", default-features = false, features = [ +mimalloc = { version = "0.1.48", default-features = false, features = [ 'override', ] } -# TODO: advance to version > 0.1.46 once released. The following commit includes a fix required for rust 1.87 windows linker error [target.'cfg(target_os = "macos")'.dependencies] # override is unstable in MacOS and is thus excluded -mimalloc = { git = "https://github.com/purpleprotocol/mimalloc_rust", rev = "eff21096d5ee5337ec89e2b7174f1bbb11026c70", default-features = false } +mimalloc = { version = "0.1.48", default-features = false } [features] heap = [] diff --git a/utils/src/hex.rs b/utils/src/hex.rs index acd4458881..ec8f39e284 100644 --- a/utils/src/hex.rs +++ b/utils/src/hex.rs @@ -142,13 +142,13 @@ mod tests { #[test] fn test_smallvec_hex_convert() { - type TestVec = SmallVec<[u8; 36]>; + type TestVec = SmallVec<[u8; 35]>; let v: TestVec = smallvec![0x0, 0xab, 0x55, 0x30, 0x1f, 0x63]; let k = "00ab55301f63"; assert_eq!(k.len(), v.len() * 2); assert_eq!(k.to_string(), v.to_hex()); - assert_eq!(SmallVec::<[u8; 36]>::from_hex(k).unwrap(), v); + assert_eq!(SmallVec::<[u8; 35]>::from_hex(k).unwrap(), v); assert!(TestVec::from_hex("not a number").is_err()); assert!(TestVec::from_hex("ab01").is_ok()); diff --git a/utils/src/iter.rs b/utils/src/iter.rs index 33216f93a6..7990b3ac9d 100644 --- a/utils/src/iter.rs +++ b/utils/src/iter.rs @@ -11,6 +11,47 @@ pub trait IterExtensions: Iterator { { ReusableIterFormat::new(self.format(sep)) } + + /// Provides a run-length-encoding iterator that yields the cumulative count + /// of elements seen so far, along with the value of the element. Useful for creating + /// compressed representations of sequences with repeating elements + fn rle_cumulative(self) -> impl Iterator + where + Self: Sized, + Self::Item: PartialEq, + { + let mut cumulative: usize = 0; + self.dedup_with_count().map(move |(count, value)| { + cumulative += count; + (cumulative, value) + }) + } +} + +pub trait IterExtensionsRle: Iterator +where + T: Clone, +{ + /// Expands a run-length encoded iterator back into its original sequence of elements. + /// It takes an iterator of (cumulative_count, item) tuples and yields the repeated items + fn expand_rle(self) -> impl Iterator + where + Self: Sized, + { + self.scan(0usize, |prev, (cum, item)| { + let count = cum.checked_sub(*prev).filter(|&c| c > 0).expect("cumulative counts must be strictly increasing"); + *prev = cum; + Some((count, item)) + }) + .flat_map(|(count, item)| std::iter::repeat_n(item, count)) + } +} + +impl IterExtensionsRle for I +where + I: Iterator, + T: Clone, +{ } impl IterExtensions for T where T: Iterator {} diff --git a/utils/src/sync/rwlock.rs b/utils/src/sync/rwlock.rs index f67ad4f0d1..bf2c1cd2ba 100644 --- a/utils/src/sync/rwlock.rs +++ b/utils/src/sync/rwlock.rs @@ -119,7 +119,6 @@ mod tests { use tokio::{sync::oneshot, time::sleep, time::timeout}; const ACQUIRE_TIMEOUT: Duration = Duration::from_secs(5); - #[tokio::test] async fn test_writer_reentrance() { for i in 0..16 { diff --git a/wallet/core/src/account/mod.rs b/wallet/core/src/account/mod.rs index 37c7ca5998..afcfe257e2 100644 --- a/wallet/core/src/account/mod.rs +++ b/wallet/core/src/account/mod.rs @@ -785,6 +785,18 @@ pub trait DerivationCapableAccount: Account { self.wallet().notify(Events::AccountUpdate { account_descriptor: self.descriptor()? }).await?; } + // update address manager with the last used index + if update_address_indexes { + receive_address_manager.set_index(last_receive_address_index)?; + change_address_manager.set_index(last_change_address_index)?; + + let metadata = self.metadata()?.expect("derivation accounts must provide metadata"); + let store = self.wallet().store().as_account_store()?; + store.update_metadata(vec![metadata]).await?; + self.clone().scan(None, None).await?; + self.wallet().notify(Events::AccountUpdate { account_descriptor: self.descriptor()? }).await?; + } + if let Ok(legacy_account) = self.as_legacy_account() { legacy_account.clear_private_context().await?; } diff --git a/wallet/core/src/api/message.rs b/wallet/core/src/api/message.rs index b12a383952..88a7f67964 100644 --- a/wallet/core/src/api/message.rs +++ b/wallet/core/src/api/message.rs @@ -577,7 +577,7 @@ pub struct UtxoEntryWrapper { pub address: Option
, pub outpoint: TransactionOutpointWrapper, pub amount: u64, - pub script_public_key: ScriptPublicKey, + pub script_public_key: ScriptPublicKeyWrapper, pub block_daa_score: u64, pub is_coinbase: bool, } @@ -621,13 +621,32 @@ impl From for TransactionOutpoint { } } +#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[serde(rename_all = "camelCase")] +pub struct ScriptPublicKeyWrapper { + pub script_public_key: String, + pub version: u16, +} + +impl From for ScriptPublicKeyWrapper { + fn from(script_pub_key: ScriptPublicKey) -> Self { + Self { script_public_key: script_pub_key.script().to_hex(), version: script_pub_key.version } + } +} + +impl From for ScriptPublicKey { + fn from(script_pub_key: ScriptPublicKeyWrapper) -> Self { + Self::from_vec(script_pub_key.version, Vec::from_hex(&script_pub_key.script_public_key).unwrap()) + } +} + impl From for UtxoEntry { fn from(entry: UtxoEntryWrapper) -> Self { Self { address: entry.address, outpoint: entry.outpoint.into(), amount: entry.amount, - script_public_key: entry.script_public_key, + script_public_key: entry.script_public_key.into(), block_daa_score: entry.block_daa_score, is_coinbase: entry.is_coinbase, } @@ -640,7 +659,7 @@ impl From for UtxoEntryWrapper { address: entry.address, outpoint: entry.outpoint.into(), amount: entry.amount, - script_public_key: entry.script_public_key, + script_public_key: entry.script_public_key.into(), block_daa_score: entry.block_daa_score, is_coinbase: entry.is_coinbase, } @@ -688,8 +707,8 @@ pub struct AccountsEstimateResponse { #[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] #[serde(rename_all = "camelCase")] pub struct FeeRateEstimateBucket { - feerate: f64, - seconds: f64, + pub feerate: f64, + pub seconds: f64, } impl From for FeeRateEstimateBucket { diff --git a/wallet/core/src/tests/rpc_core_mock.rs b/wallet/core/src/tests/rpc_core_mock.rs index 529fffffe8..2382929cac 100644 --- a/wallet/core/src/tests/rpc_core_mock.rs +++ b/wallet/core/src/tests/rpc_core_mock.rs @@ -387,6 +387,14 @@ impl RpcApi for RpcCoreMock { Err(RpcError::NotImplemented) } + async fn get_virtual_chain_from_block_v2_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetVirtualChainFromBlockV2Request, + ) -> RpcResult { + Err(RpcError::NotImplemented) + } + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // Notification API diff --git a/wallet/native/.gitignore b/wallet/daemon/.gitignore similarity index 100% rename from wallet/native/.gitignore rename to wallet/daemon/.gitignore diff --git a/wallet/daemon/Cargo.toml b/wallet/daemon/Cargo.toml new file mode 100644 index 0000000000..be2d5709e1 --- /dev/null +++ b/wallet/daemon/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "kaspa-wallet-daemon" +description = "Kaspa wallet daemon" +rust-version.workspace = true +version.workspace = true +edition.workspace = true +authors.workspace = true +include.workspace = true +license.workspace = true +repository.workspace = true + +[features] +default = [] + +[dependencies] +async-std.workspace = true +async-trait.workspace = true +kaspa-core.workspace = true +kaspa-wallet-core.workspace = true +kaspa-wallet-grpc-core.workspace = true +kaspa-wallet-grpc-server.workspace = true +kaspa-consensus-core.workspace = true +log.workspace = true +tonic.workspace = true +tokio.workspace = true +workflow-core.workspace = true +workflow-log.workspace = true +workflow-terminal.workspace = true +clap.workspace = true +futures-util.workspace = true + +[lints] +workspace = true \ No newline at end of file diff --git a/wallet/daemon/src/args.rs b/wallet/daemon/src/args.rs new file mode 100644 index 0000000000..cfea3a558b --- /dev/null +++ b/wallet/daemon/src/args.rs @@ -0,0 +1,75 @@ +use clap::{Arg, Command}; +use kaspa_core::kaspad_env::version; +use std::net::SocketAddr; + +pub struct Args { + pub password: String, + pub name: Option, + pub rpc_server: Option, + pub network_id: Option, + pub listen_address: SocketAddr, + pub ecdsa: bool, +} + +impl Args { + pub fn parse() -> Self { + let matches = cli().get_matches(); + + Args { + password: matches.get_one::("password").cloned().expect("Password argument is missing."), + name: matches.get_one::("name").cloned(), + rpc_server: matches.get_one::("rpc-server").cloned(), + network_id: matches.get_one::("network-id").cloned(), + listen_address: matches + .get_one::("listen-address") + .cloned() + .unwrap_or_else(|| "127.0.0.1:8082".parse().unwrap()), + ecdsa: matches.get_one::("ecdsa").cloned().unwrap_or(false), + } + } +} + +pub fn cli() -> Command { + Command::new("kaspawalletd") + .about(format!("{} (kaspawalletd) v{}", env!("CARGO_PKG_DESCRIPTION"), version())) + .version(env!("CARGO_PKG_VERSION")) + .arg(Arg::new("password").long("password").short('p').value_name("password").help("Path of password file")) + .arg( + Arg::new("name") + .long("name") + .short('n') + .value_name("name") + .value_parser(clap::value_parser!(String)) + .help("Name of wallet"), + ) + .arg( + Arg::new("rpc-server") + .long("rpc-server") + .short('s') + .value_name("rpc-server") + .value_parser(clap::value_parser!(String)) + .help("Private RPC server URL"), + ) + .arg( + Arg::new("network-id") + .long("network-id") + .value_name("network-id") + .value_parser(clap::value_parser!(String)) + .help("Network id to be connected via PNN."), + ) + .arg( + Arg::new("listen-address") + .long("listen-address") + .short('l') + .value_name("listen-address") + .value_parser(clap::value_parser!(String)) + .help("gRPC listening address with port."), + ) + .arg( + Arg::new("ecdsa") + .long("ecdsa") + .value_name("ecdsa") + .value_parser(clap::value_parser!(bool)) + .help("Use ecdsa for transactions broadcast"), + ) +} diff --git a/wallet/daemon/src/main.rs b/wallet/daemon/src/main.rs new file mode 100644 index 0000000000..6d3d1803b9 --- /dev/null +++ b/wallet/daemon/src/main.rs @@ -0,0 +1,72 @@ +mod args; + +use crate::args::Args; +use kaspa_consensus_core::network::NetworkId; +use kaspa_core::{info, warn}; +use kaspa_wallet_core::{ + api::WalletApi, + rpc::{ConnectOptions, ConnectStrategy, Resolver, WrpcEncoding}, + wallet::Wallet, +}; +use kaspa_wallet_grpc_core::kaspawalletd::kaspawalletd_server::KaspawalletdServer; +use kaspa_wallet_grpc_server::service::Service; +use std::{error::Error, str::FromStr, sync::Arc}; +use tokio::sync::oneshot; +use tonic::transport::Server; + +#[tokio::main] +async fn main() -> Result<(), Box> { + kaspa_core::log::init_logger(None, ""); + let args = Args::parse(); + + let wallet = Arc::new(Wallet::try_new(Wallet::local_store()?, Some(Resolver::default()), None)?); + wallet.clone().wallet_open(args.password.into(), args.name, false, false).await?; + info!("Wallet path: {}", wallet.store().location()?); + + if let Some(wrpc_client) = wallet.try_wrpc_client().as_ref() { + let rpc_address = if let Some(address) = args.rpc_server { + address + } else { + let network_id = NetworkId::from_str(&args.network_id.expect("Specifying network id is needed for PNN"))?; + warn!("Using PNN may expose your data to third parties. For privacy, use a private, self-hosted node."); + Resolver::default().get_url(WrpcEncoding::Borsh, network_id).await.map_err(|e| e.to_string())? + }; + + info!("Connecting to {}...", rpc_address); + + let options = ConnectOptions { + block_async_connect: true, + strategy: ConnectStrategy::Fallback, + url: Some(rpc_address), + ..Default::default() + }; + wrpc_client.connect(Some(options)).await?; + } + + let dag_info = wallet.rpc_api().get_block_dag_info().await?; + wallet.set_network_id(&dag_info.network)?; + info!("Connected to node on {} with DAA score {}.", dag_info.network, dag_info.virtual_daa_score); + + wallet.start().await?; + + let (shutdown_sender, shutdown_receiver) = oneshot::channel(); + let service = Service::with_notification_pipe_task(wallet.clone(), shutdown_sender, args.ecdsa); + service.wallet().accounts_activate(None).await?; + wallet.autoselect_default_account_if_single().await?; + info!("Activated account {}, synchronizing...", wallet.account().unwrap().id().short()); + + let server_handle = tokio::spawn(async move { + Server::builder() + .add_service(KaspawalletdServer::new(service)) + .serve_with_shutdown(args.listen_address, async { + let _ = shutdown_receiver.await; + info!("Shutdown initiated, stopping gRPC server..."); + }) + .await + .unwrap(); + }); + info!("gRPC server is listening on {}:{}.", args.listen_address.ip(), args.listen_address.port()); + server_handle.await?; + + Ok(()) +} diff --git a/wallet/grpc/core/Cargo.toml b/wallet/grpc/core/Cargo.toml new file mode 100644 index 0000000000..9d172ec27f --- /dev/null +++ b/wallet/grpc/core/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "kaspa-wallet-grpc-core" +rust-version.workspace = true +version.workspace = true +authors.workspace = true +license.workspace = true +repository.workspace = true +edition.workspace = true +include.workspace = true + +[dependencies] +kaspa-rpc-core.workspace = true +kaspa-wallet-core.workspace = true +kaspa-txscript.workspace = true +kaspa-bip32.workspace = true +thiserror.workspace = true + +tonic.workspace = true +prost.workspace = true + +[lints] +workspace = true + +[build-dependencies] +tonic-build = { workspace = true, features = ["prost"] } diff --git a/wallet/grpc/core/build.rs b/wallet/grpc/core/build.rs new file mode 100644 index 0000000000..3f3afa8491 --- /dev/null +++ b/wallet/grpc/core/build.rs @@ -0,0 +1,14 @@ +use std::{io::Result, path::PathBuf}; + +fn main() -> Result<()> { + let proto_kaspawalletd = "./proto/kaspawalletd.proto"; + let proto_protoserialization = "./proto/wallet.proto"; + + println!("cargo:rerun-if-changed={}", proto_kaspawalletd); + + let proto_dir = PathBuf::from("./proto"); + tonic_build::configure().build_server(true).build_client(true).compile_protos(&[proto_kaspawalletd], &[&proto_dir])?; + tonic_build::configure().compile_protos(&[proto_protoserialization], &[proto_dir])?; + + Ok(()) +} diff --git a/wallet/grpc/core/proto/kaspawalletd.proto b/wallet/grpc/core/proto/kaspawalletd.proto new file mode 100644 index 0000000000..918a6d5e1b --- /dev/null +++ b/wallet/grpc/core/proto/kaspawalletd.proto @@ -0,0 +1,151 @@ +syntax = "proto3"; + +option go_package = "github.com/kaspanet/kaspad/cmd/kaspawallet/daemon/pb"; +package kaspawalletd; + +service kaspawalletd { + rpc GetBalance(GetBalanceRequest) returns (GetBalanceResponse) {} + rpc GetExternalSpendableUTXOs(GetExternalSpendableUTXOsRequest) + returns (GetExternalSpendableUTXOsResponse) {} + rpc CreateUnsignedTransactions(CreateUnsignedTransactionsRequest) + returns (CreateUnsignedTransactionsResponse) {} + rpc ShowAddresses(ShowAddressesRequest) returns (ShowAddressesResponse) {} + rpc NewAddress(NewAddressRequest) returns (NewAddressResponse) {} + rpc Shutdown(ShutdownRequest) returns (ShutdownResponse) {} + rpc Broadcast(BroadcastRequest) returns (BroadcastResponse) {} + // BroadcastReplacement assumes that all transactions depend on the first one + rpc BroadcastReplacement(BroadcastRequest) returns (BroadcastResponse) {} + // Since SendRequest contains a password - this command should only be used on + // a trusted or secure connection + rpc Send(SendRequest) returns (SendResponse) {} + // Since SignRequest contains a password - this command should only be used on + // a trusted or secure connection + rpc Sign(SignRequest) returns (SignResponse) {} + rpc GetVersion(GetVersionRequest) returns (GetVersionResponse) {} + rpc BumpFee(BumpFeeRequest) returns (BumpFeeResponse) {} +} + +message GetBalanceRequest {} + +message GetBalanceResponse { + uint64 available = 1; + uint64 pending = 2; + repeated AddressBalances addressBalances = 3; +} + +message AddressBalances { + string address = 1; + uint64 available = 2; + uint64 pending = 3; +} + +message FeePolicy { + oneof feePolicy { + double maxFeeRate = 6; + double exactFeeRate = 7; + uint64 maxFee = 8; + } +} + +message CreateUnsignedTransactionsRequest { + string address = 1; + uint64 amount = 2; + repeated string from = 3; + bool useExistingChangeAddress = 4; + bool isSendAll = 5; + FeePolicy feePolicy = 6; +} + +message CreateUnsignedTransactionsResponse { + repeated bytes unsignedTransactions = 1; +} + +message ShowAddressesRequest {} + +message ShowAddressesResponse { repeated string address = 1; } + +message NewAddressRequest {} + +message NewAddressResponse { string address = 1; } + +message BroadcastRequest { + bool isDomain = 1; + repeated bytes transactions = 2; +} + +message BroadcastResponse { repeated string txIds = 1; } + +message ShutdownRequest {} + +message ShutdownResponse {} + +message Outpoint { + string transactionId = 1; + uint32 index = 2; +} + +message UtxosByAddressesEntry { + string address = 1; + Outpoint outpoint = 2; + UtxoEntry utxoEntry = 3; +} + +message ScriptPublicKey { + uint32 version = 1; + string scriptPublicKey = 2; +} + +message UtxoEntry { + uint64 amount = 1; + ScriptPublicKey scriptPublicKey = 2; + uint64 blockDaaScore = 3; + bool isCoinbase = 4; +} + +message GetExternalSpendableUTXOsRequest { string address = 1; } + +message GetExternalSpendableUTXOsResponse { + repeated UtxosByAddressesEntry Entries = 1; +} +// Since SendRequest contains a password - this command should only be used on a +// trusted or secure connection +message SendRequest { + string toAddress = 1; + uint64 amount = 2; + string password = 3; + repeated string from = 4; + bool useExistingChangeAddress = 5; + bool isSendAll = 6; + FeePolicy feePolicy = 7; +} + +message SendResponse { + repeated string txIds = 1; + repeated bytes signedTransactions = 2; +} + +// Since SignRequest contains a password - this command should only be used on a +// trusted or secure connection +message SignRequest { + repeated bytes unsignedTransactions = 1; + string password = 2; +} + +message SignResponse { repeated bytes signedTransactions = 1; } + +message GetVersionRequest {} + +message GetVersionResponse { string version = 1; } + +message BumpFeeRequest { + string password = 1; + repeated string from = 2; + bool useExistingChangeAddress = 3; + FeePolicy feePolicy = 4; + string txId = 5; +} + +message BumpFeeResponse { + repeated bytes transactions = 1; + repeated string txIds = 2; +} diff --git a/wallet/grpc/core/proto/wallet.proto b/wallet/grpc/core/proto/wallet.proto new file mode 100644 index 0000000000..72be9a7433 --- /dev/null +++ b/wallet/grpc/core/proto/wallet.proto @@ -0,0 +1,61 @@ +syntax = "proto3"; +package protoserialization; + +option go_package = "github.com/kaspanet/kaspad/cmd/kaspawallet/libkaspawallet/serialization/protoserialization"; + +message PartiallySignedTransaction{ + TransactionMessage tx = 1; + repeated PartiallySignedInput partiallySignedInputs = 2; +} + +message PartiallySignedInput{ + bytes redeemScript = 1; + TransactionOutput prevOutput = 2; + uint32 minimumSignatures = 3; + repeated PubKeySignaturePair pubKeySignaturePairs = 4; + string derivationPath = 5; +} + +message PubKeySignaturePair{ + string extendedPubKey = 1; + bytes signature = 2; +} + +message SubnetworkId{ + bytes bytes = 1; +} + +message TransactionMessage{ + uint32 version = 1; + repeated TransactionInput inputs = 2; + repeated TransactionOutput outputs = 3; + uint64 lockTime = 4; + SubnetworkId subnetworkId = 5; + uint64 gas = 6; + bytes payload = 8; +} + +message TransactionInput{ + Outpoint previousOutpoint = 1; + bytes signatureScript = 2; + uint64 sequence = 3; + uint32 sigOpCount = 4; +} + +message Outpoint{ + TransactionId transactionId = 1; + uint32 index = 2; +} + +message TransactionId{ + bytes bytes = 1; +} +message ScriptPublicKey { + bytes script = 1; + uint32 version = 2; +} + +message TransactionOutput{ + uint64 value = 1; + ScriptPublicKey scriptPublicKey = 2; +} \ No newline at end of file diff --git a/wallet/grpc/core/src/convert.rs b/wallet/grpc/core/src/convert.rs new file mode 100644 index 0000000000..670c8b02ea --- /dev/null +++ b/wallet/grpc/core/src/convert.rs @@ -0,0 +1,320 @@ +use crate::kaspawalletd::{Outpoint, ScriptPublicKey, UtxoEntry, UtxosByAddressesEntry}; +use crate::protoserialization; +use kaspa_bip32::secp256k1::PublicKey; +use kaspa_bip32::{DerivationPath, Error, ExtendedKey, ExtendedPublicKey}; +use kaspa_rpc_core::{ + RpcScriptPublicKey, RpcScriptVec, RpcSubnetworkId, RpcTransaction, RpcTransactionId, RpcTransactionInput, RpcTransactionOutpoint, + RpcTransactionOutput, +}; +use kaspa_txscript::script_builder::ScriptBuilder; +use kaspa_wallet_core::api::{ScriptPublicKeyWrapper, TransactionOutpointWrapper, UtxoEntryWrapper}; +use kaspa_wallet_core::derivation::ExtendedPublicKeySecp256k1; +use prost::Message; +use std::num::TryFromIntError; +use std::str::FromStr; +use tonic::Status; + +/// Deserializes a vector of transaction byte arrays into RpcTransaction. +/// +/// # Arguments +/// * `txs` - Vector of transaction byte arrays to deserialize +/// * `is_domain` - Boolean flag indicating whether the transactions are domain transactions +/// +/// # Returns +/// * `Result, Status>` - Vector of deserialized transactions or error status +pub fn deserialize_txs(txs: Vec>, is_domain: bool, ecdsa: bool) -> Result, Status> { + txs.into_iter() + .map(|tx| if is_domain { deserialize_domain_tx(tx.as_slice()) } else { extract_tx(tx.as_slice(), ecdsa) }) + .collect::, Status>>() +} + +/// Deserializes a domain transaction from bytes into an RpcTransaction. +/// +/// # Arguments +/// * `tx` - Byte slice containing the domain transaction data +/// +/// # Returns +/// * `Result` - Deserialized transaction or error status +fn deserialize_domain_tx(tx: &[u8]) -> Result { + let tx = protoserialization::TransactionMessage::decode(tx).map_err(|err| Status::invalid_argument(err.to_string()))?; + RpcTransaction::try_from(tx) +} + +/// Extracts and deserializes a partially signed transaction from bytes into an RpcTransaction. +/// +/// # Arguments +/// * `tx` - Byte slice containing the partially signed transaction data +/// +/// # Returns +/// * `Result` - Deserialized transaction or error status +pub fn extract_tx(tx: &[u8], ecdsa: bool) -> Result { + let tx = protoserialization::PartiallySignedTransaction::decode(tx).map_err(|err| Status::invalid_argument(err.to_string()))?; + let tx_message = extract_tx_deserialized(tx, ecdsa).map_err(|err| Status::invalid_argument(err.to_string()))?; + RpcTransaction::try_from(tx_message) +} + +/// Extracts and processes a partially signed transaction into a regular transaction message. +/// Handles both single-signature and multi-signature inputs, constructing appropriate signature scripts. +fn extract_tx_deserialized( + partially_signed_tx: protoserialization::PartiallySignedTransaction, + ecdsa: bool, +) -> Result { + let Some(mut tx) = partially_signed_tx.tx else { return Err(Status::invalid_argument("missing transaction")) }; + if partially_signed_tx.partially_signed_inputs.len() > tx.inputs.len() { + return Err(Status::invalid_argument("unbalanced inputs")); + } + for (idx, (signed_input, tx_input)) in partially_signed_tx.partially_signed_inputs.iter().zip(&mut tx.inputs).enumerate() { + let mut script_builder = ScriptBuilder::new(); + match signed_input.pub_key_signature_pairs.len() { + 0 => { /* do nothing */ }, + 1 => { + if signed_input.pub_key_signature_pairs[0].signature.is_empty() { + return Err(Status::invalid_argument("missing signature")); + } + let sig_script = script_builder + .add_data(signed_input.pub_key_signature_pairs[0].signature.as_slice()) + .map_err(|err| Status::invalid_argument(err.to_string()))? + .drain(); + tx_input.signature_script = sig_script; + } + pairs_len /*multisig*/ => { + for pair in signed_input.pub_key_signature_pairs.iter() { + script_builder.add_data(pair.signature.as_slice()).map_err(|err| Status::invalid_argument(err.to_string()))?; + } + if pairs_len < signed_input.minimum_signatures as usize { + return Err(Status::invalid_argument(format!("missing {} signatures on input: {idx}", signed_input.minimum_signatures as usize - pairs_len))); + } + let redeem_script = partially_signed_input_multisig_redeem_script(signed_input, ecdsa, "m")?; + script_builder.add_data(redeem_script.as_slice()).map_err(|err| Status::invalid_argument(err.to_string()))?; + tx_input.signature_script = script_builder.drain(); + } + } + } + Ok(tx) +} + +/// Generates a multi-signature redeem script for a partially signed input. +/// Supports both ECDSA and Schnorr signature schemes based on the ecdsa parameter. +fn partially_signed_input_multisig_redeem_script( + input: &protoserialization::PartiallySignedInput, + ecdsa: bool, + path: &str, +) -> Result, Status> { + let extended_pub_keys: &[ExtendedPublicKey] = &input + .pub_key_signature_pairs + .iter() + .map(|pair| { + let extended_key = + ExtendedKey::from_str(pair.extended_pub_key.as_str()).map_err(|err| Status::invalid_argument(err.to_string()))?; + let derived_key: ExtendedPublicKeySecp256k1 = + extended_key.try_into().map_err(|err: Error| Status::invalid_argument(err.to_string()))?; + derived_key + .derive_path(&path.parse::().map_err(|err| Status::invalid_argument(err.to_string()))?) + .map_err(|err| Status::invalid_argument(err.to_string())) + }) + .collect::, Status>>()?; + + if ecdsa { + multisig_redeem_script_ecdsa(extended_pub_keys, input.minimum_signatures as usize) + } else { + multisig_redeem_script(extended_pub_keys, input.minimum_signatures as usize) + } +} + +/// Creates a Schnorr-based multisig redeem script from a list of public keys. +/// The script requires at least `minimum_signatures` valid signatures to spend. +fn multisig_redeem_script(extended_pub_keys: &[ExtendedPublicKey], minimum_signatures: usize) -> Result, Status> { + let serialized_keys = extended_pub_keys.iter().map(|key| key.public_key.x_only_public_key().0.serialize()); + let redeem_script = kaspa_txscript::multisig_redeem_script(serialized_keys, minimum_signatures) + .map_err(|err| Status::invalid_argument(err.to_string()))?; + Ok(redeem_script) +} + +/// Creates an ECDSA-based multisig redeem script from a list of public keys. +/// The script requires at least `minimum_signatures` valid signatures to spend. +fn multisig_redeem_script_ecdsa( + extended_pub_keys: &[ExtendedPublicKey], + minimum_signatures: usize, +) -> Result, Status> { + let serialized_ecdsa_keys = extended_pub_keys.iter().map(|key| key.public_key.serialize()); + let redeem_script = kaspa_txscript::multisig_redeem_script_ecdsa(serialized_ecdsa_keys, minimum_signatures) + .map_err(|err| Status::invalid_argument(err.to_string()))?; + Ok(redeem_script) +} + +impl From for Outpoint { + fn from(wrapper: kaspa_wallet_core::api::TransactionOutpointWrapper) -> Self { + Outpoint { transaction_id: wrapper.transaction_id.to_string(), index: wrapper.index } + } +} + +impl From for ScriptPublicKey { + fn from(script_pub_key: ScriptPublicKeyWrapper) -> Self { + ScriptPublicKey { script_public_key: script_pub_key.script_public_key, version: script_pub_key.version.into() } + } +} + +impl From for UtxosByAddressesEntry { + fn from(wrapper: UtxoEntryWrapper) -> Self { + UtxosByAddressesEntry { + address: wrapper.address.map(|addr| addr.to_string()).unwrap_or_default(), + outpoint: Some(wrapper.outpoint.into()), + utxo_entry: Some(UtxoEntry { + amount: wrapper.amount, + script_public_key: Some(wrapper.script_public_key.into()), + block_daa_score: wrapper.block_daa_score, + is_coinbase: wrapper.is_coinbase, + }), + } + } +} + +impl TryFrom for RpcTransaction { + type Error = Status; + + fn try_from( + // protoserialization::TransactionMessage { version, inputs, outputs, lock_time, subnetwork_id, gas, payload }: protoserialization::TransactionMessage, + value: protoserialization::TransactionMessage, + ) -> Result { + let version: u16 = value.version.try_into().map_err(|e: TryFromIntError| Status::invalid_argument(e.to_string()))?; + let inputs: Result, Status> = value + .inputs + .into_iter() + .map(|i| RpcTransactionInput::try_from(i).map_err(|e| Status::invalid_argument(e.to_string()))) + .collect(); + let outputs: Result, Status> = value + .outputs + .into_iter() + .map(|i| RpcTransactionOutput::try_from(i).map_err(|e| Status::invalid_argument(e.to_string()))) + .collect(); + + let subnetwork_id = + RpcSubnetworkId::try_from(value.subnetwork_id.ok_or(Status::invalid_argument("missing subnetwork_id"))?.bytes.as_slice()) + .map_err(|e| Status::invalid_argument(e.to_string()))?; + + Ok(RpcTransaction { + version, + inputs: inputs?, + outputs: outputs?, + lock_time: value.lock_time, + subnetwork_id, + gas: value.gas, + payload: value.payload, + mass: 0, + verbose_data: None, + }) + } +} + +impl TryFrom for RpcTransactionInput { + type Error = Status; + fn try_from(value: protoserialization::TransactionInput) -> Result { + let previous_outpoint = value.previous_outpoint.ok_or(Status::invalid_argument("missing previous outpoint"))?.try_into()?; + let sig_op_count: u8 = value.sig_op_count.try_into().map_err(|e: TryFromIntError| Status::invalid_argument(e.to_string()))?; + Ok(RpcTransactionInput { + previous_outpoint, + signature_script: value.signature_script, + sequence: value.sequence, + sig_op_count, + verbose_data: None, + }) + } +} + +impl TryFrom for RpcTransactionOutput { + type Error = Status; + + fn try_from(value: protoserialization::TransactionOutput) -> Result { + Ok(RpcTransactionOutput { + value: value.value, + script_public_key: value.script_public_key.ok_or(Status::invalid_argument("missing script public key"))?.try_into()?, + verbose_data: None, + }) + } +} + +impl TryFrom for RpcScriptPublicKey { + type Error = Status; + + fn try_from(value: protoserialization::ScriptPublicKey) -> Result { + let version: u16 = value.version.try_into().map_err(|e: TryFromIntError| Status::invalid_argument(e.to_string()))?; + Ok(RpcScriptPublicKey::new(version, RpcScriptVec::from(value.script))) + } +} + +impl TryFrom for RpcTransactionOutpoint { + type Error = Status; + + fn try_from(protoserialization::Outpoint { transaction_id, index }: protoserialization::Outpoint) -> Result { + Ok(RpcTransactionOutpoint { + transaction_id: RpcTransactionId::try_from_slice( + transaction_id.ok_or(Status::invalid_argument("Outppoint is missing"))?.bytes.as_slice(), + ) + .map_err(|err| Status::invalid_argument(err.to_string()))?, + index, + }) + } +} + +impl protoserialization::PartiallySignedTransaction { + pub fn from_unsigned(value: RpcTransaction) -> Self { + protoserialization::PartiallySignedTransaction { + partially_signed_inputs: vec![], + tx: Some(protoserialization::TransactionMessage::from(value)), + } + } +} + +impl From for protoserialization::TransactionMessage { + fn from(value: RpcTransaction) -> Self { + protoserialization::TransactionMessage { + version: value.version as u32, + inputs: value.inputs.into_iter().map(RpcTransactionInput::into).collect(), + lock_time: value.lock_time, + gas: value.gas, + payload: value.payload, + + outputs: value.outputs.into_iter().map(RpcTransactionOutput::into).collect(), + subnetwork_id: Some(value.subnetwork_id.into()), + } + } +} + +impl From for protoserialization::TransactionInput { + fn from(value: RpcTransactionInput) -> Self { + Self { + signature_script: value.signature_script, + sequence: value.sequence, + sig_op_count: value.sig_op_count as u32, + previous_outpoint: Some(value.previous_outpoint.into()), + } + } +} + +impl From for protoserialization::Outpoint { + fn from(value: RpcTransactionOutpoint) -> Self { + Self { + transaction_id: Some(protoserialization::TransactionId { bytes: value.transaction_id.as_bytes().to_vec() }), + index: value.index, + } + } +} + +impl From for protoserialization::TransactionOutput { + fn from(value: RpcTransactionOutput) -> Self { + Self { value: value.value, script_public_key: Some(value.script_public_key.into()) } + } +} + +impl From for protoserialization::ScriptPublicKey { + fn from(value: RpcScriptPublicKey) -> Self { + Self { script: value.script().to_vec(), version: value.version as u32 } + } +} + +impl From for protoserialization::SubnetworkId { + fn from(value: RpcSubnetworkId) -> Self { + let bts: &[u8] = value.as_ref(); + Self { bytes: bts.to_vec() } + } +} diff --git a/wallet/grpc/core/src/lib.rs b/wallet/grpc/core/src/lib.rs new file mode 100644 index 0000000000..66485f8b31 --- /dev/null +++ b/wallet/grpc/core/src/lib.rs @@ -0,0 +1,20 @@ +pub mod convert; +pub mod kaspawalletd { + include!(concat!(env!("OUT_DIR"), "/kaspawalletd.rs")); +} + +pub mod protoserialization { + include!(concat!(env!("OUT_DIR"), "/protoserialization.rs")); + + impl PartiallySignedTransaction { + pub fn encode_to_vec(&self) -> Vec { + prost::Message::encode_to_vec(self) + } + } + + impl TransactionMessage { + pub fn encode_to_vec(&self) -> Vec { + prost::Message::encode_to_vec(self) + } + } +} diff --git a/wallet/grpc/server/Cargo.toml b/wallet/grpc/server/Cargo.toml new file mode 100644 index 0000000000..cde5fa079a --- /dev/null +++ b/wallet/grpc/server/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "kaspa-wallet-grpc-server" +rust-version.workspace = true +version.workspace = true +authors.workspace = true +license.workspace = true +repository.workspace = true +edition.workspace = true +include.workspace = true + +[dependencies] +futures-util.workspace = true +kaspa-addresses.workspace = true +kaspa-consensus-core.workspace = true +kaspa-rpc-core.workspace = true +kaspa-wallet-core.workspace = true +kaspa-wallet-grpc-core.workspace = true + +tokio.workspace = true +tonic.workspace = true +log.workspace = true + +[dev-dependencies] +tokio = { workspace = true, features = ["macros", "test-util"] } + +[lints] +workspace = true diff --git a/wallet/grpc/server/src/lib.rs b/wallet/grpc/server/src/lib.rs new file mode 100644 index 0000000000..f1ac0bedd6 --- /dev/null +++ b/wallet/grpc/server/src/lib.rs @@ -0,0 +1,208 @@ +pub mod service; + +use kaspa_addresses::Version; +use kaspa_consensus_core::tx::Transaction; +use kaspa_wallet_core::api::WalletApi; +use kaspa_wallet_core::{ + api::{AccountsGetUtxosRequest, AccountsSendRequest, NewAddressKind}, + prelude::Address, + tx::{Fees, PaymentDestination, PaymentOutputs}, +}; +use kaspa_wallet_grpc_core::convert::{deserialize_txs, extract_tx}; +use kaspa_wallet_grpc_core::kaspawalletd::{ + fee_policy::FeePolicy, kaspawalletd_server::Kaspawalletd, BroadcastRequest, BroadcastResponse, BumpFeeRequest, BumpFeeResponse, + CreateUnsignedTransactionsRequest, CreateUnsignedTransactionsResponse, GetBalanceRequest, GetBalanceResponse, + GetExternalSpendableUtxOsRequest, GetExternalSpendableUtxOsResponse, GetVersionRequest, GetVersionResponse, NewAddressRequest, + NewAddressResponse, SendRequest, SendResponse, ShowAddressesRequest, ShowAddressesResponse, ShutdownRequest, ShutdownResponse, + SignRequest, SignResponse, +}; +use kaspa_wallet_grpc_core::protoserialization::{PartiallySignedTransaction, TransactionMessage}; +use service::Service; +use tonic::{Code, Request, Response, Status}; + +#[tonic::async_trait] +impl Kaspawalletd for Service { + async fn get_balance(&self, _request: Request) -> Result, Status> { + let balances = self.descriptor().balance.unwrap(); + let response = GetBalanceResponse { available: balances.mature, pending: balances.pending, address_balances: vec![] }; + Ok(Response::new(response)) + } + + async fn get_external_spendable_utx_os( + &self, + _request: Request, + ) -> Result, Status> { + let address = Address::try_from(_request.get_ref().address.clone()) + .map_err(|_| Status::new(tonic::Code::InvalidArgument, "Invalid address provided"))?; + let request = AccountsGetUtxosRequest { + account_id: self.descriptor().account_id, + addresses: Some(vec![address]), + min_amount_sompi: None, + }; + let utxos = self.wallet().accounts_get_utxos(request).await.unwrap().utxos; + let response = GetExternalSpendableUtxOsResponse { entries: utxos.into_iter().map(Into::into).collect() }; + Ok(Response::new(response)) + } + + async fn create_unsigned_transactions( + &self, + request: Request, + ) -> Result, Status> { + let CreateUnsignedTransactionsRequest { address, amount, from, use_existing_change_address, is_send_all, fee_policy } = + request.into_inner(); + let to_address = Address::try_from(address).map_err(|err| Status::invalid_argument(err.to_string()))?; + let (fee_rate, max_fee) = self.calculate_fee_limits(fee_policy).await?; + let from_addresses = from + .iter() + .map(|a| Address::try_from(a.as_str())) + .collect::, _>>() + .map_err(|err| Status::invalid_argument(err.to_string()))?; + let transactions = + self.unsigned_txs(to_address, amount, use_existing_change_address, is_send_all, fee_rate, max_fee, from_addresses).await?; + let unsigned_transactions = + transactions.into_iter().map(|tx| PartiallySignedTransaction::from_unsigned(tx).encode_to_vec()).collect(); + Ok(Response::new(CreateUnsignedTransactionsResponse { unsigned_transactions })) + } + + async fn show_addresses(&self, _request: Request) -> Result, Status> { + let addresses = self.receive_addresses().iter().map(|addr| addr.to_string()).collect::>(); + let response = ShowAddressesResponse { address: addresses }; + Ok(Response::new(response)) + } + + async fn new_address(&self, _request: Request) -> Result, Status> { + let address = self + .wallet() + .accounts_create_new_address(self.descriptor().account_id, NewAddressKind::Receive) + .await + .map_err(|err| Status::internal(err.to_string()))? + .address; + let response = NewAddressResponse { address: address.to_string() }; + Ok(Response::new(response)) + } + + async fn shutdown(&self, _request: Request) -> Result, Status> { + self.initiate_shutdown(); + Ok(Response::new(ShutdownResponse {})) + } + + // TODO: Consider implementing parallel transaction processing in the future: + // - Server-side configuration processes messages sequentially + // - It might be possible to start processing a new message before writing the response to the socket + // - New parameters like allow_parallel should be introduced + // - Client behavior should be considered as they may expect sequential processing until the first error when sending batches + async fn broadcast(&self, request: Request) -> Result, Status> { + let request = request.into_inner(); + let txs = deserialize_txs(request.transactions, request.is_domain, self.use_ecdsa())?; + let mut tx_ids: Vec = Vec::with_capacity(txs.len()); + for tx in txs { + let tx_id = + self.wallet().rpc_api().submit_transaction(tx, false).await.map_err(|e| Status::new(Code::Internal, e.to_string()))?; + tx_ids.push(tx_id.to_string()); + } + Ok(Response::new(BroadcastResponse { tx_ids })) + } + + async fn broadcast_replacement(&self, request: Request) -> Result, Status> { + let request = request.into_inner(); + let txs = deserialize_txs(request.transactions, request.is_domain, self.use_ecdsa())?; + let mut tx_ids: Vec = Vec::with_capacity(txs.len()); + for (i, tx) in txs.into_iter().enumerate() { + // Once the first transaction is added to the mempool, the transactions that depend + // on the replaced transaction will be removed, so there's no need to submit them + // as RBF transactions. + let tx_id = if i == 0 { + let submit_transaction_replacement_response = self + .wallet() + .rpc_api() + .submit_transaction_replacement(tx) + .await + .map_err(|e| Status::new(Code::Internal, e.to_string()))?; + submit_transaction_replacement_response.transaction_id + } else { + self.wallet().rpc_api().submit_transaction(tx, false).await.map_err(|e| Status::new(Code::Internal, e.to_string()))? + }; + tx_ids.push(tx_id.to_string()); + } + Ok(Response::new(BroadcastResponse { tx_ids })) + } + + async fn send(&self, _request: Request) -> Result, Status> { + let acc = self.wallet().account().map_err(|err| Status::internal(err.to_string()))?; + if acc.minimum_signatures() != 1 { + return Err(Status::unimplemented("Only single signature wallets are supported")); + } + if acc.receive_address().map_err(|err| Status::internal(err.to_string()))?.version == Version::PubKeyECDSA { + return Err(Status::unimplemented("Ecdsa wallets are not supported yet")); + } + + // todo call unsigned tx and sign it to be consistent + + let data = _request.get_ref(); + let fee_rate_estimate = self.wallet().fee_rate_estimate().await.unwrap(); + let fee_rate = data.fee_policy.and_then(|policy| match policy.fee_policy.unwrap() { + FeePolicy::MaxFeeRate(rate) => Some(fee_rate_estimate.normal.feerate.min(rate)), + FeePolicy::ExactFeeRate(rate) => Some(rate), + _ => None, // TODO: we dont support maximum_amount policy so think if we should supply default fee_rate_estimate or just 1 on this case... + }); + let request = AccountsSendRequest { + account_id: self.descriptor().account_id, + wallet_secret: data.password.clone().into(), + payment_secret: None, + destination: PaymentDestination::PaymentOutputs(PaymentOutputs::from(( + Address::try_from(data.to_address.clone()).unwrap(), + data.amount, + ))), + fee_rate, + priority_fee_sompi: Fees::SenderPays(0), + payload: None, + }; + let result = self + .wallet() + .accounts_send(request) + .await + .map_err(|err| Status::new(tonic::Code::Internal, format!("Generator: {}", err)))?; + let final_transaction = result.final_transaction_id.unwrap().to_string(); + // todo return all transactions + let response = SendResponse { tx_ids: vec![final_transaction], signed_transactions: vec![] }; + Ok(Response::new(response)) + } + + async fn sign(&self, request: Request) -> Result, Status> { + let SignRequest { unsigned_transactions, password } = request.into_inner(); + + // Deserialization + let unsigned_transactions = unsigned_transactions + .into_iter() + .map(|tx| extract_tx(tx.as_slice(), self.use_ecdsa())) + // todo convert directly to consensus::transaction + .map(|r| r + .and_then(|rtx| Transaction::try_from(rtx) + .map_err(|err| Status::internal(err.to_string())))) + .collect::, _>>()?; + + // Sign and convert to RpcTransaction + let signed_transactions = self.sign_transactions(unsigned_transactions, password).await?; + + // Serialization - convert RpcTransaction directly to TransactionMessage + let signed_transactions = signed_transactions + .into_iter() + .map(|rpc_tx| { + // Convert RpcTransaction directly to TransactionMessage using existing From implementation + let msg = TransactionMessage::from(rpc_tx); + Ok(msg.encode_to_vec()) + }) + .collect::>, Status>>()?; + Ok(Response::new(SignResponse { signed_transactions })) + } + + async fn get_version(&self, _request: Request) -> Result, Status> { + let response = GetVersionResponse { version: env!("CARGO_PKG_VERSION").to_string() }; + Ok(Response::new(response)) + } + + async fn bump_fee(&self, _request: Request) -> Result, Status> { + // wallet api doesnt support RBF, requires manual implementation + Err(Status::unimplemented("Bump fee is not implemented yet")) + } +} diff --git a/wallet/grpc/server/src/service.rs b/wallet/grpc/server/src/service.rs new file mode 100644 index 0000000000..0e15dabea5 --- /dev/null +++ b/wallet/grpc/server/src/service.rs @@ -0,0 +1,276 @@ +use fee_policy::FeePolicy; +use futures_util::{select, FutureExt, TryStreamExt}; +use kaspa_addresses::Prefix; +use kaspa_consensus_core::constants::SOMPI_PER_KASPA; +use kaspa_consensus_core::tx::{SignableTransaction, Transaction, UtxoEntry}; +use kaspa_rpc_core::RpcTransaction; +use kaspa_wallet_core::api::NewAddressKind; +use kaspa_wallet_core::prelude::{PaymentDestination, PaymentOutput, PaymentOutputs}; +use kaspa_wallet_core::tx::{Fees, Generator, GeneratorSettings, Signer, SignerT}; +use kaspa_wallet_core::utxo::UtxoEntryReference; +use kaspa_wallet_core::{ + api::WalletApi, + events::Events, + prelude::{AccountDescriptor, Address}, + wallet::Wallet, +}; +use kaspa_wallet_grpc_core::kaspawalletd; +use kaspa_wallet_grpc_core::kaspawalletd::fee_policy; +use log::info; +use std::cmp::Reverse; +use std::sync::{Arc, Mutex}; +use tokio::sync::oneshot; +use tonic::Status; + +pub struct Service { + wallet: Arc, + shutdown_sender: Arc>>>, + // TODO: Extend the partially serialized transaction or transaction structure with a boolean field 'ecdsa' + ecdsa: bool, +} + +impl Service { + pub fn with_notification_pipe_task(wallet: Arc, shutdown_sender: oneshot::Sender<()>, ecdsa: bool) -> Self { + let channel = wallet.multiplexer().channel(); + + tokio::spawn({ + let wallet = wallet.clone(); + + async move { + loop { + select! { + msg = channel.receiver.recv().fuse() => { + if let Ok(msg) = msg { + match *msg { + Events::SyncState { sync_state } => { + if sync_state.is_synced() { + if let Err(err) = wallet.clone().wallet_reload(false).await { + panic!("Wallet reloading failed: {}", err) + } + } + }, + Events::Balance { balance: _new_balance, .. } => { + // TBD: index balance per address for call + }, + _ => {} + } + } + } + } + } + } + }); + + Service { wallet, shutdown_sender: Arc::new(Mutex::new(Some(shutdown_sender))), ecdsa } + } + + pub async fn calculate_fee_limits(&self, fee_policy: Option) -> Result<(f64, u64), Status> { + let fee_policy = fee_policy.and_then(|fee_policy| fee_policy.fee_policy); + const MIN_FEE_RATE: f64 = 1.0; + let fees: (f64, u64) = if let Some(policy) = fee_policy { + match policy { + FeePolicy::MaxFeeRate(max_fee_rate) => { + if max_fee_rate < MIN_FEE_RATE { + return Err(Status::invalid_argument(format!( + "requested max fee rate {} is too low, minimum fee rate is {}", + max_fee_rate, MIN_FEE_RATE + ))); + }; + let estimate = self.wallet.rpc_api().get_fee_estimate().await.unwrap(); + let fee_rate = max_fee_rate.min(estimate.normal_buckets[0].feerate); + (fee_rate, u64::MAX) + } + FeePolicy::ExactFeeRate(exact_fee_rate) => { + if exact_fee_rate < MIN_FEE_RATE { + return Err(Status::invalid_argument(format!( + "requested fee rate {} is too low, minimum fee rate is {}", + exact_fee_rate, MIN_FEE_RATE + ))); + } + (exact_fee_rate, u64::MAX) + } + FeePolicy::MaxFee(max_fee) => { + let estimate = self.wallet.rpc_api().get_fee_estimate().await.unwrap(); + (estimate.normal_buckets[0].feerate, max_fee) + } + } + } else { + let estimate = self.wallet.rpc_api().get_fee_estimate().await.unwrap(); + (estimate.normal_buckets[0].feerate, SOMPI_PER_KASPA) + }; + Ok(fees) + } + + pub fn receive_addresses(&self) -> Vec
{ + // TODO: move into WalletApi + let manager = self.wallet.account().unwrap().as_derivation_capable().unwrap().derivation().receive_address_manager(); + manager.get_range_with_args(0..manager.index() + 1, false).unwrap() + } + + pub fn wallet(&self) -> Arc { + self.wallet.clone() + } + + pub fn descriptor(&self) -> AccountDescriptor { + self.wallet.account().unwrap().descriptor().unwrap() + } + + pub fn initiate_shutdown(&self) { + let mut sender = self.shutdown_sender.lock().unwrap(); + if let Some(shutdown_sender) = sender.take() { + let _ = shutdown_sender.send(()); + } + } + + /// Returns whether the service should use ECDSA signatures instead of Schnorr signatures. + /// This flag is used when processing transactions to determine the appropriate signature scheme. + /// Currently set via command-line arguments, but this is temporary - the signature scheme + /// should be determined per transaction by extending the partially serialized transaction + /// or transaction structure with this field. + pub fn use_ecdsa(&self) -> bool { + self.ecdsa + } + + pub async fn unsigned_txs( + &self, + to: Address, + amount: u64, + use_existing_change_address: bool, + is_send_all: bool, + fee_rate: f64, + max_fee: u64, + from_addresses: Vec
, + ) -> Result, Status> { + let current_network = self.wallet().network_id().map_err(|err| Status::internal(err.to_string()))?; + if to.prefix != Prefix::from(current_network) { + return Err(Status::invalid_argument(format!( + "decoded address is of wrong network. Expected {} but got {}", + Prefix::from(current_network), + to.prefix + ))); + } + + let account = self.wallet().account().map_err(|err| Status::internal(err.to_string()))?; + + info!("Processing request for account_id: {}", self.descriptor().account_id); + + let addresses = account.account_addresses().map_err(|err| Status::internal(err.to_string()))?; + if let Some(non_existent_address) = from_addresses.iter().find(|from| addresses.iter().all(|address| &address != from)) { + return Err(Status::invalid_argument(format!("specified from address {non_existent_address} does not exists"))); + } + + // If specific addresses for sending are specified, use them + // Otherwise, use all wallet addresses to search for UTXO + let search_addresses = if from_addresses.is_empty() { + info!("No specific addresses specified, searching UTXOs in all wallet addresses"); + None // Search UTXO from all addresses in wallet + } else { + info!("Searching UTXOs in specified addresses: {:?}", from_addresses); + Some(from_addresses) + }; + + let utxos = account.clone().get_utxos(search_addresses, None).await.map_err(|err| Status::internal(err.to_string()))?; + + // Sort UTXOs by amount descending to optimize transaction weight + // Use large UTXOs in priority to minimize the number of inputs + let mut sorted_utxos = utxos; + sorted_utxos.sort_unstable_by_key(|a| Reverse(a.amount)); + + let change_address = if !use_existing_change_address { + self.wallet() + .accounts_create_new_address(self.descriptor().account_id, NewAddressKind::Change) + .await + .map_err(|err| Status::internal(err.to_string()))? + .address + } else { + self.descriptor().change_address.ok_or(Status::internal("change address doesn't exist"))?.clone() + }; + + let total_balance: u64 = sorted_utxos.iter().map(|utxo| utxo.amount).sum(); + let output_amount = if is_send_all { total_balance } else { amount }; + + info!("Found {} UTXOs with total value {} sompi", sorted_utxos.len(), total_balance); + + let settings = GeneratorSettings::try_new_with_iterator( + current_network, + Box::new(sorted_utxos.into_iter().map(|utxo| UtxoEntryReference { utxo: Arc::new(utxo) })), + None, + change_address, + account.sig_op_count(), + account.minimum_signatures(), + PaymentDestination::PaymentOutputs(PaymentOutputs { outputs: vec![PaymentOutput { address: to, amount: output_amount }] }), + Some(fee_rate), + Fees::SenderPays(0), // FIXME: @zelenevn + None, + None, + ) + .map_err(|err| Status::internal(err.to_string()))?; + + let generator = Generator::try_new(settings, None, None).map_err(|err| Status::internal(err.to_string()))?; + + let mut stream = generator.stream(); + let mut txs = vec![]; + while let Some(transaction) = stream.try_next().await.map_err(|err| Status::internal(err.to_string()))? { + txs.push(transaction.rpc_transaction()); + } + if generator.summary().aggregate_fees > max_fee { + return Err(Status::failed_precondition(format!( + "aggregate fees {} exceeds requested max {}", + generator.summary().aggregate_fees, + max_fee + ))); + } + Ok(txs) + } + + pub async fn sign_transactions( + &self, + unsigned_transactions: Vec, + password: String, + ) -> Result, Status> { + if self.use_ecdsa() { + return Err(Status::unimplemented("Ecdsa signing is not supported yet")); + } + + let account = self.wallet().account().map_err(|e| Status::internal(format!("Account error: {}", e)))?; + + let utxos = account.clone().get_utxos(None, None).await.map_err(|err| Status::internal(err.to_string()))?; + let utxo_context = account.utxo_context(); + + // Transaction -> SignableTransaction + let signable_txs: Vec = unsigned_transactions + .into_iter() + .map(|tx| { + let entries = tx + .inputs + .iter() + .map(|input| { + utxos + .iter() + .find(|utxo| utxo.outpoint == input.previous_outpoint) + .map(UtxoEntry::from) + .ok_or(Status::invalid_argument(format!("Wallet does not have mature utxo for input {input:?}"))) + }) + .collect::>()?; + Ok(SignableTransaction::with_entries(tx, entries)) + }) + .collect::>()?; + + // Get private key data for signing + let prv_key_data = account.prv_key_data(password.into()).await.map_err(|err| Status::internal(err.to_string()))?; + let addresses: Vec<_> = utxo_context.addresses().iter().map(|addr| addr.as_ref().clone()).collect(); + + let signer = Signer::new(account.clone(), prv_key_data, None); + let signed_txs = signable_txs + .into_iter() + .map(|tx| { + let signed = signer.try_sign(tx, addresses.as_slice()).map_err(|err| Status::internal(err.to_string()))?; + Ok(signed.tx) + }) + .collect::, Status>>()?; + + // Convert to RpcTransaction + let signed_txs = signed_txs.into_iter().map(|tx| RpcTransaction::from(&tx)).collect(); + Ok(signed_txs) + } +} diff --git a/wallet/native/src/main.rs b/wallet/native/src/main.rs deleted file mode 100644 index 4853a87db9..0000000000 --- a/wallet/native/src/main.rs +++ /dev/null @@ -1,9 +0,0 @@ -use kaspa_cli_lib::{kaspa_cli, TerminalOptions}; - -#[tokio::main] -async fn main() { - let result = kaspa_cli(TerminalOptions::new().with_prompt("$ "), None).await; - if let Err(err) = result { - println!("{err}"); - } -} diff --git a/wallet/pskt/src/error.rs b/wallet/pskt/src/error.rs index f3fd835701..9f3a26535d 100644 --- a/wallet/pskt/src/error.rs +++ b/wallet/pskt/src/error.rs @@ -42,6 +42,8 @@ pub enum Error { PskbPrefixError, #[error("PSKT serialization requires 'PSKT' prefix")] PsktPrefixError, + #[error("Cannot set payload on PSKT version {0}, payload requires version 1 or higher")] + PayloadRequiresVersion1(crate::pskt::Version), } #[derive(thiserror::Error, Debug)] pub enum ConstructorError { diff --git a/wallet/pskt/src/global.rs b/wallet/pskt/src/global.rs index ad98f11d30..23720e0db5 100644 --- a/wallet/pskt/src/global.rs +++ b/wallet/pskt/src/global.rs @@ -39,6 +39,8 @@ pub struct Global { /// Unknown key-value pairs for this output. #[serde(flatten)] pub unknowns: BTreeMap, + #[serde(with = "kaspa_utils::serde_bytes_optional")] + pub payload: Option>, } impl Add for Global { @@ -104,6 +106,23 @@ impl Add for Global { self.proprietaries = combine_if_no_conflicts(self.proprietaries, rhs.proprietaries).map_err(CombineError::NotCompatibleProprietary)?; self.unknowns = combine_if_no_conflicts(self.unknowns, rhs.unknowns).map_err(CombineError::NotCompatibleUnknownField)?; + + // Combine payloads according to the rules: + // - Both None -> None + // - One has payload -> use that payload + // - Both have same payload -> use that payload + // - Different payloads -> error + // Payload requires version >= 1 + if (self.payload.is_some() || rhs.payload.is_some()) && self.version < Version::One { + return Err(CombineError::PayloadRequiresHigherVersion { version: self.version }); + } + self.payload = match (self.payload.take(), rhs.payload) { + (None, None) => None, + (Some(p), None) | (None, Some(p)) => Some(p), + (Some(lhs), Some(rhs)) if lhs == rhs => Some(lhs), + (Some(lhs), Some(rhs)) => return Err(CombineError::PayloadMismatch { this: Some(lhs), that: Some(rhs) }), + }; + Ok(self) } } @@ -122,6 +141,7 @@ impl Default for Global { id: None, proprietaries: Default::default(), unknowns: Default::default(), + payload: None, } } } @@ -167,4 +187,16 @@ pub enum CombineError { NotCompatibleUnknownField(crate::utils::Error), #[error("Two different proprietary values")] NotCompatibleProprietary(crate::utils::Error), + #[error("The transaction payloads are not compatible")] + PayloadMismatch { + /// lhs + this: Option>, + /// rhs + that: Option>, + }, + #[error("Payload requires PSKT version 1 or higher, but current version is {version}")] + PayloadRequiresHigherVersion { + /// Current PSKT version + version: Version, + }, } diff --git a/wallet/pskt/src/pskt.rs b/wallet/pskt/src/pskt.rs index c0af033f8c..a3341e0669 100644 --- a/wallet/pskt/src/pskt.rs +++ b/wallet/pskt/src/pskt.rs @@ -33,17 +33,19 @@ pub struct Inner { pub outputs: Vec, } -#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Hash, Serialize_repr, Deserialize_repr)] +#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize_repr, Deserialize_repr)] #[repr(u8)] pub enum Version { #[default] Zero = 0, + One = 1, } impl Display for Version { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { match self { Version::Zero => write!(f, "{}", Version::Zero as u8), + Version::One => write!(f, "{}", Version::One as u8), } } } @@ -151,7 +153,8 @@ impl PSKT { self.determine_lock_time(), SUBNETWORK_ID_NATIVE, 0, - vec![], + // Only include payload if version supports it (Version::One or higher) + if self.global.version >= Version::One { self.global.payload.clone().unwrap_or_default() } else { vec![] }, ); let entries = self.inputs.iter().filter_map(|Input { utxo_entry, .. }| utxo_entry.clone()).collect(); SignableTransaction::with_entries(tx, entries) @@ -191,6 +194,12 @@ impl PSKT { self } + /// Sets the PSKT version. + pub fn set_version(mut self, version: Version) -> Self { + self.inner_pskt.global.version = version; + self + } + // todo generic const /// Sets the inputs modifiable bit in the transaction modifiable flags. pub fn inputs_modifiable(mut self) -> Self { @@ -237,6 +246,15 @@ impl PSKT { self } + pub fn payload(mut self, payload: Option>) -> Result { + // Only allow setting payload if version is One or greater + if payload.is_some() && self.inner_pskt.global.version < Version::One { + return Err(Error::PayloadRequiresVersion1(self.inner_pskt.global.version)); + } + self.inner_pskt.global.payload = payload; + Ok(self) + } + /// Returns a PSKT [`Updater`] once construction is completed. pub fn updater(self) -> PSKT { let pskt = self.no_more_inputs().no_more_outputs(); @@ -452,7 +470,7 @@ impl PSKT { let reused_values = SigHashReusedValuesUnsync::new(); tx.populated_inputs().enumerate().try_for_each(|(idx, (input, entry))| { - TxScriptEngine::from_transaction_input(&tx, input, idx, entry, &reused_values, &cache, false, false).execute()?; + TxScriptEngine::from_transaction_input(&tx, input, idx, entry, &reused_values, &cache).execute()?; >::Ok(()) })?; } diff --git a/wasm/build-release b/wasm/build-release index 5b98d09650..1777d64fff 100755 --- a/wasm/build-release +++ b/wasm/build-release @@ -87,8 +87,3 @@ cp LICENSE release/kaspa-wasm32-sdk/LICENSE node build/package-sizes.js cp package-sizes.js release/kaspa-wasm32-sdk/package-sizes.js - -pushd . -cd release -zip -q -r kaspa-wasm32-sdk.zip kaspa-wasm32-sdk -popd diff --git a/wasm/examples/nodejs/javascript/general/get-vritual-chain-v2.js b/wasm/examples/nodejs/javascript/general/get-vritual-chain-v2.js new file mode 100644 index 0000000000..3153e35f34 --- /dev/null +++ b/wasm/examples/nodejs/javascript/general/get-vritual-chain-v2.js @@ -0,0 +1,69 @@ +// @ts-ignore +globalThis.WebSocket = require("websocket").w3cwebsocket; // W3C WebSocket module shim + +const kaspa = require("../../../../nodejs/kaspa"); +const { RpcClient, Encoding } = kaspa; + +kaspa.initConsolePanicHook(); + +const delay = (ms) => new Promise((res) => setTimeout(res, ms)); + +(async () => { + const rpc = new RpcClient({ + url: "127.0.0.1", + encoding: Encoding.Borsh, + // resolver: new Resolver(), + networkId: "mainnet", + }); + console.log(`Resolving RPC endpoint...`); + await rpc.connect(); + console.log(`Connecting to ${rpc.url}`); + + console.log("Getting known block hash from node..."); + + const info = await rpc.getBlockDagInfo(); + console.info("BlockDagInfo:", info); + + // Start from node sink / pruning point + let lowHash = info.sink; + console.info("Starting lowHash (sink):", lowHash); + + await delay(2000); + + // Main loop - runs forever every 10 seconds + while (true) { + try { + const date = new Date(); + const vspc = await rpc.getVirtualChainFromBlockV2({ + startHash: lowHash, + minConfirmationCount: 10, + dataVerbosityLevel: "None", + }); + console.info("VSPC Info:", vspc); + + for (const hash of vspc.removedChainBlockHashes) { + console.info("Removed block hash:", hash); + } + + for (const hash of vspc.addedChainBlockHashes) { + console.info("Added block hash:", hash); + lowHash = hash; + } + + for (const cbat of vspc.chainBlockAcceptedTransactions) { + // Do something with the chain block header + console.info(cbat.chainBlockHeader); + // Do something with the accepted transactions + console.info(cbat.acceptedTransactions); + } + + console.info("Time span:", Date.now() - date.getTime(), "ms"); + } catch (innerErr) { + console.error("Error in loop iteration:", innerErr); + // keep running despite errors + } + + // wait 10 seconds before next iteration + await delay(10000); + } +})();