diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 0000000000..39b059596f
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,2 @@
+.git
+target
\ No newline at end of file
diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml
index 9b1e99a1cb..91ada621c4 100644
--- a/.github/workflows/ci.yaml
+++ b/.github/workflows/ci.yaml
@@ -354,13 +354,12 @@ jobs:
cd wasm
bash build-release
popd
- mv wasm/release/kaspa-wasm32-sdk.zip wasm/release/kaspa-wasm32-sdk-${{ env.SHORT_SHA }}.zip
- name: Upload WASM build to GitHub
uses: actions/upload-artifact@v4
with:
- name: kaspa-wasm32-sdk-${{ env.SHORT_SHA }}.zip
- path: wasm/release/kaspa-wasm32-sdk-${{ env.SHORT_SHA }}.zip
+ name: kaspa-wasm32-sdk-${{ env.SHORT_SHA }}
+ path: wasm/release/
build-release:
name: Build Linux Release
runs-on: ubuntu-latest
@@ -405,4 +404,4 @@ jobs:
# Run build script for musl toolchain
source musl-toolchain/build.sh
# Build for musl
- cargo --verbose build --bin kaspad --bin rothschild --bin kaspa-wallet --release --target x86_64-unknown-linux-musl
+ cargo --verbose build --bin kaspad --bin rothschild --bin kaspa-wallet-daemon --release --target x86_64-unknown-linux-musl
diff --git a/.github/workflows/deploy.yaml b/.github/workflows/deploy.yaml
index 537eeef898..4298b1d68f 100644
--- a/.github/workflows/deploy.yaml
+++ b/.github/workflows/deploy.yaml
@@ -1,200 +1,186 @@
-name: Build and upload assets
-on:
- release:
- types: [ published ]
-
-jobs:
- build:
- runs-on: ${{ matrix.os }}
- strategy:
- fail-fast: false
- matrix:
- # Build gnu-linux on ubuntu-18.04 and musl on ubuntu latest
- # os: [ ubuntu-18.04, ubuntu-latest, windows-latest, macos-latest ]
- os: [ ubuntu-latest, windows-latest, macos-latest ]
- name: Building, ${{ matrix.os }}
- steps:
- - name: Fix CRLF on Windows
- if: runner.os == 'Windows'
- run: git config --global core.autocrlf false
-
- - name: Checkout sources
- uses: actions/checkout@v3
-
- - name: Install Protoc
- uses: arduino/setup-protoc@v3
- with:
- repo-token: ${{ secrets.GITHUB_TOKEN }}
-
- - name: Install stable toolchain
- uses: dtolnay/rust-toolchain@stable
-
- - name: Cache Cargo Build Outputs
- uses: actions/cache@v3
- with:
- path: |
- ~/.cargo/bin/
- ~/.cargo/registry/index/
- ~/.cargo/registry/cache/
- ~/.cargo/git/db/
- target/
- key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
-
- - name: Cache Toolchain
- uses: actions/cache@v4
- with:
- path: |
- ~/x-tools
- key: ${{ runner.os }}-musl-${{ hashFiles('**/musl-toolchain/preset.sh') }}
- restore-keys: |
- ${{ runner.os }}-musl-
-
- - name: Build on Linux
- if: runner.os == 'Linux'
- # We're using musl to make the binaries statically linked and portable
- run: |
- # Run build script for musl toolchain
- source musl-toolchain/build.sh
-
- # Go back to the workspace
- cd $GITHUB_WORKSPACE
-
- # Build for musl
- cargo --verbose build --bin kaspad --bin rothschild --bin kaspa-wallet --release --target x86_64-unknown-linux-musl
- mkdir bin || true
- cp target/x86_64-unknown-linux-musl/release/kaspad bin/
- cp target/x86_64-unknown-linux-musl/release/rothschild bin/
- cp target/x86_64-unknown-linux-musl/release/kaspa-wallet bin/
- archive="bin/rusty-kaspa-${{ github.event.release.tag_name }}-linux-amd64.zip"
- asset_name="rusty-kaspa-${{ github.event.release.tag_name }}-linux-amd64.zip"
- zip -r "${archive}" ./bin/*
- echo "archive=${archive}" >> $GITHUB_ENV
- echo "asset_name=${asset_name}" >> $GITHUB_ENV
-
- - name: Build on Windows
- if: runner.os == 'Windows'
- shell: bash
- run: |
- cargo build --bin kaspad --release
- cargo build --bin rothschild --release
- cargo build --bin kaspa-wallet --release
- mkdir bin || true
- cp target/release/kaspad.exe bin/
- cp target/release/rothschild.exe bin/
- cp target/release/kaspa-wallet.exe bin/
- archive="bin/rusty-kaspa-${{ github.event.release.tag_name }}-win64.zip"
- asset_name="rusty-kaspa-${{ github.event.release.tag_name }}-win64.zip"
- powershell "Compress-Archive bin/* \"${archive}\""
- echo "archive=${archive}" >> $GITHUB_ENV
- echo "asset_name=${asset_name}" >> $GITHUB_ENV
-
- - name: Build on MacOS
- if: runner.os == 'macOS'
- run: |
- cargo build --bin kaspad --release
- cargo build --bin rothschild --release
- cargo build --bin kaspa-wallet --release
- mkdir bin || true
- cp target/release/kaspad bin/
- cp target/release/rothschild bin/
- cp target/release/kaspa-wallet bin/
- archive="bin/rusty-kaspa-${{ github.event.release.tag_name }}-osx.zip"
- asset_name="rusty-kaspa-${{ github.event.release.tag_name }}-osx.zip"
- zip -r "${archive}" ./bin/*
- echo "archive=${archive}" >> $GITHUB_ENV
- echo "asset_name=${asset_name}" >> $GITHUB_ENV
-
- - name: Upload release asset
- uses: actions/upload-release-asset@v1
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- with:
- upload_url: ${{ github.event.release.upload_url }}
- asset_path: "./${{ env.archive }}"
- asset_name: "${{ env.asset_name }}"
- asset_content_type: application/zip
-
- build-wasm:
- runs-on: ubuntu-latest
- name: Building WASM32 SDK
- steps:
- - name: Checkout sources
- uses: actions/checkout@v3
-
- - name: Install Protoc
- uses: arduino/setup-protoc@v3
- with:
- repo-token: ${{ secrets.GITHUB_TOKEN }}
-
- - name: Install stable toolchain
- uses: dtolnay/rust-toolchain@stable
-
- - name: Install llvm
- id: install_llvm
- continue-on-error: true
- run: |
- wget -qO- https://apt.llvm.org/llvm-snapshot.gpg.key | sudo tee /etc/apt/trusted.gpg.d/apt.llvm.org.asc
- sudo apt-get install -y clang-15 lldb-15 lld-15 clangd-15 clang-tidy-15 clang-format-15 clang-tools-15 llvm-15-dev lld-15 lldb-15 llvm-15-tools libomp-15-dev libc++-15-dev libc++abi-15-dev libclang-common-15-dev libclang-15-dev libclang-cpp15-dev libunwind-15-dev
- # Make Clang 15 default
- sudo update-alternatives --install /usr/bin/clang++ clang++ /usr/lib/llvm-15/bin/clang++ 100
- sudo update-alternatives --install /usr/bin/clang clang /usr/lib/llvm-15/bin/clang 100
- sudo update-alternatives --install /usr/bin/clang-format clang-format /usr/lib/llvm-15/bin/clang-format 100
- sudo update-alternatives --install /usr/bin/clang-tidy clang-tidy /usr/lib/llvm-15/bin/clang-tidy 100
- sudo update-alternatives --install /usr/bin/run-clang-tidy run-clang-tidy /usr/lib/llvm-15/bin/run-clang-tidy 100
- # Alias cc to clang
- sudo update-alternatives --install /usr/bin/cc cc /usr/lib/llvm-15/bin/clang 0
- sudo update-alternatives --install /usr/bin/c++ c++ /usr/lib/llvm-15/bin/clang++ 0
-
- - name: Install gcc-multilib
- # gcc-multilib allows clang to find gnu libraries properly
- run: sudo apt install -y gcc-multilib
-
- - name: Install stable toolchain
- if: steps.install_llvm.outcome == 'success' && steps.install_llvm.conclusion == 'success'
- uses: dtolnay/rust-toolchain@stable
-
- - name: Install wasm-pack
- run: cargo install wasm-pack
-
- - name: Add wasm32 target
- run: rustup target add wasm32-unknown-unknown
-
- - name: Install NodeJS
- uses: actions/setup-node@v4
- with:
- node-version: '20'
-
- - name: Install NodeJS dependencies
- run: npm install --global typedoc typescript
-
- - name: Cache
- uses: actions/cache@v3
- with:
- path: |
- ~/.cargo/bin/
- ~/.cargo/registry/index/
- ~/.cargo/registry/cache/
- ~/.cargo/git/db/
- target/
- key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
-
- - name: Build WASM32 SDK
- run: |
- cd wasm
- bash build-release
- mv release/kaspa-wasm32-sdk.zip ../kaspa-wasm32-sdk-${{ github.event.release.tag_name }}.zip
-
- archive="kaspa-wasm32-sdk-${{ github.event.release.tag_name }}.zip"
- asset_name="kaspa-wasm32-sdk-${{ github.event.release.tag_name }}.zip"
- echo "archive=${archive}" >> $GITHUB_ENV
- echo "asset_name=${asset_name}" >> $GITHUB_ENV
-
- - name: Upload WASM32 SDK
- uses: actions/upload-release-asset@v1
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- with:
- upload_url: ${{ github.event.release.upload_url }}
- asset_path: "./${{ env.archive }}"
- asset_name: "${{ env.asset_name }}"
- asset_content_type: application/zip
+name: Build and upload assets
+on:
+ release:
+ types: [ published ]
+
+jobs:
+ build:
+ runs-on: ${{ matrix.os }}
+ strategy:
+ fail-fast: false
+ matrix:
+ # Build gnu-linux on ubuntu-18.04 and musl on ubuntu latest
+ # os: [ ubuntu-18.04, ubuntu-latest, windows-latest, macos-latest ]
+ os: [ ubuntu-latest, windows-latest, macos-latest ]
+ name: Building, ${{ matrix.os }}
+ steps:
+ - name: Fix CRLF on Windows
+ if: runner.os == 'Windows'
+ run: git config --global core.autocrlf false
+
+ - name: Checkout sources
+ uses: actions/checkout@v3
+
+ - name: Install Protoc
+ uses: arduino/setup-protoc@v3
+ with:
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
+
+ - name: Install stable toolchain
+ uses: dtolnay/rust-toolchain@stable
+
+ - name: Cache Cargo Build Outputs
+ uses: actions/cache@v3
+ with:
+ path: |
+ ~/.cargo/bin/
+ ~/.cargo/registry/index/
+ ~/.cargo/registry/cache/
+ ~/.cargo/git/db/
+ target/
+ key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
+
+ - name: Cache Toolchain
+ uses: actions/cache@v4
+ with:
+ path: |
+ ~/x-tools
+ key: ${{ runner.os }}-musl-${{ hashFiles('**/musl-toolchain/preset.sh') }}
+ restore-keys: |
+ ${{ runner.os }}-musl-
+
+ - name: Build on Linux
+ if: runner.os == 'Linux'
+ # We're using musl to make the binaries statically linked and portable
+ run: |
+ # Run build script for musl toolchain
+ source musl-toolchain/build.sh
+
+ # Go back to the workspace
+ cd $GITHUB_WORKSPACE
+
+ # Build for musl
+ cargo --verbose build --bin kaspad --bin rothschild --bin kaspa-wallet-daemon --release --target x86_64-unknown-linux-musl
+ mkdir bin || true
+ cp target/x86_64-unknown-linux-musl/release/kaspad bin/
+ cp target/x86_64-unknown-linux-musl/release/rothschild bin/
+ cp target/x86_64-unknown-linux-musl/release/kaspa-wallet-daemon bin/
+ archive="bin/rusty-kaspa-${{ github.event.release.tag_name }}-linux-amd64.zip"
+ zip -r "${archive}" ./bin/*
+ echo "archive=${archive}" >> $GITHUB_ENV
+
+ - name: Build on Windows
+ if: runner.os == 'Windows'
+ shell: bash
+ run: |
+ cargo build --bin kaspad --release
+ cargo build --bin rothschild --release
+ cargo build --bin kaspa-wallet-daemon --release
+ mkdir bin || true
+ cp target/release/kaspad.exe bin/
+ cp target/release/rothschild.exe bin/
+ cp target/release/kaspa-wallet-daemon.exe bin/
+ archive="bin/rusty-kaspa-${{ github.event.release.tag_name }}-win64.zip"
+ powershell "Compress-Archive bin/* \"${archive}\""
+ echo "archive=${archive}" >> $GITHUB_ENV
+
+ - name: Build on MacOS
+ if: runner.os == 'macOS'
+ run: |
+ cargo build --bin kaspad --release
+ cargo build --bin rothschild --release
+ cargo build --bin kaspa-wallet-daemon --release
+ mkdir bin || true
+ cp target/release/kaspad bin/
+ cp target/release/rothschild bin/
+ cp target/release/kaspa-wallet-daemon bin/
+ archive="bin/rusty-kaspa-${{ github.event.release.tag_name }}-osx.zip"
+ zip -r "${archive}" ./bin/*
+ echo "archive=${archive}" >> $GITHUB_ENV
+
+ - name: Upload release asset
+ uses: softprops/action-gh-release@v2
+ with:
+ files: ./${{ env.archive }}
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+
+ build-wasm:
+ runs-on: ubuntu-latest
+ name: Building WASM32 SDK
+ steps:
+ - name: Checkout sources
+ uses: actions/checkout@v3
+
+ - name: Install Protoc
+ uses: arduino/setup-protoc@v3
+ with:
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
+
+ - name: Install stable toolchain
+ uses: dtolnay/rust-toolchain@stable
+
+ - name: Install llvm
+ id: install_llvm
+ continue-on-error: true
+ run: |
+ wget -qO- https://apt.llvm.org/llvm-snapshot.gpg.key | sudo tee /etc/apt/trusted.gpg.d/apt.llvm.org.asc
+ sudo apt-get install -y clang-15 lldb-15 lld-15 clangd-15 clang-tidy-15 clang-format-15 clang-tools-15 llvm-15-dev lld-15 lldb-15 llvm-15-tools libomp-15-dev libc++-15-dev libc++abi-15-dev libclang-common-15-dev libclang-15-dev libclang-cpp15-dev libunwind-15-dev
+ # Make Clang 15 default
+ sudo update-alternatives --install /usr/bin/clang++ clang++ /usr/lib/llvm-15/bin/clang++ 100
+ sudo update-alternatives --install /usr/bin/clang clang /usr/lib/llvm-15/bin/clang 100
+ sudo update-alternatives --install /usr/bin/clang-format clang-format /usr/lib/llvm-15/bin/clang-format 100
+ sudo update-alternatives --install /usr/bin/clang-tidy clang-tidy /usr/lib/llvm-15/bin/clang-tidy 100
+ sudo update-alternatives --install /usr/bin/run-clang-tidy run-clang-tidy /usr/lib/llvm-15/bin/run-clang-tidy 100
+ # Alias cc to clang
+ sudo update-alternatives --install /usr/bin/cc cc /usr/lib/llvm-15/bin/clang 0
+ sudo update-alternatives --install /usr/bin/c++ c++ /usr/lib/llvm-15/bin/clang++ 0
+
+ - name: Install gcc-multilib
+ # gcc-multilib allows clang to find gnu libraries properly
+ run: sudo apt install -y gcc-multilib
+
+ - name: Install stable toolchain
+ if: steps.install_llvm.outcome == 'success' && steps.install_llvm.conclusion == 'success'
+ uses: dtolnay/rust-toolchain@stable
+
+ - name: Install wasm-pack
+ run: cargo install wasm-pack
+
+ - name: Add wasm32 target
+ run: rustup target add wasm32-unknown-unknown
+
+ - name: Install NodeJS
+ uses: actions/setup-node@v4
+ with:
+ node-version: '20'
+
+ - name: Install NodeJS dependencies
+ run: npm install --global typedoc typescript
+
+ - name: Cache
+ uses: actions/cache@v3
+ with:
+ path: |
+ ~/.cargo/bin/
+ ~/.cargo/registry/index/
+ ~/.cargo/registry/cache/
+ ~/.cargo/git/db/
+ target/
+ key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
+
+ - name: Build WASM32 SDK
+ run: |
+ cd wasm
+ bash build-release
+ cd release
+ zip -q -r kaspa-wasm32-sdk-${{ github.event.release.tag_name }}.zip kaspa-wasm32-sdk
+ archive="wasm/release/kaspa-wasm32-sdk-${{ github.event.release.tag_name }}.zip"
+ echo "archive=${archive}" >> $GITHUB_ENV
+
+ - name: Upload release asset
+ uses: softprops/action-gh-release@v2
+ with:
+ files: ./${{ env.archive }}
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.gitignore b/.gitignore
index 0199232fe6..53625dde02 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,6 +3,7 @@ web-root
**/.idea/
/rust-toolchain
/.vscode/
+.zed
**/db-*
/testing/integration/testdata/dags_for_json_tests/goref-mainnet
/testing/integration/testdata/dags_for_json_tests/goref-1.6M-tx-10K-blocks
diff --git a/Cargo.lock b/Cargo.lock
index c4e998e45a..e2a2ea78c3 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -11,7 +11,7 @@ dependencies = [
"macroific",
"proc-macro2",
"quote",
- "syn 2.0.79",
+ "syn 2.0.110",
]
[[package]]
@@ -57,7 +57,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011"
dependencies = [
"cfg-if 1.0.0",
- "getrandom",
+ "getrandom 0.2.15",
"once_cell",
"version_check",
"zerocopy",
@@ -324,7 +324,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.79",
+ "syn 2.0.110",
]
[[package]]
@@ -335,13 +335,13 @@ checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de"
[[package]]
name = "async-trait"
-version = "0.1.83"
+version = "0.1.89"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd"
+checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.79",
+ "syn 2.0.110",
]
[[package]]
@@ -486,22 +486,20 @@ dependencies = [
[[package]]
name = "bindgen"
-version = "0.69.4"
+version = "0.72.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a00dc851838a2120612785d195287475a3ac45514741da670b735818822129a0"
+checksum = "993776b509cfb49c750f11b8f07a46fa23e0a1386ffc01fb1e7d343efc387895"
dependencies = [
- "bitflags 2.6.0",
+ "bitflags 2.9.4",
"cexpr",
"clang-sys",
- "itertools 0.12.1",
- "lazy_static",
- "lazycell",
+ "itertools 0.13.0",
"proc-macro2",
"quote",
"regex",
- "rustc-hash 1.1.0",
+ "rustc-hash 2.1.1",
"shlex",
- "syn 2.0.79",
+ "syn 2.0.110",
]
[[package]]
@@ -512,9 +510,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "bitflags"
-version = "2.6.0"
+version = "2.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de"
+checksum = "2261d10cca569e4643e526d8dc2e62e433cc8aba21ab764233731f8d369bf394"
[[package]]
name = "blake2"
@@ -578,7 +576,7 @@ dependencies = [
"proc-macro-crate",
"proc-macro2",
"quote",
- "syn 2.0.79",
+ "syn 2.0.110",
"syn_derive",
]
@@ -829,9 +827,9 @@ dependencies = [
[[package]]
name = "clap"
-version = "4.5.19"
+version = "4.5.51"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7be5744db7978a28d9df86a214130d106a89ce49644cbc4e3f0c22c3fba30615"
+checksum = "4c26d721170e0295f191a69bd9a1f93efcdb0aff38684b61ab5750468972e5f5"
dependencies = [
"clap_builder",
"clap_derive",
@@ -839,9 +837,9 @@ dependencies = [
[[package]]
name = "clap_builder"
-version = "4.5.19"
+version = "4.5.51"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a5fbc17d3ef8278f55b282b2a2e75ae6f6c7d4bb70ed3d0382375104bfafdb4b"
+checksum = "75835f0c7bf681bfd05abe44e965760fea999a5286c6eb2d59883634fd02011a"
dependencies = [
"anstream",
"anstyle",
@@ -851,21 +849,21 @@ dependencies = [
[[package]]
name = "clap_derive"
-version = "4.5.18"
+version = "4.5.49"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab"
+checksum = "2a0b5487afeab2deb2ff4e03a807ad1a03ac532ff5a2cee5d86884440c7f7671"
dependencies = [
"heck",
"proc-macro2",
"quote",
- "syn 2.0.79",
+ "syn 2.0.110",
]
[[package]]
name = "clap_lex"
-version = "0.7.2"
+version = "0.7.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97"
+checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d"
[[package]]
name = "colorchoice"
@@ -965,7 +963,7 @@ dependencies = [
"anes",
"cast",
"ciborium",
- "clap 4.5.19",
+ "clap 4.5.51",
"criterion-plot",
"is-terminal",
"itertools 0.10.5",
@@ -1030,7 +1028,7 @@ version = "0.27.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f476fe445d41c9e991fd07515a6f463074b782242ccf4a5b7b1d1012e70824df"
dependencies = [
- "bitflags 2.6.0",
+ "bitflags 2.9.4",
"crossterm_winapi",
"libc",
"mio 0.8.11",
@@ -1062,7 +1060,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3"
dependencies = [
"generic-array",
- "rand_core",
+ "rand_core 0.6.4",
"typenum",
]
@@ -1130,7 +1128,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.79",
+ "syn 2.0.110",
]
[[package]]
@@ -1139,8 +1137,18 @@ version = "0.20.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989"
dependencies = [
- "darling_core",
- "darling_macro",
+ "darling_core 0.20.10",
+ "darling_macro 0.20.10",
+]
+
+[[package]]
+name = "darling"
+version = "0.21.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9cdf337090841a411e2a7f3deb9187445851f91b309c0c0a29e05f74a00a48c0"
+dependencies = [
+ "darling_core 0.21.3",
+ "darling_macro 0.21.3",
]
[[package]]
@@ -1154,7 +1162,21 @@ dependencies = [
"proc-macro2",
"quote",
"strsim 0.11.1",
- "syn 2.0.79",
+ "syn 2.0.110",
+]
+
+[[package]]
+name = "darling_core"
+version = "0.21.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1247195ecd7e3c85f83c8d2a366e4210d588e802133e1e355180a9870b517ea4"
+dependencies = [
+ "fnv",
+ "ident_case",
+ "proc-macro2",
+ "quote",
+ "strsim 0.11.1",
+ "syn 2.0.110",
]
[[package]]
@@ -1163,9 +1185,20 @@ version = "0.20.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806"
dependencies = [
- "darling_core",
+ "darling_core 0.20.10",
"quote",
- "syn 2.0.79",
+ "syn 2.0.110",
+]
+
+[[package]]
+name = "darling_macro"
+version = "0.21.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81"
+dependencies = [
+ "darling_core 0.21.3",
+ "quote",
+ "syn 2.0.110",
]
[[package]]
@@ -1197,7 +1230,7 @@ dependencies = [
"macroific",
"proc-macro2",
"quote",
- "syn 2.0.79",
+ "syn 2.0.110",
]
[[package]]
@@ -1236,10 +1269,10 @@ version = "0.20.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7431fa049613920234f22c47fdc33e6cf3ee83067091ea4277a3f8c4587aae38"
dependencies = [
- "darling",
+ "darling 0.20.10",
"proc-macro2",
"quote",
- "syn 2.0.79",
+ "syn 2.0.110",
]
[[package]]
@@ -1249,7 +1282,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4abae7035bf79b9877b779505d8cf3749285b80c43941eda66604841889451dc"
dependencies = [
"derive_builder_core",
- "syn 2.0.79",
+ "syn 2.0.110",
]
[[package]]
@@ -1262,7 +1295,7 @@ dependencies = [
"proc-macro2",
"quote",
"rustc_version",
- "syn 2.0.79",
+ "syn 2.0.110",
]
[[package]]
@@ -1390,7 +1423,7 @@ checksum = "ba7795da175654fe16979af73f81f26a8ea27638d8d9823d317016888a63dc4c"
dependencies = [
"num-traits",
"quote",
- "syn 2.0.79",
+ "syn 2.0.110",
]
[[package]]
@@ -1467,7 +1500,7 @@ dependencies = [
"macroific",
"proc-macro2",
"quote",
- "syn 2.0.79",
+ "syn 2.0.110",
]
[[package]]
@@ -1612,7 +1645,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.79",
+ "syn 2.0.110",
]
[[package]]
@@ -1669,6 +1702,18 @@ dependencies = [
"wasm-bindgen",
]
+[[package]]
+name = "getrandom"
+version = "0.3.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd"
+dependencies = [
+ "cfg-if 1.0.0",
+ "libc",
+ "r-efi",
+ "wasip2",
+]
+
[[package]]
name = "gimli"
version = "0.31.1"
@@ -2081,7 +2126,7 @@ dependencies = [
"http 0.2.12",
"hyper 0.14.30",
"log",
- "rand",
+ "rand 0.8.5",
"tokio",
"url",
"xmltree",
@@ -2098,7 +2143,7 @@ dependencies = [
"delegate-display",
"fancy_constructor",
"js-sys",
- "uuid 1.10.0",
+ "uuid 1.18.1",
"wasm-bindgen",
"wasm-bindgen-futures",
"web-sys",
@@ -2211,15 +2256,6 @@ dependencies = [
"either",
]
-[[package]]
-name = "itertools"
-version = "0.12.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569"
-dependencies = [
- "either",
-]
-
[[package]]
name = "itertools"
version = "0.13.0"
@@ -2285,7 +2321,7 @@ dependencies = [
"local-ip-address",
"log",
"parking_lot",
- "rand",
+ "rand 0.8.5",
"rocksdb",
"rv",
"serde",
@@ -2307,15 +2343,15 @@ dependencies = [
"borsh",
"bs58",
"faster-hex",
- "getrandom",
+ "getrandom 0.2.15",
"hmac",
"js-sys",
"kaspa-consensus-core",
"kaspa-utils",
"once_cell",
"pbkdf2",
- "rand",
- "rand_core",
+ "rand 0.8.5",
+ "rand_core 0.6.4",
"ripemd",
"secp256k1",
"serde",
@@ -2387,7 +2423,7 @@ dependencies = [
"kaspa-utils",
"log",
"parking_lot",
- "rand",
+ "rand 0.8.5",
"tokio",
]
@@ -2423,7 +2459,7 @@ dependencies = [
"log",
"once_cell",
"parking_lot",
- "rand",
+ "rand 0.8.5",
"rand_distr",
"rayon",
"rocksdb",
@@ -2452,7 +2488,7 @@ dependencies = [
"kaspa-txscript",
"kaspa-utils",
"kaspa-wasm-core",
- "rand",
+ "rand 0.8.5",
"secp256k1",
"serde",
"serde-wasm-bindgen",
@@ -2470,12 +2506,13 @@ dependencies = [
"arc-swap",
"async-trait",
"bincode",
+ "bitflags 2.9.4",
"borsh",
"cfg-if 1.0.0",
"criterion",
"faster-hex",
"futures-util",
- "getrandom",
+ "getrandom 0.2.15",
"itertools 0.13.0",
"js-sys",
"kaspa-addresses",
@@ -2486,7 +2523,7 @@ dependencies = [
"kaspa-muhash",
"kaspa-txscript-errors",
"kaspa-utils",
- "rand",
+ "rand 0.8.5",
"secp256k1",
"serde",
"serde-wasm-bindgen",
@@ -2534,7 +2571,7 @@ dependencies = [
"kaspa-hashes",
"kaspa-txscript",
"kaspa-utils",
- "rand",
+ "rand 0.8.5",
"secp256k1",
"serde",
"serde-wasm-bindgen",
@@ -2560,7 +2597,7 @@ dependencies = [
"kaspa-utils",
"log",
"parking_lot",
- "rand",
+ "rand 0.8.5",
"tokio",
]
@@ -2619,7 +2656,7 @@ dependencies = [
"num-traits",
"num_cpus",
"parking_lot",
- "rand",
+ "rand 0.8.5",
"rocksdb",
"serde",
"smallvec",
@@ -2649,7 +2686,7 @@ dependencies = [
"parking_lot",
"paste",
"prost",
- "rand",
+ "rand 0.8.5",
"regex",
"rustls",
"thiserror",
@@ -2669,6 +2706,7 @@ dependencies = [
"faster-hex",
"futures",
"h2 0.4.6",
+ "itertools 0.13.0",
"kaspa-addresses",
"kaspa-consensus-core",
"kaspa-core",
@@ -2678,7 +2716,7 @@ dependencies = [
"log",
"paste",
"prost",
- "rand",
+ "rand 0.8.5",
"regex",
"thiserror",
"tokio",
@@ -2715,14 +2753,24 @@ dependencies = [
"parking_lot",
"paste",
"prost",
- "rand",
+ "rand 0.8.5",
"rustls",
"thiserror",
"tokio",
"tokio-stream",
"tonic",
"triggered",
- "uuid 1.10.0",
+ "uuid 1.18.1",
+]
+
+[[package]]
+name = "kaspa-grpc-simple-client-example"
+version = "1.0.1"
+dependencies = [
+ "futures",
+ "kaspa-grpc-client",
+ "kaspa-rpc-core",
+ "tokio",
]
[[package]]
@@ -2738,7 +2786,7 @@ dependencies = [
"kaspa-utils",
"keccak",
"once_cell",
- "rand",
+ "rand 0.8.5",
"serde",
"sha2",
"sha3",
@@ -2787,7 +2835,7 @@ dependencies = [
"log",
"parking_lot",
"paste",
- "rand",
+ "rand 0.8.5",
"thiserror",
"tokio",
"triggered",
@@ -2804,7 +2852,7 @@ dependencies = [
"kaspa-utils",
"malachite-base",
"malachite-nz",
- "rand_chacha",
+ "rand_chacha 0.3.1",
"serde",
"serde-wasm-bindgen",
"thiserror",
@@ -2855,7 +2903,7 @@ dependencies = [
"kaspa-utils",
"log",
"parking_lot",
- "rand",
+ "rand 0.8.5",
"secp256k1",
"serde",
"smallvec",
@@ -2879,8 +2927,8 @@ dependencies = [
"criterion",
"kaspa-hashes",
"kaspa-math",
- "rand",
- "rand_chacha",
+ "rand 0.8.5",
+ "rand_chacha 0.3.1",
"rayon",
"serde",
]
@@ -2910,7 +2958,7 @@ dependencies = [
"log",
"parking_lot",
"paste",
- "rand",
+ "rand 0.8.5",
"serde",
"thiserror",
"tokio",
@@ -2946,11 +2994,11 @@ dependencies = [
"kaspa-utils-tower",
"log",
"parking_lot",
- "rand",
+ "rand 0.8.5",
"thiserror",
"tokio",
"tokio-stream",
- "uuid 1.10.0",
+ "uuid 1.18.1",
]
[[package]]
@@ -2973,7 +3021,7 @@ dependencies = [
"log",
"parking_lot",
"prost",
- "rand",
+ "rand 0.8.5",
"seqlock",
"serde",
"thiserror",
@@ -2981,7 +3029,7 @@ dependencies = [
"tokio-stream",
"tonic",
"tonic-build",
- "uuid 1.10.0",
+ "uuid 1.18.1",
]
[[package]]
@@ -3058,13 +3106,14 @@ dependencies = [
"kaspa-utils",
"log",
"paste",
- "rand",
+ "rand 0.8.5",
"serde",
"serde-wasm-bindgen",
"serde_json",
+ "serde_nested_with",
"smallvec",
"thiserror",
- "uuid 1.10.0",
+ "uuid 1.18.1",
"wasm-bindgen",
"workflow-core",
"workflow-serializer",
@@ -3121,7 +3170,7 @@ dependencies = [
"async-trait",
"bincode",
"chrono",
- "clap 4.5.19",
+ "clap 4.5.51",
"criterion",
"crossbeam-channel",
"dhat",
@@ -3159,7 +3208,7 @@ dependencies = [
"kaspad",
"log",
"parking_lot",
- "rand",
+ "rand 0.8.5",
"rand_distr",
"rayon",
"rocksdb",
@@ -3193,7 +3242,7 @@ dependencies = [
"kaspa-wasm-core",
"log",
"parking_lot",
- "rand",
+ "rand 0.8.5",
"secp256k1",
"serde",
"serde-wasm-bindgen",
@@ -3235,7 +3284,7 @@ dependencies = [
"num_cpus",
"once_cell",
"parking_lot",
- "rand",
+ "rand 0.8.5",
"rlimit",
"serde",
"serde_json",
@@ -3245,7 +3294,7 @@ dependencies = [
"thiserror",
"tokio",
"triggered",
- "uuid 1.10.0",
+ "uuid 1.18.1",
"wasm-bindgen",
]
@@ -3280,24 +3329,12 @@ dependencies = [
"kaspa-utils",
"log",
"parking_lot",
- "rand",
+ "rand 0.8.5",
"rocksdb",
"serde",
"thiserror",
]
-[[package]]
-name = "kaspa-wallet"
-version = "1.0.1"
-dependencies = [
- "async-std",
- "async-trait",
- "kaspa-cli",
- "tokio",
- "workflow-log",
- "workflow-terminal",
-]
-
[[package]]
name = "kaspa-wallet-cli-wasm"
version = "1.0.1"
@@ -3365,7 +3402,7 @@ dependencies = [
"md-5",
"pad",
"pbkdf2",
- "rand",
+ "rand 0.8.5",
"regex",
"ripemd",
"secp256k1",
@@ -3393,6 +3430,56 @@ dependencies = [
"zeroize",
]
+[[package]]
+name = "kaspa-wallet-daemon"
+version = "1.0.1"
+dependencies = [
+ "async-std",
+ "async-trait",
+ "clap 4.5.19",
+ "futures-util",
+ "kaspa-consensus-core",
+ "kaspa-core",
+ "kaspa-wallet-core",
+ "kaspa-wallet-grpc-core",
+ "kaspa-wallet-grpc-server",
+ "log",
+ "tokio",
+ "tonic",
+ "workflow-core",
+ "workflow-log",
+ "workflow-terminal",
+]
+
+[[package]]
+name = "kaspa-wallet-grpc-core"
+version = "1.0.1"
+dependencies = [
+ "kaspa-bip32",
+ "kaspa-rpc-core",
+ "kaspa-txscript",
+ "kaspa-wallet-core",
+ "prost",
+ "thiserror",
+ "tonic",
+ "tonic-build",
+]
+
+[[package]]
+name = "kaspa-wallet-grpc-server"
+version = "1.0.1"
+dependencies = [
+ "futures-util",
+ "kaspa-addresses",
+ "kaspa-consensus-core",
+ "kaspa-rpc-core",
+ "kaspa-wallet-core",
+ "kaspa-wallet-grpc-core",
+ "log",
+ "tokio",
+ "tonic",
+]
+
[[package]]
name = "kaspa-wallet-keys"
version = "1.0.1"
@@ -3410,7 +3497,7 @@ dependencies = [
"kaspa-txscript-errors",
"kaspa-utils",
"kaspa-wasm-core",
- "rand",
+ "rand 0.8.5",
"ripemd",
"secp256k1",
"serde",
@@ -3525,7 +3612,7 @@ dependencies = [
"kaspa-rpc-core",
"kaspa-rpc-macros",
"paste",
- "rand",
+ "rand 0.8.5",
"regex",
"rustls",
"serde",
@@ -3564,7 +3651,7 @@ name = "kaspa-wrpc-proxy"
version = "1.0.1"
dependencies = [
"async-trait",
- "clap 4.5.19",
+ "clap 4.5.51",
"kaspa-consensus-core",
"kaspa-grpc-client",
"kaspa-rpc-core",
@@ -3616,6 +3703,17 @@ dependencies = [
"tokio",
]
+[[package]]
+name = "kaspa-wrpc-vcc-v2"
+version = "1.0.1"
+dependencies = [
+ "futures",
+ "kaspa-addresses",
+ "kaspa-rpc-core",
+ "kaspa-wrpc-client",
+ "tokio",
+]
+
[[package]]
name = "kaspa-wrpc-wasm"
version = "1.0.1"
@@ -3652,7 +3750,7 @@ version = "1.0.1"
dependencies = [
"async-channel 2.3.1",
"cfg-if 1.0.0",
- "clap 4.5.19",
+ "clap 4.5.51",
"dhat",
"dirs",
"futures-util",
@@ -3684,10 +3782,11 @@ dependencies = [
"kaspa-wrpc-server",
"log",
"num_cpus",
- "rand",
+ "rand 0.8.5",
"rayon",
"rocksdb",
"serde",
+ "serde_json",
"serde_with",
"tempfile",
"thiserror",
@@ -3750,8 +3849,9 @@ checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058"
[[package]]
name = "libmimalloc-sys"
-version = "0.1.42"
-source = "git+https://github.com/purpleprotocol/mimalloc_rust?rev=eff21096d5ee5337ec89e2b7174f1bbb11026c70#eff21096d5ee5337ec89e2b7174f1bbb11026c70"
+version = "0.1.44"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "667f4fec20f29dfc6bc7357c582d91796c169ad7e2fce709468aefeb2c099870"
dependencies = [
"cc",
"libc",
@@ -3763,21 +3863,20 @@ version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d"
dependencies = [
- "bitflags 2.6.0",
+ "bitflags 2.9.4",
"libc",
"redox_syscall",
]
[[package]]
name = "librocksdb-sys"
-version = "0.16.0+8.10.0"
+version = "0.17.3+10.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ce3d60bc059831dc1c83903fb45c103f75db65c5a7bf22272764d9cc683e348c"
+checksum = "cef2a00ee60fe526157c9023edab23943fae1ce2ab6f4abb2a807c1746835de9"
dependencies = [
- "bindgen 0.69.4",
+ "bindgen 0.72.1",
"bzip2-sys",
"cc",
- "glob",
"libc",
"libz-sys",
"lz4-sys",
@@ -3877,7 +3976,7 @@ dependencies = [
"log-mdc",
"once_cell",
"parking_lot",
- "rand",
+ "rand 0.8.5",
"serde",
"serde-value",
"serde_json",
@@ -3946,7 +4045,7 @@ dependencies = [
"cfg-if 1.0.0",
"proc-macro2",
"quote",
- "syn 2.0.79",
+ "syn 2.0.110",
]
[[package]]
@@ -3957,7 +4056,7 @@ checksum = "13198c120864097a565ccb3ff947672d969932b7975ebd4085732c9f09435e55"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.79",
+ "syn 2.0.110",
]
[[package]]
@@ -3970,7 +4069,7 @@ dependencies = [
"macroific_core",
"proc-macro2",
"quote",
- "syn 2.0.79",
+ "syn 2.0.110",
]
[[package]]
@@ -4051,8 +4150,9 @@ dependencies = [
[[package]]
name = "mimalloc"
-version = "0.1.46"
-source = "git+https://github.com/purpleprotocol/mimalloc_rust?rev=eff21096d5ee5337ec89e2b7174f1bbb11026c70#eff21096d5ee5337ec89e2b7174f1bbb11026c70"
+version = "0.1.48"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e1ee66a4b64c74f4ef288bcbb9192ad9c3feaad75193129ac8509af543894fd8"
dependencies = [
"libmimalloc-sys",
]
@@ -4130,7 +4230,7 @@ version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3ffa00dec017b5b1a8b7cf5e2c008bfda1aa7e0697ac1508b491fdf2622fb4d8"
dependencies = [
- "rand",
+ "rand 0.8.5",
]
[[package]]
@@ -4164,7 +4264,7 @@ version = "0.28.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4"
dependencies = [
- "bitflags 2.6.0",
+ "bitflags 2.9.4",
"cfg-if 1.0.0",
"cfg_aliases 0.1.1",
"libc",
@@ -4177,7 +4277,7 @@ version = "0.29.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46"
dependencies = [
- "bitflags 2.6.0",
+ "bitflags 2.9.4",
"cfg-if 1.0.0",
"cfg_aliases 0.2.1",
"libc",
@@ -4446,7 +4546,7 @@ checksum = "70df726c43c645ef1dde24c7ae14692036ebe5457c92c5f0ec4cfceb99634ff6"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.79",
+ "syn 2.0.110",
]
[[package]]
@@ -4456,7 +4556,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "346f04948ba92c43e8469c1ee6736c7563d71012b17d40745260fe106aac2166"
dependencies = [
"base64ct",
- "rand_core",
+ "rand_core 0.6.4",
"subtle",
]
@@ -4498,7 +4598,7 @@ dependencies = [
"order-stat",
"peroxide-ad",
"puruspe",
- "rand",
+ "rand 0.8.5",
"rand_distr",
]
@@ -4539,7 +4639,7 @@ checksum = "a4502d8515ca9f32f1fb543d987f63d95a14934883db45bdb48060b6b69257f8"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.79",
+ "syn 2.0.110",
]
[[package]]
@@ -4625,7 +4725,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "479cf940fbbb3426c32c5d5176f62ad57549a0bb84773423ba8be9d089f5faba"
dependencies = [
"proc-macro2",
- "syn 2.0.79",
+ "syn 2.0.110",
]
[[package]]
@@ -4660,11 +4760,33 @@ dependencies = [
"version_check",
]
+[[package]]
+name = "proc-macro-error-attr2"
+version = "2.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5"
+dependencies = [
+ "proc-macro2",
+ "quote",
+]
+
+[[package]]
+name = "proc-macro-error2"
+version = "2.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802"
+dependencies = [
+ "proc-macro-error-attr2",
+ "proc-macro2",
+ "quote",
+ "syn 2.0.110",
+]
+
[[package]]
name = "proc-macro2"
-version = "1.0.86"
+version = "1.0.103"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77"
+checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8"
dependencies = [
"unicode-ident",
]
@@ -4696,7 +4818,7 @@ dependencies = [
"prost",
"prost-types",
"regex",
- "syn 2.0.79",
+ "syn 2.0.110",
"tempfile",
]
@@ -4710,7 +4832,7 @@ dependencies = [
"itertools 0.13.0",
"proc-macro2",
"quote",
- "syn 2.0.79",
+ "syn 2.0.110",
]
[[package]]
@@ -4738,7 +4860,7 @@ dependencies = [
"pin-project-lite",
"quinn-proto",
"quinn-udp",
- "rustc-hash 2.0.0",
+ "rustc-hash 2.1.1",
"rustls",
"socket2",
"thiserror",
@@ -4753,9 +4875,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fadfaed2cd7f389d0161bb73eeb07b7b78f8691047a6f3e73caaeae55310a4a6"
dependencies = [
"bytes",
- "rand",
+ "rand 0.8.5",
"ring",
- "rustc-hash 2.0.0",
+ "rustc-hash 2.1.1",
"rustls",
"slab",
"thiserror",
@@ -4778,13 +4900,19 @@ dependencies = [
[[package]]
name = "quote"
-version = "1.0.37"
+version = "1.0.42"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af"
+checksum = "a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f"
dependencies = [
"proc-macro2",
]
+[[package]]
+name = "r-efi"
+version = "5.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f"
+
[[package]]
name = "rand"
version = "0.8.5"
@@ -4792,8 +4920,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
dependencies = [
"libc",
- "rand_chacha",
- "rand_core",
+ "rand_chacha 0.3.1",
+ "rand_core 0.6.4",
+]
+
+[[package]]
+name = "rand"
+version = "0.9.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1"
+dependencies = [
+ "rand_chacha 0.9.0",
+ "rand_core 0.9.3",
]
[[package]]
@@ -4803,7 +4941,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
dependencies = [
"ppv-lite86",
- "rand_core",
+ "rand_core 0.6.4",
+]
+
+[[package]]
+name = "rand_chacha"
+version = "0.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb"
+dependencies = [
+ "ppv-lite86",
+ "rand_core 0.9.3",
]
[[package]]
@@ -4812,7 +4960,16 @@ version = "0.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
dependencies = [
- "getrandom",
+ "getrandom 0.2.15",
+]
+
+[[package]]
+name = "rand_core"
+version = "0.9.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38"
+dependencies = [
+ "getrandom 0.3.4",
]
[[package]]
@@ -4822,7 +4979,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32cb0b9bc82b0a0876c2dd994a7e7a2683d3e7390ca40e6886785ef0c7e3ee31"
dependencies = [
"num-traits",
- "rand",
+ "rand 0.8.5",
]
[[package]]
@@ -4857,7 +5014,7 @@ version = "0.5.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f"
dependencies = [
- "bitflags 2.6.0",
+ "bitflags 2.9.4",
]
[[package]]
@@ -4866,7 +5023,7 @@ version = "0.4.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43"
dependencies = [
- "getrandom",
+ "getrandom 0.2.15",
"libredox",
"thiserror",
]
@@ -4953,7 +5110,7 @@ checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d"
dependencies = [
"cc",
"cfg-if 1.0.0",
- "getrandom",
+ "getrandom 0.2.15",
"libc",
"spin",
"untrusted",
@@ -4980,9 +5137,9 @@ dependencies = [
[[package]]
name = "rocksdb"
-version = "0.22.0"
+version = "0.24.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6bd13e55d6d7b8cd0ea569161127567cd587676c99f4472f779a0279aa60a7a7"
+checksum = "ddb7af00d2b17dbd07d82c0063e25411959748ff03e8d4f96134c2ff41fce34f"
dependencies = [
"libc",
"librocksdb-sys",
@@ -4993,7 +5150,7 @@ name = "rothschild"
version = "1.0.1"
dependencies = [
"async-channel 2.3.1",
- "clap 4.5.19",
+ "clap 4.5.51",
"criterion",
"faster-hex",
"itertools 0.13.0",
@@ -5007,7 +5164,7 @@ dependencies = [
"kaspa-utils",
"log",
"parking_lot",
- "rand",
+ "rand 0.8.5",
"rayon",
"secp256k1",
"tokio",
@@ -5027,9 +5184,9 @@ checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2"
[[package]]
name = "rustc-hash"
-version = "2.0.0"
+version = "2.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "583034fd73374156e66797ed8e5b0d5690409c9226b22d87cb7f19821c05d152"
+checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d"
[[package]]
name = "rustc_version"
@@ -5046,7 +5203,7 @@ version = "0.38.37"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811"
dependencies = [
- "bitflags 2.6.0",
+ "bitflags 2.9.4",
"errno",
"libc",
"linux-raw-sys",
@@ -5111,7 +5268,7 @@ dependencies = [
"num",
"num-traits",
"peroxide",
- "rand",
+ "rand 0.8.5",
"rand_distr",
"special",
]
@@ -5152,7 +5309,7 @@ version = "0.29.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9465315bc9d4566e1724f0fffcbcc446268cb522e60f9a27bcded6b19c108113"
dependencies = [
- "rand",
+ "rand 0.8.5",
"secp256k1-sys",
"serde",
]
@@ -5228,7 +5385,7 @@ checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.79",
+ "syn 2.0.110",
]
[[package]]
@@ -5243,6 +5400,18 @@ dependencies = [
"serde",
]
+[[package]]
+name = "serde_nested_with"
+version = "0.2.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bc84538493ef215370434907a7dca8117778d16ac1acd0482ce88a0f5cf19707"
+dependencies = [
+ "darling 0.21.3",
+ "proc-macro-error2",
+ "quote",
+ "syn 2.0.110",
+]
+
[[package]]
name = "serde_repr"
version = "0.1.19"
@@ -5251,7 +5420,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.79",
+ "syn 2.0.110",
]
[[package]]
@@ -5299,10 +5468,10 @@ version = "3.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9d846214a9854ef724f3da161b426242d8de7c1fc7de2f89bb1efcb154dca79d"
dependencies = [
- "darling",
+ "darling 0.20.10",
"proc-macro2",
"quote",
- "syn 2.0.79",
+ "syn 2.0.110",
]
[[package]]
@@ -5402,7 +5571,7 @@ version = "1.0.1"
dependencies = [
"async-channel 2.3.1",
"cfg-if 1.0.0",
- "clap 4.5.19",
+ "clap 4.5.51",
"dhat",
"futures",
"futures-util",
@@ -5419,10 +5588,11 @@ dependencies = [
"kaspa-utils",
"log",
"num_cpus",
- "rand",
+ "rand 0.8.5",
"rand_distr",
"rayon",
"secp256k1",
+ "serde_json",
"tokio",
]
@@ -5534,9 +5704,9 @@ dependencies = [
[[package]]
name = "syn"
-version = "2.0.79"
+version = "2.0.110"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "89132cd0bf050864e1d38dc3bbc07a0eb8e7530af26344d3d2bbbef83499f590"
+checksum = "a99801b5bd34ede4cf3fc688c5919368fea4e4814a4664359503e6015b280aea"
dependencies = [
"proc-macro2",
"quote",
@@ -5552,7 +5722,7 @@ dependencies = [
"proc-macro-error",
"proc-macro2",
"quote",
- "syn 2.0.79",
+ "syn 2.0.110",
]
[[package]]
@@ -5590,7 +5760,7 @@ version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b"
dependencies = [
- "bitflags 2.6.0",
+ "bitflags 2.9.4",
"core-foundation",
"system-configuration-sys",
]
@@ -5664,7 +5834,7 @@ checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.79",
+ "syn 2.0.110",
]
[[package]]
@@ -5775,7 +5945,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.79",
+ "syn 2.0.110",
]
[[package]]
@@ -5908,7 +6078,7 @@ dependencies = [
"prost-build",
"prost-types",
"quote",
- "syn 2.0.79",
+ "syn 2.0.110",
]
[[package]]
@@ -5922,7 +6092,7 @@ dependencies = [
"indexmap 1.9.3",
"pin-project",
"pin-project-lite",
- "rand",
+ "rand 0.8.5",
"slab",
"tokio",
"tokio-util",
@@ -5951,7 +6121,7 @@ version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5"
dependencies = [
- "bitflags 2.6.0",
+ "bitflags 2.9.4",
"bytes",
"http 1.1.0",
"http-body 1.0.1",
@@ -5992,7 +6162,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.79",
+ "syn 2.0.110",
]
[[package]]
@@ -6028,7 +6198,7 @@ dependencies = [
"http 1.1.0",
"httparse",
"log",
- "rand",
+ "rand 0.8.5",
"rustls",
"rustls-pki-types",
"sha1",
@@ -6150,17 +6320,18 @@ version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7"
dependencies = [
- "getrandom",
+ "getrandom 0.2.15",
]
[[package]]
name = "uuid"
-version = "1.10.0"
+version = "1.18.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314"
+checksum = "2f87b8aa10b915a06587d0dec516c282ff295b475d94abf425d62b57710070a2"
dependencies = [
- "getrandom",
- "rand",
+ "getrandom 0.3.4",
+ "js-sys",
+ "rand 0.9.2",
"serde",
"wasm-bindgen",
]
@@ -6229,6 +6400,15 @@ version = "0.11.0+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
+[[package]]
+name = "wasip2"
+version = "1.0.1+wasi-0.2.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7"
+dependencies = [
+ "wit-bindgen",
+]
+
[[package]]
name = "wasm-bindgen"
version = "0.2.100"
@@ -6253,7 +6433,7 @@ dependencies = [
"log",
"proc-macro2",
"quote",
- "syn 2.0.79",
+ "syn 2.0.110",
"wasm-bindgen-shared",
]
@@ -6288,7 +6468,7 @@ checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.79",
+ "syn 2.0.110",
"wasm-bindgen-backend",
"wasm-bindgen-shared",
]
@@ -6323,7 +6503,7 @@ checksum = "17d5042cc5fa009658f9a7333ef24291b1291a25b6382dd68862a7f3b969f69b"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.79",
+ "syn 2.0.110",
]
[[package]]
@@ -6427,7 +6607,7 @@ checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.79",
+ "syn 2.0.110",
]
[[package]]
@@ -6438,7 +6618,7 @@ checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.79",
+ "syn 2.0.110",
]
[[package]]
@@ -6637,6 +6817,12 @@ dependencies = [
"memchr",
]
+[[package]]
+name = "wit-bindgen"
+version = "0.46.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59"
+
[[package]]
name = "workflow-chrome"
version = "0.18.0"
@@ -6667,10 +6853,10 @@ dependencies = [
"dirs",
"faster-hex",
"futures",
- "getrandom",
+ "getrandom 0.2.15",
"instant",
"js-sys",
- "rand",
+ "rand 0.8.5",
"rlimit",
"serde",
"serde-wasm-bindgen",
@@ -6798,7 +6984,7 @@ dependencies = [
"futures",
"js-sys",
"nw-sys",
- "rand",
+ "rand 0.8.5",
"serde",
"serde-wasm-bindgen",
"thiserror",
@@ -6848,9 +7034,9 @@ dependencies = [
"downcast-rs",
"futures",
"futures-util",
- "getrandom",
+ "getrandom 0.2.15",
"manual_future",
- "rand",
+ "rand 0.8.5",
"serde",
"serde_json",
"thiserror",
@@ -7093,7 +7279,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.79",
+ "syn 2.0.110",
]
[[package]]
diff --git a/Cargo.toml b/Cargo.toml
index d6367a9af2..1241093913 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -6,7 +6,9 @@ members = [
"core",
"wallet/macros",
"wallet/core",
- "wallet/native",
+ "wallet/grpc/core",
+ "wallet/grpc/server",
+ "wallet/daemon",
"wallet/wasm",
"wallet/bip32",
"wallet/keys",
@@ -36,12 +38,14 @@ members = [
"rpc/grpc/core",
"rpc/grpc/client",
"rpc/grpc/server",
+ "rpc/grpc/examples/simple_client",
"rpc/wrpc/server",
"rpc/wrpc/client",
"rpc/wrpc/proxy",
"rpc/wrpc/wasm",
"rpc/wrpc/examples/subscriber",
"rpc/wrpc/examples/simple_client",
+ "rpc/wrpc/examples/vcc_v2",
"mining",
"mining/errors",
"protocol/p2p",
@@ -121,11 +125,13 @@ kaspa-txscript-errors = { version = "1.0.1", path = "crypto/txscript/errors" }
kaspa-utils = { version = "1.0.1", path = "utils" }
kaspa-utils-tower = { version = "1.0.1", path = "utils/tower" }
kaspa-utxoindex = { version = "1.0.1", path = "indexes/utxoindex" }
-kaspa-wallet = { version = "1.0.1", path = "wallet/native" }
+kaspa-wallet-daemon = { version = "1.0.1", path = "wallet/daemon" }
kaspa-wallet-cli-wasm = { version = "1.0.1", path = "wallet/wasm" }
kaspa-wallet-keys = { version = "1.0.1", path = "wallet/keys" }
kaspa-wallet-pskt = { version = "1.0.1", path = "wallet/pskt" }
kaspa-wallet-core = { version = "1.0.1", path = "wallet/core" }
+kaspa-wallet-grpc-core = { version = "1.0.0", path = "wallet/grpc/core" }
+kaspa-wallet-grpc-server = { version = "1.0.0", path = "wallet/grpc/server" }
kaspa-wallet-macros = { version = "1.0.1", path = "wallet/macros" }
kaspa-wasm = { version = "1.0.1", path = "wasm" }
kaspa-wasm-core = { version = "1.0.1", path = "wasm/core" }
@@ -136,6 +142,7 @@ kaspa-wrpc-wasm = { version = "1.0.1", path = "rpc/wrpc/wasm" }
kaspa-wrpc-example-subscriber = { version = "1.0.1", path = "rpc/wrpc/examples/subscriber" }
kaspad = { version = "1.0.1", path = "kaspad" }
kaspa-alloc = { version = "1.0.1", path = "utils/alloc" }
+kaspa-wallet = { version = "1.0.1", path = "wallet/native" }
# external
aes = "0.8.3"
@@ -145,9 +152,10 @@ argon2 = "0.5.2"
async-channel = "2.0.0"
async-std = { version = "1.12.0", features = ['attributes'] }
async-stream = "0.3.5"
-async-trait = "0.1.74"
+async-trait = "0.1.88"
base64 = "0.22.1"
bincode = { version = "1.3.3", default-features = false }
+bitflags = "2.9.4"
blake2b_simd = "1.0.2"
borsh = { version = "1.5.1", features = ["derive", "rc"] }
bs58 = { version = "0.5.0", features = ["check"], default-features = false }
@@ -156,7 +164,7 @@ cc = "1.0.83"
cfb-mode = "0.8.2"
cfg-if = "1.0.0"
chacha20poly1305 = "0.10.1"
-clap = { version = "4.4.7", features = ["derive", "string", "cargo"] }
+clap = { version = "4.5.35", features = ["derive", "string", "cargo"] }
convert_case = "0.6.0"
criterion = { version = "0.5.1", default-features = false }
crossbeam-channel = "0.5.8"
@@ -212,7 +220,7 @@ num-traits = "0.2.17"
once_cell = "1.18.0"
pad = "0.1.6"
parking_lot = "0.12.1"
-paste = "1.0.14"
+paste = "1.0.15"
pbkdf2 = "0.12.2"
portable-atomic = { version = "1.5.1", features = ["float"] }
prost = "0.13.2"
@@ -224,7 +232,7 @@ rayon = "1.8.0"
regex = "1.10.2"
ripemd = { version = "0.1.3", default-features = false }
rlimit = "0.10.1"
-rocksdb = "0.22.0"
+rocksdb = "0.24.0"
rv = "0.16.4"
secp256k1 = { version = "0.29.0", features = [
"global-context",
@@ -235,6 +243,7 @@ separator = "0.4.1"
seqlock = "0.2.0"
serde = { version = "1.0.190", features = ["derive", "rc"] }
serde_bytes = "0.11.12"
+serde_nested_with = "0.2.5" # helper, can be removed when https://github.com/serde-rs/serde/issues/723 is reseolved
serde_json = "1.0.107"
serde_repr = "0.1.18"
serde-value = "0.7.0"
@@ -243,7 +252,7 @@ sha1 = "0.10.6"
sha2 = "0.10.8"
sha3 = "0.10.8"
slugify-rs = "0.0.3"
-smallvec = { version = "1.11.1", features = ["serde"] }
+smallvec = { version = "1.11.1", features = ["serde", "const_generics"] }
sorted-insert = "0.2.3"
subtle = { version = "2.5.0", default-features = false }
sysinfo = "0.31.2"
@@ -256,7 +265,7 @@ toml = "0.8.8"
tonic = { version = "0.12.3", features = ["tls-webpki-roots", "gzip", "transport"] }
tonic-build = { version = "0.12.3", features = ["prost"] }
triggered = "0.1.2"
-uuid = { version = "1.5.0", features = ["v4", "fast-rng", "serde"] }
+uuid = { version = "1.16.0", features = ["v4", "fast-rng", "serde", "js"] }
wasm-bindgen = { version = "0.2.100", features = ["serde-serialize"] }
wasm-bindgen-futures = "0.4.43"
wasm-bindgen-test = "0.3.50"
diff --git a/README.md b/README.md
index 05d7a07cff..47a2a1b0b7 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,6 @@
[](https://deepwiki.com/kaspanet/rusty-kaspa)
-
Kaspa On Rust
+Kaspa on Rust
Welcome to the Rust-based implementation of the Kaspa full-node and its ancillary libraries. The contained node release serves as a drop-in replacement to the established Golang node and to date is the recommended node software for the Kaspa network, introducing developers to the possibilities of Rust in the Kaspa network's context.
@@ -158,6 +158,52 @@ To build WASM on MacOS you need to install `llvm` from homebrew (at the time of
+
+ Building with Docker
+
+ You can build the project using Docker in two ways: a simple single-architecture build, or a multi-architecture build using the provided script.
+
+ #### 1. Simple Docker Build
+
+ To build for your current architecture (e.g., `linux/amd64`):
+
+ ```sh
+ docker build -f docker/Dockerfile.kaspad -t kaspad:latest .
+ ```
+
+ Replace `Dockerfile.kaspad` with the appropriate Dockerfile for your target (`kaspad`, `kaspa-wallet`, `rothschild`, or `simpa`).
+
+ #### 2. Multi-Architecture Build
+
+ To build images for multiple architectures (e.g., `linux/amd64` and `linux/arm64`) and optionally push them to a registry, use the `build-docker-multi-arch.sh` script:
+
+ ```sh
+ ./build-docker-multi-arch.sh --tag --artifact [--arches ""] [--push]
+ ```
+
+ - `--tag `: **(required)** The Docker image tag to use.
+ - `--artifact `: The build target/artifact (default: `kaspad`). Must match the Dockerfile name, e.g., `kaspad` for `Dockerfile.kaspad`.
+ - `--arches ""`: Space-separated list of architectures (default: `"linux/amd64 linux/arm64"`).
+ - `--push`: If specified, the built images will be pushed to your Docker registry.
+
+ **Examples:**
+
+ Build and push a multi-arch image for `kaspad`:
+
+ ```sh
+ ./build-docker-multi-arch.sh --tag myrepo/kaspad:latest --artifact kaspad --push
+ ```
+
+ Build a multi-arch image for `kaspa-wallet` without pushing:
+
+ ```sh
+ ./build-docker-multi-arch.sh --tag kaspa-wallet:test --artifact kaspa-wallet
+ ```
+
+ **Note:**
+ In order to use `build-docker-multi-arch.sh`, you need Docker with Buildx enabled.
+
+
Building WASM32 SDK
@@ -241,6 +287,24 @@ The framework is compatible with all major desktop and mobile browsers.
```bash
cargo run --release --bin kaspad -- --testnet
```
+
+
+
+ Start a devnet node
+
+
+Start the DevNet node with the following command:
+
+```bash
+cargo run --bin kaspad -- --devnet --enable-unsynced-mining --rpclisten=127.0.0.1 --rpclisten-borsh=127.0.0.1 --utxoindex
+```
+* `enable-unsynced-mining` is required when the network isn't synchronized, which is the case on the first launch
+* `uxtoindex` enables the UTXO index, which is necessary for wallet functionality.
+* `rpclisten-borsh` and `rpclisten-borsh` are likely to be required by mining softwares
+
+note: it will take a bit of time for difficulty to adjust, so you may need to wait a bit before you see blocks being mined consistently.
+
+
@@ -421,4 +485,12 @@ Logging in `kaspad` and `simpa` can be [filtered](https://docs.rs/env_logger/0.1
```
In this command we set the `loglevel` to `INFO`.
+
+
+
+
+Override consensus parameters
+
+You can experiment with non-standard consensus parameters in non-mainnet environments by supplying a JSON file with `--override-params-file `. See [docs/override-params.md](docs/override-params.md) for a more detailed explanation.
+
\ No newline at end of file
diff --git a/build-docker-multi-arch.sh b/build-docker-multi-arch.sh
new file mode 100755
index 0000000000..ec46b1af2e
--- /dev/null
+++ b/build-docker-multi-arch.sh
@@ -0,0 +1,83 @@
+#!/bin/sh
+
+set -e
+
+# Parse command line arguments for --tag and --repo
+ARCHES="linux/amd64 linux/arm64"
+ARTIFACT="kaspad"
+while [ $# -gt 0 ]; do
+ case "$1" in
+ --tag)
+ shift
+ TAG="$1"
+ ;;
+ --arches)
+ shift
+ ARCHES="$1"
+ ;;
+ --push)
+ PUSH="push"
+ ;;
+ --artifact)
+ shift
+ ARTIFACT="$1"
+ ;;
+ --help|-h)
+ echo "Usage: $0 --tag --artifact [--arches ] [--push]"
+ echo ""
+ echo " --tag Docker image tag (required)"
+ echo " --artifact Build target/artifact (default: \"$ARTIFACT\")"
+ echo " --arches Space-separated list of architectures (default: \"$ARCHES\")"
+ echo " --push Push the built images"
+ echo " --help, -h Show this help message"
+ exit 0
+ ;;
+ *)
+ break
+ ;;
+ esac
+ shift
+done
+
+if [ -z "$TAG" ]; then
+ echo "Error: --tag argument is required"
+ exit 1
+fi
+
+BUILD_DIR="$(dirname $0)"
+docker=docker
+id -nG $USER | grep -qw docker || docker="sudo $docker"
+
+multi_arch_build() {
+ echo
+ echo "===================================================="
+ echo " Running build for $1"
+ echo "===================================================="
+ dockerRepo="${DOCKER_REPO_PREFIX}-$1"
+ dockerRepoArgs=
+
+ if [ "$PUSH" = "push" ]; then
+ dockerRepoArgs="$dockerRepoArgs --push"
+ fi
+
+ dockerRepoArgs="$dockerRepoArgs --tag $TAG"
+ dockerRepoArgs="$dockerRepoArgs -f docker/Dockerfile.$1"
+
+ $docker buildx build --platform=$(echo $ARCHES | sed 's/ /,/g') $dockerRepoArgs \
+ --tag $TAG "$BUILD_DIR"
+ echo "===================================================="
+ echo " Completed build for $1"
+ echo "===================================================="
+}
+
+echo
+echo "===================================================="
+echo " Setup multi arch build ($ARCHES)"
+echo "===================================================="
+$docker buildx create --name mybuilder \
+--driver docker-container \
+--node mybuilder0 \
+--use --bootstrap
+$docker buildx create --name=mybuilder --append --node=mybuilder0 --platform=$(echo $ARCHES | sed 's/ /,/g') --bootstrap --use
+echo "SUCCESS - doing multi arch build"
+multi_arch_build $ARTIFACT
diff --git a/cli/src/modules/rpc.rs b/cli/src/modules/rpc.rs
index 75bc50f421..6e42922b14 100644
--- a/cli/src/modules/rpc.rs
+++ b/cli/src/modules/rpc.rs
@@ -290,6 +290,29 @@ impl Rpc {
self.println(&ctx, result);
}
+ RpcApiOps::GetVirtualChainFromBlockV2 => {
+ if argv.is_empty() {
+ return Err(Error::custom("Missing startHash argument"));
+ };
+
+ let start_hash = RpcHash::from_hex(argv.remove(0).as_str())?;
+
+ let verbosity_level_i32 = argv.pop().and_then(|arg| arg.parse::().ok()).unwrap_or_default();
+ let verbosity_level = RpcDataVerbosityLevel::try_from(verbosity_level_i32)?;
+
+ let result = rpc
+ .get_virtual_chain_from_block_v2_call(
+ None,
+ GetVirtualChainFromBlockV2Request {
+ start_hash,
+ data_verbosity_level: Some(verbosity_level),
+ min_confirmation_count: None,
+ },
+ )
+ .await;
+
+ self.println(&ctx, result);
+ }
_ => {
tprintln!(ctx, "rpc method exists but is not supported by the cli: '{op_str}'\r\n");
return Ok(());
diff --git a/components/addressmanager/src/lib.rs b/components/addressmanager/src/lib.rs
index b220a8ab1b..9f630204c3 100644
--- a/components/addressmanager/src/lib.rs
+++ b/components/addressmanager/src/lib.rs
@@ -535,7 +535,11 @@ mod address_store_with_cache {
assert_eq!(iter.count(), 0);
}
+ // This test is indeterminate, so it is ignored by default.
+ // Every developer that changes the logic of the address manager should run this test locally before sending a PR.
+ // TODO: Maybe change statistical parameters to reduce the failure rate?
#[test]
+ #[ignore]
fn test_network_distribution_weighting() {
kaspa_core::log::try_init_logger("info");
diff --git a/components/consensusmanager/src/session.rs b/components/consensusmanager/src/session.rs
index 46782a318b..c77cf730be 100644
--- a/components/consensusmanager/src/session.rs
+++ b/components/consensusmanager/src/session.rs
@@ -3,7 +3,7 @@
//! We use newtypes in order to simplify changing the underlying lock in the future
use kaspa_consensus_core::{
- acceptance_data::AcceptanceData,
+ acceptance_data::{AcceptanceData, MergesetBlockAcceptanceData},
api::{BlockCount, BlockValidationFutures, ConsensusApi, ConsensusStats, DynConsensus},
block::Block,
blockstatus::BlockStatus,
@@ -13,8 +13,7 @@ use kaspa_consensus_core::{
mass::{ContextualMasses, NonContextualMasses},
pruning::{PruningPointProof, PruningPointTrustedData, PruningPointsList},
trusted::{ExternalGhostdagData, TrustedBlock},
- tx::{MutableTransaction, SignableTransaction, Transaction, TransactionOutpoint, UtxoEntry},
- utxo::utxo_inquirer::UtxoInquirerError,
+ tx::{MutableTransaction, Transaction, TransactionId, TransactionOutpoint, TransactionQueryResult, TransactionType, UtxoEntry},
BlockHashSet, BlueWorkType, ChainPath, Hash,
};
use kaspa_utils::sync::rwlock::*;
@@ -262,7 +261,7 @@ impl ConsensusSessionOwned {
self.clone().spawn_blocking(move |c| c.get_current_block_color(hash)).await
}
- /// retention period root refers to the earliest block from which the current node has full header & block data
+ /// retention period root refers to the earliest block from which the current node has full header & block data
pub async fn async_get_retention_period_root(&self) -> Hash {
self.clone().spawn_blocking(|c| c.get_retention_period_root()).await
}
@@ -316,12 +315,27 @@ impl ConsensusSessionOwned {
self.clone().spawn_blocking(|c| c.get_chain_block_samples()).await
}
- pub async fn async_get_populated_transaction(
+ pub async fn async_get_transactions_by_accepting_daa_score(
&self,
- txid: Hash,
- accepting_block_daa_score: u64,
- ) -> Result {
- self.clone().spawn_blocking(move |c| c.get_populated_transaction(txid, accepting_block_daa_score)).await
+ accepting_daa_score: u64,
+ tx_ids: Option>,
+ tx_type: TransactionType,
+ ) -> ConsensusResult {
+ self.clone().spawn_blocking(move |c| c.get_transactions_by_accepting_daa_score(accepting_daa_score, tx_ids, tx_type)).await
+ }
+
+ pub async fn async_get_transactions_by_block_acceptance_data(
+ &self,
+ accepting_block: Hash,
+ block_acceptance_data: MergesetBlockAcceptanceData,
+ tx_ids: Option>,
+ tx_type: TransactionType,
+ ) -> ConsensusResult {
+ self.clone()
+ .spawn_blocking(move |c| {
+ c.get_transactions_by_block_acceptance_data(accepting_block, block_acceptance_data, tx_ids, tx_type)
+ })
+ .await
}
/// Returns the antipast of block `hash` from the POV of `context`, i.e. `antipast(hash) ∩ past(context)`.
@@ -369,6 +383,10 @@ impl ConsensusSessionOwned {
self.clone().spawn_blocking(move |c| c.get_block(hash)).await
}
+ pub async fn async_get_block_body(&self, hash: Hash) -> ConsensusResult>> {
+ self.clone().spawn_blocking(move |c| c.get_block_body(hash)).await
+ }
+
pub async fn async_get_block_even_if_header_only(&self, hash: Hash) -> ConsensusResult {
self.clone().spawn_blocking(move |c| c.get_block_even_if_header_only(hash)).await
}
@@ -424,16 +442,16 @@ impl ConsensusSessionOwned {
self.clone().spawn_blocking(move |c| c.get_missing_block_body_hashes(high)).await
}
- pub async fn async_pruning_point(&self) -> Hash {
- self.clone().spawn_blocking(|c| c.pruning_point()).await
+ pub async fn async_get_body_missing_anticone(&self) -> Vec {
+ self.clone().spawn_blocking(move |c| c.get_body_missing_anticone()).await
}
- pub async fn async_get_daa_window(&self, hash: Hash) -> ConsensusResult> {
- self.clone().spawn_blocking(move |c| c.get_daa_window(hash)).await
+ pub async fn async_clear_body_missing_anticone_set(&self) {
+ self.clone().spawn_blocking(move |c| c.clear_body_missing_anticone_set()).await
}
- pub async fn async_get_trusted_block_associated_ghostdag_data_block_hashes(&self, hash: Hash) -> ConsensusResult> {
- self.clone().spawn_blocking(move |c| c.get_trusted_block_associated_ghostdag_data_block_hashes(hash)).await
+ pub async fn async_pruning_point(&self) -> Hash {
+ self.clone().spawn_blocking(|c| c.pruning_point()).await
}
pub async fn async_estimate_network_hashes_per_second(
@@ -459,6 +477,30 @@ impl ConsensusSessionOwned {
pub async fn async_finality_point(&self) -> Hash {
self.clone().spawn_blocking(move |c| c.finality_point()).await
}
+ pub async fn async_clear_pruning_utxo_set(&self) {
+ self.clone().spawn_blocking(move |c| c.clear_pruning_utxo_set()).await
+ }
+ pub async fn async_is_pruning_utxoset_stable(&self) -> bool {
+ self.clone().spawn_blocking(move |c| c.is_pruning_utxoset_stable()).await
+ }
+ pub async fn async_is_pruning_point_anticone_fully_synced(&self) -> bool {
+ self.clone().spawn_blocking(move |c| c.is_pruning_point_anticone_fully_synced()).await
+ }
+ pub async fn async_is_consensus_in_transitional_ibd_state(&self) -> bool {
+ self.clone().spawn_blocking(move |c| c.is_consensus_in_transitional_ibd_state()).await
+ }
+ pub async fn async_set_pruning_utxoset_unstable(&self) {
+ self.clone().spawn_blocking(move |c| c.set_pruning_utxoset_stable_flag(false)).await
+ }
+ pub async fn async_set_pruning_utxoset_stable(&self) {
+ self.clone().spawn_blocking(move |c| c.set_pruning_utxoset_stable_flag(true)).await
+ }
+ pub async fn async_verify_is_pruning_sample(&self, candidate_hash: Hash) -> ConsensusResult<()> {
+ self.clone().spawn_blocking(move |c| c.verify_is_pruning_sample(candidate_hash)).await
+ }
+ pub async fn async_intrusive_pruning_point_update(&self, new_pruning_point: Hash, syncer_sink: Hash) -> ConsensusResult<()> {
+ self.clone().spawn_blocking(move |c| c.intrusive_pruning_point_update(new_pruning_point, syncer_sink)).await
+ }
}
pub type ConsensusProxy = ConsensusSessionOwned;
diff --git a/consensus/benches/check_scripts.rs b/consensus/benches/check_scripts.rs
index 772d2c9070..42804bc328 100644
--- a/consensus/benches/check_scripts.rs
+++ b/consensus/benches/check_scripts.rs
@@ -89,7 +89,7 @@ fn benchmark_check_scripts(c: &mut Criterion) {
let cache = Cache::new(inputs_count as u64);
b.iter(|| {
cache.clear();
- check_scripts_sequential(black_box(&cache), black_box(&tx.as_verifiable()), false, false).unwrap();
+ check_scripts_sequential(black_box(&cache), black_box(&tx.as_verifiable())).unwrap();
})
});
@@ -98,7 +98,7 @@ fn benchmark_check_scripts(c: &mut Criterion) {
let cache = Cache::new(inputs_count as u64);
b.iter(|| {
cache.clear();
- check_scripts_par_iter(black_box(&cache), black_box(&tx.as_verifiable()), false, false).unwrap();
+ check_scripts_par_iter(black_box(&cache), black_box(&tx.as_verifiable())).unwrap();
})
});
@@ -110,14 +110,7 @@ fn benchmark_check_scripts(c: &mut Criterion) {
let cache = Cache::new(inputs_count as u64);
b.iter(|| {
cache.clear();
- check_scripts_par_iter_pool(
- black_box(&cache),
- black_box(&tx.as_verifiable()),
- black_box(&pool),
- false,
- false,
- )
- .unwrap();
+ check_scripts_par_iter_pool(black_box(&cache), black_box(&tx.as_verifiable()), black_box(&pool)).unwrap();
})
});
}
@@ -153,7 +146,7 @@ fn benchmark_check_scripts_with_payload(c: &mut Criterion) {
let cache = Cache::new(inputs_count as u64);
b.iter(|| {
cache.clear();
- check_scripts_par_iter(black_box(&cache), black_box(&tx.as_verifiable()), false, false).unwrap();
+ check_scripts_par_iter(black_box(&cache), black_box(&tx.as_verifiable())).unwrap();
})
});
}
diff --git a/consensus/client/src/error.rs b/consensus/client/src/error.rs
index e632f517d5..8f4575dd84 100644
--- a/consensus/client/src/error.rs
+++ b/consensus/client/src/error.rs
@@ -42,6 +42,9 @@ pub enum Error {
#[error(transparent)]
NetworkType(#[from] kaspa_consensus_core::network::NetworkTypeError),
+ #[error(transparent)]
+ CompressedParents(#[from] kaspa_consensus_core::errors::header::CompressedParentsError),
+
#[error("Error converting property `{0}`: {1}")]
Convert(&'static str, String),
diff --git a/consensus/client/src/header.rs b/consensus/client/src/header.rs
index 7d2e25b393..5b4392ed4d 100644
--- a/consensus/client/src/header.rs
+++ b/consensus/client/src/header.rs
@@ -233,7 +233,7 @@ impl Header {
#[wasm_bindgen(setter = parentsByLevel)]
pub fn set_parents_by_level_from_js_value(&mut self, js_value: JsValue) {
let array = Array::from(&js_value);
- self.inner_mut().parents_by_level = array
+ let parents = array
.iter()
.map(|jsv| {
Array::from(&jsv)
@@ -246,6 +246,8 @@ impl Header {
.unwrap_or_else(|err| {
panic!("{}", err);
});
+
+ self.inner_mut().parents_by_level = parents.try_into().unwrap();
}
#[wasm_bindgen(getter = blueWork)]
@@ -272,7 +274,7 @@ impl TryCastFromJs for Header {
{
Self::resolve(value, || {
if let Some(object) = Object::try_from(value.as_ref()) {
- let parents_by_level = object
+ let parents_by_level_vec = object
.get_vec("parentsByLevel")?
.iter()
.map(|jsv| {
@@ -284,6 +286,8 @@ impl TryCastFromJs for Header {
})
.collect::>, Error>>()?;
+ let parents_by_level = parents_by_level_vec.try_into()?;
+
let header = native::Header {
hash: object.get_value("hash")?.try_into_owned().unwrap_or_default(),
version: object.get_u16("version")?,
diff --git a/consensus/client/src/outpoint.rs b/consensus/client/src/outpoint.rs
index a9b39f5e4f..c2f59092e1 100644
--- a/consensus/client/src/outpoint.rs
+++ b/consensus/client/src/outpoint.rs
@@ -188,6 +188,12 @@ impl From<&TransactionOutpoint> for cctx::TransactionOutpoint {
}
}
+impl PartialEq for TransactionOutpoint {
+ fn eq(&self, other: &cctx::TransactionOutpoint) -> bool {
+ self.inner.transaction_id == other.transaction_id && self.inner.index == other.index
+ }
+}
+
impl TransactionOutpoint {
pub fn simulated() -> Self {
Self::new(TransactionId::from_slice(&rand::random::<[u8; kaspa_hashes::HASH_SIZE]>()), 0)
diff --git a/consensus/core/Cargo.toml b/consensus/core/Cargo.toml
index 228b4ac11d..ad493998e3 100644
--- a/consensus/core/Cargo.toml
+++ b/consensus/core/Cargo.toml
@@ -18,6 +18,7 @@ default = []
arc-swap.workspace = true
async-trait.workspace = true
borsh.workspace = true
+bitflags.workspace = true
cfg-if.workspace = true
faster-hex.workspace = true
futures-util.workspace = true
diff --git a/consensus/core/src/acceptance_data.rs b/consensus/core/src/acceptance_data.rs
index 2ab2355839..95fcaa20ea 100644
--- a/consensus/core/src/acceptance_data.rs
+++ b/consensus/core/src/acceptance_data.rs
@@ -3,9 +3,11 @@ use serde::{Deserialize, Serialize};
use crate::tx::TransactionId;
+/// Holds a mergeset acceptance data, a list of all its merged block with their accepted transactions
pub type AcceptanceData = Vec;
#[derive(Debug, Clone, Serialize, Deserialize)]
+/// Holds a merged block with its accepted transactions
pub struct MergesetBlockAcceptanceData {
pub block_hash: Hash,
pub accepted_transactions: Vec,
diff --git a/consensus/core/src/api/mod.rs b/consensus/core/src/api/mod.rs
index c33a537a76..39c40da352 100644
--- a/consensus/core/src/api/mod.rs
+++ b/consensus/core/src/api/mod.rs
@@ -3,7 +3,7 @@ use kaspa_muhash::MuHash;
use std::sync::Arc;
use crate::{
- acceptance_data::AcceptanceData,
+ acceptance_data::{AcceptanceData, MergesetBlockAcceptanceData},
api::args::{TransactionValidationArgs, TransactionValidationBatchArgs},
block::{Block, BlockTemplate, TemplateBuildMode, TemplateTransactionSelector, VirtualStateApproxId},
blockstatus::BlockStatus,
@@ -20,8 +20,10 @@ use crate::{
mass::{ContextualMasses, NonContextualMasses},
pruning::{PruningPointProof, PruningPointTrustedData, PruningPointsList, PruningProofMetadata},
trusted::{ExternalGhostdagData, TrustedBlock},
- tx::{MutableTransaction, SignableTransaction, Transaction, TransactionOutpoint, UtxoEntry},
- utxo::utxo_inquirer::UtxoInquirerError,
+ tx::{
+ MutableTransaction, Transaction, TransactionId, TransactionIndexType, TransactionOutpoint, TransactionQueryResult,
+ TransactionType, UtxoEntry,
+ },
BlockHashSet, BlueWorkType, ChainPath,
};
use kaspa_hashes::Hash;
@@ -151,7 +153,7 @@ pub trait ConsensusApi: Send + Sync {
unimplemented!()
}
- /// retention period root refers to the earliest block from which the current node has full header & block data
+ /// retention period root refers to the earliest block from which the current node has full header & block data
fn get_retention_period_root(&self) -> Hash {
unimplemented!()
}
@@ -162,7 +164,7 @@ pub trait ConsensusApi: Send + Sync {
/// Gets the virtual chain paths from `low` to the `sink` hash, or until `chain_path_added_limit` is reached
///
- /// Note:
+ /// Note:
/// 1) `chain_path_added_limit` will populate removed fully, and then the added chain path, up to `chain_path_added_limit` amount of hashes.
/// 1.1) use `None to impose no limit with optimized backward chain iteration, for better performance in cases where batching is not required.
fn get_virtual_chain_from_block(&self, low: Hash, chain_path_added_limit: Option) -> ConsensusResult {
@@ -175,7 +177,32 @@ pub trait ConsensusApi: Send + Sync {
/// Returns the fully populated transaction with the given txid which was accepted at the provided accepting_block_daa_score.
/// The argument `accepting_block_daa_score` is expected to be the DAA score of the accepting chain block of `txid`.
- fn get_populated_transaction(&self, txid: Hash, accepting_block_daa_score: u64) -> Result {
+ /// Note: If the transaction vec is None, the function returns all accepted transactions.
+ fn get_transactions_by_accepting_daa_score(
+ &self,
+ accepting_daa_score: u64,
+ tx_ids: Option>,
+ tx_type: TransactionType,
+ ) -> ConsensusResult {
+ unimplemented!()
+ }
+
+ fn get_transactions_by_block_acceptance_data(
+ &self,
+ accepting_block: Hash,
+ block_acceptance_data: MergesetBlockAcceptanceData,
+ tx_ids: Option>,
+ tx_type: TransactionType,
+ ) -> ConsensusResult {
+ unimplemented!()
+ }
+
+ fn get_transactions_by_accepting_block(
+ &self,
+ accepting_block: Hash,
+ tx_ids: Option>,
+ tx_type: TransactionType,
+ ) -> ConsensusResult {
unimplemented!()
}
@@ -208,7 +235,7 @@ pub trait ConsensusApi: Send + Sync {
unimplemented!()
}
- fn calc_transaction_hash_merkle_root(&self, txs: &[Transaction], pov_daa_score: u64) -> Hash {
+ fn calc_transaction_hash_merkle_root(&self, txs: &[Transaction]) -> Hash {
unimplemented!()
}
@@ -284,6 +311,14 @@ pub trait ConsensusApi: Send + Sync {
unimplemented!()
}
+ fn get_block_transactions(&self, hash: Hash, indices: Option>) -> ConsensusResult> {
+ unimplemented!()
+ }
+
+ fn get_block_body(&self, hash: Hash) -> ConsensusResult>> {
+ unimplemented!()
+ }
+
fn get_block_even_if_header_only(&self, hash: Hash) -> ConsensusResult {
unimplemented!()
}
@@ -336,19 +371,14 @@ pub trait ConsensusApi: Send + Sync {
fn get_missing_block_body_hashes(&self, high: Hash) -> ConsensusResult> {
unimplemented!()
}
-
- fn pruning_point(&self) -> Hash {
+ fn get_body_missing_anticone(&self) -> Vec {
unimplemented!()
}
-
- // TODO: Delete this function once there's no need for go-kaspad backward compatibility.
- fn get_daa_window(&self, hash: Hash) -> ConsensusResult> {
+ fn clear_body_missing_anticone_set(&self) {
unimplemented!()
}
- // TODO: Think of a better name.
- // TODO: Delete this function once there's no need for go-kaspad backward compatibility.
- fn get_trusted_block_associated_ghostdag_data_block_hashes(&self, hash: Hash) -> ConsensusResult> {
+ fn pruning_point(&self) -> Hash {
unimplemented!()
}
@@ -371,6 +401,34 @@ pub trait ConsensusApi: Send + Sync {
fn finality_point(&self) -> Hash {
unimplemented!()
}
+
+ fn clear_pruning_utxo_set(&self) {
+ unimplemented!()
+ }
+
+ fn set_pruning_utxoset_stable_flag(&self, val: bool) {
+ unimplemented!()
+ }
+
+ fn is_pruning_utxoset_stable(&self) -> bool {
+ unimplemented!()
+ }
+
+ fn is_pruning_point_anticone_fully_synced(&self) -> bool {
+ unimplemented!()
+ }
+
+ fn is_consensus_in_transitional_ibd_state(&self) -> bool {
+ unimplemented!()
+ }
+
+ fn verify_is_pruning_sample(&self, candidate_hash: Hash) -> ConsensusResult<()> {
+ unimplemented!()
+ }
+
+ fn intrusive_pruning_point_update(&self, new_pruning_point: Hash, syncer_sink: Hash) -> ConsensusResult<()> {
+ unimplemented!()
+ }
}
pub type DynConsensus = Arc;
diff --git a/consensus/core/src/block.rs b/consensus/core/src/block.rs
index cbd76b42dc..2af0b3dde5 100644
--- a/consensus/core/src/block.rs
+++ b/consensus/core/src/block.rs
@@ -68,6 +68,8 @@ impl Block {
Block::from_header(Header::from_precomputed_hash(hash, parents))
}
+ /// Check if the block in-memory size is too large to be cached as a pending-validation orphan block.
+ /// Returns None if the block is too large
pub fn asses_for_cache(&self) -> Option<()> {
(self.estimate_mem_bytes() < 1_000_000).then_some(())
}
diff --git a/consensus/core/src/config/constants.rs b/consensus/core/src/config/constants.rs
index 02eabb7114..4b0e050966 100644
--- a/consensus/core/src/config/constants.rs
+++ b/consensus/core/src/config/constants.rs
@@ -166,7 +166,7 @@ pub mod perf {
impl PerfParams {
pub fn adjust_to_consensus_params(&mut self, consensus_params: &Params) {
// Allow caching up to 10x over the baseline
- self.block_data_cache_size *= consensus_params.bps().upper_bound().clamp(1, 10) as usize;
+ self.block_data_cache_size *= consensus_params.bps().after().clamp(1, 10) as usize;
}
}
}
diff --git a/consensus/core/src/config/genesis.rs b/consensus/core/src/config/genesis.rs
index 06d1431ed2..0c7281ed28 100644
--- a/consensus/core/src/config/genesis.rs
+++ b/consensus/core/src/config/genesis.rs
@@ -1,4 +1,9 @@
-use crate::{block::Block, header::Header, subnets::SUBNETWORK_ID_COINBASE, tx::Transaction};
+use crate::{
+ block::Block,
+ header::{CompressedParents, Header},
+ subnets::SUBNETWORK_ID_COINBASE,
+ tx::Transaction,
+};
use kaspa_hashes::{Hash, ZERO_HASH};
use kaspa_muhash::EMPTY_MUHASH;
@@ -26,7 +31,7 @@ impl From<&GenesisBlock> for Header {
fn from(genesis: &GenesisBlock) -> Self {
Header::new_finalized(
genesis.version,
- Vec::new(),
+ CompressedParents::default(),
genesis.hash_merkle_root,
ZERO_HASH,
genesis.utxo_commitment,
@@ -231,7 +236,7 @@ mod tests {
fn test_genesis_hashes() {
[GENESIS, TESTNET_GENESIS, TESTNET11_GENESIS, SIMNET_GENESIS, DEVNET_GENESIS].into_iter().for_each(|genesis| {
let block: Block = (&genesis).into();
- assert_hashes_eq(calc_hash_merkle_root(block.transactions.iter(), false), block.header.hash_merkle_root);
+ assert_hashes_eq(calc_hash_merkle_root(block.transactions.iter()), block.header.hash_merkle_root);
assert_hashes_eq(block.hash(), genesis.hash);
});
}
diff --git a/consensus/core/src/config/params.rs b/consensus/core/src/config/params.rs
index 0773aeaa43..fcee365a6b 100644
--- a/consensus/core/src/config/params.rs
+++ b/consensus/core/src/config/params.rs
@@ -10,9 +10,10 @@ use crate::{
};
use kaspa_addresses::Prefix;
use kaspa_math::Uint256;
+use serde::{Deserialize, Serialize};
use std::cmp::min;
-#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
+#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
pub struct ForkActivation(u64);
impl ForkActivation {
@@ -139,7 +140,7 @@ impl ForkedParam {
}
/// Fork params for the Crescendo hardfork
-#[derive(Clone, Debug)]
+#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct CrescendoParams {
pub past_median_time_sampled_window_size: u64,
pub sampled_difficulty_window_size: u64,
@@ -194,6 +195,93 @@ pub const CRESCENDO: CrescendoParams = CrescendoParams {
max_script_public_key_len: 10_000,
};
+#[derive(Clone, Debug, Serialize, Deserialize)]
+pub struct OverrideParams {
+ pub prior_ghostdag_k: Option,
+
+ /// Timestamp deviation tolerance (in seconds)
+ pub timestamp_deviation_tolerance: Option,
+
+ /// Target time per block (in milliseconds)
+ pub prior_target_time_per_block: Option,
+
+ /// Size of full blocks window that is inspected to calculate the required difficulty of each block
+ pub prior_difficulty_window_size: Option,
+
+ /// The minimum size a difficulty window (full or sampled) must have to trigger a DAA calculation
+ pub min_difficulty_window_size: Option,
+
+ pub prior_max_block_parents: Option,
+ pub prior_mergeset_size_limit: Option,
+ pub prior_merge_depth: Option,
+ pub prior_finality_depth: Option,
+ pub prior_pruning_depth: Option,
+
+ pub coinbase_payload_script_public_key_max_len: Option,
+ pub max_coinbase_payload_len: Option,
+
+ pub prior_max_tx_inputs: Option,
+ pub prior_max_tx_outputs: Option,
+ pub prior_max_signature_script_len: Option,
+ pub prior_max_script_public_key_len: Option,
+
+ pub mass_per_tx_byte: Option,
+ pub mass_per_script_pub_key_byte: Option,
+ pub mass_per_sig_op: Option,
+ pub max_block_mass: Option,
+
+ /// The parameter for scaling inverse KAS value to mass units (KIP-0009)
+ pub storage_mass_parameter: Option,
+
+ /// DAA score after which the pre-deflationary period switches to the deflationary period
+ pub deflationary_phase_daa_score: Option,
+
+ pub pre_deflationary_phase_base_subsidy: Option,
+ pub prior_coinbase_maturity: Option,
+ pub skip_proof_of_work: Option,
+ pub max_block_level: Option,
+ pub pruning_proof_m: Option,
+
+ pub crescendo: Option,
+ pub crescendo_activation: Option,
+}
+
+impl From for OverrideParams {
+ fn from(p: Params) -> Self {
+ Self {
+ prior_ghostdag_k: Some(p.prior_ghostdag_k),
+ timestamp_deviation_tolerance: Some(p.timestamp_deviation_tolerance),
+ prior_target_time_per_block: Some(p.prior_target_time_per_block),
+ prior_difficulty_window_size: Some(p.prior_difficulty_window_size),
+ min_difficulty_window_size: Some(p.min_difficulty_window_size),
+ prior_max_block_parents: Some(p.prior_max_block_parents),
+ prior_mergeset_size_limit: Some(p.prior_mergeset_size_limit),
+ prior_merge_depth: Some(p.prior_merge_depth),
+ prior_finality_depth: Some(p.prior_finality_depth),
+ prior_pruning_depth: Some(p.prior_pruning_depth),
+ coinbase_payload_script_public_key_max_len: Some(p.coinbase_payload_script_public_key_max_len),
+ max_coinbase_payload_len: Some(p.max_coinbase_payload_len),
+ prior_max_tx_inputs: Some(p.prior_max_tx_inputs),
+ prior_max_tx_outputs: Some(p.prior_max_tx_outputs),
+ prior_max_signature_script_len: Some(p.prior_max_signature_script_len),
+ prior_max_script_public_key_len: Some(p.prior_max_script_public_key_len),
+ mass_per_tx_byte: Some(p.mass_per_tx_byte),
+ mass_per_script_pub_key_byte: Some(p.mass_per_script_pub_key_byte),
+ mass_per_sig_op: Some(p.mass_per_sig_op),
+ max_block_mass: Some(p.max_block_mass),
+ storage_mass_parameter: Some(p.storage_mass_parameter),
+ deflationary_phase_daa_score: Some(p.deflationary_phase_daa_score),
+ pre_deflationary_phase_base_subsidy: Some(p.pre_deflationary_phase_base_subsidy),
+ prior_coinbase_maturity: Some(p.prior_coinbase_maturity),
+ skip_proof_of_work: Some(p.skip_proof_of_work),
+ max_block_level: Some(p.max_block_level),
+ pruning_proof_m: Some(p.pruning_proof_m),
+ crescendo: Some(p.crescendo),
+ crescendo_activation: Some(p.crescendo_activation),
+ }
+ }
+}
+
/// Consensus parameters. Contains settings and configurations which are consensus-sensitive.
/// Changing one of these on a network node would exclude and prevent it from reaching consensus
/// with the other unmodified nodes.
@@ -439,6 +527,69 @@ impl Params {
pub fn default_rpc_port(&self) -> u16 {
self.net.default_rpc_port()
}
+
+ pub fn override_params(self, overrides: OverrideParams) -> Self {
+ Self {
+ dns_seeders: self.dns_seeders,
+ net: self.net,
+ genesis: self.genesis.clone(),
+ prior_ghostdag_k: overrides.prior_ghostdag_k.unwrap_or(self.prior_ghostdag_k),
+
+ timestamp_deviation_tolerance: overrides.timestamp_deviation_tolerance.unwrap_or(self.timestamp_deviation_tolerance),
+
+ prior_target_time_per_block: overrides.prior_target_time_per_block.unwrap_or(self.prior_target_time_per_block),
+
+ max_difficulty_target: self.max_difficulty_target,
+ max_difficulty_target_f64: self.max_difficulty_target_f64,
+
+ prior_difficulty_window_size: overrides.prior_difficulty_window_size.unwrap_or(self.prior_difficulty_window_size),
+
+ min_difficulty_window_size: overrides.min_difficulty_window_size.unwrap_or(self.min_difficulty_window_size),
+
+ prior_max_block_parents: overrides.prior_max_block_parents.unwrap_or(self.prior_max_block_parents),
+
+ prior_mergeset_size_limit: overrides.prior_mergeset_size_limit.unwrap_or(self.prior_mergeset_size_limit),
+
+ prior_merge_depth: overrides.prior_merge_depth.unwrap_or(self.prior_merge_depth),
+ prior_finality_depth: overrides.prior_finality_depth.unwrap_or(self.prior_finality_depth),
+ prior_pruning_depth: overrides.prior_pruning_depth.unwrap_or(self.prior_pruning_depth),
+
+ coinbase_payload_script_public_key_max_len: overrides
+ .coinbase_payload_script_public_key_max_len
+ .unwrap_or(self.coinbase_payload_script_public_key_max_len),
+
+ max_coinbase_payload_len: overrides.max_coinbase_payload_len.unwrap_or(self.max_coinbase_payload_len),
+
+ prior_max_tx_inputs: overrides.prior_max_tx_inputs.unwrap_or(self.prior_max_tx_inputs),
+ prior_max_tx_outputs: overrides.prior_max_tx_outputs.unwrap_or(self.prior_max_tx_outputs),
+ prior_max_signature_script_len: overrides.prior_max_signature_script_len.unwrap_or(self.prior_max_signature_script_len),
+ prior_max_script_public_key_len: overrides.prior_max_script_public_key_len.unwrap_or(self.prior_max_script_public_key_len),
+
+ mass_per_tx_byte: overrides.mass_per_tx_byte.unwrap_or(self.mass_per_tx_byte),
+ mass_per_script_pub_key_byte: overrides.mass_per_script_pub_key_byte.unwrap_or(self.mass_per_script_pub_key_byte),
+ mass_per_sig_op: overrides.mass_per_sig_op.unwrap_or(self.mass_per_sig_op),
+ max_block_mass: overrides.max_block_mass.unwrap_or(self.max_block_mass),
+
+ storage_mass_parameter: overrides.storage_mass_parameter.unwrap_or(self.storage_mass_parameter),
+
+ deflationary_phase_daa_score: overrides.deflationary_phase_daa_score.unwrap_or(self.deflationary_phase_daa_score),
+
+ pre_deflationary_phase_base_subsidy: overrides
+ .pre_deflationary_phase_base_subsidy
+ .unwrap_or(self.pre_deflationary_phase_base_subsidy),
+
+ prior_coinbase_maturity: overrides.prior_coinbase_maturity.unwrap_or(self.prior_coinbase_maturity),
+
+ skip_proof_of_work: overrides.skip_proof_of_work.unwrap_or(self.skip_proof_of_work),
+
+ max_block_level: overrides.max_block_level.unwrap_or(self.max_block_level),
+
+ pruning_proof_m: overrides.pruning_proof_m.unwrap_or(self.pruning_proof_m),
+
+ crescendo: overrides.crescendo.clone().unwrap_or(self.crescendo.clone()),
+ crescendo_activation: overrides.crescendo_activation.unwrap_or(self.crescendo_activation),
+ }
+ }
}
impl From for Params {
diff --git a/consensus/core/src/errors/block.rs b/consensus/core/src/errors/block.rs
index afe4bf11dc..6dba66ddb4 100644
--- a/consensus/core/src/errors/block.rs
+++ b/consensus/core/src/errors/block.rs
@@ -97,9 +97,6 @@ pub enum RuleError {
#[error("coinbase blue score of {0} is not the expected value of {1}")]
BadCoinbasePayloadBlueScore(u64, u64),
- #[error("coinbase mass commitment field is not zero")]
- CoinbaseNonZeroMassCommitment,
-
#[error("transaction in isolation validation failed for tx {0}: {1}")]
TxInIsolationValidationFailed(TransactionId, TxRuleError),
diff --git a/consensus/core/src/errors/consensus.rs b/consensus/core/src/errors/consensus.rs
index 58c5ed35e9..1a2c8a3c76 100644
--- a/consensus/core/src/errors/consensus.rs
+++ b/consensus/core/src/errors/consensus.rs
@@ -1,6 +1,8 @@
use kaspa_hashes::Hash;
use thiserror::Error;
+use crate::{tx::TransactionIndexType, utxo::utxo_inquirer::UtxoInquirerError};
+
use super::{difficulty::DifficultyError, sync::SyncManagerError, traversal::TraversalError};
#[derive(Error, Debug, Clone)]
@@ -11,6 +13,12 @@ pub enum ConsensusError {
#[error("cannot find header {0}")]
HeaderNotFound(Hash),
+ #[error("trying to query {0} txs in block {1}, but the block only holds {2} txs")]
+ TransactionQueryTooLarge(usize, Hash, usize),
+
+ #[error("index {0} out of max {1} in block {2} is out of bounds")]
+ TransactionIndexOutOfBounds(TransactionIndexType, usize, Hash),
+
#[error("block {0} is invalid")]
InvalidBlock(Hash),
@@ -35,6 +43,9 @@ pub enum ConsensusError {
#[error("{0}")]
General(&'static str),
+ #[error("utxo inquirer error: {0}")]
+ UtxoInquirerError(#[from] UtxoInquirerError),
+
#[error("{0}")]
GeneralOwned(String),
}
diff --git a/consensus/core/src/errors/header.rs b/consensus/core/src/errors/header.rs
new file mode 100644
index 0000000000..f3b458a23b
--- /dev/null
+++ b/consensus/core/src/errors/header.rs
@@ -0,0 +1,9 @@
+use thiserror::Error;
+
+#[derive(Error, Debug, Clone)]
+pub enum CompressedParentsError {
+ #[error("Parents by level exceeds maximum levels of 255")]
+ LevelsExceeded,
+}
+
+pub type CompressedParentsResult = std::result::Result;
diff --git a/consensus/core/src/errors/mod.rs b/consensus/core/src/errors/mod.rs
index c65ea57c51..8347a76ee0 100644
--- a/consensus/core/src/errors/mod.rs
+++ b/consensus/core/src/errors/mod.rs
@@ -3,6 +3,7 @@ pub mod coinbase;
pub mod config;
pub mod consensus;
pub mod difficulty;
+pub mod header;
pub mod pruning;
pub mod sync;
pub mod traversal;
diff --git a/consensus/core/src/errors/pruning.rs b/consensus/core/src/errors/pruning.rs
index 5c69eb0142..173ef8a0f6 100644
--- a/consensus/core/src/errors/pruning.rs
+++ b/consensus/core/src/errors/pruning.rs
@@ -39,8 +39,8 @@ pub enum PruningImportError {
#[error("block {0} already appeared in the proof headers for level {1}")]
PruningProofDuplicateHeaderAtLevel(Hash, BlockLevel),
- #[error("got header-only trusted block {0} which is not in pruning point past according to available reachability")]
- PruningPointPastMissingReachability(Hash),
+ #[error("trusted block {0} is in the anticone of the pruning point but does not have block body")]
+ PruningPointAnticoneMissingBody(Hash),
#[error("new pruning point has an invalid transaction {0}: {1}")]
NewPruningPointTxError(Hash, TxRuleError),
@@ -80,6 +80,9 @@ pub enum PruningImportError {
#[error("a past pruning point has not been pointed at")]
UnpointedPruningPoint,
+
+ #[error("got trusted block {0} in the future of the pruning point {1}")]
+ TrustedBlockInPruningPointFuture(Hash, Hash),
}
pub type PruningImportResult = std::result::Result;
diff --git a/consensus/core/src/errors/tx.rs b/consensus/core/src/errors/tx.rs
index f21409857f..5cefa7921d 100644
--- a/consensus/core/src/errors/tx.rs
+++ b/consensus/core/src/errors/tx.rs
@@ -15,9 +15,6 @@ pub enum TxRuleError {
#[error("transaction has non zero gas value")]
TxHasGas,
- #[error("a non coinbase transaction has a payload")]
- NonCoinbaseTxHasPayload,
-
#[error("transaction version {0} is unknown")]
UnknownTxVersion(u16),
@@ -45,6 +42,9 @@ pub enum TxRuleError {
#[error("script public key of coinbase output #{0} is too long")]
CoinbaseScriptPublicKeyTooLong(usize),
+ #[error("coinbase mass commitment field is not zero")]
+ CoinbaseNonZeroMassCommitment,
+
#[error(
"transaction input #{0} tried to spend coinbase outpoint {1} with daa score of {2}
while the merging block daa score is {3} and the coinbase maturity period of {4} hasn't passed yet"
diff --git a/consensus/core/src/hashing/header.rs b/consensus/core/src/hashing/header.rs
index 3ad90fa760..e531f0cd8e 100644
--- a/consensus/core/src/hashing/header.rs
+++ b/consensus/core/src/hashing/header.rs
@@ -6,10 +6,10 @@ use kaspa_hashes::{Hash, HasherBase};
#[inline]
pub fn hash_override_nonce_time(header: &Header, nonce: u64, timestamp: u64) -> Hash {
let mut hasher = kaspa_hashes::BlockHash::new();
- hasher.update(header.version.to_le_bytes()).write_len(header.parents_by_level.len()); // Write the number of parent levels
+ hasher.update(header.version.to_le_bytes()).write_len(header.parents_by_level.expanded_len()); // Write the number of parent levels
// Write parents at each level
- header.parents_by_level.iter().for_each(|level| {
+ header.parents_by_level.expanded_iter().for_each(|level| {
hasher.write_var_array(level);
});
@@ -43,7 +43,7 @@ mod tests {
fn test_header_hashing() {
let header = Header::new_finalized(
1,
- vec![vec![1.into()]],
+ vec![vec![1.into()]].try_into().unwrap(),
Default::default(),
Default::default(),
Default::default(),
diff --git a/consensus/core/src/hashing/tx.rs b/consensus/core/src/hashing/tx.rs
index f9cac0311a..b5d3b966fb 100644
--- a/consensus/core/src/hashing/tx.rs
+++ b/consensus/core/src/hashing/tx.rs
@@ -2,33 +2,47 @@ use super::HasherExtensions;
use crate::tx::{Transaction, TransactionId, TransactionInput, TransactionOutpoint, TransactionOutput};
use kaspa_hashes::{Hash, Hasher};
-/// A bitmask defining which transaction fields we
-/// want to encode and which to ignore.
-type TxEncodingFlags = u8;
-
-pub const TX_ENCODING_FULL: TxEncodingFlags = 0;
-pub const TX_ENCODING_EXCLUDE_SIGNATURE_SCRIPT: TxEncodingFlags = 1;
+bitflags::bitflags! {
+ /// A bitmask defining which transaction fields we want to encode and which to ignore.
+ #[derive(Debug, Clone, Copy, PartialEq, Eq)]
+ pub struct TxEncodingFlags: u8 {
+ const FULL = 0;
+ const EXCLUDE_SIGNATURE_SCRIPT = 1 << 0;
+ const EXCLUDE_MASS_COMMIT = 1 << 1;
+ }
+}
/// Returns the transaction hash. Note that this is different than the transaction ID.
-pub fn hash(tx: &Transaction, include_mass_field: bool) -> Hash {
+pub fn hash(tx: &Transaction) -> Hash {
let mut hasher = kaspa_hashes::TransactionHash::new();
- write_transaction(&mut hasher, tx, TX_ENCODING_FULL, include_mass_field);
+ write_transaction(&mut hasher, tx, TxEncodingFlags::FULL);
+ hasher.finalize()
+}
+
+/// Returns the transaction hash pre-crescendo (which excludes the mass commitment)
+pub fn hash_pre_crescendo(tx: &Transaction) -> Hash {
+ let mut hasher = kaspa_hashes::TransactionHash::new();
+ write_transaction(&mut hasher, tx, TxEncodingFlags::EXCLUDE_MASS_COMMIT);
hasher.finalize()
}
/// Not intended for direct use by clients. Instead use `tx.id()`
pub(crate) fn id(tx: &Transaction) -> TransactionId {
// Encode the transaction, replace signature script with an empty array, skip
- // sigop counts and mass and hash the result.
+ // sigop counts and mass commitment and hash the result.
- let encoding_flags = if tx.is_coinbase() { TX_ENCODING_FULL } else { TX_ENCODING_EXCLUDE_SIGNATURE_SCRIPT };
+ let encoding_flags = if tx.is_coinbase() {
+ TxEncodingFlags::FULL
+ } else {
+ TxEncodingFlags::EXCLUDE_SIGNATURE_SCRIPT | TxEncodingFlags::EXCLUDE_MASS_COMMIT
+ };
let mut hasher = kaspa_hashes::TransactionID::new();
- write_transaction(&mut hasher, tx, encoding_flags, false);
+ write_transaction(&mut hasher, tx, encoding_flags);
hasher.finalize()
}
/// Write the transaction into the provided hasher according to the encoding flags
-fn write_transaction(hasher: &mut T, tx: &Transaction, encoding_flags: TxEncodingFlags, include_mass_field: bool) {
+fn write_transaction(hasher: &mut T, tx: &Transaction, encoding_flags: TxEncodingFlags) {
hasher.update(tx.version.to_le_bytes()).write_len(tx.inputs.len());
for input in tx.inputs.iter() {
// Write the tx input
@@ -47,7 +61,7 @@ fn write_transaction(hasher: &mut T, tx: &Transaction, encoding_flags
Design principles (mostly related to the new mass commitment field; see KIP-0009):
1. The new mass field should not modify tx::id (since it is essentially a commitment by the miner re block space usage
so there is no need to modify the id definition which will require wide-spread changes in ecosystem software).
- 2. Coinbase tx hash and id should ideally remain equal
+ 2. Coinbase tx hash should ideally remain unchanged
Solution:
1. Hash the mass field only for tx::hash
@@ -57,13 +71,10 @@ fn write_transaction(hasher: &mut T, tx: &Transaction, encoding_flags
This way we have:
- Unique commitment for tx::hash per any possible mass value (with only zero being a no-op)
- tx::id remains unmodified
- - Coinbase tx hash and id remain the same and equal
+ - Coinbase tx hash remains unchanged
*/
- // TODO (post HF):
- // 1. Avoid passing a boolean
- // 2. Use TxEncodingFlags to avoid including the mass for tx ID
- if include_mass_field {
+ if !encoding_flags.contains(TxEncodingFlags::EXCLUDE_MASS_COMMIT) {
let mass = tx.mass();
if mass > 0 {
hasher.update(mass.to_le_bytes());
@@ -74,7 +85,7 @@ fn write_transaction(hasher: &mut T, tx: &Transaction, encoding_flags
#[inline(always)]
fn write_input(hasher: &mut T, input: &TransactionInput, encoding_flags: TxEncodingFlags) {
write_outpoint(hasher, &input.previous_outpoint);
- if encoding_flags & TX_ENCODING_EXCLUDE_SIGNATURE_SCRIPT != TX_ENCODING_EXCLUDE_SIGNATURE_SCRIPT {
+ if !encoding_flags.contains(TxEncodingFlags::EXCLUDE_SIGNATURE_SCRIPT) {
hasher.write_var_bytes(input.signature_script.as_slice()).update([input.sig_op_count]);
} else {
hasher.write_var_bytes(&[]);
@@ -183,12 +194,7 @@ mod tests {
for (i, test) in tests.iter().enumerate() {
assert_eq!(test.tx.id(), Hash::from_str(test.expected_id).unwrap(), "transaction id failed for test {}", i + 1);
- assert_eq!(
- hash(&test.tx, false),
- Hash::from_str(test.expected_hash).unwrap(),
- "transaction hash failed for test {}",
- i + 1
- );
+ assert_eq!(hash(&test.tx), Hash::from_str(test.expected_hash).unwrap(), "transaction hash failed for test {}", i + 1);
}
// Avoid compiler warnings on the last clone
diff --git a/consensus/core/src/header.rs b/consensus/core/src/header.rs
index e53de44255..17443ab6e7 100644
--- a/consensus/core/src/header.rs
+++ b/consensus/core/src/header.rs
@@ -1,8 +1,107 @@
use crate::{hashing, BlueWorkType};
use borsh::{BorshDeserialize, BorshSerialize};
use kaspa_hashes::Hash;
-use kaspa_utils::mem_size::MemSizeEstimator;
+use kaspa_utils::{
+ iter::{IterExtensions, IterExtensionsRle},
+ mem_size::MemSizeEstimator,
+};
use serde::{Deserialize, Serialize};
+use std::mem::size_of;
+
+/// An efficient run-length encoding for the parent-by-level vector in the block header.
+/// The i-th run `(cum_count, parents)` indicates that for all levels in the range `prev_cum_count..cum_count`,
+/// the parents are `parents`.
+///
+/// Example: `[(3, [A]), (5, [B])]` means levels 0-2 have parents `[A]`,
+/// and levels 3-4 have parents `[B]`.
+#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize, BorshSerialize, BorshDeserialize)]
+pub struct CompressedParents(Vec<(u8, Vec)>);
+
+impl CompressedParents {
+ pub fn expanded_len(&self) -> usize {
+ self.0.last().map(|(cum, _)| *cum as usize).unwrap_or(0)
+ }
+
+ pub fn is_empty(&self) -> bool {
+ self.0.is_empty()
+ }
+
+ pub fn get(&self, index: usize) -> Option<&[Hash]> {
+ if index >= self.expanded_len() {
+ return None;
+ }
+ if index == 0 {
+ // Fast path for the common case of getting the first level (direct parents)
+ return Some(&self.0[0].1);
+ }
+ // `partition_point` returns the index of the first element for which the predicate is false.
+ // The predicate `cum - 1 < index` checks if a run is before the desired `index`.
+ // The first run for which this is false is the one that contains our index.
+ let i = self.0.partition_point(|(cum, _)| (*cum as usize) - 1 < index);
+ Some(&self.0[i].1)
+ }
+
+ pub fn expanded_iter(&self) -> impl Iterator- {
+ self.0.iter().map(|(cum, v)| (*cum as usize, v.as_slice())).expand_rle()
+ }
+
+ /// Adds a new level of parents. This extends the last run if parents_at_level
+ /// is identical to the last level, otherwise it starts a new run
+ pub fn push(&mut self, parents_at_level: Vec) {
+ match self.0.last_mut() {
+ Some((count, last_parents)) if *last_parents == parents_at_level => {
+ *count = count.checked_add(1).expect("exceeded max levels of 255");
+ }
+ Some((count, _)) => {
+ let next_cum = count.checked_add(1).expect("exceeded max levels of 255");
+ self.0.push((next_cum, parents_at_level));
+ }
+ None => {
+ self.0.push((1, parents_at_level));
+ }
+ }
+ }
+
+ /// Sets the direct parents (level 0) to the given value, preserving all other levels.
+ ///
+ /// NOTE: inefficient implementation, should be used for testing purposes only.
+ pub fn set_direct_parents(&mut self, direct_parents: Vec) {
+ if self.0.is_empty() {
+ self.0.push((1, direct_parents));
+ return;
+ }
+ let mut parents: Vec> = std::mem::take(self).into();
+ parents[0] = direct_parents;
+ *self = parents.try_into().unwrap();
+ }
+}
+
+use crate::errors::header::CompressedParentsError;
+
+impl TryFrom>> for CompressedParents {
+ type Error = CompressedParentsError;
+
+ fn try_from(parents: Vec>) -> Result {
+ if parents.len() > u8::MAX as usize {
+ return Err(CompressedParentsError::LevelsExceeded);
+ }
+
+ // Casting count from usize to u8 is safe because of the check above
+ Ok(Self(parents.into_iter().rle_cumulative().map(|(count, level)| (count as u8, level)).collect()))
+ }
+}
+
+impl From for Vec> {
+ fn from(value: CompressedParents) -> Self {
+ value.0.into_iter().map(|(cum, v)| (cum as usize, v)).expand_rle().collect()
+ }
+}
+
+impl From<&CompressedParents> for Vec> {
+ fn from(value: &CompressedParents) -> Self {
+ value.expanded_iter().map(|x| x.to_vec()).collect()
+ }
+}
/// @category Consensus
#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)]
@@ -11,7 +110,7 @@ pub struct Header {
/// Cached hash
pub hash: Hash,
pub version: u16,
- pub parents_by_level: Vec>,
+ pub parents_by_level: CompressedParents,
pub hash_merkle_root: Hash,
pub accepted_id_merkle_root: Hash,
pub utxo_commitment: Hash,
@@ -29,7 +128,7 @@ impl Header {
#[allow(clippy::too_many_arguments)]
pub fn new_finalized(
version: u16,
- parents_by_level: Vec>,
+ parents_by_level: CompressedParents,
hash_merkle_root: Hash,
accepted_id_merkle_root: Hash,
utxo_commitment: Hash,
@@ -66,10 +165,9 @@ impl Header {
}
pub fn direct_parents(&self) -> &[Hash] {
- if self.parents_by_level.is_empty() {
- &[]
- } else {
- &self.parents_by_level[0]
+ match self.parents_by_level.get(0) {
+ Some(parents) => parents,
+ None => &[],
}
}
@@ -78,7 +176,7 @@ impl Header {
Header {
version: crate::constants::BLOCK_VERSION,
hash,
- parents_by_level: vec![parents],
+ parents_by_level: vec![parents].try_into().unwrap(),
hash_merkle_root: Default::default(),
accepted_id_merkle_root: Default::default(),
utxo_commitment: Default::default(),
@@ -101,7 +199,9 @@ impl AsRef for Header {
impl MemSizeEstimator for Header {
fn estimate_mem_bytes(&self) -> usize {
- size_of::() + self.parents_by_level.iter().map(|l| l.len()).sum::() * size_of::()
+ size_of::()
+ + self.parents_by_level.0.iter().map(|(_, l)| l.len()).sum::() * size_of::()
+ + self.parents_by_level.0.len() * size_of::<(u8, Vec)>()
}
}
@@ -111,11 +211,29 @@ mod tests {
use kaspa_math::Uint192;
use serde_json::Value;
+ fn hash(val: u8) -> Hash {
+ Hash::from(val as u64)
+ }
+
+ fn vec_from(slice: &[u8]) -> Vec {
+ slice.iter().map(|&v| hash(v)).collect()
+ }
+
+ fn serialize_parents(parents: &[Vec]) -> Vec {
+ let compressed: CompressedParents = (parents.to_vec()).try_into().unwrap();
+ bincode::serialize(&compressed).unwrap()
+ }
+
+ fn deserialize_parents(bytes: &[u8]) -> bincode::Result>> {
+ let parents: CompressedParents = bincode::deserialize(bytes)?;
+ Ok(parents.into())
+ }
+
#[test]
fn test_header_ser() {
let header = Header::new_finalized(
1,
- vec![vec![1.into()]],
+ vec![vec![1.into()]].try_into().unwrap(),
Default::default(),
Default::default(),
Default::default(),
@@ -141,4 +259,111 @@ mod tests {
let h = serde_json::from_str::(&json).unwrap();
assert!(h.blue_score == header.blue_score && h.blue_work == header.blue_work);
}
+
+ #[test]
+ fn parents_vrle_round_trip_multiple_runs() {
+ let parents = vec![
+ vec_from(&[1, 2, 3]),
+ vec_from(&[1, 2, 3]),
+ vec_from(&[1, 2, 3]),
+ vec_from(&[4, 5]),
+ vec_from(&[4, 5]),
+ vec_from(&[6]),
+ ];
+
+ let bytes = serialize_parents(&parents);
+ let decoded = deserialize_parents(&bytes).unwrap();
+ assert_eq!(decoded, parents);
+ }
+
+ #[test]
+ fn parents_vrle_round_trip_single_run() {
+ let repeated = vec_from(&[9, 8, 7]);
+ let parents = vec![repeated.clone(), repeated.clone(), repeated.clone()];
+
+ let bytes = serialize_parents(&parents);
+ let decoded = deserialize_parents(&bytes).unwrap();
+ assert_eq!(decoded, parents);
+ }
+
+ #[test]
+ fn parents_vrle_round_trip_empty() {
+ let bytes = serialize_parents(&[]);
+ let decoded = deserialize_parents(&bytes).unwrap();
+ assert!(decoded.is_empty());
+ }
+
+ #[test]
+ fn compressed_parents_len_and_get() {
+ // Test with multiple runs of different lengths
+ let first = vec_from(&[1]);
+ let second = vec_from(&[2, 3]);
+ let third = vec_from(&[4]);
+ let parents = vec![first.clone(), first.clone(), second.clone(), second.clone(), third.clone()];
+ let compressed = CompressedParents::try_from(parents.clone()).unwrap();
+
+ assert_eq!(compressed.expanded_len(), parents.len());
+ assert!(!compressed.is_empty());
+
+ // Test `get` at various positions
+ assert_eq!(compressed.get(0), Some(first.as_slice()), "get first element");
+ assert_eq!(compressed.get(1), Some(first.as_slice()), "get element in the middle of a run");
+ assert_eq!(compressed.get(2), Some(second.as_slice()), "get first element of a new run");
+ assert_eq!(compressed.get(3), Some(second.as_slice()), "get element in the middle of a new run");
+ assert_eq!(compressed.get(4), Some(third.as_slice()), "get last element");
+ assert_eq!(compressed.get(5), None, "get out of bounds (just over)");
+ assert_eq!(compressed.get(10), None, "get out of bounds (far over)");
+
+ let collected: Vec<&[Hash]> = compressed.expanded_iter().collect();
+ let expected: Vec<&[Hash]> = parents.iter().map(|v| v.as_slice()).collect();
+ assert_eq!(collected, expected);
+
+ // Test with an empty vec
+ let parents_empty: Vec> = vec![];
+ let compressed_empty: CompressedParents = parents_empty.try_into().unwrap();
+ assert_eq!(compressed_empty.expanded_len(), 0);
+ assert!(compressed_empty.is_empty());
+ assert_eq!(compressed_empty.get(0), None);
+
+ // Test with a single run
+ let parents_single_run = vec![first.clone(), first.clone(), first.clone()];
+ let compressed_single_run: CompressedParents = parents_single_run.try_into().unwrap();
+ assert_eq!(compressed_single_run.expanded_len(), 3);
+ assert_eq!(compressed_single_run.get(0), Some(first.as_slice()));
+ assert_eq!(compressed_single_run.get(1), Some(first.as_slice()));
+ assert_eq!(compressed_single_run.get(2), Some(first.as_slice()));
+ assert_eq!(compressed_single_run.get(3), None);
+ }
+
+ #[test]
+ fn test_compressed_parents_push() {
+ let mut compressed = CompressedParents(Vec::new());
+ let level1 = vec_from(&[1, 2]);
+ let level2 = vec_from(&[3, 4]);
+
+ // 1. Push to empty
+ compressed.push(level1.clone());
+ assert_eq!(compressed.expanded_len(), 1);
+ assert_eq!(compressed.0, vec![(1, level1.clone())]);
+
+ // 2. Push same (extend run)
+ compressed.push(level1.clone());
+ assert_eq!(compressed.expanded_len(), 2);
+ assert_eq!(compressed.0, vec![(2, level1.clone())]);
+
+ // 3. Push different (new run)
+ compressed.push(level2.clone());
+ assert_eq!(compressed.expanded_len(), 3);
+ assert_eq!(compressed.0, vec![(2, level1), (3, level2)]);
+ }
+
+ #[test]
+ fn compressed_parents_binary_format_matches_runs() {
+ let parents = vec![vec_from(&[1, 2, 3]), vec_from(&[1, 2, 3]), vec_from(&[4])];
+ let compressed: CompressedParents = parents.try_into().unwrap();
+
+ let encoded = bincode::serialize(&compressed).unwrap();
+ let expected = bincode::serialize(&compressed.0).unwrap();
+ assert_eq!(encoded, expected);
+ }
}
diff --git a/consensus/core/src/mass/mod.rs b/consensus/core/src/mass/mod.rs
index 90099487c2..e349eb47b6 100644
--- a/consensus/core/src/mass/mod.rs
+++ b/consensus/core/src/mass/mod.rs
@@ -433,7 +433,7 @@ mod tests {
*/
for net in NetworkType::iter() {
let params: Params = net.into();
- let max_spk_len = (params.max_script_public_key_len().upper_bound() as u64)
+ let max_spk_len = (params.max_script_public_key_len().after() as u64)
.min(params.max_block_mass.div_ceil(params.mass_per_script_pub_key_byte));
let max_plurality = (UTXO_CONST_STORAGE + max_spk_len).div_ceil(UTXO_UNIT_SIZE); // see utxo_plurality
let product = params.storage_mass_parameter.checked_mul(max_plurality).and_then(|x| x.checked_mul(max_plurality));
diff --git a/consensus/core/src/merkle.rs b/consensus/core/src/merkle.rs
index 59c6ca7c4c..46b2ce6791 100644
--- a/consensus/core/src/merkle.rs
+++ b/consensus/core/src/merkle.rs
@@ -2,13 +2,17 @@ use crate::{hashing, tx::Transaction};
use kaspa_hashes::Hash;
use kaspa_merkle::calc_merkle_root;
-pub fn calc_hash_merkle_root<'a>(txs: impl ExactSizeIterator
- , include_mass_field: bool) -> Hash {
- calc_merkle_root(txs.map(|tx| hashing::tx::hash(tx, include_mass_field)))
+pub fn calc_hash_merkle_root<'a>(txs: impl ExactSizeIterator
- ) -> Hash {
+ calc_merkle_root(txs.map(hashing::tx::hash))
+}
+
+pub fn calc_hash_merkle_root_pre_crescendo<'a>(txs: impl ExactSizeIterator
- ) -> Hash {
+ calc_merkle_root(txs.map(hashing::tx::hash_pre_crescendo))
}
#[cfg(test)]
mod tests {
- use crate::merkle::calc_hash_merkle_root;
+ use crate::merkle::{calc_hash_merkle_root, calc_hash_merkle_root_pre_crescendo};
use crate::{
subnets::{SUBNETWORK_ID_COINBASE, SUBNETWORK_ID_NATIVE},
tx::{scriptvec, ScriptPublicKey, Transaction, TransactionId, TransactionInput, TransactionOutpoint, TransactionOutput},
@@ -17,7 +21,7 @@ mod tests {
#[test]
fn merkle_root_test() {
- let txs = vec![
+ let txs = [
Transaction::new(
0,
vec![],
@@ -238,7 +242,27 @@ mod tests {
),
];
assert_eq!(
- calc_hash_merkle_root(txs.iter(), false),
+ calc_hash_merkle_root(txs.iter()),
+ Hash::from_slice(&[
+ 0x46, 0xec, 0xf4, 0x5b, 0xe3, 0xba, 0xca, 0x34, 0x9d, 0xfe, 0x8a, 0x78, 0xde, 0xaf, 0x05, 0x3b, 0x0a, 0xa6, 0xd5,
+ 0x38, 0x97, 0x4d, 0xa5, 0x0f, 0xd6, 0xef, 0xb4, 0xd2, 0x66, 0xbc, 0x8d, 0x21,
+ ])
+ );
+
+ // Test a tx with storage mass commitment > 0
+ txs[0].set_mass(7);
+
+ assert_eq!(
+ calc_hash_merkle_root(txs.iter()),
+ Hash::from_slice(&[
+ 0x75, 0x4a, 0x1, 0x59, 0xdc, 0x4b, 0x3d, 0xaa, 0x16, 0x95, 0x28, 0x4d, 0x96, 0xc8, 0x2a, 0xba, 0x27, 0x2a, 0x11, 0x43,
+ 0xe4, 0x2e, 0x60, 0x4, 0xaf, 0x2b, 0xaa, 0x1e, 0x3c, 0xed, 0x23, 0x7,
+ ])
+ );
+
+ // Make sure that pre-crescendo hash is unaffected by the mass set
+ assert_eq!(
+ calc_hash_merkle_root_pre_crescendo(txs.iter()),
Hash::from_slice(&[
0x46, 0xec, 0xf4, 0x5b, 0xe3, 0xba, 0xca, 0x34, 0x9d, 0xfe, 0x8a, 0x78, 0xde, 0xaf, 0x05, 0x3b, 0x0a, 0xa6, 0xd5,
0x38, 0x97, 0x4d, 0xa5, 0x0f, 0xd6, 0xef, 0xb4, 0xd2, 0x66, 0xbc, 0x8d, 0x21,
diff --git a/consensus/core/src/trusted.rs b/consensus/core/src/trusted.rs
index 0e5bd97bfc..8ff0994900 100644
--- a/consensus/core/src/trusted.rs
+++ b/consensus/core/src/trusted.rs
@@ -17,6 +17,7 @@ pub struct ExternalGhostdagData {
/// Represents an externally provided block with associated Ghostdag data which
/// is only partially validated by the consensus layer. Note there is no actual trust
/// but rather these blocks are indirectly validated through the PoW mined over them
+#[derive(Clone)]
pub struct TrustedBlock {
pub block: Block,
pub ghostdag: ExternalGhostdagData,
diff --git a/consensus/core/src/tx.rs b/consensus/core/src/tx.rs
index b542738f07..64f8286fe9 100644
--- a/consensus/core/src/tx.rs
+++ b/consensus/core/src/tx.rs
@@ -19,6 +19,7 @@ use serde::{Deserialize, Serialize};
use std::collections::HashSet;
use std::sync::atomic::AtomicU64;
use std::sync::atomic::Ordering::SeqCst;
+use std::sync::Arc;
use std::{
fmt::Display,
ops::Range,
@@ -94,10 +95,6 @@ pub struct TransactionInput {
#[serde(with = "serde_bytes")]
pub signature_script: Vec, // TODO: Consider using SmallVec
pub sequence: u64,
-
- // TODO: Since this field is used for calculating mass context free, and we already commit
- // to the mass in a dedicated field (on the tx level), it follows that this field is no longer
- // needed, and can be removed if we ever implement a v2 transaction
pub sig_op_count: u8,
}
@@ -233,8 +230,8 @@ impl Transaction {
self.id
}
- /// Set the storage mass commitment field of this transaction. This field is expected to be activated on mainnet
- /// as part of the Crescendo hardfork. The field has no effect on tx ID so no need to finalize following this call.
+ /// Set the storage mass commitment field of this transaction. This field has been activated on mainnet as part
+ /// of the Crescendo hardfork. The field has no effect on tx ID so no need to finalize following this call.
pub fn set_mass(&self, mass: u64) {
self.mass.0.store(mass, SeqCst)
}
@@ -542,6 +539,18 @@ impl MutableTransaction {
/// and can also be modified internally and signed etc.
pub type SignableTransaction = MutableTransaction;
+#[derive(Debug, Clone)]
+pub enum TransactionType {
+ Transaction,
+ SignableTransaction,
+}
+
+#[derive(Debug, Clone)]
+pub enum TransactionQueryResult {
+ Transaction(Arc>),
+ SignableTransaction(Arc>),
+}
+
#[cfg(test)]
mod tests {
use super::*;
@@ -688,7 +697,7 @@ mod tests {
let vec = (0..SCRIPT_VECTOR_SIZE as u8).collect::>();
let spk = ScriptPublicKey::from_vec(0xc0de, vec.clone());
let hex: String = serde_json::to_string(&spk).unwrap();
- assert_eq!("\"c0de000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20212223\"", hex);
+ assert_eq!("\"c0de000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122\"", hex);
let spk = serde_json::from_str::(&hex).unwrap();
assert_eq!(spk.version, 0xc0de);
assert_eq!(spk.script.as_slice(), vec.as_slice());
diff --git a/consensus/core/src/tx/script_public_key.rs b/consensus/core/src/tx/script_public_key.rs
index 180cde4f20..92a2151ef0 100644
--- a/consensus/core/src/tx/script_public_key.rs
+++ b/consensus/core/src/tx/script_public_key.rs
@@ -19,7 +19,7 @@ use wasm_bindgen::prelude::*;
use workflow_wasm::prelude::*;
/// Size of the underlying script vector of a script.
-pub const SCRIPT_VECTOR_SIZE: usize = 36;
+pub const SCRIPT_VECTOR_SIZE: usize = 35;
/// Used as the underlying type for script public key data, optimized for the common p2pk script size (34).
pub type ScriptVec = SmallVec<[u8; SCRIPT_VECTOR_SIZE]>;
@@ -415,7 +415,7 @@ mod tests {
let vec = (0..SCRIPT_VECTOR_SIZE as u8).collect::>();
let spk = ScriptPublicKey::from_vec(0xc0de, vec.clone()); // 0xc0de == 49374,
let hex: String = serde_json::to_string(&spk).unwrap();
- assert_eq!("\"c0de000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20212223\"", hex);
+ assert_eq!("\"c0de000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122\"", hex);
let spk = serde_json::from_str::(&hex).unwrap();
assert_eq!(spk.version, 0xc0de);
assert_eq!(spk.script.as_slice(), vec.as_slice());
@@ -453,7 +453,7 @@ mod tests {
let version = 0xc0de;
let vec: Vec = (0..SCRIPT_VECTOR_SIZE as u8).collect();
let spk = ScriptPublicKey::from_vec(version, vec.clone());
- let str = "c0de000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20212223";
+ let str = "c0de000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122";
let js = to_value(&spk).unwrap();
assert_eq!(js.as_string().unwrap(), str);
let script_hex = spk.script_as_hex();
diff --git a/consensus/core/src/utxo/utxo_inquirer.rs b/consensus/core/src/utxo/utxo_inquirer.rs
index 3aa1000295..07dea8fbab 100644
--- a/consensus/core/src/utxo/utxo_inquirer.rs
+++ b/consensus/core/src/utxo/utxo_inquirer.rs
@@ -1,16 +1,18 @@
use kaspa_hashes::Hash;
use thiserror::Error;
+use crate::tx::{TransactionId, TransactionOutpoint};
+
#[derive(Error, Debug, Clone)]
pub enum UtxoInquirerError {
- #[error("Transaction is already pruned")]
- AlreadyPruned,
#[error("Transaction return address is coinbase")]
TxFromCoinbase,
#[error("Transaction not found at given accepting daa score")]
NoTxAtScore,
#[error("Transaction was found but not standard")]
NonStandard,
+ #[error("No transaction specified")]
+ TransactionNotFound,
#[error("Did not find compact header for block hash {0} ")]
MissingCompactHeaderForBlockHash(Hash),
#[error("Did not find containing_acceptance for tx {0} ")]
@@ -19,8 +21,6 @@ pub enum UtxoInquirerError {
MissingBlockFromBlockTxStore(Hash),
#[error("Did not find index {0} in transactions of block {1}")]
MissingTransactionIndexOfBlock(usize, Hash),
- #[error("Expected {0} to match {1} when checking block_transaction_store using array index of transaction")]
- UnexpectedTransactionMismatch(Hash, Hash),
#[error("Did not find a utxo diff for chain block {0} ")]
MissingUtxoDiffForChainBlock(Hash),
#[error("Transaction {0} acceptance data must also be in the same block in this case")]
@@ -33,6 +33,22 @@ pub enum UtxoInquirerError {
MissingHashAtIndex(u64),
#[error("Did not find acceptance data for chain block {0}")]
MissingAcceptanceDataForChainBlock(Hash),
+ #[error("Did not find utxo entry for outpoint {0}")]
+ MissingUtxoEntryForOutpoint(TransactionOutpoint),
+ #[error("Did not find queried transactions in acceptance data: {0:?}")]
+ MissingQueriedTransactions(Vec),
#[error("Utxo entry is not filled")]
UnfilledUtxoEntry,
+ #[error(transparent)]
+ UtxoInquirerFindTxsFromAcceptanceDataError(#[from] UtxoInquirerFindTxsFromAcceptanceDataError),
}
+
+#[derive(Error, Debug, Clone)]
+pub enum UtxoInquirerFindTxsFromAcceptanceDataError {
+ #[error("Tx ids filter is not allowed to be empty when not None.")]
+ TxIdsFilterIsEmptyError,
+ #[error("More than one tx id filter element is not supported yet.")]
+ TxIdsFilterNeedsLessOrEqualThanOneElementError,
+}
+
+pub type UtxoInquirerResult = std::result::Result;
diff --git a/consensus/src/consensus/factory.rs b/consensus/src/consensus/factory.rs
index e387329c85..9d163f93ab 100644
--- a/consensus/src/consensus/factory.rs
+++ b/consensus/src/consensus/factory.rs
@@ -3,7 +3,7 @@ use super::utxo_set_override::{set_genesis_utxo_commitment_from_config, set_init
use super::{ctl::Ctl, Consensus};
use crate::{model::stores::U64Key, pipeline::ProcessingCounters};
use itertools::Itertools;
-use kaspa_consensus_core::{config::Config, mining_rules::MiningRules};
+use kaspa_consensus_core::{api::ConsensusApi, config::Config, mining_rules::MiningRules};
use kaspa_consensus_notify::root::ConsensusNotificationRoot;
use kaspa_consensusmanager::{ConsensusFactory, ConsensusInstance, DynConsensusCtl, SessionLock};
use kaspa_core::{debug, time::unix_now, warn};
@@ -59,7 +59,7 @@ pub struct MultiConsensusMetadata {
version: u32,
}
-const LATEST_DB_VERSION: u32 = 4;
+pub const LATEST_DB_VERSION: u32 = 5;
impl Default for MultiConsensusMetadata {
fn default() -> Self {
Self {
@@ -366,6 +366,10 @@ impl ConsensusFactory for Factory {
self.mining_rules.clone(),
));
+ // The default for the body_missing_anticone_set is an empty vector, which corresponds precisely to the state before a consensus commit
+ // But The default value for the pruning_utxoset_stable_flag is true, but a staging consensus does not have a utxo and hence the flag is dropped explicitly
+ consensus.set_pruning_utxoset_stable_flag(false);
+
(ConsensusInstance::new(session_lock, consensus.clone()), Arc::new(Ctl::new(self.management_store.clone(), db, consensus)))
}
diff --git a/consensus/src/consensus/mod.rs b/consensus/src/consensus/mod.rs
index 0819da4062..b10e5426c7 100644
--- a/consensus/src/consensus/mod.rs
+++ b/consensus/src/consensus/mod.rs
@@ -21,12 +21,12 @@ use crate::{
headers_selected_tip::HeadersSelectedTipStoreReader,
past_pruning_points::PastPruningPointsStoreReader,
pruning::PruningStoreReader,
- pruning_samples::{PruningSamplesStore, PruningSamplesStoreReader},
- reachability::ReachabilityStoreReader,
relations::RelationsStoreReader,
+ selected_chain::SelectedChainStore,
statuses::StatusesStoreReader,
- tips::TipsStoreReader,
+ tips::{TipsStore, TipsStoreReader},
utxo_set::{UtxoSetStore, UtxoSetStoreReader},
+ virtual_state::VirtualState,
DB,
},
},
@@ -44,7 +44,7 @@ use crate::{
},
};
use kaspa_consensus_core::{
- acceptance_data::AcceptanceData,
+ acceptance_data::{AcceptanceData, MergesetBlockAcceptanceData},
api::{
args::{TransactionValidationArgs, TransactionValidationBatchArgs},
stats::BlockCount,
@@ -70,8 +70,10 @@ use kaspa_consensus_core::{
network::NetworkType,
pruning::{PruningPointProof, PruningPointTrustedData, PruningPointsList, PruningProofMetadata},
trusted::{ExternalGhostdagData, TrustedBlock},
- tx::{MutableTransaction, SignableTransaction, Transaction, TransactionOutpoint, UtxoEntry},
- utxo::utxo_inquirer::UtxoInquirerError,
+ tx::{
+ MutableTransaction, Transaction, TransactionId, TransactionIndexType, TransactionOutpoint, TransactionQueryResult,
+ TransactionType, UtxoEntry,
+ },
BlockHashSet, BlueWorkType, ChainPath, HashMapCustomHasher,
};
use kaspa_consensus_notify::root::ConsensusNotificationRoot;
@@ -82,22 +84,26 @@ use crossbeam_channel::{
use itertools::Itertools;
use kaspa_consensusmanager::{SessionLock, SessionReadGuard};
-use kaspa_database::prelude::{StoreResultEmptyTuple, StoreResultExtensions};
+use kaspa_core::info;
+use kaspa_database::prelude::StoreResultExtensions;
use kaspa_hashes::Hash;
use kaspa_muhash::MuHash;
use kaspa_txscript::caches::TxScriptCacheCounters;
+use kaspa_utils::arc::ArcExtensions;
use rayon::iter::{IntoParallelRefIterator, ParallelIterator};
+use rocksdb::WriteBatch;
use std::{
+ cmp,
cmp::Reverse,
- collections::{BinaryHeap, VecDeque},
+ collections::{BinaryHeap, HashSet, VecDeque},
future::Future,
iter::once,
ops::Deref,
- sync::{atomic::Ordering, Arc},
-};
-use std::{
- sync::atomic::AtomicBool,
+ sync::{
+ atomic::{AtomicBool, Ordering},
+ Arc,
+ },
thread::{self, JoinHandle},
};
use tokio::sync::oneshot;
@@ -106,8 +112,6 @@ use self::{services::ConsensusServices, storage::ConsensusStorage};
use crate::model::stores::selected_chain::SelectedChainStoreReader;
-use std::cmp;
-
pub struct Consensus {
// DB
db: Arc,
@@ -320,10 +324,7 @@ impl Consensus {
fn run_database_upgrades(&self) {
// Upgrade to initialize the new retention root field correctly
self.retention_root_database_upgrade();
-
- // TODO (post HF): remove this upgrade
- // Database upgrade to include pruning samples
- self.pruning_samples_database_upgrade();
+ self.consensus_transitional_flags_upgrade();
}
fn retention_root_database_upgrade(&self) {
@@ -336,60 +337,25 @@ impl Consensus {
pruning_point_store.set_retention_period_root(&mut batch, retention_checkpoint).unwrap();
} else {
// For non-archival nodes the retention root was the pruning point
- let pruning_point = pruning_point_store.get().unwrap().pruning_point;
+ let pruning_point = pruning_point_store.pruning_point().unwrap();
pruning_point_store.set_retention_period_root(&mut batch, pruning_point).unwrap();
}
self.db.write(batch).unwrap();
}
}
- fn pruning_samples_database_upgrade(&self) {
- //
- // For the first time this version runs, make sure we populate pruning samples
- // from pov for all qualified chain blocks in the pruning point future
- //
-
- let sink = self.get_sink();
- if self.storage.pruning_samples_store.pruning_sample_from_pov(sink).unwrap_option().is_some() {
- // Sink is populated so we assume the database is upgraded
- return;
+ fn consensus_transitional_flags_upgrade(&self) {
+ // Write the defaults to the internal storage so they will remain in cache
+ // *For a new staging consensus these flags will be updated again explicitly*
+ let mut batch = rocksdb::WriteBatch::default();
+ let mut pruning_meta_write = self.storage.pruning_meta_stores.write();
+ if pruning_meta_write.is_anticone_fully_synced() {
+ pruning_meta_write.set_body_missing_anticone(&mut batch, vec![]).unwrap();
}
-
- // Populate past pruning points (including current one)
- for (p1, p2) in (0..=self.pruning_point_store.read().get().unwrap().index)
- .map(|index| self.past_pruning_points_store.get(index).unwrap())
- .tuple_windows()
- {
- // Set p[i] to point at p[i-1]
- self.pruning_samples_store.insert(p2, p1).unwrap_or_exists();
- }
-
- let pruning_point = self.pruning_point();
- let reachability = self.reachability_store.read();
-
- // We walk up via reachability tree children so that we only iterate blocks B s.t. pruning point ∈ chain(B)
- let mut queue = VecDeque::::from_iter(reachability.get_children(pruning_point).unwrap().iter().copied());
- let mut processed = 0;
- kaspa_core::info!("Upgrading database to include and populate the pruning samples store");
- while let Some(current) = queue.pop_front() {
- if !self.get_block_status(current).is_some_and(|s| s == BlockStatus::StatusUTXOValid) {
- // Skip branches of the tree which are not chain qualified.
- // This is sufficient since we will only assume this field exists
- // for such chain qualified blocks
- continue;
- }
- queue.extend(reachability.get_children(current).unwrap().iter());
-
- processed += 1;
-
- // Populate the data
- let ghostdag_data = self.ghostdag_store.get_compact_data(current).unwrap();
- let pruning_sample_from_pov =
- self.services.pruning_point_manager.expected_header_pruning_point_v2(ghostdag_data).pruning_sample;
- self.pruning_samples_store.insert(current, pruning_sample_from_pov).unwrap_or_exists();
+ if pruning_meta_write.pruning_utxoset_stable_flag() {
+ pruning_meta_write.set_pruning_utxoset_stable_flag(&mut batch, true).unwrap();
}
-
- kaspa_core::info!("Done upgrading database (populated {} entries)", processed);
+ self.db.write(batch).unwrap();
}
pub fn run_processors(&self) -> Vec> {
@@ -479,13 +445,125 @@ impl Consensus {
fn pruning_point_compact_headers(&self) -> Vec<(Hash, CompactHeaderData)> {
// PRUNE SAFETY: index is monotonic and past pruning point headers are expected permanently
- let current_pp_info = self.pruning_point_store.read().get().unwrap();
- (0..current_pp_info.index)
+ let (pruning_point, pruning_index) = self.pruning_point_store.read().pruning_point_and_index().unwrap();
+ (0..pruning_index)
.map(|index| self.past_pruning_points_store.get(index).unwrap())
- .chain(once(current_pp_info.pruning_point))
+ .chain(once(pruning_point))
.map(|hash| (hash, self.headers_store.get_compact_header_data(hash).unwrap()))
.collect_vec()
}
+
+ /// See: intrusive_pruning_point_update implementation below for details
+ pub fn intrusive_pruning_point_store_writes(
+ &self,
+ new_pruning_point: Hash,
+ syncer_sink: Hash,
+ pruning_points_to_add: VecDeque,
+ ) -> ConsensusResult<()> {
+ let mut batch = WriteBatch::default();
+ let mut pruning_point_write = self.pruning_point_store.write();
+ let old_pp_index = pruning_point_write.pruning_point_index().unwrap();
+ let retention_period_root = pruning_point_write.retention_period_root().unwrap();
+
+ let new_pp_index = old_pp_index + pruning_points_to_add.len() as u64;
+ pruning_point_write.set_batch(&mut batch, new_pruning_point, new_pp_index).unwrap();
+ for (i, &past_pp) in pruning_points_to_add.iter().rev().enumerate() {
+ self.past_pruning_points_store.insert_batch(&mut batch, old_pp_index + i as u64 + 1, past_pp).unwrap();
+ }
+
+ // For archival nodes, keep the retention root in place
+ if !self.config.is_archival {
+ let adjusted_retention_period_root =
+ self.pruning_processor.advance_retention_period_root(retention_period_root, new_pruning_point);
+ pruning_point_write.set_retention_period_root(&mut batch, adjusted_retention_period_root).unwrap();
+ }
+
+ // Update virtual state based to the new pruning point
+ // Updating of the utxoset is done separately as it requires downloading the new utxoset in its entirety.
+ let virtual_parents = vec![new_pruning_point];
+ let virtual_state = Arc::new(VirtualState {
+ parents: virtual_parents.clone(),
+ ghostdag_data: self.services.ghostdag_manager.ghostdag(&virtual_parents),
+ ..VirtualState::default()
+ });
+ self.virtual_stores.write().state.set_batch(&mut batch, virtual_state).unwrap();
+ // Remove old body tips and insert pruning point as the current tip
+ self.body_tips_store.write().delete_all_tips(&mut batch).unwrap();
+ self.body_tips_store.write().init_batch(&mut batch, &virtual_parents).unwrap();
+ // Update selected_chain
+ self.selected_chain_store.write().init_with_pruning_point(&mut batch, new_pruning_point).unwrap();
+ // It is important to set this flag to false together with writing the batch, in case the node crashes suddenly before syncing of new utxo starts
+ self.pruning_meta_stores.write().set_pruning_utxoset_stable_flag(&mut batch, false).unwrap();
+ // Store the currently bodyless anticone from the POV of the syncer, for trusted body validation at a later stage.
+ let mut anticone = self.services.dag_traversal_manager.anticone(new_pruning_point, [syncer_sink].into_iter(), None)?;
+ // Add the pruning point itself which is also missing a body
+ anticone.push(new_pruning_point);
+ self.pruning_meta_stores.write().set_body_missing_anticone(&mut batch, anticone).unwrap();
+ self.db.write(batch).unwrap();
+ drop(pruning_point_write);
+ Ok(())
+ }
+
+ /// Verify that the new pruning point can be safely imported
+ /// and return all new pruning point on path to it that needs to be updated in consensus
+ fn get_and_verify_path_to_new_pruning_point(&self, new_pruning_point: Hash, syncer_sink: Hash) -> ConsensusResult> {
+ // Let B.sp denote the selected parent of a block B, let f be the finality depth, and let p be the pruning depth.
+ // The new pruning point P can be "finalized" into consensus if:
+ // 1) P satisfies P.blue_score>Nf and selected_parent(P).blue_score<=NF
+ // where N is some integer (i.e. it is a valid pruning point based on score)
+ // *this condition is assumed to have already been checked externally and we do not repeat it here*.
+
+ // 2) There are sufficient headers built on top of it, specifically,
+ // a header is validated whose blue_score is greater than P.B+p:
+ let syncer_pp_bscore = self.get_header(new_pruning_point).unwrap().blue_score;
+ let syncer_virtual_bscore = self.get_header(syncer_sink).unwrap().blue_score;
+ // [Crescendo]: Remove after()
+ if syncer_virtual_bscore < syncer_pp_bscore + self.config.pruning_depth().after() {
+ return Err(ConsensusError::General("declared pruning point is not of sufficient depth"));
+ }
+ // 3) The syncer pruning point is on the selected chain from that header.
+ if !self.services.reachability_service.is_chain_ancestor_of(new_pruning_point, syncer_sink) {
+ return Err(ConsensusError::General("new pruning point is not in the past of syncer sink"));
+ }
+ info!("Setting {new_pruning_point} as the pruning point");
+ // 4) The pruning points declared on headers on that path must be consistent with those already known by the node:
+ let pruning_point_read = self.pruning_point_store.read();
+ let old_pruning_point = pruning_point_read.pruning_point().unwrap();
+
+ // Note that the function below also updates the pruning samples,
+ // and implicitly confirms any pruning point pointed at en route to virtual is a pruning sample.
+ // it is emphasized that updating pruning samples for individual blocks is not harmful
+ // even if the verification ultimately does not succeed.
+ let mut pruning_points_to_add =
+ self.services.pruning_point_manager.pruning_points_on_path_to_syncer_sink(old_pruning_point, syncer_sink).map_err(
+ |e: PruningImportError| {
+ ConsensusError::GeneralOwned(format!("pruning points en route to syncer sink do not form a valid chain: {}", e))
+ },
+ )?;
+ // next we filter the returned list so it contains only the pruning point that must be introduced to consensus
+
+ // Remove the excess pruning points before the old pruning point
+ while let Some(past_pp) = pruning_points_to_add.pop_back() {
+ if past_pp == old_pruning_point {
+ break;
+ }
+ }
+ if pruning_points_to_add.is_empty() {
+ return Err(ConsensusError::General("old pruning points is inconsistent with synced headers"));
+ }
+ // Remove the excess pruning points beyond the new pruning_point
+ while let Some(&future_pp) = pruning_points_to_add.front() {
+ if future_pp == new_pruning_point {
+ break;
+ }
+ // Here we only pop_front after checking as we want the new pruning_point to stay in the list
+ pruning_points_to_add.pop_front();
+ }
+ if pruning_points_to_add.is_empty() {
+ return Err(ConsensusError::General("new pruning point is inconsistent with synced headers"));
+ }
+ Ok(pruning_points_to_add)
+ }
}
impl ConsensusApi for Consensus {
@@ -667,7 +745,7 @@ impl ConsensusApi for Consensus {
/// Estimates the number of blocks and headers stored in the node database.
///
/// This is an estimation based on the DAA score difference between the node's `retention root` and `virtual`'s DAA score,
- /// as such, it does not include non-daa blocks, and does not include headers stored as part of the pruning proof.
+ /// as such, it does not include non-daa blocks, and does not include headers stored as part of the pruning proof.
fn estimate_block_count(&self) -> BlockCount {
// PRUNE SAFETY: retention root is always a current or past pruning point which its header is kept permanently
let retention_period_root_score = self.headers_store.get_daa_score(self.get_retention_period_root()).unwrap();
@@ -771,11 +849,133 @@ impl ConsensusApi for Consensus {
sample_headers
}
+ fn get_transactions_by_accepting_daa_score(
+ &self,
+ accepting_daa_score: u64,
+ tx_ids: Option>,
+ tx_type: TransactionType,
+ ) -> ConsensusResult {
+ // We need consistency between the acceptance store and the block transaction store,
+ let _guard = self.pruning_lock.blocking_read();
+ let accepting_block = self
+ .virtual_processor
+ .find_accepting_chain_block_hash_at_daa_score(accepting_daa_score, self.get_retention_period_root())?;
+ self.get_transactions_by_accepting_block(accepting_block, tx_ids, tx_type)
+ }
- fn get_populated_transaction(&self, txid: Hash, accepting_block_daa_score: u64) -> Result {
- // We need consistency between the pruning_point_store, utxo_diffs_store, block_transactions_store, selected chain and headers store reads
+ fn get_transactions_by_block_acceptance_data(
+ &self,
+ accepting_block: Hash,
+ block_acceptance_data: MergesetBlockAcceptanceData,
+ tx_ids: Option>,
+ tx_type: TransactionType,
+ ) -> ConsensusResult {
+ // Need consistency between the acceptance store and the block transaction store.
let _guard = self.pruning_lock.blocking_read();
- self.virtual_processor.get_populated_transaction(txid, accepting_block_daa_score, self.get_retention_period_root())
+
+ match tx_type {
+ TransactionType::Transaction => {
+ if let Some(tx_ids) = tx_ids {
+ let mut tx_ids_filter = HashSet::with_capacity(tx_ids.len());
+ tx_ids_filter.extend(tx_ids);
+
+ Ok(TransactionQueryResult::Transaction(Arc::new(
+ self.get_block_transactions(
+ block_acceptance_data.block_hash,
+ Some(
+ block_acceptance_data
+ .accepted_transactions
+ .into_iter()
+ .filter_map(|atx| {
+ if tx_ids_filter.contains(&atx.transaction_id) {
+ Some(atx.index_within_block)
+ } else {
+ None
+ }
+ })
+ .collect(),
+ ),
+ )?,
+ )))
+ } else {
+ Ok(TransactionQueryResult::Transaction(Arc::new(self.get_block_transactions(
+ block_acceptance_data.block_hash,
+ Some(block_acceptance_data.accepted_transactions.iter().map(|atx| atx.index_within_block).collect()),
+ )?)))
+ }
+ }
+ TransactionType::SignableTransaction => Ok(TransactionQueryResult::SignableTransaction(Arc::new(
+ self.virtual_processor.get_populated_transactions_by_block_acceptance_data(
+ tx_ids,
+ block_acceptance_data,
+ accepting_block,
+ )?,
+ ))),
+ }
+ }
+
+ fn get_transactions_by_accepting_block(
+ &self,
+ accepting_block: Hash,
+ tx_ids: Option>,
+ tx_type: TransactionType,
+ ) -> ConsensusResult {
+ // need consistency between the acceptance store and the block transaction store,
+ let _guard = self.pruning_lock.blocking_read();
+
+ match tx_type {
+ TransactionType::Transaction => {
+ let accepting_block_mergeset_acceptance_data_iter = self
+ .acceptance_data_store
+ .get(accepting_block)
+ .map_err(|_| ConsensusError::MissingData(accepting_block))?
+ .unwrap_or_clone()
+ .into_iter();
+
+ if let Some(tx_ids) = tx_ids {
+ let mut tx_ids_filter = HashSet::with_capacity(tx_ids.len());
+ tx_ids_filter.extend(tx_ids);
+
+ Ok(TransactionQueryResult::Transaction(Arc::new(
+ accepting_block_mergeset_acceptance_data_iter
+ .flat_map(|mbad| {
+ self.get_block_transactions(
+ mbad.block_hash,
+ Some(
+ mbad.accepted_transactions
+ .into_iter()
+ .filter_map(|atx| {
+ if tx_ids_filter.contains(&atx.transaction_id) {
+ Some(atx.index_within_block)
+ } else {
+ None
+ }
+ })
+ .collect(),
+ ),
+ )
+ })
+ .flatten()
+ .collect::>(),
+ )))
+ } else {
+ Ok(TransactionQueryResult::Transaction(Arc::new(
+ accepting_block_mergeset_acceptance_data_iter
+ .flat_map(|mbad| {
+ self.get_block_transactions(
+ mbad.block_hash,
+ Some(mbad.accepted_transactions.iter().map(|atx| atx.index_within_block).collect()),
+ )
+ })
+ .flatten()
+ .collect::>(),
+ )))
+ }
+ }
+ TransactionType::SignableTransaction => Ok(TransactionQueryResult::SignableTransaction(Arc::new(
+ self.virtual_processor.get_populated_transactions_by_accepting_block(tx_ids, accepting_block)?,
+ ))),
+ }
}
fn get_virtual_parents(&self) -> BlockHashSet {
@@ -815,10 +1015,10 @@ impl ConsensusApi for Consensus {
if self.pruning_point_store.read().pruning_point().unwrap() != expected_pruning_point {
return Err(ConsensusError::UnexpectedPruningPoint);
}
- let pruning_utxoset_read = self.pruning_utxoset_stores.read();
- let iter = pruning_utxoset_read.utxo_set.seek_iterator(from_outpoint, chunk_size, skip_first);
+ let pruning_meta_read = self.pruning_meta_stores.read();
+ let iter = pruning_meta_read.utxo_set.seek_iterator(from_outpoint, chunk_size, skip_first);
let utxos = iter.map(|item| item.unwrap()).collect();
- drop(pruning_utxoset_read);
+ drop(pruning_meta_read);
// We recheck the expected pruning point in case it was switched just before the utxo set read.
// NOTE: we rely on order of operations by pruning processor. See extended comment therein.
@@ -833,9 +1033,8 @@ impl ConsensusApi for Consensus {
self.services.coinbase_manager.modify_coinbase_payload(payload, miner_data)
}
- fn calc_transaction_hash_merkle_root(&self, txs: &[Transaction], pov_daa_score: u64) -> Hash {
- let storage_mass_activated = self.config.crescendo_activation.is_active(pov_daa_score);
- calc_hash_merkle_root(txs.iter(), storage_mass_activated)
+ fn calc_transaction_hash_merkle_root(&self, txs: &[Transaction]) -> Hash {
+ calc_hash_merkle_root(txs.iter())
}
fn validate_pruning_proof(
@@ -855,8 +1054,8 @@ impl ConsensusApi for Consensus {
}
fn append_imported_pruning_point_utxos(&self, utxoset_chunk: &[(TransactionOutpoint, UtxoEntry)], current_multiset: &mut MuHash) {
- let mut pruning_utxoset_write = self.pruning_utxoset_stores.write();
- pruning_utxoset_write.utxo_set.write_many(utxoset_chunk).unwrap();
+ let mut pruning_meta_write = self.pruning_meta_stores.write();
+ pruning_meta_write.utxo_set.write_many(utxoset_chunk).unwrap();
// Parallelize processing using the context of an existing thread pool.
let inner_multiset = self.virtual_processor.install(|| {
@@ -875,16 +1074,16 @@ impl ConsensusApi for Consensus {
fn validate_pruning_points(&self, syncer_virtual_selected_parent: Hash) -> ConsensusResult<()> {
let hst = self.storage.headers_selected_tip_store.read().get().unwrap().hash;
- let pp_info = self.pruning_point_store.read().get().unwrap();
- if !self.services.pruning_point_manager.is_valid_pruning_point(pp_info.pruning_point, hst) {
+ let (synced_pruning_point, synced_pp_index) = self.pruning_point_store.read().pruning_point_and_index().unwrap();
+ if !self.services.pruning_point_manager.is_valid_pruning_point(synced_pruning_point, hst) {
return Err(ConsensusError::General("pruning point does not coincide with the synced header selected tip"));
}
- if !self.services.pruning_point_manager.is_valid_pruning_point(pp_info.pruning_point, syncer_virtual_selected_parent) {
+ if !self.services.pruning_point_manager.is_valid_pruning_point(synced_pruning_point, syncer_virtual_selected_parent) {
return Err(ConsensusError::General("pruning point does not coincide with the syncer's sink (virtual selected parent)"));
}
self.services
.pruning_point_manager
- .are_pruning_points_in_valid_chain(pp_info, syncer_virtual_selected_parent)
+ .are_pruning_points_in_valid_chain(synced_pruning_point, synced_pp_index, syncer_virtual_selected_parent)
.map_err(|e| ConsensusError::GeneralOwned(format!("past pruning points do not form a valid chain: {}", e)))
}
@@ -898,7 +1097,7 @@ impl ConsensusApi for Consensus {
// max_blocks has to be greater than the merge set size limit
fn get_hashes_between(&self, low: Hash, high: Hash, max_blocks: usize) -> ConsensusResult<(Vec, Hash)> {
let _guard = self.pruning_lock.blocking_read();
- assert!(max_blocks as u64 > self.config.mergeset_size_limit().upper_bound());
+ assert!(max_blocks as u64 > self.config.mergeset_size_limit().after());
self.validate_block_exists(low)?;
self.validate_block_exists(high)?;
@@ -948,10 +1147,10 @@ impl ConsensusApi for Consensus {
fn pruning_point_headers(&self) -> Vec> {
// PRUNE SAFETY: index is monotonic and past pruning point headers are expected permanently
- let current_pp_info = self.pruning_point_store.read().get().unwrap();
- (0..current_pp_info.index)
+ let (pruning_point, pruning_index) = self.pruning_point_store.read().pruning_point_and_index().unwrap();
+ (0..pruning_index)
.map(|index| self.past_pruning_points_store.get(index).unwrap())
- .chain(once(current_pp_info.pruning_point))
+ .chain(once(pruning_point))
.map(|hash| self.headers_store.get_header(hash).unwrap())
.collect_vec()
}
@@ -976,6 +1175,44 @@ impl ConsensusApi for Consensus {
})
}
+ fn get_block_transactions(&self, hash: Hash, indices: Option>) -> ConsensusResult> {
+ let transactions = self.block_transactions_store.get(hash).unwrap_option().ok_or(ConsensusError::BlockNotFound(hash))?;
+ let tx_len = transactions.len();
+
+ if let Some(indices) = indices {
+ if tx_len < indices.len() {
+ return Err(ConsensusError::TransactionQueryTooLarge(indices.len(), hash, transactions.len()));
+ }
+
+ let res = transactions
+ .unwrap_or_clone()
+ .into_iter()
+ .enumerate()
+ .filter(|(index, _tx)| indices.contains(&(*index as TransactionIndexType)))
+ .map(|(_, tx)| tx)
+ .collect::>();
+
+ if res.len() != indices.len() {
+ Err(ConsensusError::TransactionIndexOutOfBounds(*indices.iter().max().unwrap(), tx_len, hash))
+ } else {
+ Ok(res)
+ }
+ } else {
+ Ok(transactions.unwrap_or_clone())
+ }
+ }
+
+ fn get_block_body(&self, hash: Hash) -> ConsensusResult>> {
+ if match self.statuses_store.read().get(hash).unwrap_option() {
+ Some(status) => !status.has_block_body(),
+ None => true,
+ } {
+ return Err(ConsensusError::BlockNotFound(hash));
+ }
+
+ self.block_transactions_store.get(hash).unwrap_option().ok_or(ConsensusError::BlockNotFound(hash))
+ }
+
fn get_block_even_if_header_only(&self, hash: Hash) -> ConsensusResult {
let Some(status) = self.statuses_store.read().get(hash).unwrap_option().filter(|&status| status.has_block_header()) else {
return Err(ConsensusError::HeaderNotFound(hash));
@@ -1061,45 +1298,22 @@ impl ConsensusApi for Consensus {
self.validate_block_exists(high)?;
Ok(self.services.sync_manager.get_missing_block_body_hashes(high)?)
}
-
- fn pruning_point(&self) -> Hash {
- self.pruning_point_store.read().pruning_point().unwrap()
+ /// Returns the set of blocks in the anticone of the current pruning point
+ /// which (may) lack a block body due to being in a transitional state
+ /// If not in a transitional state this list is supposed to be empty
+ fn get_body_missing_anticone(&self) -> Vec {
+ self.pruning_meta_stores.read().get_body_missing_anticone()
}
- fn get_daa_window(&self, hash: Hash) -> ConsensusResult> {
- let _guard = self.pruning_lock.blocking_read();
- self.validate_block_exists(hash)?;
- Ok(self
- .services
- .window_manager
- .block_window(&self.ghostdag_store.get_data(hash).unwrap(), WindowType::DifficultyWindow)
- .unwrap()
- .deref()
- .iter()
- .map(|block| block.0.hash)
- .collect())
+ fn clear_body_missing_anticone_set(&self) {
+ let mut pruning_meta_write = self.pruning_meta_stores.write();
+ let mut batch = rocksdb::WriteBatch::default();
+ pruning_meta_write.set_body_missing_anticone(&mut batch, vec![]).unwrap();
+ self.db.write(batch).unwrap();
}
- fn get_trusted_block_associated_ghostdag_data_block_hashes(&self, hash: Hash) -> ConsensusResult> {
- let _guard = self.pruning_lock.blocking_read();
- self.validate_block_exists(hash)?;
-
- // In order to guarantee the chain height is at least k, we check that the pruning point is not genesis.
- let pruning_point = self.pruning_point();
- if pruning_point == self.config.genesis.hash {
- return Err(ConsensusError::UnexpectedPruningPoint);
- }
-
- // [Crescendo]: get ghostdag k based on the pruning point's DAA score. The off-by-one of not going by selected parent
- // DAA score is not important here as we simply increase K one block earlier which is more conservative (saving/sending more data)
- let ghostdag_k = self.config.ghostdag_k().get(self.headers_store.get_daa_score(pruning_point).unwrap());
-
- // Note: the method `get_ghostdag_chain_k_depth` might return a partial chain if data is missing.
- // Ideally this node when synced would validate it got all of the associated data up to k blocks
- // back and then we would be able to assert we actually got `k + 1` blocks, however we choose to
- // simply ignore, since if the data was truly missing we wouldn't accept the staging consensus in
- // the first place
- Ok(self.services.pruning_proof_manager.get_ghostdag_chain_k_depth(hash, ghostdag_k))
+ fn pruning_point(&self) -> Hash {
+ self.pruning_point_store.read().pruning_point().unwrap()
}
fn create_block_locator_from_pruning_point(&self, high: Hash, limit: usize) -> ConsensusResult> {
@@ -1141,4 +1355,74 @@ impl ConsensusApi for Consensus {
fn finality_point(&self) -> Hash {
self.virtual_processor.virtual_finality_point(&self.lkg_virtual_state.load().ghostdag_data, self.pruning_point())
}
+
+ /// The utxoset is an additive structure,
+ /// to make room for the gradual aggregation of a new utxoset,
+ /// first the old one must be cleared.
+ /// Likewise, clearing the old utxoset is also a gradual process.
+ /// The utxo stable flag guarantees that a full utxoset is never mistaken for
+ /// an incomplete or partially deleted one.
+ fn clear_pruning_utxo_set(&self) {
+ let mut pruning_meta_write = self.pruning_meta_stores.write();
+ let mut batch = rocksdb::WriteBatch::default();
+ // Currently under the conditions in which this function is called, this flag should already be false.
+ // We lower it down regardless as it is conceptually true to do so.
+ pruning_meta_write.set_pruning_utxoset_stable_flag(&mut batch, false).unwrap();
+ self.db.write(batch).unwrap();
+ pruning_meta_write.utxo_set.clear().unwrap();
+ }
+
+ fn verify_is_pruning_sample(&self, pruning_candidate: Hash) -> ConsensusResult<()> {
+ if pruning_candidate == self.config.genesis.hash {
+ return Ok(());
+ }
+ let Ok(candidate_ghostdag_data) = self.get_ghostdag_data(pruning_candidate) else {
+ return Err(ConsensusError::General("pruning candidate missing ghostdag data"));
+ };
+ let Ok(selected_parent_ghostdag_data) = self.get_ghostdag_data(candidate_ghostdag_data.selected_parent) else {
+ return Err(ConsensusError::General("pruning candidate selected parent missing ghostdag data"));
+ };
+ self.services
+ .pruning_point_manager
+ .is_pruning_sample(
+ candidate_ghostdag_data.blue_score,
+ selected_parent_ghostdag_data.blue_score,
+ self.config.params.finality_depth().after(),
+ )
+ .then_some(())
+ .ok_or(ConsensusError::General("pruning candidate is not a pruning sample"))
+ }
+
+ /// The usual flow consists of the pruning point naturally updating during pruning, and hence maintains consistency by default
+ /// During pruning catchup, we need to manually update the pruning point and
+ /// make sure that consensus looks "as if" it has just moved to a new pruning point.
+ fn intrusive_pruning_point_update(&self, new_pruning_point: Hash, syncer_sink: Hash) -> ConsensusResult<()> {
+ let pruning_points_to_add = self.get_and_verify_path_to_new_pruning_point(new_pruning_point, syncer_sink)?;
+
+ // If all has gone well, we can finally update pruning point and other stores.
+ self.intrusive_pruning_point_store_writes(new_pruning_point, syncer_sink, pruning_points_to_add)
+ }
+
+ fn set_pruning_utxoset_stable_flag(&self, val: bool) {
+ let mut pruning_meta_write = self.pruning_meta_stores.write();
+ let mut batch = rocksdb::WriteBatch::default();
+
+ pruning_meta_write.set_pruning_utxoset_stable_flag(&mut batch, val).unwrap();
+ self.db.write(batch).unwrap();
+ }
+
+ fn is_pruning_utxoset_stable(&self) -> bool {
+ let pruning_meta_read = self.pruning_meta_stores.read();
+ pruning_meta_read.pruning_utxoset_stable_flag()
+ }
+
+ fn is_pruning_point_anticone_fully_synced(&self) -> bool {
+ let pruning_meta_read = self.pruning_meta_stores.read();
+ pruning_meta_read.is_anticone_fully_synced()
+ }
+
+ fn is_consensus_in_transitional_ibd_state(&self) -> bool {
+ let pruning_meta_read = self.pruning_meta_stores.read();
+ pruning_meta_read.is_in_transitional_ibd_state()
+ }
}
diff --git a/consensus/src/consensus/services.rs b/consensus/src/consensus/services.rs
index d608938cd7..5e2b92fd8f 100644
--- a/consensus/src/consensus/services.rs
+++ b/consensus/src/consensus/services.rs
@@ -150,6 +150,7 @@ impl ConsensusServices {
params.max_script_public_key_len(),
params.coinbase_payload_script_public_key_max_len,
params.coinbase_maturity(),
+ params.ghostdag_k().after(),
tx_script_cache_counters,
mass_calculator.clone(),
params.crescendo_activation,
diff --git a/consensus/src/consensus/storage.rs b/consensus/src/consensus/storage.rs
index c943f233db..f6d7e88508 100644
--- a/consensus/src/consensus/storage.rs
+++ b/consensus/src/consensus/storage.rs
@@ -11,8 +11,8 @@ use crate::{
headers_selected_tip::DbHeadersSelectedTipStore,
past_pruning_points::DbPastPruningPointsStore,
pruning::DbPruningStore,
+ pruning_meta::PruningMetaStores,
pruning_samples::DbPruningSamplesStore,
- pruning_utxoset::PruningUtxosetStores,
reachability::{DbReachabilityStore, ReachabilityData},
relations::DbRelationsStore,
selected_chain::DbSelectedChainStore,
@@ -46,7 +46,7 @@ pub struct ConsensusStorage {
pub pruning_point_store: Arc>,
pub headers_selected_tip_store: Arc>,
pub body_tips_store: Arc>,
- pub pruning_utxoset_stores: Arc>,
+ pub pruning_meta_stores: Arc>,
pub virtual_stores: Arc>,
pub selected_chain_store: Arc>,
@@ -83,9 +83,8 @@ impl ConsensusStorage {
let perf_params = &config.perf;
// Lower and upper bounds
- // [Crescendo]: all usages of pruning upper bounds also bound by actual memory bytes, so we can safely use the larger values
- let pruning_depth = params.pruning_depth().upper_bound() as usize;
- let pruning_size_for_caches = pruning_depth + params.finality_depth().upper_bound() as usize; // Upper bound for any block/header related data
+ let pruning_depth = params.pruning_depth().after() as usize;
+ let pruning_size_for_caches = pruning_depth + params.finality_depth().after() as usize; // Upper bound for any block/header related data
let level_lower_bound = 2 * params.pruning_proof_m as usize; // Number of items lower bound for level-related caches
// Budgets in bytes. All byte budgets overall sum up to ~1GB of memory (which obviously takes more low level alloc space)
@@ -211,9 +210,8 @@ impl ConsensusStorage {
// Pruning
let pruning_point_store = Arc::new(RwLock::new(DbPruningStore::new(db.clone())));
let past_pruning_points_store = Arc::new(DbPastPruningPointsStore::new(db.clone(), past_pruning_points_builder.build()));
- let pruning_utxoset_stores = Arc::new(RwLock::new(PruningUtxosetStores::new(db.clone(), utxo_set_builder.build())));
+ let pruning_meta_stores = Arc::new(RwLock::new(PruningMetaStores::new(db.clone(), utxo_set_builder.build())));
let pruning_samples_store = Arc::new(DbPruningSamplesStore::new(db.clone(), header_data_builder.build()));
-
// Txs
let block_transactions_store = Arc::new(DbBlockTransactionsStore::new(db.clone(), transactions_builder.build()));
let utxo_diffs_store = Arc::new(DbUtxoDiffsStore::new(db.clone(), utxo_diffs_builder.build()));
@@ -249,7 +247,7 @@ impl ConsensusStorage {
body_tips_store,
headers_store,
block_transactions_store,
- pruning_utxoset_stores,
+ pruning_meta_stores,
virtual_stores,
selected_chain_store,
acceptance_data_store,
diff --git a/consensus/src/consensus/test_consensus.rs b/consensus/src/consensus/test_consensus.rs
index 69ec7170c2..091b7440c1 100644
--- a/consensus/src/consensus/test_consensus.rs
+++ b/consensus/src/consensus/test_consensus.rs
@@ -25,8 +25,7 @@ use crate::{
model::{
services::reachability::MTReachabilityService,
stores::{
- ghostdag::DbGhostdagStore, headers::HeaderStoreReader, pruning::PruningStoreReader, reachability::DbReachabilityStore,
- virtual_state::VirtualStores, DB,
+ ghostdag::DbGhostdagStore, headers::HeaderStoreReader, reachability::DbReachabilityStore, virtual_state::VirtualStores, DB,
},
},
params::Params,
@@ -119,13 +118,10 @@ impl TestConsensus {
}
pub fn build_header_with_parents(&self, hash: Hash, parents: Vec) -> Header {
- let mut header = header_from_precomputed_hash(hash, parents);
+ let mut header = header_from_precomputed_hash(hash, parents.clone());
+ let parents_by_level = self.consensus.services.parents_manager.calc_block_parents(self.pruning_point(), &parents);
+ header.parents_by_level = parents_by_level;
let ghostdag_data = self.consensus.services.ghostdag_manager.ghostdag(header.direct_parents());
- header.pruning_point = self
- .consensus
- .services
- .pruning_point_manager
- .expected_header_pruning_point_v1(ghostdag_data.to_compact(), self.consensus.pruning_point_store.read().get().unwrap());
let daa_window = self.consensus.services.window_manager.block_daa_window(&ghostdag_data).unwrap();
header.bits = self.consensus.services.window_manager.calculate_difficulty_bits(&ghostdag_data, &daa_window);
header.daa_score = daa_window.daa_score;
@@ -136,8 +132,12 @@ impl TestConsensus {
header
}
- pub fn add_block_with_parents(&self, hash: Hash, parents: Vec) -> impl Future