diff --git a/Cargo.lock b/Cargo.lock index f622d477bb..6229878949 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -153,9 +153,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.89" +version = "1.0.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" +checksum = "dcfed56ad506cb2c684a14971b8861fdc3baaaae314b9e5f9bb532cbe3ba7a4f" [[package]] name = "arc-swap" @@ -2256,7 +2256,7 @@ dependencies = [ [[package]] name = "kaspa-addresses" -version = "0.16.1" +version = "0.17.1" dependencies = [ "borsh", "criterion", @@ -2273,7 +2273,7 @@ dependencies = [ [[package]] name = "kaspa-addressmanager" -version = "0.16.1" +version = "0.17.1" dependencies = [ "borsh", "igd-next", @@ -2295,14 +2295,14 @@ dependencies = [ [[package]] name = "kaspa-alloc" -version = "0.16.1" +version = "0.17.1" dependencies = [ "mimalloc", ] [[package]] name = "kaspa-bip32" -version = "0.16.1" +version = "0.17.1" dependencies = [ "borsh", "bs58", @@ -2329,7 +2329,7 @@ dependencies = [ [[package]] name = "kaspa-cli" -version = "0.16.1" +version = "0.17.1" dependencies = [ "async-trait", "borsh", @@ -2376,7 +2376,7 @@ dependencies = [ [[package]] name = "kaspa-connectionmanager" -version = "0.16.1" +version = "0.17.1" dependencies = [ "duration-string", "futures-util", @@ -2393,7 +2393,7 @@ dependencies = [ [[package]] name = "kaspa-consensus" -version = "0.16.1" +version = "0.17.1" dependencies = [ "arc-swap", "async-channel 2.3.1", @@ -2437,7 +2437,7 @@ dependencies = [ [[package]] name = "kaspa-consensus-client" -version = "0.16.1" +version = "0.17.1" dependencies = [ "ahash", "cfg-if 1.0.0", @@ -2465,7 +2465,7 @@ dependencies = [ [[package]] name = "kaspa-consensus-core" -version = "0.16.1" +version = "0.17.1" dependencies = [ "arc-swap", "async-trait", @@ -2504,7 +2504,7 @@ dependencies = [ [[package]] name = "kaspa-consensus-notify" -version = "0.16.1" +version = "0.17.1" dependencies = [ "async-channel 2.3.1", "cfg-if 1.0.0", @@ -2523,7 +2523,7 @@ dependencies = [ [[package]] name = "kaspa-consensus-wasm" -version = "0.16.1" +version = "0.17.1" dependencies = [ "cfg-if 1.0.0", "faster-hex", @@ -2547,7 +2547,7 @@ dependencies = [ [[package]] name = "kaspa-consensusmanager" -version = "0.16.1" +version = "0.17.1" dependencies = [ "duration-string", "futures", @@ -2566,8 +2566,9 @@ dependencies = [ [[package]] name = "kaspa-core" -version = "0.16.1" +version = "0.17.1" dependencies = [ + "anyhow", "cfg-if 1.0.0", "ctrlc", "futures-util", @@ -2584,7 +2585,7 @@ dependencies = [ [[package]] name = "kaspa-daemon" -version = "0.16.1" +version = "0.17.1" dependencies = [ "async-trait", "borsh", @@ -2606,7 +2607,7 @@ dependencies = [ [[package]] name = "kaspa-database" -version = "0.16.1" +version = "0.17.1" dependencies = [ "bincode", "enum-primitive-derive", @@ -2628,7 +2629,7 @@ dependencies = [ [[package]] name = "kaspa-grpc-client" -version = "0.16.1" +version = "0.17.1" dependencies = [ "async-channel 2.3.1", "async-stream", @@ -2660,7 +2661,7 @@ dependencies = [ [[package]] name = "kaspa-grpc-core" -version = "0.16.1" +version = "0.17.1" dependencies = [ "async-channel 2.3.1", "async-stream", @@ -2690,7 +2691,7 @@ dependencies = [ [[package]] name = "kaspa-grpc-server" -version = "0.16.1" +version = "0.17.1" dependencies = [ "async-channel 2.3.1", "async-stream", @@ -2726,7 +2727,7 @@ dependencies = [ [[package]] name = "kaspa-hashes" -version = "0.16.1" +version = "0.17.1" dependencies = [ "blake2b_simd", "borsh", @@ -2747,7 +2748,7 @@ dependencies = [ [[package]] name = "kaspa-index-core" -version = "0.16.1" +version = "0.17.1" dependencies = [ "async-channel 2.3.1", "async-trait", @@ -2766,7 +2767,7 @@ dependencies = [ [[package]] name = "kaspa-index-processor" -version = "0.16.1" +version = "0.17.1" dependencies = [ "async-channel 2.3.1", "async-trait", @@ -2794,7 +2795,7 @@ dependencies = [ [[package]] name = "kaspa-math" -version = "0.16.1" +version = "0.17.1" dependencies = [ "borsh", "criterion", @@ -2815,14 +2816,14 @@ dependencies = [ [[package]] name = "kaspa-merkle" -version = "0.16.1" +version = "0.17.1" dependencies = [ "kaspa-hashes", ] [[package]] name = "kaspa-metrics-core" -version = "0.16.1" +version = "0.17.1" dependencies = [ "async-trait", "borsh", @@ -2838,7 +2839,7 @@ dependencies = [ [[package]] name = "kaspa-mining" -version = "0.16.1" +version = "0.17.1" dependencies = [ "criterion", "futures-util", @@ -2865,7 +2866,7 @@ dependencies = [ [[package]] name = "kaspa-mining-errors" -version = "0.16.1" +version = "0.17.1" dependencies = [ "kaspa-consensus-core", "thiserror", @@ -2873,7 +2874,7 @@ dependencies = [ [[package]] name = "kaspa-muhash" -version = "0.16.1" +version = "0.17.1" dependencies = [ "criterion", "kaspa-hashes", @@ -2886,7 +2887,7 @@ dependencies = [ [[package]] name = "kaspa-notify" -version = "0.16.1" +version = "0.17.1" dependencies = [ "async-channel 2.3.1", "async-trait", @@ -2922,7 +2923,7 @@ dependencies = [ [[package]] name = "kaspa-p2p-flows" -version = "0.16.1" +version = "0.17.1" dependencies = [ "async-trait", "chrono", @@ -2940,6 +2941,7 @@ dependencies = [ "kaspa-muhash", "kaspa-notify", "kaspa-p2p-lib", + "kaspa-p2p-mining", "kaspa-utils", "kaspa-utils-tower", "log", @@ -2953,7 +2955,7 @@ dependencies = [ [[package]] name = "kaspa-p2p-lib" -version = "0.16.1" +version = "0.17.1" dependencies = [ "borsh", "ctrlc", @@ -2982,9 +2984,26 @@ dependencies = [ "uuid 1.10.0", ] +[[package]] +name = "kaspa-p2p-mining" +version = "0.17.1" +dependencies = [ + "kaspa-consensus-core", + "kaspa-consensusmanager", + "kaspa-core", + "kaspa-hashes", + "kaspa-math", + "kaspa-mining-errors", + "kaspa-p2p-lib", + "kaspa-utils", + "kaspa-utils-tower", + "log", + "tokio", +] + [[package]] name = "kaspa-perf-monitor" -version = "0.16.1" +version = "0.17.1" dependencies = [ "kaspa-core", "log", @@ -2996,7 +3015,7 @@ dependencies = [ [[package]] name = "kaspa-pow" -version = "0.16.1" +version = "0.17.1" dependencies = [ "criterion", "js-sys", @@ -3012,7 +3031,7 @@ dependencies = [ [[package]] name = "kaspa-rpc-core" -version = "0.16.1" +version = "0.17.1" dependencies = [ "async-channel 2.3.1", "async-trait", @@ -3054,7 +3073,7 @@ dependencies = [ [[package]] name = "kaspa-rpc-macros" -version = "0.16.1" +version = "0.17.1" dependencies = [ "convert_case 0.6.0", "proc-macro-error", @@ -3066,7 +3085,7 @@ dependencies = [ [[package]] name = "kaspa-rpc-service" -version = "0.16.1" +version = "0.17.1" dependencies = [ "async-trait", "kaspa-addresses", @@ -3081,6 +3100,7 @@ dependencies = [ "kaspa-notify", "kaspa-p2p-flows", "kaspa-p2p-lib", + "kaspa-p2p-mining", "kaspa-perf-monitor", "kaspa-rpc-core", "kaspa-txscript", @@ -3095,7 +3115,7 @@ dependencies = [ [[package]] name = "kaspa-testing-integration" -version = "0.16.1" +version = "0.17.1" dependencies = [ "async-channel 2.3.1", "async-trait", @@ -3155,7 +3175,7 @@ dependencies = [ [[package]] name = "kaspa-txscript" -version = "0.16.1" +version = "0.17.1" dependencies = [ "blake2b_simd", "borsh", @@ -3187,7 +3207,7 @@ dependencies = [ [[package]] name = "kaspa-txscript-errors" -version = "0.16.1" +version = "0.17.1" dependencies = [ "secp256k1", "thiserror", @@ -3195,7 +3215,7 @@ dependencies = [ [[package]] name = "kaspa-utils" -version = "0.16.1" +version = "0.17.1" dependencies = [ "arc-swap", "async-channel 2.3.1", @@ -3231,7 +3251,7 @@ dependencies = [ [[package]] name = "kaspa-utils-tower" -version = "0.16.1" +version = "0.17.1" dependencies = [ "bytes", "cfg-if 1.0.0", @@ -3247,7 +3267,7 @@ dependencies = [ [[package]] name = "kaspa-utxoindex" -version = "0.16.1" +version = "0.17.1" dependencies = [ "futures", "kaspa-consensus", @@ -3268,7 +3288,7 @@ dependencies = [ [[package]] name = "kaspa-wallet" -version = "0.16.1" +version = "0.17.1" dependencies = [ "async-std", "async-trait", @@ -3280,7 +3300,7 @@ dependencies = [ [[package]] name = "kaspa-wallet-cli-wasm" -version = "0.16.1" +version = "0.17.1" dependencies = [ "async-trait", "js-sys", @@ -3294,7 +3314,7 @@ dependencies = [ [[package]] name = "kaspa-wallet-core" -version = "0.16.1" +version = "0.17.1" dependencies = [ "aes", "ahash", @@ -3375,7 +3395,7 @@ dependencies = [ [[package]] name = "kaspa-wallet-keys" -version = "0.16.1" +version = "0.17.1" dependencies = [ "async-trait", "borsh", @@ -3408,7 +3428,7 @@ dependencies = [ [[package]] name = "kaspa-wallet-macros" -version = "0.16.1" +version = "0.17.1" dependencies = [ "convert_case 0.5.0", "proc-macro-error", @@ -3421,7 +3441,7 @@ dependencies = [ [[package]] name = "kaspa-wallet-pskt" -version = "0.16.1" +version = "0.17.1" dependencies = [ "bincode", "derive_builder", @@ -3448,7 +3468,7 @@ dependencies = [ [[package]] name = "kaspa-wasm" -version = "0.16.1" +version = "0.17.1" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -3476,7 +3496,7 @@ dependencies = [ [[package]] name = "kaspa-wasm-core" -version = "0.16.1" +version = "0.17.1" dependencies = [ "faster-hex", "hexplay", @@ -3487,7 +3507,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-client" -version = "0.16.1" +version = "0.17.1" dependencies = [ "async-std", "async-trait", @@ -3523,7 +3543,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-example-subscriber" -version = "0.16.1" +version = "0.17.1" dependencies = [ "ctrlc", "futures", @@ -3538,7 +3558,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-proxy" -version = "0.16.1" +version = "0.17.1" dependencies = [ "async-trait", "clap 4.5.19", @@ -3557,7 +3577,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-server" -version = "0.16.1" +version = "0.17.1" dependencies = [ "async-trait", "borsh", @@ -3585,7 +3605,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-simple-client-example" -version = "0.16.1" +version = "0.17.1" dependencies = [ "futures", "kaspa-rpc-core", @@ -3595,7 +3615,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-wasm" -version = "0.16.1" +version = "0.17.1" dependencies = [ "ahash", "async-std", @@ -3625,7 +3645,7 @@ dependencies = [ [[package]] name = "kaspad" -version = "0.16.1" +version = "0.17.1" dependencies = [ "async-channel 2.3.1", "cfg-if 1.0.0", @@ -3649,6 +3669,8 @@ dependencies = [ "kaspa-mining", "kaspa-notify", "kaspa-p2p-flows", + "kaspa-p2p-lib", + "kaspa-p2p-mining", "kaspa-perf-monitor", "kaspa-rpc-core", "kaspa-rpc-service", @@ -4967,7 +4989,7 @@ dependencies = [ [[package]] name = "rothschild" -version = "0.16.1" +version = "0.17.1" dependencies = [ "async-channel 2.3.1", "clap 4.5.19", @@ -5374,7 +5396,7 @@ dependencies = [ [[package]] name = "simpa" -version = "0.16.1" +version = "0.17.1" dependencies = [ "async-channel 2.3.1", "cfg-if 1.0.0", diff --git a/Cargo.toml b/Cargo.toml index c1955f268a..d37f8f5abd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -63,7 +63,7 @@ members = [ [workspace.package] rust-version = "1.82.0" -version = "0.16.1" +version = "0.17.1" authors = ["Kaspa developers"] license = "ISC" repository = "https://github.com/kaspanet/rusty-kaspa" @@ -80,61 +80,62 @@ include = [ ] [workspace.dependencies] -# kaspa-testing-integration = { version = "0.16.1", path = "testing/integration" } -kaspa-addresses = { version = "0.16.1", path = "crypto/addresses" } -kaspa-addressmanager = { version = "0.16.1", path = "components/addressmanager" } -kaspa-bip32 = { version = "0.16.1", path = "wallet/bip32" } -kaspa-cli = { version = "0.16.1", path = "cli" } -kaspa-connectionmanager = { version = "0.16.1", path = "components/connectionmanager" } -kaspa-consensus = { version = "0.16.1", path = "consensus" } -kaspa-consensus-core = { version = "0.16.1", path = "consensus/core" } -kaspa-consensus-client = { version = "0.16.1", path = "consensus/client" } -kaspa-consensus-notify = { version = "0.16.1", path = "consensus/notify" } -kaspa-consensus-wasm = { version = "0.16.1", path = "consensus/wasm" } -kaspa-consensusmanager = { version = "0.16.1", path = "components/consensusmanager" } -kaspa-core = { version = "0.16.1", path = "core" } -kaspa-daemon = { version = "0.16.1", path = "daemon" } -kaspa-database = { version = "0.16.1", path = "database" } -kaspa-grpc-client = { version = "0.16.1", path = "rpc/grpc/client" } -kaspa-grpc-core = { version = "0.16.1", path = "rpc/grpc/core" } -kaspa-grpc-server = { version = "0.16.1", path = "rpc/grpc/server" } -kaspa-hashes = { version = "0.16.1", path = "crypto/hashes" } -kaspa-index-core = { version = "0.16.1", path = "indexes/core" } -kaspa-index-processor = { version = "0.16.1", path = "indexes/processor" } -kaspa-math = { version = "0.16.1", path = "math" } -kaspa-merkle = { version = "0.16.1", path = "crypto/merkle" } -kaspa-metrics-core = { version = "0.16.1", path = "metrics/core" } -kaspa-mining = { version = "0.16.1", path = "mining" } -kaspa-mining-errors = { version = "0.16.1", path = "mining/errors" } -kaspa-muhash = { version = "0.16.1", path = "crypto/muhash" } -kaspa-notify = { version = "0.16.1", path = "notify" } -kaspa-p2p-flows = { version = "0.16.1", path = "protocol/flows" } -kaspa-p2p-lib = { version = "0.16.1", path = "protocol/p2p" } -kaspa-perf-monitor = { version = "0.16.1", path = "metrics/perf_monitor" } -kaspa-pow = { version = "0.16.1", path = "consensus/pow" } -kaspa-rpc-core = { version = "0.16.1", path = "rpc/core" } -kaspa-rpc-macros = { version = "0.16.1", path = "rpc/macros" } -kaspa-rpc-service = { version = "0.16.1", path = "rpc/service" } -kaspa-txscript = { version = "0.16.1", path = "crypto/txscript" } -kaspa-txscript-errors = { version = "0.16.1", path = "crypto/txscript/errors" } -kaspa-utils = { version = "0.16.1", path = "utils" } -kaspa-utils-tower = { version = "0.16.1", path = "utils/tower" } -kaspa-utxoindex = { version = "0.16.1", path = "indexes/utxoindex" } -kaspa-wallet = { version = "0.16.1", path = "wallet/native" } -kaspa-wallet-cli-wasm = { version = "0.16.1", path = "wallet/wasm" } -kaspa-wallet-keys = { version = "0.16.1", path = "wallet/keys" } -kaspa-wallet-pskt = { version = "0.16.1", path = "wallet/pskt" } -kaspa-wallet-core = { version = "0.16.1", path = "wallet/core" } -kaspa-wallet-macros = { version = "0.16.1", path = "wallet/macros" } -kaspa-wasm = { version = "0.16.1", path = "wasm" } -kaspa-wasm-core = { version = "0.16.1", path = "wasm/core" } -kaspa-wrpc-client = { version = "0.16.1", path = "rpc/wrpc/client" } -kaspa-wrpc-proxy = { version = "0.16.1", path = "rpc/wrpc/proxy" } -kaspa-wrpc-server = { version = "0.16.1", path = "rpc/wrpc/server" } -kaspa-wrpc-wasm = { version = "0.16.1", path = "rpc/wrpc/wasm" } -kaspa-wrpc-example-subscriber = { version = "0.16.1", path = "rpc/wrpc/examples/subscriber" } -kaspad = { version = "0.16.1", path = "kaspad" } -kaspa-alloc = { version = "0.16.1", path = "utils/alloc" } +# kaspa-testing-integration = { version = "0.17.1", path = "testing/integration" } +kaspa-addresses = { version = "0.17.1", path = "crypto/addresses" } +kaspa-addressmanager = { version = "0.17.1", path = "components/addressmanager" } +kaspa-bip32 = { version = "0.17.1", path = "wallet/bip32" } +kaspa-cli = { version = "0.17.1", path = "cli" } +kaspa-connectionmanager = { version = "0.17.1", path = "components/connectionmanager" } +kaspa-consensus = { version = "0.17.1", path = "consensus" } +kaspa-consensus-core = { version = "0.17.1", path = "consensus/core" } +kaspa-consensus-client = { version = "0.17.1", path = "consensus/client" } +kaspa-consensus-notify = { version = "0.17.1", path = "consensus/notify" } +kaspa-consensus-wasm = { version = "0.17.1", path = "consensus/wasm" } +kaspa-consensusmanager = { version = "0.17.1", path = "components/consensusmanager" } +kaspa-core = { version = "0.17.1", path = "core" } +kaspa-daemon = { version = "0.17.1", path = "daemon" } +kaspa-database = { version = "0.17.1", path = "database" } +kaspa-grpc-client = { version = "0.17.1", path = "rpc/grpc/client" } +kaspa-grpc-core = { version = "0.17.1", path = "rpc/grpc/core" } +kaspa-grpc-server = { version = "0.17.1", path = "rpc/grpc/server" } +kaspa-hashes = { version = "0.17.1", path = "crypto/hashes" } +kaspa-index-core = { version = "0.17.1", path = "indexes/core" } +kaspa-index-processor = { version = "0.17.1", path = "indexes/processor" } +kaspa-math = { version = "0.17.1", path = "math" } +kaspa-merkle = { version = "0.17.1", path = "crypto/merkle" } +kaspa-metrics-core = { version = "0.17.1", path = "metrics/core" } +kaspa-mining = { version = "0.17.1", path = "mining" } +kaspa-mining-errors = { version = "0.17.1", path = "mining/errors" } +kaspa-muhash = { version = "0.17.1", path = "crypto/muhash" } +kaspa-notify = { version = "0.17.1", path = "notify" } +kaspa-p2p-flows = { version = "0.17.1", path = "protocol/flows" } +kaspa-p2p-lib = { version = "0.17.1", path = "protocol/p2p" } +kaspa-p2p-mining = { version = "0.17.1", path = "protocol/mining" } +kaspa-perf-monitor = { version = "0.17.1", path = "metrics/perf_monitor" } +kaspa-pow = { version = "0.17.1", path = "consensus/pow" } +kaspa-rpc-core = { version = "0.17.1", path = "rpc/core" } +kaspa-rpc-macros = { version = "0.17.1", path = "rpc/macros" } +kaspa-rpc-service = { version = "0.17.1", path = "rpc/service" } +kaspa-txscript = { version = "0.17.1", path = "crypto/txscript" } +kaspa-txscript-errors = { version = "0.17.1", path = "crypto/txscript/errors" } +kaspa-utils = { version = "0.17.1", path = "utils" } +kaspa-utils-tower = { version = "0.17.1", path = "utils/tower" } +kaspa-utxoindex = { version = "0.17.1", path = "indexes/utxoindex" } +kaspa-wallet = { version = "0.17.1", path = "wallet/native" } +kaspa-wallet-cli-wasm = { version = "0.17.1", path = "wallet/wasm" } +kaspa-wallet-keys = { version = "0.17.1", path = "wallet/keys" } +kaspa-wallet-pskt = { version = "0.17.1", path = "wallet/pskt" } +kaspa-wallet-core = { version = "0.17.1", path = "wallet/core" } +kaspa-wallet-macros = { version = "0.17.1", path = "wallet/macros" } +kaspa-wasm = { version = "0.17.1", path = "wasm" } +kaspa-wasm-core = { version = "0.17.1", path = "wasm/core" } +kaspa-wrpc-client = { version = "0.17.1", path = "rpc/wrpc/client" } +kaspa-wrpc-proxy = { version = "0.17.1", path = "rpc/wrpc/proxy" } +kaspa-wrpc-server = { version = "0.17.1", path = "rpc/wrpc/server" } +kaspa-wrpc-wasm = { version = "0.17.1", path = "rpc/wrpc/wasm" } +kaspa-wrpc-example-subscriber = { version = "0.17.1", path = "rpc/wrpc/examples/subscriber" } +kaspad = { version = "0.17.1", path = "kaspad" } +kaspa-alloc = { version = "0.17.1", path = "utils/alloc" } # external aes = "0.8.3" diff --git a/components/consensusmanager/src/session.rs b/components/consensusmanager/src/session.rs index c67caf07d4..2d4fdef1bb 100644 --- a/components/consensusmanager/src/session.rs +++ b/components/consensusmanager/src/session.rs @@ -10,6 +10,7 @@ use kaspa_consensus_core::{ daa_score_timestamp::DaaScoreTimestamp, errors::consensus::ConsensusResult, header::Header, + mass::{ContextualMasses, NonContextualMasses}, pruning::{PruningPointProof, PruningPointTrustedData, PruningPointsList}, trusted::{ExternalGhostdagData, TrustedBlock}, tx::{MutableTransaction, SignableTransaction, Transaction, TransactionOutpoint, UtxoEntry}, @@ -191,14 +192,14 @@ impl ConsensusSessionOwned { self.consensus.validate_and_insert_trusted_block(tb) } - pub fn calculate_transaction_compute_mass(&self, transaction: &Transaction) -> u64 { + pub fn calculate_transaction_non_contextual_masses(&self, transaction: &Transaction) -> NonContextualMasses { // This method performs pure calculations so no need for an async wrapper - self.consensus.calculate_transaction_compute_mass(transaction) + self.consensus.calculate_transaction_non_contextual_masses(transaction) } - pub fn calculate_transaction_storage_mass(&self, transaction: &MutableTransaction) -> Option { + pub fn calculate_transaction_contextual_masses(&self, transaction: &MutableTransaction) -> Option { // This method performs pure calculations so no need for an async wrapper - self.consensus.calculate_transaction_storage_mass(transaction) + self.consensus.calculate_transaction_contextual_masses(transaction) } pub fn get_virtual_daa_score(&self) -> u64 { @@ -249,26 +250,23 @@ impl ConsensusSessionOwned { self.clone().spawn_blocking(|c| c.get_sink_timestamp()).await } + pub async fn async_get_sink_daa_score_timestamp(&self) -> DaaScoreTimestamp { + self.clone().spawn_blocking(|c| c.get_sink_daa_score_timestamp()).await + } + pub async fn async_get_current_block_color(&self, hash: Hash) -> Option { self.clone().spawn_blocking(move |c| c.get_current_block_color(hash)).await } - /// source refers to the earliest block from which the current node has full header & block data - pub async fn async_get_source(&self) -> Hash { - self.clone().spawn_blocking(|c| c.get_source()).await + /// retention period root refers to the earliest block from which the current node has full header & block data + pub async fn async_get_retention_period_root(&self) -> Hash { + self.clone().spawn_blocking(|c| c.get_retention_period_root()).await } pub async fn async_estimate_block_count(&self) -> BlockCount { self.clone().spawn_blocking(|c| c.estimate_block_count()).await } - /// Returns whether this consensus is considered synced or close to being synced. - /// - /// This info is used to determine if it's ok to use a block template from this node for mining purposes. - pub async fn async_is_nearly_synced(&self) -> bool { - self.clone().spawn_blocking(|c| c.is_nearly_synced()).await - } - pub async fn async_get_virtual_chain_from_block( &self, low: Hash, @@ -442,8 +440,8 @@ impl ConsensusSessionOwned { self.clone().spawn_blocking(move |c| c.estimate_network_hashes_per_second(start_hash, window_size)).await } - pub async fn async_validate_pruning_points(&self) -> ConsensusResult<()> { - self.clone().spawn_blocking(move |c| c.validate_pruning_points()).await + pub async fn async_validate_pruning_points(&self, syncer_virtual_selected_parent: Hash) -> ConsensusResult<()> { + self.clone().spawn_blocking(move |c| c.validate_pruning_points(syncer_virtual_selected_parent)).await } pub async fn async_are_pruning_points_violating_finality(&self, pp_list: PruningPointsList) -> bool { diff --git a/consensus/client/src/utxo.rs b/consensus/client/src/utxo.rs index 99a663fd05..13edade76f 100644 --- a/consensus/client/src/utxo.rs +++ b/consensus/client/src/utxo.rs @@ -12,6 +12,7 @@ use crate::imports::*; use crate::outpoint::{TransactionOutpoint, TransactionOutpointInner}; use crate::result::Result; use kaspa_addresses::Address; +use kaspa_consensus_core::mass::{UtxoCell, UtxoPlurality}; #[wasm_bindgen(typescript_custom_section)] const TS_UTXO_ENTRY: &'static str = r#" @@ -249,6 +250,12 @@ impl From for UtxoEntryReference { } } +impl From<&UtxoEntryReference> for UtxoCell { + fn from(entry: &UtxoEntryReference) -> Self { + Self::new(entry.utxo.script_public_key.plurality(), entry.amount()) + } +} + impl Eq for UtxoEntryReference {} impl PartialEq for UtxoEntryReference { diff --git a/consensus/core/src/api/counters.rs b/consensus/core/src/api/counters.rs index 0297dab265..cc22d02795 100644 --- a/consensus/core/src/api/counters.rs +++ b/consensus/core/src/api/counters.rs @@ -11,6 +11,10 @@ pub struct ProcessingCounters { pub chain_block_counts: AtomicU64, pub chain_disqualified_counts: AtomicU64, pub mass_counts: AtomicU64, + pub build_block_template_above_threshold: AtomicU64, + pub build_block_template_within_threshold: AtomicU64, + pub submit_block_bad_merkle_root_count: AtomicU64, + pub submit_block_success_count: AtomicU64, } impl ProcessingCounters { @@ -25,6 +29,10 @@ impl ProcessingCounters { chain_block_counts: self.chain_block_counts.load(Ordering::Relaxed), chain_disqualified_counts: self.chain_disqualified_counts.load(Ordering::Relaxed), mass_counts: self.mass_counts.load(Ordering::Relaxed), + build_block_template_above_threshold: self.build_block_template_above_threshold.load(Ordering::Relaxed), + build_block_template_within_threshold: self.build_block_template_within_threshold.load(Ordering::Relaxed), + submit_block_bad_merkle_root_count: self.submit_block_bad_merkle_root_count.load(Ordering::Relaxed), + submit_block_success_count: self.submit_block_success_count.load(Ordering::Relaxed), } } } @@ -40,6 +48,10 @@ pub struct ProcessingCountersSnapshot { pub chain_block_counts: u64, pub chain_disqualified_counts: u64, pub mass_counts: u64, + pub build_block_template_above_threshold: u64, + pub build_block_template_within_threshold: u64, + pub submit_block_bad_merkle_root_count: u64, + pub submit_block_success_count: u64, } impl core::ops::Sub for &ProcessingCountersSnapshot { @@ -56,6 +68,16 @@ impl core::ops::Sub for &ProcessingCountersSnapshot { chain_block_counts: self.chain_block_counts.saturating_sub(rhs.chain_block_counts), chain_disqualified_counts: self.chain_disqualified_counts.saturating_sub(rhs.chain_disqualified_counts), mass_counts: self.mass_counts.saturating_sub(rhs.mass_counts), + build_block_template_above_threshold: self + .build_block_template_above_threshold + .saturating_sub(rhs.build_block_template_above_threshold), + build_block_template_within_threshold: self + .build_block_template_within_threshold + .saturating_sub(rhs.build_block_template_within_threshold), + submit_block_bad_merkle_root_count: self + .submit_block_bad_merkle_root_count + .saturating_sub(rhs.submit_block_bad_merkle_root_count), + submit_block_success_count: self.submit_block_success_count.saturating_sub(rhs.submit_block_success_count), } } } diff --git a/consensus/core/src/api/mod.rs b/consensus/core/src/api/mod.rs index f8df0c0e14..e611327ba6 100644 --- a/consensus/core/src/api/mod.rs +++ b/consensus/core/src/api/mod.rs @@ -17,6 +17,7 @@ use crate::{ tx::TxResult, }, header::Header, + mass::{ContextualMasses, NonContextualMasses}, pruning::{PruningPointProof, PruningPointTrustedData, PruningPointsList, PruningProofMetadata}, trusted::{ExternalGhostdagData, TrustedBlock}, tx::{MutableTransaction, SignableTransaction, Transaction, TransactionOutpoint, UtxoEntry}, @@ -90,11 +91,11 @@ pub trait ConsensusApi: Send + Sync { unimplemented!() } - fn calculate_transaction_compute_mass(&self, transaction: &Transaction) -> u64 { + fn calculate_transaction_non_contextual_masses(&self, transaction: &Transaction) -> NonContextualMasses { unimplemented!() } - fn calculate_transaction_storage_mass(&self, transaction: &MutableTransaction) -> Option { + fn calculate_transaction_contextual_masses(&self, transaction: &MutableTransaction) -> Option { unimplemented!() } @@ -134,27 +135,24 @@ pub trait ConsensusApi: Send + Sync { unimplemented!() } - fn get_current_block_color(&self, hash: Hash) -> Option { + fn get_sink_daa_score_timestamp(&self) -> DaaScoreTimestamp { unimplemented!() } - fn get_virtual_state_approx_id(&self) -> VirtualStateApproxId { + fn get_current_block_color(&self, hash: Hash) -> Option { unimplemented!() } - /// source refers to the earliest block from which the current node has full header & block data - fn get_source(&self) -> Hash { + fn get_virtual_state_approx_id(&self) -> VirtualStateApproxId { unimplemented!() } - fn estimate_block_count(&self) -> BlockCount { + /// retention period root refers to the earliest block from which the current node has full header & block data + fn get_retention_period_root(&self) -> Hash { unimplemented!() } - /// Returns whether this consensus is considered synced or close to being synced. - /// - /// This info is used to determine if it's ok to use a block template from this node for mining purposes. - fn is_nearly_synced(&self) -> bool { + fn estimate_block_count(&self) -> BlockCount { unimplemented!() } @@ -218,7 +216,7 @@ pub trait ConsensusApi: Send + Sync { unimplemented!() } - fn import_pruning_points(&self, pruning_points: PruningPointsList) { + fn import_pruning_points(&self, pruning_points: PruningPointsList) -> PruningImportResult<()> { unimplemented!() } @@ -354,7 +352,7 @@ pub trait ConsensusApi: Send + Sync { unimplemented!() } - fn validate_pruning_points(&self) -> ConsensusResult<()> { + fn validate_pruning_points(&self, syncer_virtual_selected_parent: Hash) -> ConsensusResult<()> { unimplemented!() } diff --git a/consensus/core/src/config/bps.rs b/consensus/core/src/config/bps.rs index 5e98aac5df..ef5da0c4e6 100644 --- a/consensus/core/src/config/bps.rs +++ b/consensus/core/src/config/bps.rs @@ -20,8 +20,8 @@ pub fn calculate_ghostdag_k(x: f64, delta: f64) -> u64 { } } -/// Bps-related constants generator for testnet 11 -pub type Testnet11Bps = Bps<10>; +/// Bps-related constants generator for 10-bps networks +pub type TenBps = Bps<10>; /// Struct representing network blocks-per-second. Provides a bunch of const functions /// computing various constants which are functions of the BPS value @@ -93,29 +93,21 @@ impl Bps { BPS * NEW_FINALITY_DURATION } - /// Limit used to previously calculate the pruning depth. - const fn prev_mergeset_size_limit() -> u64 { - Self::ghostdag_k() as u64 * 10 - } - pub const fn pruning_depth() -> u64 { // Based on the analysis at https://github.com/kaspanet/docs/blob/main/Reference/prunality/Prunality.pdf // and on the decomposition of merge depth (rule R-I therein) from finality depth (φ) // We add an additional merge depth unit as a safety margin for anticone finalization - Self::finality_depth() + let lower_bound = Self::finality_depth() + Self::merge_depth_bound() * 2 - + 4 * Self::prev_mergeset_size_limit() * Self::ghostdag_k() as u64 + + 4 * Self::mergeset_size_limit() * Self::ghostdag_k() as u64 + 2 * Self::ghostdag_k() as u64 - + 2 - - // TODO (HF or restart of TN11): - // Return `Self::finality_depth() * 3` and assert that this value is equal or larger than the above expression. - // This will give us a round easy number to track which is not sensitive to minor changes in other related params. - } + + 2; - pub const fn pruning_proof_m() -> u64 { - // No need to scale this constant with BPS since the important block levels (higher) remain logarithmically short - PRUNING_PROOF_M + if lower_bound > BPS * NEW_PRUNING_DURATION { + lower_bound + } else { + BPS * NEW_PRUNING_DURATION + } } /// Sample rate for sampling blocks to the median time window (in block units, hence dependent on BPS) diff --git a/consensus/core/src/config/constants.rs b/consensus/core/src/config/constants.rs index 7818789339..02eabb7114 100644 --- a/consensus/core/src/config/constants.rs +++ b/consensus/core/src/config/constants.rs @@ -23,20 +23,15 @@ pub mod consensus { // ~~~~~~~~~~~~~~~~~~ Timestamp deviation & Median time ~~~~~~~~~~~~~~~~~~ // - /// **Legacy** timestamp deviation tolerance (seconds) - pub const LEGACY_TIMESTAMP_DEVIATION_TOLERANCE: u64 = 132; - - /// **New** timestamp deviation tolerance (seconds). - /// TODO: KIP-0004: 605 (~10 minutes) - pub const NEW_TIMESTAMP_DEVIATION_TOLERANCE: u64 = 132; + /// Timestamp deviation tolerance (seconds) + pub const TIMESTAMP_DEVIATION_TOLERANCE: u64 = 132; /// The desired interval between samples of the median time window (seconds). - /// KIP-0004: 10 seconds pub const PAST_MEDIAN_TIME_SAMPLE_INTERVAL: u64 = 10; /// Size of the **sampled** median time window (independent of BPS) pub const MEDIAN_TIME_SAMPLED_WINDOW_SIZE: u64 = - (2 * NEW_TIMESTAMP_DEVIATION_TOLERANCE - 1).div_ceil(PAST_MEDIAN_TIME_SAMPLE_INTERVAL); + (2 * TIMESTAMP_DEVIATION_TOLERANCE - 1).div_ceil(PAST_MEDIAN_TIME_SAMPLE_INTERVAL); // // ~~~~~~~~~~~~~~~~~~~~~~~~~ Max difficulty target ~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -56,18 +51,21 @@ pub mod consensus { // ~~~~~~~~~~~~~~~~~~~ Difficulty Adjustment Algorithm (DAA) ~~~~~~~~~~~~~~~~~~~ // - /// Minimal size of the difficulty window. Affects the DA algorithm only at the starting period of a new net - pub const MIN_DIFFICULTY_WINDOW_LEN: usize = 10; + /// Minimal size of the difficulty window. Affects the DA algorithm at the starting period of a new net. + /// Also used during BPS fork transitions to stabilize the new rate before applying DA (see KIP-14). + /// With 4 seconds sampling interval, a value of 150 indicates 10 minutes of fixed + /// difficulty until the window grows large enough. + /// + /// TODO (crescendo): finalize + pub const MIN_DIFFICULTY_WINDOW_SIZE: usize = 150; /// **Legacy** difficulty adjustment window size corresponding to ~44 minutes with 1 BPS pub const LEGACY_DIFFICULTY_WINDOW_SIZE: usize = 2641; /// **New** difficulty window duration expressed in time units (seconds). - /// TODO: KIP-0004: 30,000 (500 minutes) pub const NEW_DIFFICULTY_WINDOW_DURATION: u64 = 2641; /// The desired interval between samples of the difficulty window (seconds). - /// TODO: KIP-0004: 30 seconds pub const DIFFICULTY_WINDOW_SAMPLE_INTERVAL: u64 = 4; /// Size of the **sampled** difficulty window (independent of BPS) @@ -81,9 +79,11 @@ pub mod consensus { pub const LEGACY_FINALITY_DEPTH: u64 = 86_400; /// **New** finality duration expressed in time units (seconds). - /// TODO: finalize this value (consider 6-24 hours) pub const NEW_FINALITY_DURATION: u64 = 43_200; // 12 hours + /// **New** pruning duration expressed in time units (seconds). + pub const NEW_PRUNING_DURATION: u64 = 108_000; // 30 hours + /// Merge depth bound duration (in seconds). For 1 BPS networks this equals the legacy depth /// bound in block units. For higher BPS networks this should be scaled up. /// @@ -166,7 +166,7 @@ pub mod perf { impl PerfParams { pub fn adjust_to_consensus_params(&mut self, consensus_params: &Params) { // Allow caching up to 10x over the baseline - self.block_data_cache_size *= consensus_params.bps().clamp(1, 10) as usize; + self.block_data_cache_size *= consensus_params.bps().upper_bound().clamp(1, 10) as usize; } } } diff --git a/consensus/core/src/config/genesis.rs b/consensus/core/src/config/genesis.rs index 9f9ea21e54..06d1431ed2 100644 --- a/consensus/core/src/config/genesis.rs +++ b/consensus/core/src/config/genesis.rs @@ -225,7 +225,7 @@ pub const DEVNET_GENESIS: GenesisBlock = GenesisBlock { #[cfg(test)] mod tests { use super::*; - use crate::{config::bps::Testnet11Bps, merkle::calc_hash_merkle_root}; + use crate::{config::bps::TenBps, merkle::calc_hash_merkle_root}; #[test] fn test_genesis_hashes() { @@ -238,7 +238,7 @@ mod tests { #[test] fn gen_testnet11_genesis() { - let bps = Testnet11Bps::bps(); + let bps = TenBps::bps(); let mut genesis = TESTNET_GENESIS; let target = kaspa_math::Uint256::from_compact_target_bits(genesis.bits); let scaled_target = target * bps / 100; diff --git a/consensus/core/src/config/mod.rs b/consensus/core/src/config/mod.rs index d62bf15a94..bc41cde562 100644 --- a/consensus/core/src/config/mod.rs +++ b/consensus/core/src/config/mod.rs @@ -68,6 +68,9 @@ pub struct Config { /// A scale factor to apply to memory allocation bounds pub ram_scale: f64, + + /// The number of days to keep data for + pub retention_period_days: Option, } impl Config { @@ -95,6 +98,7 @@ impl Config { initial_utxo_set: Default::default(), disable_upnp: false, ram_scale: 1.0, + retention_period_days: None, } } diff --git a/consensus/core/src/config/params.rs b/consensus/core/src/config/params.rs index b0ab02e98e..02d3de5e42 100644 --- a/consensus/core/src/config/params.rs +++ b/consensus/core/src/config/params.rs @@ -1,5 +1,5 @@ pub use super::{ - bps::{Bps, Testnet11Bps}, + bps::{Bps, TenBps}, constants::consensus::*, genesis::{GenesisBlock, DEVNET_GENESIS, GENESIS, SIMNET_GENESIS, TESTNET11_GENESIS, TESTNET_GENESIS}, }; @@ -10,25 +10,32 @@ use crate::{ }; use kaspa_addresses::Prefix; use kaspa_math::Uint256; -use std::{ - cmp::min, - time::{SystemTime, UNIX_EPOCH}, -}; +use std::cmp::min; #[derive(Clone, Copy, Debug, Default, PartialEq, Eq)] pub struct ForkActivation(u64); impl ForkActivation { + const NEVER: u64 = u64::MAX; + const ALWAYS: u64 = 0; + pub const fn new(daa_score: u64) -> Self { Self(daa_score) } pub const fn never() -> Self { - Self(u64::MAX) + Self(Self::NEVER) } pub const fn always() -> Self { - Self(0) + Self(Self::ALWAYS) + } + + /// Returns the actual DAA score triggering the activation. Should be used only + /// for cases where the explicit value is required for computations (e.g., coinbase subsidy). + /// Otherwise, **activation checks should always go through `self.is_active(..)`** + pub fn daa_score(self) -> u64 { + self.0 } pub fn is_active(self, current_daa_score: u64) -> bool { @@ -40,8 +47,153 @@ impl ForkActivation { pub fn is_within_range_from_activation(self, current_daa_score: u64, range: u64) -> bool { self != Self::always() && self.is_active(current_daa_score) && current_daa_score < self.0 + range } + + /// Checks if the fork is expected to be activated "soon", i.e., in the time frame of the provided range. + /// Returns the distance from activation if so, or `None` otherwise. + pub fn is_within_range_before_activation(self, current_daa_score: u64, range: u64) -> Option { + if !self.is_active(current_daa_score) && current_daa_score + range > self.0 { + Some(self.0 - current_daa_score) + } else { + None + } + } +} + +/// A consensus parameter which depends on forking activation +#[derive(Clone, Copy, Debug)] +pub struct ForkedParam { + pre: T, + post: T, + activation: ForkActivation, +} + +impl ForkedParam { + const fn new(pre: T, post: T, activation: ForkActivation) -> Self { + Self { pre, post, activation } + } + + pub const fn new_const(val: T) -> Self { + Self { pre: val, post: val, activation: ForkActivation::never() } + } + + pub fn activation(&self) -> ForkActivation { + self.activation + } + + pub fn get(&self, daa_score: u64) -> T { + if self.activation.is_active(daa_score) { + self.post + } else { + self.pre + } + } + + /// Returns the value before activation (=pre unless activation = always) + pub fn before(&self) -> T { + match self.activation.0 { + ForkActivation::ALWAYS => self.post, + _ => self.pre, + } + } + + /// Returns the permanent long-term value after activation (=post unless the activation is never scheduled) + pub fn after(&self) -> T { + match self.activation.0 { + ForkActivation::NEVER => self.pre, + _ => self.post, + } + } + + /// Maps the ForkedParam to a new ForkedParam by applying a map function on both pre and post + pub fn map U>(&self, f: F) -> ForkedParam { + ForkedParam::new(f(self.pre), f(self.post), self.activation) + } +} + +impl ForkedParam { + /// Returns the min of `pre` and `post` values. Useful for non-consensus initializations + /// which require knowledge of the value bounds. + /// + /// Note that if activation is not scheduled (set to never) then pre is always returned, + /// and if activation is set to always (since inception), post will be returned. + pub fn lower_bound(&self) -> T { + match self.activation.0 { + ForkActivation::NEVER => self.pre, + ForkActivation::ALWAYS => self.post, + _ => self.pre.min(self.post), + } + } + + /// Returns the max of `pre` and `post` values. Useful for non-consensus initializations + /// which require knowledge of the value bounds. + /// + /// Note that if activation is not scheduled (set to never) then pre is always returned, + /// and if activation is set to always (since inception), post will be returned. + pub fn upper_bound(&self) -> T { + match self.activation.0 { + ForkActivation::NEVER => self.pre, + ForkActivation::ALWAYS => self.post, + _ => self.pre.max(self.post), + } + } } +/// Fork params for the Crescendo hardfork +#[derive(Clone, Debug)] +pub struct CrescendoParams { + pub past_median_time_sampled_window_size: u64, + pub sampled_difficulty_window_size: u64, + + /// Target time per block (in milliseconds) + pub target_time_per_block: u64, + pub ghostdag_k: KType, + + pub past_median_time_sample_rate: u64, + pub difficulty_sample_rate: u64, + + pub max_block_parents: u8, + pub mergeset_size_limit: u64, + pub merge_depth: u64, + pub finality_depth: u64, + pub pruning_depth: u64, + + pub max_tx_inputs: usize, + pub max_tx_outputs: usize, + pub max_signature_script_len: usize, + pub max_script_public_key_len: usize, + + pub coinbase_maturity: u64, +} + +pub const CRESCENDO: CrescendoParams = CrescendoParams { + past_median_time_sampled_window_size: MEDIAN_TIME_SAMPLED_WINDOW_SIZE, + sampled_difficulty_window_size: DIFFICULTY_SAMPLED_WINDOW_SIZE, + + // + // ~~~~~~~~~~~~~~~~~~ BPS dependent constants ~~~~~~~~~~~~~~~~~~ + // + target_time_per_block: TenBps::target_time_per_block(), + ghostdag_k: TenBps::ghostdag_k(), + past_median_time_sample_rate: TenBps::past_median_time_sample_rate(), + difficulty_sample_rate: TenBps::difficulty_adjustment_sample_rate(), + max_block_parents: TenBps::max_block_parents(), + mergeset_size_limit: TenBps::mergeset_size_limit(), + merge_depth: TenBps::merge_depth_bound(), + finality_depth: TenBps::finality_depth(), + pruning_depth: TenBps::pruning_depth(), + + coinbase_maturity: TenBps::coinbase_maturity(), + + // Limit the cost of calculating compute/transient/storage masses + max_tx_inputs: 1000, + max_tx_outputs: 1000, + // Transient mass enforces a limit of 125Kb, however script engine max scripts size is 10Kb so there's no point in surpassing that. + max_signature_script_len: 10_000, + // Compute mass enforces a limit of ~45.5Kb, however script engine max scripts size is 10Kb so there's no point in surpassing that. + // Note that storage mass will kick in and gradually penalize also for lower lengths (generalized KIP-0009, plurality will be high). + max_script_public_key_len: 10_000, +}; + /// Consensus parameters. Contains settings and configurations which are consensus-sensitive. /// Changing one of these on a network node would exclude and prevent it from reaching consensus /// with the other unmodified nodes. @@ -50,25 +202,13 @@ pub struct Params { pub dns_seeders: &'static [&'static str], pub net: NetworkId, pub genesis: GenesisBlock, - pub ghostdag_k: KType, - - /// Legacy timestamp deviation tolerance (in seconds) - pub legacy_timestamp_deviation_tolerance: u64, + pub prior_ghostdag_k: KType, - /// New timestamp deviation tolerance (in seconds, activated with sampling) - pub new_timestamp_deviation_tolerance: u64, - - /// Block sample rate for filling the past median time window (selects one every N blocks) - pub past_median_time_sample_rate: u64, - - /// Size of sampled blocks window that is inspected to calculate the past median time of each block - pub past_median_time_sampled_window_size: u64, + /// Timestamp deviation tolerance (in seconds) + pub timestamp_deviation_tolerance: u64, /// Target time per block (in milliseconds) - pub target_time_per_block: u64, - - /// DAA score from which the window sampling starts for difficulty and past median time calculation - pub sampling_activation: ForkActivation, + pub prior_target_time_per_block: u64, /// Defines the highest allowed proof of work difficulty value for a block as a [`Uint256`] pub max_difficulty_target: Uint256, @@ -76,184 +216,184 @@ pub struct Params { /// Highest allowed proof of work difficulty as a floating number pub max_difficulty_target_f64: f64, - /// Block sample rate for filling the difficulty window (selects one every N blocks) - pub difficulty_sample_rate: u64, - - /// Size of sampled blocks window that is inspected to calculate the required difficulty of each block - pub sampled_difficulty_window_size: usize, - /// Size of full blocks window that is inspected to calculate the required difficulty of each block - pub legacy_difficulty_window_size: usize, + pub prior_difficulty_window_size: usize, - /// The minimum length a difficulty window (full or sampled) must have to trigger a DAA calculation - pub min_difficulty_window_len: usize, + /// The minimum size a difficulty window (full or sampled) must have to trigger a DAA calculation + pub min_difficulty_window_size: usize, + + pub prior_max_block_parents: u8, + pub prior_mergeset_size_limit: u64, + pub prior_merge_depth: u64, + pub prior_finality_depth: u64, + pub prior_pruning_depth: u64, - pub max_block_parents: u8, - pub mergeset_size_limit: u64, - pub merge_depth: u64, - pub finality_depth: u64, - pub pruning_depth: u64, pub coinbase_payload_script_public_key_max_len: u8, pub max_coinbase_payload_len: usize, - pub max_tx_inputs: usize, - pub max_tx_outputs: usize, - pub max_signature_script_len: usize, - pub max_script_public_key_len: usize, + + pub prior_max_tx_inputs: usize, + pub prior_max_tx_outputs: usize, + pub prior_max_signature_script_len: usize, + pub prior_max_script_public_key_len: usize, + pub mass_per_tx_byte: u64, pub mass_per_script_pub_key_byte: u64, pub mass_per_sig_op: u64, pub max_block_mass: u64, - /// The parameter for scaling inverse KAS value to mass units (unpublished KIP-0009) + /// The parameter for scaling inverse KAS value to mass units (KIP-0009) pub storage_mass_parameter: u64, - /// DAA score from which storage mass calculation and transaction mass field are activated as a consensus rule - pub storage_mass_activation: ForkActivation, - - /// DAA score from which tx engine: - /// 1. Supports 8-byte integer arithmetic operations (previously limited to 4 bytes) - /// 2. Supports transaction introspection opcodes: - /// - OpTxInputCount (0xb3): Get number of inputs - /// - OpTxOutputCount (0xb4): Get number of outputs - /// - OpTxInputIndex (0xb9): Get current input index - /// - OpTxInputAmount (0xbe): Get input amount - /// - OpTxInputSpk (0xbf): Get input script public key - /// - OpTxOutputAmount (0xc2): Get output amount - /// - OpTxOutputSpk (0xc3): Get output script public key - pub kip10_activation: ForkActivation, - /// DAA score after which the pre-deflationary period switches to the deflationary period pub deflationary_phase_daa_score: u64, pub pre_deflationary_phase_base_subsidy: u64, - pub coinbase_maturity: u64, + pub prior_coinbase_maturity: u64, pub skip_proof_of_work: bool, pub max_block_level: BlockLevel, pub pruning_proof_m: u64, - /// Activation rules for when to enable using the payload field in transactions - pub payload_activation: ForkActivation, - pub runtime_sig_op_counting: ForkActivation, -} - -fn unix_now() -> u64 { - SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_millis() as u64 + pub crescendo: CrescendoParams, + pub crescendo_activation: ForkActivation, } impl Params { /// Returns the size of the full blocks window that is inspected to calculate the past median time (legacy) #[inline] #[must_use] - pub fn legacy_past_median_time_window_size(&self) -> usize { - (2 * self.legacy_timestamp_deviation_tolerance - 1) as usize + pub fn prior_past_median_time_window_size(&self) -> usize { + (2 * self.timestamp_deviation_tolerance - 1) as usize } /// Returns the size of the sampled blocks window that is inspected to calculate the past median time #[inline] #[must_use] pub fn sampled_past_median_time_window_size(&self) -> usize { - self.past_median_time_sampled_window_size as usize + self.crescendo.past_median_time_sampled_window_size as usize } - /// Returns the size of the blocks window that is inspected to calculate the past median time, - /// depending on a selected parent DAA score + /// Returns the size of the blocks window that is inspected to calculate the past median time. #[inline] #[must_use] - pub fn past_median_time_window_size(&self, selected_parent_daa_score: u64) -> usize { - if self.sampling_activation.is_active(selected_parent_daa_score) { - self.sampled_past_median_time_window_size() - } else { - self.legacy_past_median_time_window_size() - } + pub fn past_median_time_window_size(&self) -> ForkedParam { + ForkedParam::new( + self.prior_past_median_time_window_size(), + self.sampled_past_median_time_window_size(), + self.crescendo_activation, + ) } - /// Returns the timestamp deviation tolerance, - /// depending on a selected parent DAA score + /// Returns the past median time sample rate #[inline] #[must_use] - pub fn timestamp_deviation_tolerance(&self, selected_parent_daa_score: u64) -> u64 { - if self.sampling_activation.is_active(selected_parent_daa_score) { - self.new_timestamp_deviation_tolerance - } else { - self.legacy_timestamp_deviation_tolerance - } + pub fn past_median_time_sample_rate(&self) -> ForkedParam { + ForkedParam::new(1, self.crescendo.past_median_time_sample_rate, self.crescendo_activation) } - /// Returns the past median time sample rate, - /// depending on a selected parent DAA score + /// Returns the size of the blocks window that is inspected to calculate the difficulty #[inline] #[must_use] - pub fn past_median_time_sample_rate(&self, selected_parent_daa_score: u64) -> u64 { - if self.sampling_activation.is_active(selected_parent_daa_score) { - self.past_median_time_sample_rate - } else { - 1 - } + pub fn difficulty_window_size(&self) -> ForkedParam { + ForkedParam::new( + self.prior_difficulty_window_size, + self.crescendo.sampled_difficulty_window_size as usize, + self.crescendo_activation, + ) } - /// Returns the size of the blocks window that is inspected to calculate the difficulty, - /// depending on a selected parent DAA score + /// Returns the difficulty sample rate #[inline] #[must_use] - pub fn difficulty_window_size(&self, selected_parent_daa_score: u64) -> usize { - if self.sampling_activation.is_active(selected_parent_daa_score) { - self.sampled_difficulty_window_size - } else { - self.legacy_difficulty_window_size - } + pub fn difficulty_sample_rate(&self) -> ForkedParam { + ForkedParam::new(1, self.crescendo.difficulty_sample_rate, self.crescendo_activation) } - /// Returns the difficulty sample rate, - /// depending on a selected parent DAA score + /// Returns the target time per block #[inline] #[must_use] - pub fn difficulty_sample_rate(&self, selected_parent_daa_score: u64) -> u64 { - if self.sampling_activation.is_active(selected_parent_daa_score) { - self.difficulty_sample_rate - } else { - 1 - } + pub fn target_time_per_block(&self) -> ForkedParam { + ForkedParam::new(self.prior_target_time_per_block, self.crescendo.target_time_per_block, self.crescendo_activation) } - /// Returns the target time per block, - /// depending on a selected parent DAA score + /// Returns the expected number of blocks per second #[inline] #[must_use] - pub fn target_time_per_block(&self, _selected_parent_daa_score: u64) -> u64 { - self.target_time_per_block + pub fn bps(&self) -> ForkedParam { + ForkedParam::new( + 1000 / self.prior_target_time_per_block, + 1000 / self.crescendo.target_time_per_block, + self.crescendo_activation, + ) } - /// Returns the expected number of blocks per second - #[inline] - #[must_use] - pub fn bps(&self) -> u64 { - 1000 / self.target_time_per_block + pub fn ghostdag_k(&self) -> ForkedParam { + ForkedParam::new(self.prior_ghostdag_k, self.crescendo.ghostdag_k, self.crescendo_activation) } - pub fn daa_window_duration_in_blocks(&self, selected_parent_daa_score: u64) -> u64 { - if self.sampling_activation.is_active(selected_parent_daa_score) { - self.difficulty_sample_rate * self.sampled_difficulty_window_size as u64 - } else { - self.legacy_difficulty_window_size as u64 - } + pub fn max_block_parents(&self) -> ForkedParam { + ForkedParam::new(self.prior_max_block_parents, self.crescendo.max_block_parents, self.crescendo_activation) } - fn expected_daa_window_duration_in_milliseconds(&self, selected_parent_daa_score: u64) -> u64 { - if self.sampling_activation.is_active(selected_parent_daa_score) { - self.target_time_per_block * self.difficulty_sample_rate * self.sampled_difficulty_window_size as u64 - } else { - self.target_time_per_block * self.legacy_difficulty_window_size as u64 - } + pub fn mergeset_size_limit(&self) -> ForkedParam { + ForkedParam::new(self.prior_mergeset_size_limit, self.crescendo.mergeset_size_limit, self.crescendo_activation) + } + + pub fn merge_depth(&self) -> ForkedParam { + ForkedParam::new(self.prior_merge_depth, self.crescendo.merge_depth, self.crescendo_activation) + } + + pub fn finality_depth(&self) -> ForkedParam { + ForkedParam::new(self.prior_finality_depth, self.crescendo.finality_depth, self.crescendo_activation) + } + + pub fn pruning_depth(&self) -> ForkedParam { + ForkedParam::new(self.prior_pruning_depth, self.crescendo.pruning_depth, self.crescendo_activation) + } + + pub fn coinbase_maturity(&self) -> ForkedParam { + ForkedParam::new(self.prior_coinbase_maturity, self.crescendo.coinbase_maturity, self.crescendo_activation) + } + + pub fn finality_duration_in_milliseconds(&self) -> ForkedParam { + ForkedParam::new( + self.prior_target_time_per_block * self.prior_finality_depth, + self.crescendo.target_time_per_block * self.crescendo.finality_depth, + self.crescendo_activation, + ) + } + + pub fn difficulty_window_duration_in_block_units(&self) -> ForkedParam { + ForkedParam::new( + self.prior_difficulty_window_size as u64, + self.crescendo.difficulty_sample_rate * self.crescendo.sampled_difficulty_window_size, + self.crescendo_activation, + ) + } + + pub fn expected_difficulty_window_duration_in_milliseconds(&self) -> ForkedParam { + ForkedParam::new( + self.prior_target_time_per_block * self.prior_difficulty_window_size as u64, + self.crescendo.target_time_per_block + * self.crescendo.difficulty_sample_rate + * self.crescendo.sampled_difficulty_window_size, + self.crescendo_activation, + ) } /// Returns the depth at which the anticone of a chain block is final (i.e., is a permanently closed set). /// Based on the analysis at /// and on the decomposition of merge depth (rule R-I therein) from finality depth (φ) - pub fn anticone_finalization_depth(&self) -> u64 { - let anticone_finalization_depth = self.finality_depth - + self.merge_depth - + 4 * self.mergeset_size_limit * self.ghostdag_k as u64 - + 2 * self.ghostdag_k as u64 + pub fn anticone_finalization_depth(&self) -> ForkedParam { + let prior_anticone_finalization_depth = self.prior_finality_depth + + self.prior_merge_depth + + 4 * self.prior_mergeset_size_limit * self.prior_ghostdag_k as u64 + + 2 * self.prior_ghostdag_k as u64 + + 2; + + let new_anticone_finalization_depth = self.crescendo.finality_depth + + self.crescendo.merge_depth + + 4 * self.crescendo.mergeset_size_limit * self.crescendo.ghostdag_k as u64 + + 2 * self.crescendo.ghostdag_k as u64 + 2; // In mainnet it's guaranteed that `self.pruning_depth` is greater @@ -261,27 +401,27 @@ impl Params { // a smaller (unsafe) pruning depth, so we return the minimum of // the two to avoid a situation where a block can be pruned and // not finalized. - min(self.pruning_depth, anticone_finalization_depth) + ForkedParam::new( + min(self.prior_pruning_depth, prior_anticone_finalization_depth), + min(self.crescendo.pruning_depth, new_anticone_finalization_depth), + self.crescendo_activation, + ) } - /// Returns whether the sink timestamp is recent enough and the node is considered synced or nearly synced. - pub fn is_nearly_synced(&self, sink_timestamp: u64, sink_daa_score: u64) -> bool { - if self.net.is_mainnet() { - // We consider the node close to being synced if the sink (virtual selected parent) block - // timestamp is within DAA window duration far in the past. Blocks mined over such DAG state would - // enter the DAA window of fully-synced nodes and thus contribute to overall network difficulty - unix_now() < sink_timestamp + self.expected_daa_window_duration_in_milliseconds(sink_daa_score) - } else { - // For testnets we consider the node to be synced if the sink timestamp is within a time range which - // is overwhelmingly unlikely to pass without mined blocks even if net hashrate decreased dramatically. - // - // This period is smaller than the above mainnet calculation in order to ensure that an IBDing miner - // with significant testnet hashrate does not overwhelm the network with deep side-DAGs. - // - // We use DAA duration as baseline and scale it down with BPS (and divide by 3 for mining only when very close to current time on TN11) - let max_expected_duration_without_blocks_in_milliseconds = self.target_time_per_block * NEW_DIFFICULTY_WINDOW_DURATION / 3; // = DAA duration in milliseconds / bps / 3 - unix_now() < sink_timestamp + max_expected_duration_without_blocks_in_milliseconds - } + pub fn max_tx_inputs(&self) -> ForkedParam { + ForkedParam::new(self.prior_max_tx_inputs, self.crescendo.max_tx_inputs, self.crescendo_activation) + } + + pub fn max_tx_outputs(&self) -> ForkedParam { + ForkedParam::new(self.prior_max_tx_outputs, self.crescendo.max_tx_outputs, self.crescendo_activation) + } + + pub fn max_signature_script_len(&self) -> ForkedParam { + ForkedParam::new(self.prior_max_signature_script_len, self.crescendo.max_signature_script_len, self.crescendo_activation) + } + + pub fn max_script_public_key_len(&self) -> ForkedParam { + ForkedParam::new(self.prior_max_script_public_key_len, self.crescendo.max_script_public_key_len, self.crescendo_activation) } pub fn network_name(&self) -> String { @@ -299,10 +439,6 @@ impl Params { pub fn default_rpc_port(&self) -> u16 { self.net.default_rpc_port() } - - pub fn finality_duration(&self) -> u64 { - self.target_time_per_block * self.finality_depth - } } impl From for Params { @@ -322,7 +458,6 @@ impl From for Params { NetworkType::Mainnet => MAINNET_PARAMS, NetworkType::Testnet => match value.suffix { Some(10) => TESTNET_PARAMS, - Some(11) => TESTNET11_PARAMS, Some(x) => panic!("Testnet suffix {} is not supported", x), None => panic!("Testnet suffix not provided"), }, @@ -359,24 +494,18 @@ pub const MAINNET_PARAMS: Params = Params { ], net: NetworkId::new(NetworkType::Mainnet), genesis: GENESIS, - ghostdag_k: LEGACY_DEFAULT_GHOSTDAG_K, - legacy_timestamp_deviation_tolerance: LEGACY_TIMESTAMP_DEVIATION_TOLERANCE, - new_timestamp_deviation_tolerance: NEW_TIMESTAMP_DEVIATION_TOLERANCE, - past_median_time_sample_rate: Bps::<1>::past_median_time_sample_rate(), - past_median_time_sampled_window_size: MEDIAN_TIME_SAMPLED_WINDOW_SIZE, - target_time_per_block: 1000, - sampling_activation: ForkActivation::never(), + prior_ghostdag_k: LEGACY_DEFAULT_GHOSTDAG_K, + timestamp_deviation_tolerance: TIMESTAMP_DEVIATION_TOLERANCE, + prior_target_time_per_block: 1000, max_difficulty_target: MAX_DIFFICULTY_TARGET, max_difficulty_target_f64: MAX_DIFFICULTY_TARGET_AS_F64, - difficulty_sample_rate: Bps::<1>::difficulty_adjustment_sample_rate(), - sampled_difficulty_window_size: DIFFICULTY_SAMPLED_WINDOW_SIZE as usize, - legacy_difficulty_window_size: LEGACY_DIFFICULTY_WINDOW_SIZE, - min_difficulty_window_len: MIN_DIFFICULTY_WINDOW_LEN, - max_block_parents: 10, - mergeset_size_limit: (LEGACY_DEFAULT_GHOSTDAG_K as u64) * 10, - merge_depth: 3600, - finality_depth: 86400, - pruning_depth: 185798, + prior_difficulty_window_size: LEGACY_DIFFICULTY_WINDOW_SIZE, + min_difficulty_window_size: MIN_DIFFICULTY_WINDOW_SIZE, + prior_max_block_parents: 10, + prior_mergeset_size_limit: (LEGACY_DEFAULT_GHOSTDAG_K as u64) * 10, + prior_merge_depth: 3600, + prior_finality_depth: 86400, + prior_pruning_depth: 185798, coinbase_payload_script_public_key_max_len: 150, max_coinbase_payload_len: 204, @@ -384,10 +513,10 @@ pub const MAINNET_PARAMS: Params = Params { // check these rules, but in practice it's enforced by the network layer that limits the message // size to 1 GB. // These values should be lowered to more reasonable amounts on the next planned HF/SF. - max_tx_inputs: 1_000_000_000, - max_tx_outputs: 1_000_000_000, - max_signature_script_len: 1_000_000_000, - max_script_public_key_len: 1_000_000_000, + prior_max_tx_inputs: 1_000_000_000, + prior_max_tx_outputs: 1_000_000_000, + prior_max_signature_script_len: 1_000_000_000, + prior_max_script_public_key_len: 1_000_000_000, mass_per_tx_byte: 1, mass_per_script_pub_key_byte: 10, @@ -395,8 +524,6 @@ pub const MAINNET_PARAMS: Params = Params { max_block_mass: 500_000, storage_mass_parameter: STORAGE_MASS_PARAMETER, - storage_mass_activation: ForkActivation::never(), - kip10_activation: ForkActivation::never(), // deflationary_phase_daa_score is the DAA score after which the pre-deflationary period // switches to the deflationary period. This number is calculated as follows: @@ -406,13 +533,13 @@ pub const MAINNET_PARAMS: Params = Params { // Three days in seconds = 3 * 24 * 60 * 60 = 259200 deflationary_phase_daa_score: 15778800 - 259200, pre_deflationary_phase_base_subsidy: 50000000000, - coinbase_maturity: 100, + prior_coinbase_maturity: 100, skip_proof_of_work: false, max_block_level: 225, pruning_proof_m: 1000, - payload_activation: ForkActivation::never(), - runtime_sig_op_counting: ForkActivation::never(), + crescendo: CRESCENDO, + crescendo_activation: ForkActivation::never(), }; pub const TESTNET_PARAMS: Params = Params { @@ -423,27 +550,23 @@ pub const TESTNET_PARAMS: Params = Params { "dnsseeder-kaspa-testnet.x-con.at", // This DNS seeder is run by H@H "ns-testnet10.kaspa-dnsseeder.net", + // This DNS seeder is run by supertypo + "n-testnet-10.kaspa.ws", ], net: NetworkId::with_suffix(NetworkType::Testnet, 10), genesis: TESTNET_GENESIS, - ghostdag_k: LEGACY_DEFAULT_GHOSTDAG_K, - legacy_timestamp_deviation_tolerance: LEGACY_TIMESTAMP_DEVIATION_TOLERANCE, - new_timestamp_deviation_tolerance: NEW_TIMESTAMP_DEVIATION_TOLERANCE, - past_median_time_sample_rate: Bps::<1>::past_median_time_sample_rate(), - past_median_time_sampled_window_size: MEDIAN_TIME_SAMPLED_WINDOW_SIZE, - target_time_per_block: 1000, - sampling_activation: ForkActivation::never(), + prior_ghostdag_k: LEGACY_DEFAULT_GHOSTDAG_K, + timestamp_deviation_tolerance: TIMESTAMP_DEVIATION_TOLERANCE, + prior_target_time_per_block: 1000, max_difficulty_target: MAX_DIFFICULTY_TARGET, max_difficulty_target_f64: MAX_DIFFICULTY_TARGET_AS_F64, - difficulty_sample_rate: Bps::<1>::difficulty_adjustment_sample_rate(), - sampled_difficulty_window_size: DIFFICULTY_SAMPLED_WINDOW_SIZE as usize, - legacy_difficulty_window_size: LEGACY_DIFFICULTY_WINDOW_SIZE, - min_difficulty_window_len: MIN_DIFFICULTY_WINDOW_LEN, - max_block_parents: 10, - mergeset_size_limit: (LEGACY_DEFAULT_GHOSTDAG_K as u64) * 10, - merge_depth: 3600, - finality_depth: 86400, - pruning_depth: 185798, + prior_difficulty_window_size: LEGACY_DIFFICULTY_WINDOW_SIZE, + min_difficulty_window_size: MIN_DIFFICULTY_WINDOW_SIZE, + prior_max_block_parents: 10, + prior_mergeset_size_limit: (LEGACY_DEFAULT_GHOSTDAG_K as u64) * 10, + prior_merge_depth: 3600, + prior_finality_depth: 86400, + prior_pruning_depth: 185798, coinbase_payload_script_public_key_max_len: 150, max_coinbase_payload_len: 204, @@ -451,10 +574,10 @@ pub const TESTNET_PARAMS: Params = Params { // check these rules, but in practice it's enforced by the network layer that limits the message // size to 1 GB. // These values should be lowered to more reasonable amounts on the next planned HF/SF. - max_tx_inputs: 1_000_000_000, - max_tx_outputs: 1_000_000_000, - max_signature_script_len: 1_000_000_000, - max_script_public_key_len: 1_000_000_000, + prior_max_tx_inputs: 1_000_000_000, + prior_max_tx_outputs: 1_000_000_000, + prior_max_signature_script_len: 1_000_000_000, + prior_max_script_public_key_len: 1_000_000_000, mass_per_tx_byte: 1, mass_per_script_pub_key_byte: 10, @@ -462,8 +585,6 @@ pub const TESTNET_PARAMS: Params = Params { max_block_mass: 500_000, storage_mass_parameter: STORAGE_MASS_PARAMETER, - storage_mass_activation: ForkActivation::never(), - kip10_activation: ForkActivation::never(), // deflationary_phase_daa_score is the DAA score after which the pre-deflationary period // switches to the deflationary period. This number is calculated as follows: // We define a year as 365.25 days @@ -472,120 +593,49 @@ pub const TESTNET_PARAMS: Params = Params { // Three days in seconds = 3 * 24 * 60 * 60 = 259200 deflationary_phase_daa_score: 15778800 - 259200, pre_deflationary_phase_base_subsidy: 50000000000, - coinbase_maturity: 100, + prior_coinbase_maturity: 100, skip_proof_of_work: false, max_block_level: 250, pruning_proof_m: 1000, - payload_activation: ForkActivation::never(), - runtime_sig_op_counting: ForkActivation::never(), -}; - -pub const TESTNET11_PARAMS: Params = Params { - dns_seeders: &[ - // This DNS seeder is run by Tiram - "seeder1-testnet-11.kaspad.net", - // This DNS seeder is run by supertypo - "n-testnet-11.kaspa.ws", - // This DNS seeder is run by -gerri- - "dnsseeder-kaspa-testnet11.x-con.at", - // This DNS seeder is run by H@H - "ns-testnet11.kaspa-dnsseeder.net", - ], - net: NetworkId::with_suffix(NetworkType::Testnet, 11), - genesis: TESTNET11_GENESIS, - legacy_timestamp_deviation_tolerance: LEGACY_TIMESTAMP_DEVIATION_TOLERANCE, - new_timestamp_deviation_tolerance: NEW_TIMESTAMP_DEVIATION_TOLERANCE, - past_median_time_sampled_window_size: MEDIAN_TIME_SAMPLED_WINDOW_SIZE, - sampling_activation: ForkActivation::always(), // Sampling is activated from network inception - max_difficulty_target: MAX_DIFFICULTY_TARGET, - max_difficulty_target_f64: MAX_DIFFICULTY_TARGET_AS_F64, - sampled_difficulty_window_size: DIFFICULTY_SAMPLED_WINDOW_SIZE as usize, - legacy_difficulty_window_size: LEGACY_DIFFICULTY_WINDOW_SIZE, - min_difficulty_window_len: MIN_DIFFICULTY_WINDOW_LEN, - - // - // ~~~~~~~~~~~~~~~~~~ BPS dependent constants ~~~~~~~~~~~~~~~~~~ - // - ghostdag_k: Testnet11Bps::ghostdag_k(), - target_time_per_block: Testnet11Bps::target_time_per_block(), - past_median_time_sample_rate: Testnet11Bps::past_median_time_sample_rate(), - difficulty_sample_rate: Testnet11Bps::difficulty_adjustment_sample_rate(), - max_block_parents: Testnet11Bps::max_block_parents(), - mergeset_size_limit: Testnet11Bps::mergeset_size_limit(), - merge_depth: Testnet11Bps::merge_depth_bound(), - finality_depth: Testnet11Bps::finality_depth(), - pruning_depth: Testnet11Bps::pruning_depth(), - pruning_proof_m: Testnet11Bps::pruning_proof_m(), - deflationary_phase_daa_score: Testnet11Bps::deflationary_phase_daa_score(), - pre_deflationary_phase_base_subsidy: Testnet11Bps::pre_deflationary_phase_base_subsidy(), - coinbase_maturity: Testnet11Bps::coinbase_maturity(), - - coinbase_payload_script_public_key_max_len: 150, - max_coinbase_payload_len: 204, - - max_tx_inputs: 10_000, - max_tx_outputs: 10_000, - max_signature_script_len: 1_000_000, - max_script_public_key_len: 1_000_000, - - mass_per_tx_byte: 1, - mass_per_script_pub_key_byte: 10, - mass_per_sig_op: 1000, - max_block_mass: 500_000, - - storage_mass_parameter: STORAGE_MASS_PARAMETER, - storage_mass_activation: ForkActivation::always(), - // Roughly at Dec 3, 2024 1800 UTC - kip10_activation: ForkActivation::new(287238000), - payload_activation: ForkActivation::new(287238000), - - skip_proof_of_work: false, - max_block_level: 250, - - runtime_sig_op_counting: ForkActivation::never(), + crescendo: CRESCENDO, + // 18:30 UTC, March 6, 2025 + crescendo_activation: ForkActivation::new(88_657_000), }; pub const SIMNET_PARAMS: Params = Params { dns_seeders: &[], net: NetworkId::new(NetworkType::Simnet), genesis: SIMNET_GENESIS, - legacy_timestamp_deviation_tolerance: LEGACY_TIMESTAMP_DEVIATION_TOLERANCE, - new_timestamp_deviation_tolerance: NEW_TIMESTAMP_DEVIATION_TOLERANCE, - past_median_time_sampled_window_size: MEDIAN_TIME_SAMPLED_WINDOW_SIZE, - sampling_activation: ForkActivation::always(), // Sampling is activated from network inception + timestamp_deviation_tolerance: TIMESTAMP_DEVIATION_TOLERANCE, max_difficulty_target: MAX_DIFFICULTY_TARGET, max_difficulty_target_f64: MAX_DIFFICULTY_TARGET_AS_F64, - sampled_difficulty_window_size: DIFFICULTY_SAMPLED_WINDOW_SIZE as usize, - legacy_difficulty_window_size: LEGACY_DIFFICULTY_WINDOW_SIZE, - min_difficulty_window_len: MIN_DIFFICULTY_WINDOW_LEN, + prior_difficulty_window_size: LEGACY_DIFFICULTY_WINDOW_SIZE, + min_difficulty_window_size: MIN_DIFFICULTY_WINDOW_SIZE, // // ~~~~~~~~~~~~~~~~~~ BPS dependent constants ~~~~~~~~~~~~~~~~~~ // // Note we use a 10 BPS configuration for simnet - ghostdag_k: Testnet11Bps::ghostdag_k(), - target_time_per_block: Testnet11Bps::target_time_per_block(), - past_median_time_sample_rate: Testnet11Bps::past_median_time_sample_rate(), - difficulty_sample_rate: Testnet11Bps::difficulty_adjustment_sample_rate(), + prior_ghostdag_k: TenBps::ghostdag_k(), + prior_target_time_per_block: TenBps::target_time_per_block(), // For simnet, we deviate from TN11 configuration and allow at least 64 parents in order to support mempool benchmarks out of the box - max_block_parents: if Testnet11Bps::max_block_parents() > 64 { Testnet11Bps::max_block_parents() } else { 64 }, - mergeset_size_limit: Testnet11Bps::mergeset_size_limit(), - merge_depth: Testnet11Bps::merge_depth_bound(), - finality_depth: Testnet11Bps::finality_depth(), - pruning_depth: Testnet11Bps::pruning_depth(), - pruning_proof_m: Testnet11Bps::pruning_proof_m(), - deflationary_phase_daa_score: Testnet11Bps::deflationary_phase_daa_score(), - pre_deflationary_phase_base_subsidy: Testnet11Bps::pre_deflationary_phase_base_subsidy(), - coinbase_maturity: Testnet11Bps::coinbase_maturity(), + prior_max_block_parents: if TenBps::max_block_parents() > 64 { TenBps::max_block_parents() } else { 64 }, + prior_mergeset_size_limit: TenBps::mergeset_size_limit(), + prior_merge_depth: TenBps::merge_depth_bound(), + prior_finality_depth: TenBps::finality_depth(), + prior_pruning_depth: TenBps::pruning_depth(), + deflationary_phase_daa_score: TenBps::deflationary_phase_daa_score(), + pre_deflationary_phase_base_subsidy: TenBps::pre_deflationary_phase_base_subsidy(), + prior_coinbase_maturity: TenBps::coinbase_maturity(), coinbase_payload_script_public_key_max_len: 150, max_coinbase_payload_len: 204, - max_tx_inputs: 10_000, - max_tx_outputs: 10_000, - max_signature_script_len: 1_000_000, - max_script_public_key_len: 1_000_000, + prior_max_tx_inputs: 10_000, + prior_max_tx_outputs: 10_000, + prior_max_signature_script_len: 1_000_000, + prior_max_script_public_key_len: 1_000_000, mass_per_tx_byte: 1, mass_per_script_pub_key_byte: 10, @@ -593,38 +643,31 @@ pub const SIMNET_PARAMS: Params = Params { max_block_mass: 500_000, storage_mass_parameter: STORAGE_MASS_PARAMETER, - storage_mass_activation: ForkActivation::always(), - kip10_activation: ForkActivation::never(), skip_proof_of_work: true, // For simnet only, PoW can be simulated by default max_block_level: 250, + pruning_proof_m: PRUNING_PROOF_M, - payload_activation: ForkActivation::never(), - runtime_sig_op_counting: ForkActivation::never(), + crescendo: CRESCENDO, + crescendo_activation: ForkActivation::always(), }; pub const DEVNET_PARAMS: Params = Params { dns_seeders: &[], net: NetworkId::new(NetworkType::Devnet), genesis: DEVNET_GENESIS, - ghostdag_k: LEGACY_DEFAULT_GHOSTDAG_K, - legacy_timestamp_deviation_tolerance: LEGACY_TIMESTAMP_DEVIATION_TOLERANCE, - new_timestamp_deviation_tolerance: NEW_TIMESTAMP_DEVIATION_TOLERANCE, - past_median_time_sample_rate: Bps::<1>::past_median_time_sample_rate(), - past_median_time_sampled_window_size: MEDIAN_TIME_SAMPLED_WINDOW_SIZE, - target_time_per_block: 1000, - sampling_activation: ForkActivation::never(), + prior_ghostdag_k: LEGACY_DEFAULT_GHOSTDAG_K, + timestamp_deviation_tolerance: TIMESTAMP_DEVIATION_TOLERANCE, + prior_target_time_per_block: 1000, max_difficulty_target: MAX_DIFFICULTY_TARGET, max_difficulty_target_f64: MAX_DIFFICULTY_TARGET_AS_F64, - difficulty_sample_rate: Bps::<1>::difficulty_adjustment_sample_rate(), - sampled_difficulty_window_size: DIFFICULTY_SAMPLED_WINDOW_SIZE as usize, - legacy_difficulty_window_size: LEGACY_DIFFICULTY_WINDOW_SIZE, - min_difficulty_window_len: MIN_DIFFICULTY_WINDOW_LEN, - max_block_parents: 10, - mergeset_size_limit: (LEGACY_DEFAULT_GHOSTDAG_K as u64) * 10, - merge_depth: 3600, - finality_depth: 86400, - pruning_depth: 185798, + prior_difficulty_window_size: LEGACY_DIFFICULTY_WINDOW_SIZE, + min_difficulty_window_size: MIN_DIFFICULTY_WINDOW_SIZE, + prior_max_block_parents: 10, + prior_mergeset_size_limit: (LEGACY_DEFAULT_GHOSTDAG_K as u64) * 10, + prior_merge_depth: 3600, + prior_finality_depth: 86400, + prior_pruning_depth: 185798, coinbase_payload_script_public_key_max_len: 150, max_coinbase_payload_len: 204, @@ -632,10 +675,10 @@ pub const DEVNET_PARAMS: Params = Params { // check these rules, but in practice it's enforced by the network layer that limits the message // size to 1 GB. // These values should be lowered to more reasonable amounts on the next planned HF/SF. - max_tx_inputs: 1_000_000_000, - max_tx_outputs: 1_000_000_000, - max_signature_script_len: 1_000_000_000, - max_script_public_key_len: 1_000_000_000, + prior_max_tx_inputs: 1_000_000_000, + prior_max_tx_outputs: 1_000_000_000, + prior_max_signature_script_len: 1_000_000_000, + prior_max_script_public_key_len: 1_000_000_000, mass_per_tx_byte: 1, mass_per_script_pub_key_byte: 10, @@ -643,8 +686,6 @@ pub const DEVNET_PARAMS: Params = Params { max_block_mass: 500_000, storage_mass_parameter: STORAGE_MASS_PARAMETER, - storage_mass_activation: ForkActivation::never(), - kip10_activation: ForkActivation::never(), // deflationary_phase_daa_score is the DAA score after which the pre-deflationary period // switches to the deflationary period. This number is calculated as follows: @@ -654,11 +695,11 @@ pub const DEVNET_PARAMS: Params = Params { // Three days in seconds = 3 * 24 * 60 * 60 = 259200 deflationary_phase_daa_score: 15778800 - 259200, pre_deflationary_phase_base_subsidy: 50000000000, - coinbase_maturity: 100, + prior_coinbase_maturity: 100, skip_proof_of_work: false, max_block_level: 250, pruning_proof_m: 1000, - payload_activation: ForkActivation::never(), - runtime_sig_op_counting: ForkActivation::never(), + crescendo: CRESCENDO, + crescendo_activation: ForkActivation::never(), }; diff --git a/consensus/core/src/constants.rs b/consensus/core/src/constants.rs index 450c12f678..54bb352011 100644 --- a/consensus/core/src/constants.rs +++ b/consensus/core/src/constants.rs @@ -15,6 +15,11 @@ pub const SOMPI_PER_KASPA: u64 = 100_000_000; /// The parameter for scaling inverse KAS value to mass units (KIP-0009) pub const STORAGE_MASS_PARAMETER: u64 = SOMPI_PER_KASPA * 10_000; +/// The parameter defining how much mass per byte to charge for when calculating +/// transient storage mass. Since normally the block mass limit is 500_000, this limits +/// block body byte size to 125_000 (KIP-0013). +pub const TRANSIENT_BYTE_TO_MASS_FACTOR: u64 = 4; + /// MaxSompi is the maximum transaction amount allowed in sompi. pub const MAX_SOMPI: u64 = 29_000_000_000 * SOMPI_PER_KASPA; diff --git a/consensus/core/src/errors/block.rs b/consensus/core/src/errors/block.rs index 132c6619f7..afe4bf11dc 100644 --- a/consensus/core/src/errors/block.rs +++ b/consensus/core/src/errors/block.rs @@ -97,14 +97,20 @@ pub enum RuleError { #[error("coinbase blue score of {0} is not the expected value of {1}")] BadCoinbasePayloadBlueScore(u64, u64), + #[error("coinbase mass commitment field is not zero")] + CoinbaseNonZeroMassCommitment, + #[error("transaction in isolation validation failed for tx {0}: {1}")] TxInIsolationValidationFailed(TransactionId, TxRuleError), - #[error("block exceeded mass limit of {0}")] - ExceedsMassLimit(u64), + #[error("block compute mass {0} exceeds limit of {1}")] + ExceedsComputeMassLimit(u64, u64), + + #[error("block transient storage mass {0} exceeds limit of {1}")] + ExceedsTransientMassLimit(u64, u64), - #[error("transaction {0} has mass field of {1} but mass should be at least {2}")] - MassFieldTooLow(TransactionId, u64, u64), + #[error("block persistent storage mass {0} exceeds limit of {1}")] + ExceedsStorageMassLimit(u64, u64), #[error("outpoint {0} is spent more than once on the same block")] DoubleSpendInSameBlock(TransactionOutpoint), diff --git a/consensus/core/src/errors/consensus.rs b/consensus/core/src/errors/consensus.rs index 51d8b3f4d0..58c5ed35e9 100644 --- a/consensus/core/src/errors/consensus.rs +++ b/consensus/core/src/errors/consensus.rs @@ -34,6 +34,9 @@ pub enum ConsensusError { #[error("{0}")] General(&'static str), + + #[error("{0}")] + GeneralOwned(String), } pub type ConsensusResult = std::result::Result; diff --git a/consensus/core/src/errors/pruning.rs b/consensus/core/src/errors/pruning.rs index a9686e023a..5c69eb0142 100644 --- a/consensus/core/src/errors/pruning.rs +++ b/consensus/core/src/errors/pruning.rs @@ -62,6 +62,24 @@ pub enum PruningImportError { #[error("block {0} at level {1} has invalid proof of work for level")] ProofOfWorkFailed(Hash, BlockLevel), + + #[error("past pruning points at indices {0}, {1} have non monotonic blue score {2}, {3}")] + InconsistentPastPruningPoints(usize, usize, u64, u64), + + #[error("past pruning points contains {0} duplications")] + DuplicatedPastPruningPoints(usize), + + #[error("pruning point {0} of header {1} is not consistent with past pruning points")] + WrongHeaderPruningPoint(Hash, Hash), + + #[error("a past pruning point is pointing at a missing point")] + MissingPointedPruningPoint, + + #[error("a past pruning point is pointing at the wrong point")] + WrongPointedPruningPoint, + + #[error("a past pruning point has not been pointed at")] + UnpointedPruningPoint, } pub type PruningImportResult = std::result::Result; diff --git a/consensus/core/src/hashing/tx.rs b/consensus/core/src/hashing/tx.rs index 9216a1c16e..f9cac0311a 100644 --- a/consensus/core/src/hashing/tx.rs +++ b/consensus/core/src/hashing/tx.rs @@ -18,8 +18,8 @@ pub fn hash(tx: &Transaction, include_mass_field: bool) -> Hash { /// Not intended for direct use by clients. Instead use `tx.id()` pub(crate) fn id(tx: &Transaction) -> TransactionId { - // Encode the transaction, replace signature script with zeroes, cut off - // payload and hash the result. + // Encode the transaction, replace signature script with an empty array, skip + // sigop counts and mass and hash the result. let encoding_flags = if tx.is_coinbase() { TX_ENCODING_FULL } else { TX_ENCODING_EXCLUDE_SIGNATURE_SCRIPT }; let mut hasher = kaspa_hashes::TransactionID::new(); @@ -43,8 +43,25 @@ fn write_transaction(hasher: &mut T, tx: &Transaction, encoding_flags hasher.update(tx.lock_time.to_le_bytes()).update(&tx.subnetwork_id).update(tx.gas.to_le_bytes()).write_var_bytes(&tx.payload); - // TODO: - // 1. Avoid passing a boolean and hash the mass only if > 0 (requires setting the mass to 0 on BBT). + /* + Design principles (mostly related to the new mass commitment field; see KIP-0009): + 1. The new mass field should not modify tx::id (since it is essentially a commitment by the miner re block space usage + so there is no need to modify the id definition which will require wide-spread changes in ecosystem software). + 2. Coinbase tx hash and id should ideally remain equal + + Solution: + 1. Hash the mass field only for tx::hash + 2. Hash the mass field only if mass > 0 + 3. Require in consensus that coinbase mass == 0 + + This way we have: + - Unique commitment for tx::hash per any possible mass value (with only zero being a no-op) + - tx::id remains unmodified + - Coinbase tx hash and id remain the same and equal + */ + + // TODO (post HF): + // 1. Avoid passing a boolean // 2. Use TxEncodingFlags to avoid including the mass for tx ID if include_mass_field { let mass = tx.mass(); diff --git a/consensus/core/src/lib.rs b/consensus/core/src/lib.rs index e4591f2181..4a36a34b63 100644 --- a/consensus/core/src/lib.rs +++ b/consensus/core/src/lib.rs @@ -27,6 +27,7 @@ pub mod hashing; pub mod header; pub mod mass; pub mod merkle; +pub mod mining_rules; pub mod muhash; pub mod network; pub mod pruning; diff --git a/consensus/core/src/mass/mod.rs b/consensus/core/src/mass/mod.rs index 67bcc63aef..90099487c2 100644 --- a/consensus/core/src/mass/mod.rs +++ b/consensus/core/src/mass/mod.rs @@ -1,7 +1,8 @@ use crate::{ config::params::Params, + constants::TRANSIENT_BYTE_TO_MASS_FACTOR, subnets::SUBNETWORK_ID_SIZE, - tx::{Transaction, TransactionInput, TransactionOutput, VerifiableTransaction}, + tx::{ScriptPublicKey, Transaction, TransactionInput, TransactionOutput, UtxoEntry, VerifiableTransaction}, }; use kaspa_hashes::HASH_SIZE; @@ -57,6 +58,165 @@ pub fn transaction_output_estimated_serialized_size(output: &TransactionOutput) size } +/// Returns the UTXO storage "plurality" for this script public key. +/// i.e., how many 100-byte "storage units" it occupies. +/// The choice of 100 bytes per unit ensures that all standard SPKs have a plurality of 1. +pub fn utxo_plurality(spk: &ScriptPublicKey) -> u64 { + /// A constant representing the number of bytes used by the fixed parts of a UTXO. + const UTXO_CONST_STORAGE: usize = + 32 // outpoint::tx_id + + 4 // outpoint::index + + 8 // entry amount + + 8 // entry DAA score + + 1 // entry is coinbase + + 2 // entry spk version + + 8 // entry spk len + ; + + // The base (63 bytes) plus the max standard public key length (33 bytes) fits into one 100-byte unit. + // Hence, all standard SPKs end up with a plurality of 1. + const UTXO_UNIT_SIZE: usize = 100; + + (UTXO_CONST_STORAGE + spk.script().len()).div_ceil(UTXO_UNIT_SIZE) as u64 +} + +pub trait UtxoPlurality { + /// Returns the UTXO storage plurality for the script public key associated with this object. + fn plurality(&self) -> u64; +} + +impl UtxoPlurality for ScriptPublicKey { + fn plurality(&self) -> u64 { + utxo_plurality(self) + } +} + +impl UtxoPlurality for UtxoEntry { + fn plurality(&self) -> u64 { + utxo_plurality(&self.script_public_key) + } +} + +impl UtxoPlurality for TransactionOutput { + fn plurality(&self) -> u64 { + utxo_plurality(&self.script_public_key) + } +} + +/// An abstract UTXO storage cell. +/// +/// # Plurality +/// +/// Each `UtxoCell` now has a `plurality` field reflecting how many 100-byte "storage units" +/// this UTXO effectively occupies. This generalizes KIP-0009 to support UTXOs with +/// script public keys larger than the standard 33-byte limit. For a UTXO of byte-size +/// `entry.size`, we define: +/// +/// ```ignore +/// p := ceil(entry.size / UTXO_UNIT) +/// ``` +/// +/// Conceptually, we treat a large UTXO as `p` sub-entries each holding `entry.amount / p`, +/// preserving the total locked amount but increasing the "count" proportionally to script size. +/// +/// Refer to the KIP-0009 specification for more details. +#[derive(Clone, Copy)] +pub struct UtxoCell { + /// The plurality (number of "storage units") for this UTXO + pub plurality: u64, + /// The amount of KAS (in sompis) locked in this UTXO + pub amount: u64, +} + +impl UtxoCell { + pub fn new(plurality: u64, amount: u64) -> Self { + Self { plurality, amount } + } +} + +impl From<&UtxoEntry> for UtxoCell { + fn from(entry: &UtxoEntry) -> Self { + Self::new(entry.plurality(), entry.amount) + } +} + +impl From<&TransactionOutput> for UtxoCell { + fn from(output: &TransactionOutput) -> Self { + Self::new(output.plurality(), output.value) + } +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub struct NonContextualMasses { + /// Compute mass + pub compute_mass: u64, + + /// Transient storage mass + pub transient_mass: u64, +} + +impl NonContextualMasses { + pub fn new(compute_mass: u64, transient_mass: u64) -> Self { + Self { compute_mass, transient_mass } + } + + /// Returns the maximum over all non-contextual masses (currently compute and transient). This + /// max value has no consensus meaning and should only be used for mempool-level simplification + /// such as obtaining a one-dimensional mass value when composing blocks templates. + pub fn max(&self) -> u64 { + self.compute_mass.max(self.transient_mass) + } +} + +impl std::fmt::Display for NonContextualMasses { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "compute: {}, transient: {}", self.compute_mass, self.transient_mass) + } +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub struct ContextualMasses { + /// Persistent storage mass + pub storage_mass: u64, +} + +impl ContextualMasses { + pub fn new(storage_mass: u64) -> Self { + Self { storage_mass } + } + + /// Returns the maximum over *all masses* (currently compute, transient and storage). This max + /// value has no consensus meaning and should only be used for mempool-level simplification such + /// as obtaining a one-dimensional mass value when composing blocks templates. + pub fn max(&self, non_contextual_masses: NonContextualMasses) -> u64 { + self.storage_mass.max(non_contextual_masses.max()) + } +} + +impl std::fmt::Display for ContextualMasses { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "storage: {}", self.storage_mass) + } +} + +impl std::cmp::PartialEq for ContextualMasses { + fn eq(&self, other: &u64) -> bool { + self.storage_mass.eq(other) + } +} + +pub type Mass = (NonContextualMasses, ContextualMasses); + +pub trait MassOps { + fn max(&self) -> u64; +} + +impl MassOps for Mass { + fn max(&self) -> u64 { + self.1.max(self.0) + } +} + // Note: consensus mass calculator operates on signed transactions. // To calculate mass for unsigned transactions, please use // `kaspa_wallet_core::tx::mass::MassCalculator` @@ -82,15 +242,16 @@ impl MassCalculator { } } - /// Calculates the compute mass of this transaction. This does not include the storage mass calculation below which - /// requires full UTXO context - pub fn calc_tx_compute_mass(&self, tx: &Transaction) -> u64 { + /// Calculates the non-contextual masses for this transaction (i.e., masses which can be calculated from + /// the transaction alone). These include compute and transient storage masses of this transaction. This + /// does not include the persistent storage mass calculation below which requires full UTXO context + pub fn calc_non_contextual_masses(&self, tx: &Transaction) -> NonContextualMasses { if tx.is_coinbase() { - return 0; + return NonContextualMasses::new(0, 0); } let size = transaction_estimated_serialized_size(tx); - let mass_for_size = size * self.mass_per_tx_byte; + let compute_mass_for_size = size * self.mass_per_tx_byte; let total_script_public_key_size: u64 = tx .outputs .iter() @@ -101,93 +262,153 @@ impl MassCalculator { let total_sigops: u64 = tx.inputs.iter().map(|input| input.sig_op_count as u64).sum(); let total_sigops_mass = total_sigops * self.mass_per_sig_op; - mass_for_size + total_script_public_key_mass + total_sigops_mass + let compute_mass = compute_mass_for_size + total_script_public_key_mass + total_sigops_mass; + let transient_mass = size * TRANSIENT_BYTE_TO_MASS_FACTOR; + + NonContextualMasses::new(compute_mass, transient_mass) } - /// Calculates the storage mass for this populated transaction. + /// Calculates the contextual masses for this populated transaction. /// Assumptions which must be verified before this call: /// 1. All output values are non-zero /// 2. At least one input (unless coinbase) /// /// Otherwise this function should never fail. - pub fn calc_tx_storage_mass(&self, tx: &impl VerifiableTransaction) -> Option { + pub fn calc_contextual_masses(&self, tx: &impl VerifiableTransaction) -> Option { calc_storage_mass( tx.is_coinbase(), - tx.populated_inputs().map(|(_, entry)| entry.amount), - tx.outputs().iter().map(|out| out.value), + tx.populated_inputs().map(|(_, entry)| entry.into()), + tx.outputs().iter().map(|out| out.into()), self.storage_mass_parameter, ) - } - - /// Calculates the overall mass of this transaction, combining both compute and storage masses. - pub fn calc_tx_overall_mass(&self, tx: &impl VerifiableTransaction, cached_compute_mass: Option) -> Option { - self.calc_tx_storage_mass(tx).map(|mass| mass.max(cached_compute_mass.unwrap_or_else(|| self.calc_tx_compute_mass(tx.tx())))) + .map(ContextualMasses::new) } } -/// Calculates the storage mass for the provided input and output values. +/// Calculates the storage mass (KIP-0009) for a given set of inputs and outputs. +/// +/// This function has been generalized for UTXO entries that may exceed +/// the max standard 33-byte script public key size. Each `UtxoCell::plurality` indicates +/// how many 100-byte "storage units" that UTXO occupies. +/// +/// # Formula Overview +/// +/// The core formula is: +/// +/// ```ignore +/// max(0, C · (|O| / H(O) - |I| / A(I))) +/// ``` +/// +/// where: +/// +/// - `C` is the storage mass parameter (`storm_param`). +/// - `|O|` and `|I|` are the total pluralities of outputs and inputs, respectively. +/// - `H(O)` is the harmonic mean of the outputs' amounts, generalized to account for per-UTXO +/// `plurality`. +/// +/// In standard KIP-0009, one has: +/// +/// ```ignore +/// |O| / H(O) = Σ (1 / o) +/// ``` +/// +/// Here, each UTXO that occupies `p` storage units is treated as `p` sub-entries, +/// each holding `amount / p`. This effectively converts `1 / o` into `p^2 / amount`. +/// Consequently, the code accumulates: +/// +/// ```ignore +/// Σ [C · p(o)^2 / amount(o)] +/// ``` +/// +/// - `A(I)` is the arithmetic mean of the inputs' amounts, similarly scaled by `|I|`, +/// while the sum of amounts remains unchanged. +/// +/// Under the “relaxed formula” conditions (`|O| = 1`, `|I| = 1`, or `|O| = |I| = 2`), +/// we compute the harmonic mean for inputs as well; otherwise, we use the arithmetic +/// approach for inputs. +/// +/// Refer to KIP-0009 for more details. +/// /// Assumptions which must be verified before this call: -/// 1. All output values are non-zero -/// 2. At least one input (unless coinbase) +/// 1. All input/output values are non-zero +/// 2. At least one input (unless coinbase) /// -/// Otherwise this function should never fail. +/// If these assumptions hold, this function should never fail. A `None` return +/// indicates that the mass is incomputable and can be considered too high. pub fn calc_storage_mass( is_coinbase: bool, - input_values: impl ExactSizeIterator, - output_values: impl ExactSizeIterator, - storage_mass_parameter: u64, + inputs: impl ExactSizeIterator + Clone, + mut outputs: impl Iterator, + storm_param: u64, ) -> Option { if is_coinbase { return Some(0); } - let outs_len = output_values.len() as u64; - let ins_len = input_values.len() as u64; + /* + In KIP-0009 terms, the canonical formula is: + max(0, C * (|O|/H(O) - |I|/A(I))). - /* The code below computes the following formula: + We first calculate the harmonic portion for outputs in a single pass, + accumulating: + 1) outs_plurality = Σ p(o) + 2) harmonic_outs = Σ [C * p(o)^2 / amount(o)] + */ + let (outs_plurality, harmonic_outs) = outputs.try_fold( + (0u64, 0u64), // (accumulated plurality, accumulated harmonic) + |(acc_plurality, acc_harm), UtxoCell { plurality, amount }| { + Some(( + acc_plurality + plurality, // represents in-memory bytes, cannot overflow + acc_harm.checked_add(storm_param.checked_mul(plurality)?.checked_mul(plurality)? / amount)?, + )) + }, + )?; - max( 0 , C·( |O|/H(O) - |I|/A(I) ) ) + /* + KIP-0009 defines a relaxed formula for the cases: + |O| = 1 or |O| <= |I| <= 2 - where C is the mass storage parameter, O is the set of output values, I is the set of - input values, H(S) := |S|/sum_{s in S} 1 / s is the harmonic mean over the set S and - A(S) := sum_{s in S} / |S| is the arithmetic mean. + The relaxed formula is: + max(0, C · (|O| / H(O) - |I| / H(I))) - See KIP-0009 for more details + If |I| = 1, the harmonic and arithmetic approaches coincide, so the conditions can be expressed as: + |O| = 1 or |I| = 1 or |O| = |I| = 2 */ + let relaxed_formula_path = { + if outs_plurality == 1 { + true // |O| = 1 + } else if inputs.len() > 2 { + false // since element plurality always >= 1 => ins_plurality > 2 => skip harmonic path + } else { + // For <= 2 inputs, we can afford to clone and sum the pluralities + let ins_plurality = inputs.clone().map(|cell| cell.plurality).sum::(); + ins_plurality == 1 || (outs_plurality == 2 && ins_plurality == 2) + } + }; - // Since we are doing integer division, we perform the multiplication with C over the inner - // fractions, otherwise we'll get a sum of zeros or ones. - // - // If sum of fractions overflowed (nearly impossible, requires 10^7 outputs for C = 10^12), - // we return `None` indicating mass is incomputable - // - // Note: in theory this can be tighten by subtracting input mass in the process (possibly avoiding the overflow), - // however the overflow case is so unpractical with current mass limits so we avoid the hassle - let harmonic_outs = - output_values.map(|out| storage_mass_parameter / out).try_fold(0u64, |total, current| total.checked_add(current))?; // C·|O|/H(O) - - /* - KIP-0009 relaxed formula for the cases |O| = 1 OR |O| <= |I| <= 2: - max( 0 , C·( |O|/H(O) - |I|/H(I) ) ) + if relaxed_formula_path { + // Each input i contributes C · p(i)^2 / amount(i) + let harmonic_ins = inputs + .map(|UtxoCell { plurality, amount }| storm_param * plurality * plurality / amount) // we assume no overflow (see verify_utxo_plurality_limits) + .fold(0u64, |total, current| total.saturating_add(current)); - Note: in the case |I| = 1 both formulas are equal, yet the following code (harmonic_ins) is a bit more efficient. - Hence, we transform the condition to |O| = 1 OR |I| = 1 OR |O| = |I| = 2 which is equivalent (and faster). - */ - if outs_len == 1 || ins_len == 1 || (outs_len == 2 && ins_len == 2) { - let harmonic_ins = - input_values.map(|value| storage_mass_parameter / value).fold(0u64, |total, current| total.saturating_add(current)); // C·|I|/H(I) - return Some(harmonic_outs.saturating_sub(harmonic_ins)); // max( 0 , C·( |O|/H(O) - |I|/H(I) ) ); + // max(0, harmonic_outs - harmonic_ins) + return Some(harmonic_outs.saturating_sub(harmonic_ins)); } - // Total supply is bounded, so a sum of existing UTXO entries cannot overflow (nor can it be zero) - let sum_ins = input_values.sum::(); // |I|·A(I) - let mean_ins = sum_ins / ins_len; + // Otherwise, we calculate the arithmetic portion for inputs: + // (ins_plurality, sum_ins) => (Σ plurality, Σ amounts) + let (ins_plurality, sum_ins) = + inputs.fold((0u64, 0u64), |(acc_plur, acc_amt), UtxoCell { plurality, amount }| (acc_plur + plurality, acc_amt + amount)); - // Inner fraction must be with C and over the mean value, in order to maximize precision. - // We can saturate the overall expression at u64::MAX since we lower-bound the subtraction below by zero anyway - let arithmetic_ins = ins_len.saturating_mul(storage_mass_parameter / mean_ins); // C·|I|/A(I) + // mean_ins = (Σ amounts) / (Σ plurality) + let mean_ins = sum_ins / ins_plurality; - Some(harmonic_outs.saturating_sub(arithmetic_ins)) // max( 0 , C·( |O|/H(O) - |I|/A(I) ) ) + // arithmetic_ins: C · (|I| / A(I)) = |I| · (C / mean_ins) + let arithmetic_ins = ins_plurality.saturating_mul(storm_param / mean_ins); + + // max(0, harmonic_outs - arithmetic_ins) + Some(harmonic_outs.saturating_sub(arithmetic_ins)) } #[cfg(test)] @@ -195,13 +416,221 @@ mod tests { use super::*; use crate::{ constants::{SOMPI_PER_KASPA, STORAGE_MASS_PARAMETER}, + network::NetworkType, subnets::SubnetworkId, tx::*, }; use std::str::FromStr; + const UTXO_CONST_STORAGE: u64 = 63; + const UTXO_UNIT_SIZE: u64 = 100; + + #[test] + fn verify_utxo_plurality_limits() { + /* + Verify that for all networks, existing UTXO entries can never overflow the product C·P^2 used + for harmonic_ins within calc_storage_mass + */ + for net in NetworkType::iter() { + let params: Params = net.into(); + let max_spk_len = (params.max_script_public_key_len().upper_bound() as u64) + .min(params.max_block_mass.div_ceil(params.mass_per_script_pub_key_byte)); + let max_plurality = (UTXO_CONST_STORAGE + max_spk_len).div_ceil(UTXO_UNIT_SIZE); // see utxo_plurality + let product = params.storage_mass_parameter.checked_mul(max_plurality).and_then(|x| x.checked_mul(max_plurality)); + // verify C·P^2 can never overflow + assert!(product.is_some()); + } + + // verify P >= 1 also when the script is empty + assert!(utxo_plurality(&ScriptPublicKey::new(0, ScriptVec::from_slice(&[]))) == 1); + // Assert the UTXO_CONST_STORAGE=63, UTXO_UNIT_SIZE=100 constants + assert!(utxo_plurality(&ScriptPublicKey::from_vec(0, vec![1; (UTXO_UNIT_SIZE - UTXO_CONST_STORAGE) as usize])) == 1); + assert!(utxo_plurality(&ScriptPublicKey::from_vec(0, vec![1; (UTXO_UNIT_SIZE - UTXO_CONST_STORAGE + 1) as usize])) == 2); + } + + #[derive(Debug)] + struct PluralityTestCase { + /// Test name + name: &'static str, + + /// Amounts for the first transaction's inputs + inputs_tx1: &'static [u64], + /// Amounts for the first transaction's outputs + outputs_tx1: &'static [u64], + + /// Amounts for the second transaction's inputs + inputs_tx2: &'static [u64], + /// Amounts for the second transaction's outputs + outputs_tx2: &'static [u64], + + /// (Optional) index of the input/output in tx2 whose script we want to override + plurality_index: Option, + /// Desired plurality for that UTXO's script + desired_plurality: Option, + /// Whether to override an output and not an input + override_output: bool, + + /// Mass calculator parameters + storage_mass_parameter: u64, + } + + impl PluralityTestCase { + /// Runs the test and asserts that the masses are equal + fn run(&self) { + // Sanity + assert!( + self.inputs_tx1.iter().sum::() >= self.outputs_tx1.iter().sum::(), + "Test \"{}\": tx1 outs > ins", + self.name + ); + assert!( + self.inputs_tx2.iter().sum::() >= self.outputs_tx2.iter().sum::(), + "Test \"{}\": tx2 outs > ins", + self.name + ); + + // Generate + let tx1 = generate_tx_from_amounts(self.inputs_tx1, self.outputs_tx1); + let mut tx2 = generate_tx_from_amounts(self.inputs_tx2, self.outputs_tx2); + + // If specified, override one of the script public keys in tx2. + if let (Some(index), Some(plur)) = (self.plurality_index, self.desired_plurality) { + if self.override_output { + tx2.tx.outputs[index].script_public_key = generate_script_for_plurality(plur); + } else { + tx2.entries[index].as_mut().unwrap().script_public_key = generate_script_for_plurality(plur); + } + } + + let mc = MassCalculator::new(0, 0, 0, self.storage_mass_parameter); + + let mass1 = mc.calc_contextual_masses(&tx1.as_verifiable()); + let mass2 = mc.calc_contextual_masses(&tx2.as_verifiable()); + + assert_ne!(mass1, Some(ContextualMasses::new(0)), "Test \"{}\": avoid running meaningless test cases", self.name); + assert_eq!(mass1, mass2, "Test \"{}\" failed: mass1 = {:?}, mass2 = {:?}", self.name, mass1, mass2); + } + } + + #[test] + fn test_storage_mass_pluralities() { + /* + Tests pluralities by comparing transactions with all inputs/outputs with plurality 1 + with transactions with a super entry (plurality > 1) which replaces several entries + in the plurality 1 tx (with equal total value and equal total plurality) + */ + let test_cases = vec![ + PluralityTestCase { + name: "3:4; input index=1, plurality=2", + inputs_tx1: &[300, 200, 200], + outputs_tx1: &[200, 200, 200, 100], + inputs_tx2: &[300, 400], + outputs_tx2: &[200, 200, 200, 100], + plurality_index: Some(1), + desired_plurality: Some(2), + override_output: false, + storage_mass_parameter: 10_u64.pow(12), + }, + PluralityTestCase { + name: "2:3; output index=1, plurality=2", + inputs_tx1: &[350, 400], + outputs_tx1: &[300, 200, 200], + inputs_tx2: &[350, 400], + outputs_tx2: &[300, 400], + plurality_index: Some(1), + desired_plurality: Some(2), + override_output: true, + storage_mass_parameter: 10_u64.pow(12), + }, + PluralityTestCase { + name: "1:2; output index=0, plurality=2", + inputs_tx1: &[500], + outputs_tx1: &[200, 200], + inputs_tx2: &[500], + outputs_tx2: &[400], + plurality_index: Some(0), + desired_plurality: Some(2), + override_output: true, + storage_mass_parameter: 10_u64.pow(12), + }, + PluralityTestCase { + name: "1:3; output index=1, plurality=2", + inputs_tx1: &[1000], + outputs_tx1: &[200, 200, 200], + inputs_tx2: &[1000], + outputs_tx2: &[200, 400], + plurality_index: Some(1), + desired_plurality: Some(2), + override_output: true, + storage_mass_parameter: 10_u64.pow(12), + }, + PluralityTestCase { + name: "1:3; output index=1, plurality=2; kas units", + inputs_tx1: &[1000 * SOMPI_PER_KASPA], + outputs_tx1: &[200 * SOMPI_PER_KASPA, 200 * SOMPI_PER_KASPA, 200 * SOMPI_PER_KASPA], + inputs_tx2: &[1000 * SOMPI_PER_KASPA], + outputs_tx2: &[200 * SOMPI_PER_KASPA, 400 * SOMPI_PER_KASPA], + plurality_index: Some(1), + desired_plurality: Some(2), + override_output: true, + storage_mass_parameter: 10_u64.pow(12), + }, + PluralityTestCase { + name: "1:2; output index=0, plurality=2; kas units", + inputs_tx1: &[1000 * SOMPI_PER_KASPA], + outputs_tx1: &[200 * SOMPI_PER_KASPA, 200 * SOMPI_PER_KASPA], + inputs_tx2: &[1000 * SOMPI_PER_KASPA], + outputs_tx2: &[400 * SOMPI_PER_KASPA], + plurality_index: Some(0), + desired_plurality: Some(2), + override_output: true, + storage_mass_parameter: 10_u64.pow(12), + }, + PluralityTestCase { + name: "2:2; output index=0, plurality=2; kas units", + inputs_tx1: &[350 * SOMPI_PER_KASPA, 500 * SOMPI_PER_KASPA], + outputs_tx1: &[200 * SOMPI_PER_KASPA, 200 * SOMPI_PER_KASPA], + inputs_tx2: &[350 * SOMPI_PER_KASPA, 500 * SOMPI_PER_KASPA], + outputs_tx2: &[400 * SOMPI_PER_KASPA], + plurality_index: Some(0), + desired_plurality: Some(2), + override_output: true, + storage_mass_parameter: 10_u64.pow(12), + }, + PluralityTestCase { + name: "4:6; output index=0, plurality=3; kas units", + inputs_tx1: &[350 * SOMPI_PER_KASPA, 500 * SOMPI_PER_KASPA, 350 * SOMPI_PER_KASPA, 500 * SOMPI_PER_KASPA], + outputs_tx1: &[ + 200 * SOMPI_PER_KASPA, + 200 * SOMPI_PER_KASPA, + 400 * SOMPI_PER_KASPA, + 250 * SOMPI_PER_KASPA, + 250 * SOMPI_PER_KASPA, + 250 * SOMPI_PER_KASPA, + ], + inputs_tx2: &[350 * SOMPI_PER_KASPA, 500 * SOMPI_PER_KASPA, 350 * SOMPI_PER_KASPA, 500 * SOMPI_PER_KASPA], + outputs_tx2: &[200 * SOMPI_PER_KASPA, 200 * SOMPI_PER_KASPA, 400 * SOMPI_PER_KASPA, 750 * SOMPI_PER_KASPA], + plurality_index: Some(3), + desired_plurality: Some(3), + override_output: true, + storage_mass_parameter: 10_u64.pow(12), + }, + ]; + + for tc in test_cases { + tc.run(); + } + } + + /// ScriptPublicKey generator that yields a script with length adjusted + /// to match `desired_plurality`. + fn generate_script_for_plurality(desired_plurality: u64) -> ScriptPublicKey { + let required_script_len = ((desired_plurality - 1) * UTXO_UNIT_SIZE) as usize; + ScriptPublicKey::from_vec(0, vec![1; required_script_len]) + } + #[test] - fn test_mass_storage() { + fn test_storage_mass() { // Tx with less outs than ins let mut tx = generate_tx_from_amounts(&[100, 200, 300], &[300, 300]); @@ -209,26 +638,26 @@ mod tests { // Assert the formula: max( 0 , C·( |O|/H(O) - |I|/A(I) ) ) // - let storage_mass = MassCalculator::new(0, 0, 0, 10u64.pow(12)).calc_tx_storage_mass(&tx.as_verifiable()).unwrap(); + let storage_mass = MassCalculator::new(0, 0, 0, 10u64.pow(12)).calc_contextual_masses(&tx.as_verifiable()).unwrap(); assert_eq!(storage_mass, 0); // Compounds from 3 to 2, with symmetric outputs and no fee, should be zero // Create asymmetry tx.tx.outputs[0].value = 50; tx.tx.outputs[1].value = 550; let storage_mass_parameter = 10u64.pow(12); - let storage_mass = MassCalculator::new(0, 0, 0, storage_mass_parameter).calc_tx_storage_mass(&tx.as_verifiable()).unwrap(); + let storage_mass = MassCalculator::new(0, 0, 0, storage_mass_parameter).calc_contextual_masses(&tx.as_verifiable()).unwrap(); assert_eq!(storage_mass, storage_mass_parameter / 50 + storage_mass_parameter / 550 - 3 * (storage_mass_parameter / 200)); // Create a tx with more outs than ins let base_value = 10_000 * SOMPI_PER_KASPA; let mut tx = generate_tx_from_amounts(&[base_value, base_value, base_value * 2], &[base_value; 4]); let storage_mass_parameter = STORAGE_MASS_PARAMETER; - let storage_mass = MassCalculator::new(0, 0, 0, storage_mass_parameter).calc_tx_storage_mass(&tx.as_verifiable()).unwrap(); + let storage_mass = MassCalculator::new(0, 0, 0, storage_mass_parameter).calc_contextual_masses(&tx.as_verifiable()).unwrap(); assert_eq!(storage_mass, 4); // Inputs are above C so they don't contribute negative mass, 4 outputs exactly equal C each charge 1 let mut tx2 = tx.clone(); tx2.tx.outputs[0].value = 10 * SOMPI_PER_KASPA; - let storage_mass = MassCalculator::new(0, 0, 0, storage_mass_parameter).calc_tx_storage_mass(&tx2.as_verifiable()).unwrap(); + let storage_mass = MassCalculator::new(0, 0, 0, storage_mass_parameter).calc_contextual_masses(&tx2.as_verifiable()).unwrap(); assert_eq!(storage_mass, 1003); // Increase values over the lim @@ -236,7 +665,7 @@ mod tests { out.value += 1 } tx.entries[0].as_mut().unwrap().amount += tx.tx.outputs.len() as u64; - let storage_mass = MassCalculator::new(0, 0, 0, storage_mass_parameter).calc_tx_storage_mass(&tx.as_verifiable()).unwrap(); + let storage_mass = MassCalculator::new(0, 0, 0, storage_mass_parameter).calc_contextual_masses(&tx.as_verifiable()).unwrap(); assert_eq!(storage_mass, 0); // Now create 2:2 transaction @@ -244,19 +673,19 @@ mod tests { let mut tx = generate_tx_from_amounts(&[100, 200], &[50, 250]); let storage_mass_parameter = 10u64.pow(12); - let storage_mass = MassCalculator::new(0, 0, 0, storage_mass_parameter).calc_tx_storage_mass(&tx.as_verifiable()).unwrap(); + let storage_mass = MassCalculator::new(0, 0, 0, storage_mass_parameter).calc_contextual_masses(&tx.as_verifiable()).unwrap(); assert_eq!(storage_mass, 9000000000); // Set outputs to be equal to inputs tx.tx.outputs[0].value = 100; tx.tx.outputs[1].value = 200; - let storage_mass = MassCalculator::new(0, 0, 0, storage_mass_parameter).calc_tx_storage_mass(&tx.as_verifiable()).unwrap(); + let storage_mass = MassCalculator::new(0, 0, 0, storage_mass_parameter).calc_contextual_masses(&tx.as_verifiable()).unwrap(); assert_eq!(storage_mass, 0); // Remove an output and make sure the other is small enough to make storage mass greater than zero tx.tx.outputs.pop(); tx.tx.outputs[0].value = 50; - let storage_mass = MassCalculator::new(0, 0, 0, storage_mass_parameter).calc_tx_storage_mass(&tx.as_verifiable()).unwrap(); + let storage_mass = MassCalculator::new(0, 0, 0, storage_mass_parameter).calc_contextual_masses(&tx.as_verifiable()).unwrap(); assert_eq!(storage_mass, 5000000000); } diff --git a/consensus/core/src/mining_rules.rs b/consensus/core/src/mining_rules.rs new file mode 100644 index 0000000000..b84565c960 --- /dev/null +++ b/consensus/core/src/mining_rules.rs @@ -0,0 +1,19 @@ +use std::sync::{atomic::AtomicBool, Arc}; + +#[derive(Debug)] +pub struct MiningRules { + pub no_transactions: Arc, + pub blue_parents_only: Arc, +} + +impl MiningRules { + pub fn new() -> Self { + Self { no_transactions: Arc::new(AtomicBool::new(false)), blue_parents_only: Arc::new(AtomicBool::new(false)) } + } +} + +impl Default for MiningRules { + fn default() -> Self { + Self::new() + } +} diff --git a/consensus/core/src/network.rs b/consensus/core/src/network.rs index 2f81444b3c..e96a95e8fc 100644 --- a/consensus/core/src/network.rs +++ b/consensus/core/src/network.rs @@ -253,10 +253,9 @@ impl NetworkId { } pub fn iter() -> impl Iterator { - static NETWORK_IDS: [NetworkId; 5] = [ + static NETWORK_IDS: [NetworkId; 4] = [ NetworkId::new(NetworkType::Mainnet), NetworkId::with_suffix(NetworkType::Testnet, 10), - NetworkId::with_suffix(NetworkType::Testnet, 11), NetworkId::new(NetworkType::Devnet), NetworkId::new(NetworkType::Simnet), ]; diff --git a/consensus/core/src/tx.rs b/consensus/core/src/tx.rs index 769d29452c..b542738f07 100644 --- a/consensus/core/src/tx.rs +++ b/consensus/core/src/tx.rs @@ -26,6 +26,7 @@ use std::{ }; use wasm_bindgen::prelude::*; +use crate::mass::{ContextualMasses, NonContextualMasses}; use crate::{ hashing, subnets::{self, SubnetworkId}, @@ -174,6 +175,8 @@ pub struct Transaction { #[serde(with = "serde_bytes")] pub payload: Vec, + /// Holds a commitment to the storage mass (KIP-0009) + /// TODO: rename field and related methods to storage_mass #[serde(default)] mass: TransactionMass, @@ -230,16 +233,18 @@ impl Transaction { self.id } - /// Set the mass field of this transaction. The mass field is expected depending on hard-forks which are currently - /// activated only on some testnets. The field has no effect on tx ID so no need to finalize following this call. + /// Set the storage mass commitment field of this transaction. This field is expected to be activated on mainnet + /// as part of the Crescendo hardfork. The field has no effect on tx ID so no need to finalize following this call. pub fn set_mass(&self, mass: u64) { self.mass.0.store(mass, SeqCst) } + /// Read the storage mass commitment pub fn mass(&self) -> u64 { self.mass.0.load(SeqCst) } + /// Set the storage mass commitment of the passed transaction pub fn with_mass(self, mass: u64) -> Self { self.set_mass(mass); self @@ -303,6 +308,12 @@ pub struct PopulatedInputIterator<'a, T: VerifiableTransaction> { r: Range, } +impl Clone for PopulatedInputIterator<'_, T> { + fn clone(&self) -> Self { + Self { tx: self.tx, r: self.r.clone() } + } +} + impl<'a, T: VerifiableTransaction> PopulatedInputIterator<'a, T> { pub fn new(tx: &'a T) -> Self { Self { tx, r: (0..tx.inputs().len()) } @@ -398,14 +409,14 @@ pub struct MutableTransaction = std::sync::Arc>, /// Populated fee pub calculated_fee: Option, - /// Populated compute mass (does not include the storage mass) - pub calculated_compute_mass: Option, + /// Populated non-contextual masses (does not include the storage mass) + pub calculated_non_contextual_masses: Option, } impl> MutableTransaction { pub fn new(tx: T) -> Self { let num_inputs = tx.as_ref().inputs.len(); - Self { tx, entries: vec![None; num_inputs], calculated_fee: None, calculated_compute_mass: None } + Self { tx, entries: vec![None; num_inputs], calculated_fee: None, calculated_non_contextual_masses: None } } pub fn id(&self) -> TransactionId { @@ -414,7 +425,7 @@ impl> MutableTransaction { pub fn with_entries(tx: T, entries: Vec) -> Self { assert_eq!(tx.as_ref().inputs.len(), entries.len()); - Self { tx, entries: entries.into_iter().map(Some).collect(), calculated_fee: None, calculated_compute_mass: None } + Self { tx, entries: entries.into_iter().map(Some).collect(), calculated_fee: None, calculated_non_contextual_masses: None } } /// Returns the tx wrapped as a [`VerifiableTransaction`]. Note that this function @@ -430,7 +441,7 @@ impl> MutableTransaction { } pub fn is_fully_populated(&self) -> bool { - self.is_verifiable() && self.calculated_fee.is_some() && self.calculated_compute_mass.is_some() + self.is_verifiable() && self.calculated_fee.is_some() && self.calculated_non_contextual_masses.is_some() } pub fn missing_outpoints(&self) -> impl Iterator + '_ { @@ -450,17 +461,14 @@ impl> MutableTransaction { } } - /// Returns the calculated feerate. The feerate is calculated as the amount of fee - /// this transactions pays per gram of the full contextual (compute & storage) mass. The - /// function returns a value when calculated fee exists and the contextual mass is greater - /// than zero, otherwise `None` is returned. + /// Returns the calculated feerate. The feerate is calculated as the amount of fee this + /// transactions pays per gram of the aggregated contextual mass (max over compute, transient + /// and storage masses). The function returns a value when calculated fee and calculated masses + /// exist, otherwise `None` is returned. pub fn calculated_feerate(&self) -> Option { - let contextual_mass = self.tx.as_ref().mass(); - if contextual_mass > 0 { - self.calculated_fee.map(|fee| fee as f64 / contextual_mass as f64) - } else { - None - } + self.calculated_non_contextual_masses + .map(|non_contextual_masses| ContextualMasses::new(self.tx.as_ref().mass()).max(non_contextual_masses)) + .and_then(|max_mass| self.calculated_fee.map(|fee| fee as f64 / max_mass as f64)) } /// A function for estimating the amount of memory bytes used by this transaction (dedicated to mempool usage). diff --git a/consensus/pow/src/matrix.rs b/consensus/pow/src/matrix.rs index 20a9d1c40d..490a587b5c 100644 --- a/consensus/pow/src/matrix.rs +++ b/consensus/pow/src/matrix.rs @@ -44,7 +44,7 @@ impl Matrix { if shift == 0 { val = generator.u64(); } - (val >> (4 * shift) & 0x0F) as u16 + ((val >> (4 * shift)) & 0x0F) as u16 }) })) } diff --git a/consensus/src/consensus/factory.rs b/consensus/src/consensus/factory.rs index f8af5fb5a6..e387329c85 100644 --- a/consensus/src/consensus/factory.rs +++ b/consensus/src/consensus/factory.rs @@ -3,7 +3,7 @@ use super::utxo_set_override::{set_genesis_utxo_commitment_from_config, set_init use super::{ctl::Ctl, Consensus}; use crate::{model::stores::U64Key, pipeline::ProcessingCounters}; use itertools::Itertools; -use kaspa_consensus_core::config::Config; +use kaspa_consensus_core::{config::Config, mining_rules::MiningRules}; use kaspa_consensus_notify::root::ConsensusNotificationRoot; use kaspa_consensusmanager::{ConsensusFactory, ConsensusInstance, DynConsensusCtl, SessionLock}; use kaspa_core::{debug, time::unix_now, warn}; @@ -254,6 +254,7 @@ pub struct Factory { counters: Arc, tx_script_cache_counters: Arc, fd_budget: i32, + mining_rules: Arc, } impl Factory { @@ -266,6 +267,7 @@ impl Factory { counters: Arc, tx_script_cache_counters: Arc, fd_budget: i32, + mining_rules: Arc, ) -> Self { assert!(fd_budget > 0, "fd_budget has to be positive"); let mut config = config.clone(); @@ -283,6 +285,7 @@ impl Factory { counters, tx_script_cache_counters, fd_budget, + mining_rules, }; factory.delete_inactive_consensus_entries(); factory @@ -325,6 +328,7 @@ impl ConsensusFactory for Factory { self.counters.clone(), self.tx_script_cache_counters.clone(), entry.creation_timestamp, + self.mining_rules.clone(), )); // We write the new active entry only once the instance was created successfully. @@ -359,6 +363,7 @@ impl ConsensusFactory for Factory { self.counters.clone(), self.tx_script_cache_counters.clone(), entry.creation_timestamp, + self.mining_rules.clone(), )); (ConsensusInstance::new(session_lock, consensus.clone()), Arc::new(Ctl::new(self.management_store.clone(), db, consensus))) diff --git a/consensus/src/consensus/mod.rs b/consensus/src/consensus/mod.rs index 21e5bf5573..17637dc20d 100644 --- a/consensus/src/consensus/mod.rs +++ b/consensus/src/consensus/mod.rs @@ -21,6 +21,8 @@ use crate::{ headers_selected_tip::HeadersSelectedTipStoreReader, past_pruning_points::PastPruningPointsStoreReader, pruning::PruningStoreReader, + pruning_samples::{PruningSamplesStore, PruningSamplesStoreReader}, + reachability::ReachabilityStoreReader, relations::RelationsStoreReader, statuses::StatusesStoreReader, tips::TipsStoreReader, @@ -61,7 +63,9 @@ use kaspa_consensus_core::{ tx::TxResult, }, header::Header, + mass::{ContextualMasses, NonContextualMasses}, merkle::calc_hash_merkle_root, + mining_rules::MiningRules, muhash::MuHashExtensions, network::NetworkType, pruning::{PruningPointProof, PruningPointTrustedData, PruningPointsList, PruningProofMetadata}, @@ -78,7 +82,7 @@ use crossbeam_channel::{ use itertools::Itertools; use kaspa_consensusmanager::{SessionLock, SessionReadGuard}; -use kaspa_database::prelude::StoreResultExtensions; +use kaspa_database::prelude::{StoreResultEmptyTuple, StoreResultExtensions}; use kaspa_hashes::Hash; use kaspa_muhash::MuHash; use kaspa_txscript::caches::TxScriptCacheCounters; @@ -86,7 +90,7 @@ use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; use std::{ cmp::Reverse, - collections::BinaryHeap, + collections::{BinaryHeap, VecDeque}, future::Future, iter::once, ops::Deref, @@ -159,6 +163,7 @@ impl Consensus { counters: Arc, tx_script_cache_counters: Arc, creation_timestamp: u64, + mining_rules: Arc, ) -> Self { let params = &config.params; let perf_params = &config.perf; @@ -263,6 +268,7 @@ impl Consensus { pruning_lock.clone(), notification_root.clone(), counters.clone(), + mining_rules, )); let pruning_processor = Arc::new(PruningProcessor::new( @@ -287,7 +293,7 @@ impl Consensus { virtual_processor.process_genesis(); } - Self { + let this = Self { db, block_sender: sender, header_processor, @@ -302,9 +308,90 @@ impl Consensus { config, creation_timestamp, is_consensus_exiting, + }; + + // Run database upgrades if any + this.run_database_upgrades(); + + this + } + + /// A procedure for calling database upgrades which are self-contained (i.e., do not require knowing the DB version) + fn run_database_upgrades(&self) { + // Upgrade to initialize the new retention root field correctly + self.retention_root_database_upgrade(); + + // TODO (post HF): remove this upgrade + // Database upgrade to include pruning samples + self.pruning_samples_database_upgrade(); + } + + fn retention_root_database_upgrade(&self) { + let mut pruning_point_store = self.pruning_point_store.write(); + if pruning_point_store.retention_period_root().unwrap_option().is_none() { + let mut batch = rocksdb::WriteBatch::default(); + if self.config.is_archival { + // The retention checkpoint is what was previously known as history root + let retention_checkpoint = pruning_point_store.retention_checkpoint().unwrap(); + pruning_point_store.set_retention_period_root(&mut batch, retention_checkpoint).unwrap(); + } else { + // For non-archival nodes the retention root was the pruning point + let pruning_point = pruning_point_store.get().unwrap().pruning_point; + pruning_point_store.set_retention_period_root(&mut batch, pruning_point).unwrap(); + } + self.db.write(batch).unwrap(); } } + fn pruning_samples_database_upgrade(&self) { + // + // For the first time this version runs, make sure we populate pruning samples + // from pov for all qualified chain blocks in the pruning point future + // + + let sink = self.get_sink(); + if self.storage.pruning_samples_store.pruning_sample_from_pov(sink).unwrap_option().is_some() { + // Sink is populated so we assume the database is upgraded + return; + } + + // Populate past pruning points (including current one) + for (p1, p2) in (0..=self.pruning_point_store.read().get().unwrap().index) + .map(|index| self.past_pruning_points_store.get(index).unwrap()) + .tuple_windows() + { + // Set p[i] to point at p[i-1] + self.pruning_samples_store.insert(p2, p1).unwrap_or_exists(); + } + + let pruning_point = self.pruning_point(); + let reachability = self.reachability_store.read(); + + // We walk up via reachability tree children so that we only iterate blocks B s.t. pruning point ∈ chain(B) + let mut queue = VecDeque::::from_iter(reachability.get_children(pruning_point).unwrap().iter().copied()); + let mut processed = 0; + kaspa_core::info!("Upgrading database to include and populate the pruning samples store"); + while let Some(current) = queue.pop_front() { + if !self.get_block_status(current).is_some_and(|s| s == BlockStatus::StatusUTXOValid) { + // Skip branches of the tree which are not chain qualified. + // This is sufficient since we will only assume this field exists + // for such chain qualified blocks + continue; + } + queue.extend(reachability.get_children(current).unwrap().iter()); + + processed += 1; + + // Populate the data + let ghostdag_data = self.ghostdag_store.get_compact_data(current).unwrap(); + let pruning_sample_from_pov = + self.services.pruning_point_manager.expected_header_pruning_point_v2(ghostdag_data).pruning_sample; + self.pruning_samples_store.insert(current, pruning_sample_from_pov).unwrap_or_exists(); + } + + kaspa_core::info!("Done upgrading database (populated {} entries)", processed); + } + pub fn run_processors(&self) -> Vec> { // Spawn the asynchronous processors. let header_processor = self.header_processor.clone(); @@ -443,13 +530,12 @@ impl ConsensusApi for Consensus { self.virtual_processor.populate_mempool_transactions_in_parallel(transactions) } - fn calculate_transaction_compute_mass(&self, transaction: &Transaction) -> u64 { - self.services.mass_calculator.calc_tx_compute_mass(transaction) + fn calculate_transaction_non_contextual_masses(&self, transaction: &Transaction) -> NonContextualMasses { + self.services.mass_calculator.calc_non_contextual_masses(transaction) } - fn calculate_transaction_storage_mass(&self, _transaction: &MutableTransaction) -> Option { - // self.services.mass_calculator.calc_tx_storage_mass(&transaction.as_verifiable()) - unimplemented!("unsupported at the API level until KIP9 is finalized") + fn calculate_transaction_contextual_masses(&self, transaction: &MutableTransaction) -> Option { + self.services.mass_calculator.calc_contextual_masses(&transaction.as_verifiable()) } fn get_stats(&self) -> ConsensusStats { @@ -502,14 +588,20 @@ impl ConsensusApi for Consensus { self.headers_store.get_timestamp(self.get_sink()).unwrap() } + fn get_sink_daa_score_timestamp(&self) -> DaaScoreTimestamp { + let sink = self.get_sink(); + let compact = self.headers_store.get_compact_header_data(sink).unwrap(); + DaaScoreTimestamp { daa_score: compact.daa_score, timestamp: compact.timestamp } + } + fn get_current_block_color(&self, hash: Hash) -> Option { let _guard = self.pruning_lock.blocking_read(); // Verify the block exists and can be assumed to have relations and reachability data self.validate_block_exists(hash).ok()?; - // Verify that the block is in future(source), where Ghostdag data is complete - self.services.reachability_service.is_dag_ancestor_of(self.get_source(), hash).then_some(())?; + // Verify that the block is in future(retention root), where Ghostdag data is complete + self.services.reachability_service.is_dag_ancestor_of(self.get_retention_period_root(), hash).then_some(())?; let sink = self.get_sink(); @@ -564,41 +656,29 @@ impl ConsensusApi for Consensus { self.lkg_virtual_state.load().to_virtual_state_approx_id() } - fn get_source(&self) -> Hash { - if self.config.is_archival { - // we use the history root in archival cases. - return self.pruning_point_store.read().history_root().unwrap(); - } - self.pruning_point_store.read().pruning_point().unwrap() + fn get_retention_period_root(&self) -> Hash { + self.pruning_point_store.read().retention_period_root().unwrap() } - /// Estimates number of blocks and headers stored in the node + /// Estimates the number of blocks and headers stored in the node database. /// - /// This is an estimation based on the daa score difference between the node's `source` and `sink`'s daa score, + /// This is an estimation based on the DAA score difference between the node's `retention root` and `virtual`'s DAA score, /// as such, it does not include non-daa blocks, and does not include headers stored as part of the pruning proof. fn estimate_block_count(&self) -> BlockCount { - // PRUNE SAFETY: node is either archival or source is the pruning point which its header is kept permanently - let source_score = self.headers_store.get_compact_header_data(self.get_source()).unwrap().daa_score; + // PRUNE SAFETY: retention root is always a current or past pruning point which its header is kept permanently + let retention_period_root_score = self.headers_store.get_daa_score(self.get_retention_period_root()).unwrap(); let virtual_score = self.get_virtual_daa_score(); let header_count = self .headers_store - .get_compact_header_data(self.get_headers_selected_tip()) + .get_daa_score(self.get_headers_selected_tip()) .unwrap_option() - .map(|h| h.daa_score) .unwrap_or(virtual_score) .max(virtual_score) - - source_score; - let block_count = virtual_score - source_score; + - retention_period_root_score; + let block_count = virtual_score - retention_period_root_score; BlockCount { header_count, block_count } } - fn is_nearly_synced(&self) -> bool { - // See comment within `config.is_nearly_synced` - let sink = self.get_sink(); - let compact = self.headers_store.get_compact_header_data(sink).unwrap(); - self.config.is_nearly_synced(compact.timestamp, compact.daa_score) - } - fn get_virtual_chain_from_block(&self, low: Hash, chain_path_added_limit: Option) -> ConsensusResult { // Calculate chain changes between the given `low` and the current sink hash (up to `limit` amount of block hashes). // Note: @@ -611,12 +691,12 @@ impl ConsensusApi for Consensus { // Verify that the block exists self.validate_block_exists(low)?; - // Verify that source is on chain(block) + // Verify that retention root is on chain(block) self.services .reachability_service - .is_chain_ancestor_of(self.get_source(), low) + .is_chain_ancestor_of(self.get_retention_period_root(), low) .then_some(()) - .ok_or(ConsensusError::General("the queried hash does not have source on its chain"))?; + .ok_or(ConsensusError::General("the queried hash does not have retention root on its chain"))?; Ok(self.services.dag_traversal_manager.calculate_chain_path(low, self.get_sink(), chain_path_added_limit)) } @@ -691,7 +771,7 @@ impl ConsensusApi for Consensus { fn get_populated_transaction(&self, txid: Hash, accepting_block_daa_score: u64) -> Result { // We need consistency between the pruning_point_store, utxo_diffs_store, block_transactions_store, selected chain and headers store reads let _guard = self.pruning_lock.blocking_read(); - self.virtual_processor.get_populated_transaction(txid, accepting_block_daa_score, self.get_source()) + self.virtual_processor.get_populated_transaction(txid, accepting_block_daa_score, self.get_retention_period_root()) } fn get_virtual_parents(&self) -> BlockHashSet { @@ -750,7 +830,7 @@ impl ConsensusApi for Consensus { } fn calc_transaction_hash_merkle_root(&self, txs: &[Transaction], pov_daa_score: u64) -> Hash { - let storage_mass_activated = self.config.storage_mass_activation.is_active(pov_daa_score); + let storage_mass_activated = self.config.crescendo_activation.is_active(pov_daa_score); calc_hash_merkle_root(txs.iter(), storage_mass_activated) } @@ -766,7 +846,7 @@ impl ConsensusApi for Consensus { self.services.pruning_proof_manager.apply_proof(proof, trusted_set) } - fn import_pruning_points(&self, pruning_points: PruningPointsList) { + fn import_pruning_points(&self, pruning_points: PruningPointsList) -> PruningImportResult<()> { self.services.pruning_proof_manager.import_pruning_points(&pruning_points) } @@ -789,18 +869,19 @@ impl ConsensusApi for Consensus { self.virtual_processor.import_pruning_point_utxo_set(new_pruning_point, imported_utxo_multiset) } - fn validate_pruning_points(&self) -> ConsensusResult<()> { + fn validate_pruning_points(&self, syncer_virtual_selected_parent: Hash) -> ConsensusResult<()> { let hst = self.storage.headers_selected_tip_store.read().get().unwrap().hash; let pp_info = self.pruning_point_store.read().get().unwrap(); if !self.services.pruning_point_manager.is_valid_pruning_point(pp_info.pruning_point, hst) { - return Err(ConsensusError::General("invalid pruning point candidate")); + return Err(ConsensusError::General("pruning point does not coincide with the synced header selected tip")); } - - if !self.services.pruning_point_manager.are_pruning_points_in_valid_chain(pp_info, hst) { - return Err(ConsensusError::General("past pruning points do not form a valid chain")); + if !self.services.pruning_point_manager.is_valid_pruning_point(pp_info.pruning_point, syncer_virtual_selected_parent) { + return Err(ConsensusError::General("pruning point does not coincide with the syncer's sink (virtual selected parent)")); } - - Ok(()) + self.services + .pruning_point_manager + .are_pruning_points_in_valid_chain(pp_info, syncer_virtual_selected_parent) + .map_err(|e| ConsensusError::GeneralOwned(format!("past pruning points do not form a valid chain: {}", e))) } fn is_chain_ancestor_of(&self, low: Hash, high: Hash) -> ConsensusResult { @@ -813,7 +894,7 @@ impl ConsensusApi for Consensus { // max_blocks has to be greater than the merge set size limit fn get_hashes_between(&self, low: Hash, high: Hash, max_blocks: usize) -> ConsensusResult<(Vec, Hash)> { let _guard = self.pruning_lock.blocking_read(); - assert!(max_blocks as u64 > self.config.mergeset_size_limit); + assert!(max_blocks as u64 > self.config.mergeset_size_limit().upper_bound()); self.validate_block_exists(low)?; self.validate_block_exists(high)?; @@ -1000,16 +1081,21 @@ impl ConsensusApi for Consensus { self.validate_block_exists(hash)?; // In order to guarantee the chain height is at least k, we check that the pruning point is not genesis. - if self.pruning_point() == self.config.genesis.hash { + let pruning_point = self.pruning_point(); + if pruning_point == self.config.genesis.hash { return Err(ConsensusError::UnexpectedPruningPoint); } + // [Crescendo]: get ghostdag k based on the pruning point's DAA score. The off-by-one of not going by selected parent + // DAA score is not important here as we simply increase K one block earlier which is more conservative (saving/sending more data) + let ghostdag_k = self.config.ghostdag_k().get(self.headers_store.get_daa_score(pruning_point).unwrap()); + // Note: the method `get_ghostdag_chain_k_depth` might return a partial chain if data is missing. // Ideally this node when synced would validate it got all of the associated data up to k blocks // back and then we would be able to assert we actually got `k + 1` blocks, however we choose to // simply ignore, since if the data was truly missing we wouldn't accept the staging consensus in // the first place - Ok(self.services.pruning_proof_manager.get_ghostdag_chain_k_depth(hash)) + Ok(self.services.pruning_proof_manager.get_ghostdag_chain_k_depth(hash, ghostdag_k)) } fn create_block_locator_from_pruning_point(&self, high: Hash, limit: usize) -> ConsensusResult> { diff --git a/consensus/src/consensus/services.rs b/consensus/src/consensus/services.rs index 2ff7578e1d..d608938cd7 100644 --- a/consensus/src/consensus/services.rs +++ b/consensus/src/consensus/services.rs @@ -6,8 +6,8 @@ use crate::{ stores::{ block_window_cache::BlockWindowCacheStore, daa::DbDaaStore, depth::DbDepthStore, ghostdag::DbGhostdagStore, headers::DbHeadersStore, headers_selected_tip::DbHeadersSelectedTipStore, past_pruning_points::DbPastPruningPointsStore, - pruning::DbPruningStore, reachability::DbReachabilityStore, relations::DbRelationsStore, - selected_chain::DbSelectedChainStore, statuses::DbStatusesStore, DB, + pruning::DbPruningStore, pruning_samples::DbPruningSamplesStore, reachability::DbReachabilityStore, + relations::DbRelationsStore, selected_chain::DbSelectedChainStore, statuses::DbStatusesStore, DB, }, }, processes::{ @@ -38,9 +38,15 @@ pub type DbSyncManager = SyncManager< DbStatusesStore, >; -pub type DbPruningPointManager = - PruningPointManager; -pub type DbBlockDepthManager = BlockDepthManager; +pub type DbPruningPointManager = PruningPointManager< + DbGhostdagStore, + DbReachabilityStore, + DbHeadersStore, + DbPastPruningPointsStore, + DbHeadersSelectedTipStore, + DbPruningSamplesStore, +>; +pub type DbBlockDepthManager = BlockDepthManager; pub type DbParentsManager = ParentsManager>; pub struct ConsensusServices { @@ -93,27 +99,29 @@ impl ConsensusServices { storage.block_window_cache_for_difficulty.clone(), storage.block_window_cache_for_past_median_time.clone(), params.max_difficulty_target, - params.target_time_per_block, - params.sampling_activation, - params.legacy_difficulty_window_size, - params.sampled_difficulty_window_size, - params.min_difficulty_window_len, - params.difficulty_sample_rate, - params.legacy_past_median_time_window_size(), + params.prior_target_time_per_block, + params.crescendo.target_time_per_block, + params.crescendo_activation, + params.prior_difficulty_window_size, + params.crescendo.sampled_difficulty_window_size as usize, + params.min_difficulty_window_size, + params.crescendo.difficulty_sample_rate, + params.prior_past_median_time_window_size(), params.sampled_past_median_time_window_size(), - params.past_median_time_sample_rate, + params.crescendo.past_median_time_sample_rate, ); let depth_manager = BlockDepthManager::new( - params.merge_depth, - params.finality_depth, + params.merge_depth(), + params.finality_depth(), params.genesis.hash, storage.depth_store.clone(), reachability_service.clone(), storage.ghostdag_store.clone(), + storage.headers_store.clone(), ); let ghostdag_manager = GhostdagManager::new( params.genesis.hash, - params.ghostdag_k, + params.ghostdag_k(), storage.ghostdag_store.clone(), relations_services[0].clone(), storage.headers_store.clone(), @@ -125,7 +133,7 @@ impl ConsensusServices { params.max_coinbase_payload_len, params.deflationary_phase_daa_score, params.pre_deflationary_phase_base_subsidy, - params.target_time_per_block, + params.bps(), ); let mass_calculator = MassCalculator::new( @@ -136,30 +144,27 @@ impl ConsensusServices { ); let transaction_validator = TransactionValidator::new( - params.max_tx_inputs, - params.max_tx_outputs, - params.max_signature_script_len, - params.max_script_public_key_len, - params.ghostdag_k, + params.max_tx_inputs(), + params.max_tx_outputs(), + params.max_signature_script_len(), + params.max_script_public_key_len(), params.coinbase_payload_script_public_key_max_len, - params.coinbase_maturity, + params.coinbase_maturity(), tx_script_cache_counters, mass_calculator.clone(), - params.storage_mass_activation, - params.kip10_activation, - params.payload_activation, - params.runtime_sig_op_counting, + params.crescendo_activation, ); let pruning_point_manager = PruningPointManager::new( - params.pruning_depth, - params.finality_depth, + params.pruning_depth(), + params.finality_depth(), params.genesis.hash, reachability_service.clone(), storage.ghostdag_store.clone(), storage.headers_store.clone(), storage.past_pruning_points_store.clone(), storage.headers_selected_tip_store.clone(), + storage.pruning_samples_store.clone(), ); let parents_manager = ParentsManager::new( @@ -182,12 +187,12 @@ impl ConsensusServices { params.genesis.hash, params.pruning_proof_m, params.anticone_finalization_depth(), - params.ghostdag_k, + params.ghostdag_k(), is_consensus_exiting, )); let sync_manager = SyncManager::new( - params.mergeset_size_limit as usize, + params.mergeset_size_limit(), reachability_service.clone(), dag_traversal_manager.clone(), storage.ghostdag_store.clone(), diff --git a/consensus/src/consensus/storage.rs b/consensus/src/consensus/storage.rs index 5e2ff8fcde..c943f233db 100644 --- a/consensus/src/consensus/storage.rs +++ b/consensus/src/consensus/storage.rs @@ -11,6 +11,7 @@ use crate::{ headers_selected_tip::DbHeadersSelectedTipStore, past_pruning_points::DbPastPruningPointsStore, pruning::DbPruningStore, + pruning_samples::DbPruningSamplesStore, pruning_utxoset::PruningUtxosetStores, reachability::{DbReachabilityStore, ReachabilityData}, relations::DbRelationsStore, @@ -56,6 +57,7 @@ pub struct ConsensusStorage { pub past_pruning_points_store: Arc, pub daa_excluded_store: Arc, pub depth_store: Arc, + pub pruning_samples_store: Arc, // Utxo-related stores pub utxo_diffs_store: Arc, @@ -81,8 +83,9 @@ impl ConsensusStorage { let perf_params = &config.perf; // Lower and upper bounds - let pruning_depth = params.pruning_depth as usize; - let pruning_size_for_caches = (params.pruning_depth + params.finality_depth) as usize; // Upper bound for any block/header related data + // [Crescendo]: all usages of pruning upper bounds also bound by actual memory bytes, so we can safely use the larger values + let pruning_depth = params.pruning_depth().upper_bound() as usize; + let pruning_size_for_caches = pruning_depth + params.finality_depth().upper_bound() as usize; // Upper bound for any block/header related data let level_lower_bound = 2 * params.pruning_proof_m as usize; // Number of items lower bound for level-related caches // Budgets in bytes. All byte budgets overall sum up to ~1GB of memory (which obviously takes more low level alloc space) @@ -107,8 +110,10 @@ impl ConsensusStorage { let reachability_data_bytes = size_of::() + size_of::(); let ghostdag_compact_bytes = size_of::() + size_of::(); let headers_compact_bytes = size_of::() + size_of::(); - let difficulty_window_bytes = params.difficulty_window_size(0) * size_of::(); - let median_window_bytes = params.past_median_time_window_size(0) * size_of::(); + + // If the fork is already scheduled, prefer the long-term, permanent values + let difficulty_window_bytes = params.difficulty_window_size().after() * size_of::(); + let median_window_bytes = params.past_median_time_window_size().after() * size_of::(); // Cache policy builders let daa_excluded_builder = @@ -207,6 +212,7 @@ impl ConsensusStorage { let pruning_point_store = Arc::new(RwLock::new(DbPruningStore::new(db.clone()))); let past_pruning_points_store = Arc::new(DbPastPruningPointsStore::new(db.clone(), past_pruning_points_builder.build())); let pruning_utxoset_stores = Arc::new(RwLock::new(PruningUtxosetStores::new(db.clone(), utxo_set_builder.build()))); + let pruning_samples_store = Arc::new(DbPruningSamplesStore::new(db.clone(), header_data_builder.build())); // Txs let block_transactions_store = Arc::new(DbBlockTransactionsStore::new(db.clone(), transactions_builder.build())); @@ -250,6 +256,7 @@ impl ConsensusStorage { past_pruning_points_store, daa_excluded_store, depth_store, + pruning_samples_store, utxo_diffs_store, utxo_multisets_store, block_window_cache_for_difficulty, diff --git a/consensus/src/consensus/test_consensus.rs b/consensus/src/consensus/test_consensus.rs index 87790d093f..69ec7170c2 100644 --- a/consensus/src/consensus/test_consensus.rs +++ b/consensus/src/consensus/test_consensus.rs @@ -1,5 +1,6 @@ use async_channel::Sender; use kaspa_consensus_core::coinbase::MinerData; +use kaspa_consensus_core::mining_rules::MiningRules; use kaspa_consensus_core::tx::ScriptPublicKey; use kaspa_consensus_core::{ api::ConsensusApi, block::MutableBlock, blockstatus::BlockStatus, header::Header, merkle::calc_hash_merkle_root, @@ -58,6 +59,7 @@ impl TestConsensus { counters, tx_script_cache_counters, 0, + Arc::new(MiningRules::default()), )); let block_builder = TestBlockBuilder::new(consensus.virtual_processor.clone()); @@ -78,6 +80,7 @@ impl TestConsensus { counters, tx_script_cache_counters, 0, + Arc::new(MiningRules::default()), )); let block_builder = TestBlockBuilder::new(consensus.virtual_processor.clone()); @@ -99,6 +102,7 @@ impl TestConsensus { counters, tx_script_cache_counters, 0, + Arc::new(MiningRules::default()), )); let block_builder = TestBlockBuilder::new(consensus.virtual_processor.clone()); @@ -121,7 +125,7 @@ impl TestConsensus { .consensus .services .pruning_point_manager - .expected_header_pruning_point(ghostdag_data.to_compact(), self.consensus.pruning_point_store.read().get().unwrap()); + .expected_header_pruning_point_v1(ghostdag_data.to_compact(), self.consensus.pruning_point_store.read().get().unwrap()); let daa_window = self.consensus.services.window_manager.block_daa_window(&ghostdag_data).unwrap(); header.bits = self.consensus.services.window_manager.calculate_difficulty_bits(&ghostdag_data, &daa_window); header.daa_score = daa_window.daa_score; diff --git a/consensus/src/model/stores/mod.rs b/consensus/src/model/stores/mod.rs index 9fda332960..02e2824a98 100644 --- a/consensus/src/model/stores/mod.rs +++ b/consensus/src/model/stores/mod.rs @@ -9,6 +9,7 @@ pub mod headers; pub mod headers_selected_tip; pub mod past_pruning_points; pub mod pruning; +pub mod pruning_samples; pub mod pruning_utxoset; pub mod reachability; pub mod relations; diff --git a/consensus/src/model/stores/pruning.rs b/consensus/src/model/stores/pruning.rs index 167a47001a..14636bed64 100644 --- a/consensus/src/model/stores/pruning.rs +++ b/consensus/src/model/stores/pruning.rs @@ -39,9 +39,16 @@ pub trait PruningStoreReader { fn get(&self) -> StoreResult; /// Represent the point after which data is fully held (i.e., history is consecutive from this point and up to virtual). - /// This is usually the pruning point, though it might lag a bit behind until data prune completes (and for archival - /// nodes it will remain the initial syncing point or the last pruning point before turning to an archive) - fn history_root(&self) -> StoreResult; + /// This is usually a pruning point that is at or below the retention period requirement (and for archival + /// nodes it will remain the initial syncing point or the last pruning point before turning to an archive). + /// At every pruning point movement, this is adjusted to the next pruning point sample that satisfies the required + /// retention period. + fn retention_period_root(&self) -> StoreResult; + + // During pruning, this is a reference to the retention root before the pruning point move. + // After pruning, this is updated to point to the retention period root. + // This checkpoint is used to determine if pruning has successfully completed. + fn retention_checkpoint(&self) -> StoreResult; } pub trait PruningStore: PruningStoreReader { @@ -53,7 +60,8 @@ pub trait PruningStore: PruningStoreReader { pub struct DbPruningStore { db: Arc, access: CachedDbItem, - history_root_access: CachedDbItem, + retention_checkpoint_access: CachedDbItem, + retention_period_root_access: CachedDbItem, } impl DbPruningStore { @@ -61,7 +69,8 @@ impl DbPruningStore { Self { db: Arc::clone(&db), access: CachedDbItem::new(db.clone(), DatabaseStorePrefixes::PruningPoint.into()), - history_root_access: CachedDbItem::new(db, DatabaseStorePrefixes::HistoryRoot.into()), + retention_checkpoint_access: CachedDbItem::new(db.clone(), DatabaseStorePrefixes::RetentionCheckpoint.into()), + retention_period_root_access: CachedDbItem::new(db, DatabaseStorePrefixes::RetentionPeriodRoot.into()), } } @@ -73,8 +82,12 @@ impl DbPruningStore { self.access.write(BatchDbWriter::new(batch), &PruningPointInfo { pruning_point, candidate, index }) } - pub fn set_history_root(&mut self, batch: &mut WriteBatch, history_root: Hash) -> StoreResult<()> { - self.history_root_access.write(BatchDbWriter::new(batch), &history_root) + pub fn set_retention_checkpoint(&mut self, batch: &mut WriteBatch, retention_checkpoint: Hash) -> StoreResult<()> { + self.retention_checkpoint_access.write(BatchDbWriter::new(batch), &retention_checkpoint) + } + + pub fn set_retention_period_root(&mut self, batch: &mut WriteBatch, retention_period_root: Hash) -> StoreResult<()> { + self.retention_period_root_access.write(BatchDbWriter::new(batch), &retention_period_root) } } @@ -95,8 +108,12 @@ impl PruningStoreReader for DbPruningStore { self.access.read() } - fn history_root(&self) -> StoreResult { - self.history_root_access.read() + fn retention_checkpoint(&self) -> StoreResult { + self.retention_checkpoint_access.read() + } + + fn retention_period_root(&self) -> StoreResult { + self.retention_period_root_access.read() } } diff --git a/consensus/src/model/stores/pruning_samples.rs b/consensus/src/model/stores/pruning_samples.rs new file mode 100644 index 0000000000..b89acfb6a0 --- /dev/null +++ b/consensus/src/model/stores/pruning_samples.rs @@ -0,0 +1,69 @@ +use std::sync::Arc; + +use kaspa_consensus_core::BlockHasher; +use kaspa_database::prelude::CachePolicy; +use kaspa_database::prelude::StoreError; +use kaspa_database::prelude::DB; +use kaspa_database::prelude::{BatchDbWriter, CachedDbAccess, DirectDbWriter}; +use kaspa_database::registry::DatabaseStorePrefixes; +use kaspa_hashes::Hash; +use rocksdb::WriteBatch; + +pub trait PruningSamplesStoreReader { + fn pruning_sample_from_pov(&self, hash: Hash) -> Result; +} + +pub trait PruningSamplesStore: PruningSamplesStoreReader { + // This is append only + fn insert(&self, hash: Hash, pruning_sample_from_pov: Hash) -> Result<(), StoreError>; + fn delete(&self, hash: Hash) -> Result<(), StoreError>; +} + +/// A DB + cache implementation of `PruningSamplesStore` trait, with concurrency support. +#[derive(Clone)] +pub struct DbPruningSamplesStore { + db: Arc, + access: CachedDbAccess, +} + +impl DbPruningSamplesStore { + pub fn new(db: Arc, cache_policy: CachePolicy) -> Self { + Self { db: Arc::clone(&db), access: CachedDbAccess::new(db, cache_policy, DatabaseStorePrefixes::PruningSamples.into()) } + } + + pub fn clone_with_new_cache(&self, cache_policy: CachePolicy) -> Self { + Self::new(Arc::clone(&self.db), cache_policy) + } + + pub fn insert_batch(&self, batch: &mut WriteBatch, hash: Hash, pruning_sample_from_pov: Hash) -> Result<(), StoreError> { + if self.access.has(hash)? { + return Err(StoreError::HashAlreadyExists(hash)); + } + self.access.write(BatchDbWriter::new(batch), hash, pruning_sample_from_pov)?; + Ok(()) + } + + pub fn delete_batch(&self, batch: &mut WriteBatch, hash: Hash) -> Result<(), StoreError> { + self.access.delete(BatchDbWriter::new(batch), hash) + } +} + +impl PruningSamplesStoreReader for DbPruningSamplesStore { + fn pruning_sample_from_pov(&self, hash: Hash) -> Result { + self.access.read(hash) + } +} + +impl PruningSamplesStore for DbPruningSamplesStore { + fn insert(&self, hash: Hash, pruning_sample_from_pov: Hash) -> Result<(), StoreError> { + if self.access.has(hash)? { + return Err(StoreError::HashAlreadyExists(hash)); + } + self.access.write(DirectDbWriter::new(&self.db), hash, pruning_sample_from_pov)?; + Ok(()) + } + + fn delete(&self, hash: Hash) -> Result<(), StoreError> { + self.access.delete(DirectDbWriter::new(&self.db), hash) + } +} diff --git a/consensus/src/pipeline/body_processor/body_validation_in_context.rs b/consensus/src/pipeline/body_processor/body_validation_in_context.rs index 08eb49f63b..44ed9f453d 100644 --- a/consensus/src/pipeline/body_processor/body_validation_in_context.rs +++ b/consensus/src/pipeline/body_processor/body_validation_in_context.rs @@ -1,7 +1,7 @@ use super::BlockBodyProcessor; use crate::{ errors::{BlockProcessResult, RuleError}, - model::stores::statuses::StatusesStoreReader, + model::stores::{ghostdag::GhostdagStoreReader, headers::HeaderStoreReader, statuses::StatusesStoreReader}, processes::{ transaction_validator::{ tx_validation_in_header_context::{LockTimeArg, LockTimeType}, @@ -10,7 +10,7 @@ use crate::{ window::WindowManager, }, }; -use kaspa_consensus_core::block::Block; +use kaspa_consensus_core::{block::Block, errors::tx::TxRuleError}; use kaspa_database::prelude::StoreResultExtensions; use kaspa_hashes::Hash; use once_cell::unsync::Lazy; @@ -19,6 +19,7 @@ use std::sync::Arc; impl BlockBodyProcessor { pub fn validate_body_in_context(self: &Arc, block: &Block) -> BlockProcessResult<()> { self.check_parent_bodies_exist(block)?; + self.check_coinbase_outputs_limit(block)?; self.check_coinbase_blue_score_and_subsidy(block)?; self.check_block_transactions_in_context(block) } @@ -60,6 +61,32 @@ impl BlockBodyProcessor { Ok(()) } + fn check_coinbase_outputs_limit(&self, block: &Block) -> BlockProcessResult<()> { + // [Crescendo]: coinbase_outputs_limit depends on ghostdag k and thus depends on fork activation + // which makes it header contextual. + // + // TODO (post HF): move this check back to transaction in isolation validation + + // [Crescendo]: Ghostdag k activation is decided based on selected parent DAA score + // so we follow the same methodology for coinbase output limit (which is driven from the + // actual bound on the number of blue blocks in the mergeset). + // + // Note that body validation in context is not called for trusted blocks, so we can safely assume + // the selected parent exists and its daa score is accessible + let selected_parent = self.ghostdag_store.get_selected_parent(block.hash()).unwrap(); + let selected_parent_daa_score = self.headers_store.get_daa_score(selected_parent).unwrap(); + let coinbase_outputs_limit = self.ghostdag_k.get(selected_parent_daa_score) as u64 + 2; + + let tx = &block.transactions[0]; + if tx.outputs.len() as u64 > coinbase_outputs_limit { + return Err(RuleError::TxInIsolationValidationFailed( + tx.id(), + TxRuleError::CoinbaseTooManyOutputs(tx.outputs.len(), coinbase_outputs_limit), + )); + } + Ok(()) + } + fn check_coinbase_blue_score_and_subsidy(self: &Arc, block: &Block) -> BlockProcessResult<()> { match self.coinbase_manager.deserialize_coinbase_payload(&block.transactions[0].payload) { Ok(data) => { diff --git a/consensus/src/pipeline/body_processor/body_validation_in_isolation.rs b/consensus/src/pipeline/body_processor/body_validation_in_isolation.rs index 4c6139846b..2afd80421b 100644 --- a/consensus/src/pipeline/body_processor/body_validation_in_isolation.rs +++ b/consensus/src/pipeline/body_processor/body_validation_in_isolation.rs @@ -2,17 +2,23 @@ use std::{collections::HashSet, sync::Arc}; use super::BlockBodyProcessor; use crate::errors::{BlockProcessResult, RuleError}; -use kaspa_consensus_core::{block::Block, merkle::calc_hash_merkle_root, tx::TransactionOutpoint}; +use kaspa_consensus_core::{ + block::Block, + mass::{ContextualMasses, Mass, NonContextualMasses}, + merkle::calc_hash_merkle_root, + tx::TransactionOutpoint, +}; impl BlockBodyProcessor { - pub fn validate_body_in_isolation(self: &Arc, block: &Block) -> BlockProcessResult { - let storage_mass_activated = self.storage_mass_activation.is_active(block.header.daa_score); + pub fn validate_body_in_isolation(self: &Arc, block: &Block) -> BlockProcessResult { + let crescendo_activated = self.crescendo_activation.is_active(block.header.daa_score); Self::check_has_transactions(block)?; - Self::check_hash_merkle_root(block, storage_mass_activated)?; + Self::check_hash_merkle_root(block, crescendo_activated)?; Self::check_only_one_coinbase(block)?; self.check_transactions_in_isolation(block)?; - let mass = self.check_block_mass(block, storage_mass_activated)?; + self.check_coinbase_has_zero_mass(block, crescendo_activated)?; + let mass = self.check_block_mass(block, crescendo_activated)?; self.check_duplicate_transactions(block)?; self.check_block_double_spends(block)?; self.check_no_chained_transactions(block)?; @@ -28,8 +34,8 @@ impl BlockBodyProcessor { Ok(()) } - fn check_hash_merkle_root(block: &Block, storage_mass_activated: bool) -> BlockProcessResult<()> { - let calculated = calc_hash_merkle_root(block.transactions.iter(), storage_mass_activated); + fn check_hash_merkle_root(block: &Block, crescendo_activated: bool) -> BlockProcessResult<()> { + let calculated = calc_hash_merkle_root(block.transactions.iter(), crescendo_activated); if calculated != block.header.hash_merkle_root { return Err(RuleError::BadMerkleRoot(block.header.hash_merkle_root, calculated)); } @@ -57,34 +63,56 @@ impl BlockBodyProcessor { Ok(()) } - fn check_block_mass(self: &Arc, block: &Block, storage_mass_activated: bool) -> BlockProcessResult { - let mut total_mass: u64 = 0; - if storage_mass_activated { + fn check_coinbase_has_zero_mass(&self, block: &Block, crescendo_activated: bool) -> BlockProcessResult<()> { + // TODO (post HF): move to check_coinbase_in_isolation + if crescendo_activated && block.transactions[0].mass() > 0 { + return Err(RuleError::CoinbaseNonZeroMassCommitment); + } + Ok(()) + } + + fn check_block_mass(self: &Arc, block: &Block, crescendo_activated: bool) -> BlockProcessResult { + if crescendo_activated { + let mut total_compute_mass: u64 = 0; + let mut total_transient_mass: u64 = 0; + let mut total_storage_mass: u64 = 0; for tx in block.transactions.iter() { - // This is only the compute part of the mass, the storage part cannot be computed here - let calculated_tx_compute_mass = self.mass_calculator.calc_tx_compute_mass(tx); - let committed_contextual_mass = tx.mass(); - // We only check the lower-bound here, a precise check of the mass commitment - // is done when validating the tx in context - if committed_contextual_mass < calculated_tx_compute_mass { - return Err(RuleError::MassFieldTooLow(tx.id(), committed_contextual_mass, calculated_tx_compute_mass)); + // Calculate the non-contextual masses + let NonContextualMasses { compute_mass, transient_mass } = self.mass_calculator.calc_non_contextual_masses(tx); + + // Read the storage mass commitment. This value cannot be computed here w/o UTXO context + // so we use the commitment. Later on, when the transaction is verified in context, we use + // the context to calculate the expected storage mass and verify it matches this commitment + let storage_mass_commitment = tx.mass(); + + // Sum over the various masses separately + total_compute_mass = total_compute_mass.saturating_add(compute_mass); + total_transient_mass = total_transient_mass.saturating_add(transient_mass); + total_storage_mass = total_storage_mass.saturating_add(storage_mass_commitment); + + // Verify all limits + if total_compute_mass > self.max_block_mass { + return Err(RuleError::ExceedsComputeMassLimit(total_compute_mass, self.max_block_mass)); } - // Sum over the committed masses - total_mass = total_mass.saturating_add(committed_contextual_mass); - if total_mass > self.max_block_mass { - return Err(RuleError::ExceedsMassLimit(self.max_block_mass)); + if total_transient_mass > self.max_block_mass { + return Err(RuleError::ExceedsTransientMassLimit(total_transient_mass, self.max_block_mass)); + } + if total_storage_mass > self.max_block_mass { + return Err(RuleError::ExceedsStorageMassLimit(total_storage_mass, self.max_block_mass)); } } + Ok((NonContextualMasses::new(total_compute_mass, total_transient_mass), ContextualMasses::new(total_storage_mass))) } else { + let mut total_mass: u64 = 0; for tx in block.transactions.iter() { - let calculated_tx_mass = self.mass_calculator.calc_tx_compute_mass(tx); - total_mass = total_mass.saturating_add(calculated_tx_mass); + let compute_mass = self.mass_calculator.calc_non_contextual_masses(tx).compute_mass; + total_mass = total_mass.saturating_add(compute_mass); if total_mass > self.max_block_mass { - return Err(RuleError::ExceedsMassLimit(self.max_block_mass)); + return Err(RuleError::ExceedsComputeMassLimit(total_mass, self.max_block_mass)); } } + Ok((NonContextualMasses::new(total_mass, 0), ContextualMasses::new(0))) } - Ok(total_mass) } fn check_block_double_spends(self: &Arc, block: &Block) -> BlockProcessResult<()> { @@ -415,7 +443,7 @@ mod tests { txs[1].inputs[0].sig_op_count = 255; txs[1].inputs[1].sig_op_count = 255; block.header.hash_merkle_root = calc_hash_merkle_root(txs.iter()); - assert_match!(body_processor.validate_body_in_isolation(&block.to_immutable()), Err(RuleError::ExceedsMassLimit(_))); + assert_match!(body_processor.validate_body_in_isolation(&block.to_immutable()), Err(RuleError::ExceedsComputeMassLimit(_, _))); let mut block = example_block.clone(); let txs = &mut block.transactions; diff --git a/consensus/src/pipeline/body_processor/processor.rs b/consensus/src/pipeline/body_processor/processor.rs index 7bad12ce3f..4229ea263c 100644 --- a/consensus/src/pipeline/body_processor/processor.rs +++ b/consensus/src/pipeline/body_processor/processor.rs @@ -28,10 +28,11 @@ use kaspa_consensus_core::{ blockstatus::BlockStatus::{self, StatusHeaderOnly, StatusInvalid}, config::{ genesis::GenesisBlock, - params::{ForkActivation, Params}, + params::{ForkActivation, ForkedParam, Params}, }, - mass::MassCalculator, + mass::{Mass, MassCalculator, MassOps}, tx::Transaction, + KType, }; use kaspa_consensus_notify::{ notification::{BlockAddedNotification, Notification}, @@ -59,6 +60,7 @@ pub struct BlockBodyProcessor { // Config pub(super) max_block_mass: u64, pub(super) genesis: GenesisBlock, + pub(super) ghostdag_k: ForkedParam, // Stores pub(super) statuses_store: Arc>, @@ -87,7 +89,7 @@ pub struct BlockBodyProcessor { counters: Arc, /// Storage mass hardfork DAA score - pub(crate) storage_mass_activation: ForkActivation, + pub(crate) crescendo_activation: ForkActivation, } impl BlockBodyProcessor { @@ -113,6 +115,7 @@ impl BlockBodyProcessor { max_block_mass: params.max_block_mass, genesis: params.genesis.clone(), + ghostdag_k: params.ghostdag_k(), statuses_store: storage.statuses_store.clone(), ghostdag_store: storage.ghostdag_store.clone(), @@ -130,7 +133,7 @@ impl BlockBodyProcessor { task_manager: BlockTaskDependencyManager::new(), notification_root, counters, - storage_mass_activation: params.storage_mass_activation, + crescendo_activation: params.crescendo_activation, } } @@ -217,11 +220,11 @@ impl BlockBodyProcessor { // Report counters self.counters.body_counts.fetch_add(1, Ordering::Relaxed); self.counters.txs_counts.fetch_add(block.transactions.len() as u64, Ordering::Relaxed); - self.counters.mass_counts.fetch_add(mass, Ordering::Relaxed); + self.counters.mass_counts.fetch_add(mass.max(), Ordering::Relaxed); Ok(BlockStatus::StatusUTXOPendingVerification) } - fn validate_body(self: &Arc, block: &Block, is_trusted: bool) -> BlockProcessResult { + fn validate_body(self: &Arc, block: &Block, is_trusted: bool) -> BlockProcessResult { let mass = self.validate_body_in_isolation(block)?; if !is_trusted { self.validate_body_in_context(block)?; diff --git a/consensus/src/pipeline/header_processor/post_pow_validation.rs b/consensus/src/pipeline/header_processor/post_pow_validation.rs index 6b12b4729c..c3feb9aa92 100644 --- a/consensus/src/pipeline/header_processor/post_pow_validation.rs +++ b/consensus/src/pipeline/header_processor/post_pow_validation.rs @@ -11,7 +11,7 @@ impl HeaderProcessor { self.check_blue_score(ctx, header)?; self.check_blue_work(ctx, header)?; self.check_median_timestamp(ctx, header)?; - self.check_merge_size_limit(ctx)?; + self.check_mergeset_size_limit(ctx)?; self.check_bounded_merge_depth(ctx)?; self.check_pruning_point(ctx, header)?; self.check_indirect_parents(ctx, header) @@ -28,10 +28,11 @@ impl HeaderProcessor { Ok(()) } - pub fn check_merge_size_limit(&self, ctx: &mut HeaderProcessingContext) -> BlockProcessResult<()> { + pub fn check_mergeset_size_limit(&self, ctx: &mut HeaderProcessingContext) -> BlockProcessResult<()> { let mergeset_size = ctx.ghostdag_data().mergeset_size() as u64; - if mergeset_size > self.mergeset_size_limit { - return Err(RuleError::MergeSetTooBig(mergeset_size, self.mergeset_size_limit)); + let mergeset_size_limit = self.mergeset_size_limit.get(ctx.selected_parent_daa_score()); + if mergeset_size > mergeset_size_limit { + return Err(RuleError::MergeSetTooBig(mergeset_size, mergeset_size_limit)); } Ok(()) } @@ -54,15 +55,23 @@ impl HeaderProcessor { pub fn check_indirect_parents(&self, ctx: &mut HeaderProcessingContext, header: &Header) -> BlockProcessResult<()> { let expected_block_parents = self.parents_manager.calc_block_parents(ctx.pruning_point(), header.direct_parents()); + let crescendo_activated = self.crescendo_activation.is_active(ctx.selected_parent_daa_score()); if header.parents_by_level.len() != expected_block_parents.len() || !expected_block_parents.iter().enumerate().all(|(block_level, expected_level_parents)| { let header_level_parents = &header.parents_by_level[block_level]; if header_level_parents.len() != expected_level_parents.len() { return false; } - - let expected_set = HashSet::<&Hash>::from_iter(expected_level_parents); - header_level_parents.iter().all(|header_parent| expected_set.contains(header_parent)) + // Optimistic path where both arrays are identical also in terms of order + if header_level_parents == expected_level_parents { + return true; + } + if crescendo_activated { + HashSet::<&Hash>::from_iter(header_level_parents) == HashSet::<&Hash>::from_iter(expected_level_parents) + } else { + let expected_set = HashSet::<&Hash>::from_iter(expected_level_parents); + header_level_parents.iter().all(|header_parent| expected_set.contains(header_parent)) + } }) { return Err(RuleError::UnexpectedIndirectParents( @@ -74,9 +83,13 @@ impl HeaderProcessor { } pub fn check_pruning_point(&self, ctx: &mut HeaderProcessingContext, header: &Header) -> BlockProcessResult<()> { - let expected = self.pruning_point_manager.expected_header_pruning_point(ctx.ghostdag_data().to_compact(), ctx.pruning_info); - if expected != header.pruning_point { - return Err(RuleError::WrongHeaderPruningPoint(expected, header.pruning_point)); + // [Crescendo]: changing expected pruning point check from header validity to chain qualification + if !self.crescendo_activation.is_active(ctx.selected_parent_daa_score()) { + let expected = + self.pruning_point_manager.expected_header_pruning_point_v1(ctx.ghostdag_data().to_compact(), ctx.pruning_info); + if expected != header.pruning_point { + return Err(RuleError::WrongHeaderPruningPoint(expected, header.pruning_point)); + } } Ok(()) } diff --git a/consensus/src/pipeline/header_processor/pre_ghostdag_validation.rs b/consensus/src/pipeline/header_processor/pre_ghostdag_validation.rs index cce6411054..e153c18274 100644 --- a/consensus/src/pipeline/header_processor/pre_ghostdag_validation.rs +++ b/consensus/src/pipeline/header_processor/pre_ghostdag_validation.rs @@ -17,7 +17,7 @@ impl HeaderProcessor { pub(super) fn validate_header_in_isolation(&self, header: &Header) -> BlockProcessResult { self.check_header_version(header)?; self.check_block_timestamp_in_isolation(header)?; - self.check_parents_limit(header)?; + self.check_parents_limit_upper_bound(header)?; Self::check_parents_not_origin(header)?; self.check_pow_and_calc_block_level(header) } @@ -44,13 +44,16 @@ impl HeaderProcessor { Ok(()) } - fn check_parents_limit(&self, header: &Header) -> BlockProcessResult<()> { + fn check_parents_limit_upper_bound(&self, header: &Header) -> BlockProcessResult<()> { if header.direct_parents().is_empty() { return Err(RuleError::NoParents); } - if header.direct_parents().len() > self.max_block_parents as usize { - return Err(RuleError::TooManyParents(header.direct_parents().len(), self.max_block_parents as usize)); + // [Crescendo]: moved the tight parents limit check to pre_pow_validation since it requires selected parent DAA score info + // which is available only post ghostdag. We keep this upper bound check here since this method is applied to trusted blocks + // as well. + if header.direct_parents().len() > self.max_block_parents.upper_bound() as usize { + return Err(RuleError::TooManyParents(header.direct_parents().len(), self.max_block_parents.upper_bound() as usize)); } Ok(()) diff --git a/consensus/src/pipeline/header_processor/pre_pow_validation.rs b/consensus/src/pipeline/header_processor/pre_pow_validation.rs index 7764e1c150..e896e4f5a3 100644 --- a/consensus/src/pipeline/header_processor/pre_pow_validation.rs +++ b/consensus/src/pipeline/header_processor/pre_pow_validation.rs @@ -6,11 +6,26 @@ use kaspa_consensus_core::header::Header; impl HeaderProcessor { pub(super) fn pre_pow_validation(&self, ctx: &mut HeaderProcessingContext, header: &Header) -> BlockProcessResult<()> { + self.check_parents_limit(ctx, header)?; self.check_pruning_violation(ctx)?; self.check_difficulty_and_daa_score(ctx, header)?; Ok(()) } + // TODO (post HF): move back to pre_ghostdag_validation (substitute for check_parents_limit_upper_bound) + fn check_parents_limit(&self, ctx: &mut HeaderProcessingContext, header: &Header) -> BlockProcessResult<()> { + if header.direct_parents().is_empty() { + return Err(RuleError::NoParents); + } + + let max_block_parents = self.max_block_parents.get(ctx.selected_parent_daa_score()) as usize; + if header.direct_parents().len() > max_block_parents { + return Err(RuleError::TooManyParents(header.direct_parents().len(), max_block_parents)); + } + + Ok(()) + } + fn check_pruning_violation(&self, ctx: &HeaderProcessingContext) -> BlockProcessResult<()> { let known_parents = ctx.direct_known_parents(); diff --git a/consensus/src/pipeline/header_processor/processor.rs b/consensus/src/pipeline/header_processor/processor.rs index f467b6d975..8166754dbd 100644 --- a/consensus/src/pipeline/header_processor/processor.rs +++ b/consensus/src/pipeline/header_processor/processor.rs @@ -14,7 +14,7 @@ use crate::{ daa::DbDaaStore, depth::DbDepthStore, ghostdag::{DbGhostdagStore, GhostdagData, GhostdagStoreReader}, - headers::DbHeadersStore, + headers::{DbHeadersStore, HeaderStoreReader}, headers_selected_tip::{DbHeadersSelectedTipStore, HeadersSelectedTipStoreReader}, pruning::{DbPruningStore, PruningPointInfo, PruningStoreReader}, reachability::{DbReachabilityStore, StagingReachabilityStore}, @@ -32,7 +32,10 @@ use itertools::Itertools; use kaspa_consensus_core::{ blockhash::{BlockHashes, ORIGIN}, blockstatus::BlockStatus::{self, StatusHeaderOnly, StatusInvalid}, - config::genesis::GenesisBlock, + config::{ + genesis::GenesisBlock, + params::{ForkActivation, ForkedParam}, + }, header::Header, BlockHashSet, BlockLevel, }; @@ -56,6 +59,7 @@ pub struct HeaderProcessingContext { // Staging data pub ghostdag_data: Option>, + pub selected_parent_daa_score: Option, // [Crescendo] pub block_window_for_difficulty: Option>, pub block_window_for_past_median_time: Option>, pub mergeset_non_daa: Option, @@ -78,6 +82,7 @@ impl HeaderProcessingContext { pruning_info, known_parents, ghostdag_data: None, + selected_parent_daa_score: None, block_window_for_difficulty: None, mergeset_non_daa: None, block_window_for_past_median_time: None, @@ -101,6 +106,10 @@ impl HeaderProcessingContext { pub fn ghostdag_data(&self) -> &Arc { self.ghostdag_data.as_ref().unwrap() } + + pub fn selected_parent_daa_score(&self) -> u64 { + self.selected_parent_daa_score.unwrap() + } } pub struct HeaderProcessor { @@ -114,11 +123,11 @@ pub struct HeaderProcessor { // Config pub(super) genesis: GenesisBlock, pub(super) timestamp_deviation_tolerance: u64, - pub(super) target_time_per_block: u64, - pub(super) max_block_parents: u8, - pub(super) mergeset_size_limit: u64, + pub(super) max_block_parents: ForkedParam, + pub(super) mergeset_size_limit: ForkedParam, pub(super) skip_proof_of_work: bool, pub(super) max_block_level: BlockLevel, + pub(super) crescendo_activation: ForkActivation, // DB db: Arc, @@ -199,13 +208,13 @@ impl HeaderProcessor { task_manager: BlockTaskDependencyManager::new(), pruning_lock, counters, - // TODO (HF): make sure to also pass `new_timestamp_deviation_tolerance` and use according to HF activation score - timestamp_deviation_tolerance: params.timestamp_deviation_tolerance(0), - target_time_per_block: params.target_time_per_block, - max_block_parents: params.max_block_parents, - mergeset_size_limit: params.mergeset_size_limit, + + timestamp_deviation_tolerance: params.timestamp_deviation_tolerance, + max_block_parents: params.max_block_parents(), + mergeset_size_limit: params.mergeset_size_limit(), skip_proof_of_work: params.skip_proof_of_work, max_block_level: params.max_block_level, + crescendo_activation: params.crescendo_activation, } } @@ -298,6 +307,8 @@ impl HeaderProcessor { self.validate_parent_relations(header)?; let mut ctx = self.build_processing_context(header, block_level); self.ghostdag(&mut ctx); + // [Crescendo]: persist the selected parent DAA score to be used for activation checks + ctx.selected_parent_daa_score = Some(self.headers_store.get_daa_score(ctx.ghostdag_data().selected_parent).unwrap()); self.pre_pow_validation(&mut ctx, header)?; if let Err(e) = self.post_pow_validation(&mut ctx, header) { self.statuses_store.write().set(ctx.hash, StatusInvalid).unwrap(); diff --git a/consensus/src/pipeline/pruning_processor/processor.rs b/consensus/src/pipeline/pruning_processor/processor.rs index 2de19c265d..923df58c5e 100644 --- a/consensus/src/pipeline/pruning_processor/processor.rs +++ b/consensus/src/pipeline/pruning_processor/processor.rs @@ -12,6 +12,7 @@ use crate::{ headers::HeaderStoreReader, past_pruning_points::PastPruningPointsStoreReader, pruning::{PruningStore, PruningStoreReader}, + pruning_samples::PruningSamplesStoreReader, reachability::{DbReachabilityStore, ReachabilityStoreReader, StagingReachabilityStore}, relations::StagingRelationsStore, selected_chain::SelectedChainStore, @@ -34,7 +35,7 @@ use kaspa_consensus_core::{ BlockHashMap, BlockHashSet, BlockLevel, }; use kaspa_consensusmanager::SessionLock; -use kaspa_core::{debug, info, warn}; +use kaspa_core::{debug, info, time::unix_now, trace, warn}; use kaspa_database::prelude::{BatchDbWriter, MemoryWriter, StoreResultExtensions, DB}; use kaspa_hashes::Hash; use kaspa_muhash::MuHash; @@ -133,48 +134,51 @@ impl PruningProcessor { fn recover_pruning_workflows_if_needed(&self) { let pruning_point_read = self.pruning_point_store.read(); let pruning_point = pruning_point_read.pruning_point().unwrap(); - let history_root = pruning_point_read.history_root().unwrap_option(); - let pruning_utxoset_position = self.pruning_utxoset_stores.read().utxoset_position().unwrap_option(); + let retention_checkpoint = pruning_point_read.retention_checkpoint().unwrap(); + let retention_period_root = pruning_point_read.retention_period_root().unwrap(); + let pruning_utxoset_position = self.pruning_utxoset_stores.read().utxoset_position().unwrap(); drop(pruning_point_read); debug!( - "[PRUNING PROCESSOR] recovery check: current pruning point: {}, history root: {:?}, pruning utxoset position: {:?}", - pruning_point, history_root, pruning_utxoset_position + "[PRUNING PROCESSOR] recovery check: current pruning point: {}, retention checkpoint: {:?}, pruning utxoset position: {:?}", + pruning_point, retention_checkpoint, pruning_utxoset_position ); - if let Some(pruning_utxoset_position) = pruning_utxoset_position { - // This indicates the node crashed during a former pruning point move and we need to recover - if pruning_utxoset_position != pruning_point { - info!("Recovering pruning utxo-set from {} to the pruning point {}", pruning_utxoset_position, pruning_point); - if !self.advance_pruning_utxoset(pruning_utxoset_position, pruning_point) { - info!("Interrupted while advancing the pruning point UTXO set: Process is exiting"); - return; - } + // This indicates the node crashed during a former pruning point move and we need to recover + if pruning_utxoset_position != pruning_point { + info!("Recovering pruning utxo-set from {} to the pruning point {}", pruning_utxoset_position, pruning_point); + if !self.advance_pruning_utxoset(pruning_utxoset_position, pruning_point) { + info!("Interrupted while advancing the pruning point UTXO set: Process is exiting"); + return; } } - if let Some(history_root) = history_root { - // This indicates the node crashed or was forced to stop during a former data prune operation hence - // we need to complete it - if history_root != pruning_point { - self.prune(pruning_point); - } - } + trace!( + "retention_checkpoint: {:?} | retention_period_root: {} | pruning_point: {}", + retention_checkpoint, + retention_period_root, + pruning_point + ); - // TODO: both `pruning_utxoset_position` and `history_root` are new DB keys so for now we assume correct state if the keys are missing + // This indicates the node crashed or was forced to stop during a former data prune operation hence + // we need to complete it + if retention_checkpoint != retention_period_root { + self.prune(pruning_point, retention_period_root); + } } fn advance_pruning_point_and_candidate_if_possible(&self, sink_ghostdag_data: CompactGhostdagData) { let pruning_point_read = self.pruning_point_store.upgradable_read(); let current_pruning_info = pruning_point_read.get().unwrap(); - let (new_pruning_points, new_candidate) = self.pruning_point_manager.next_pruning_points_and_candidate_by_ghostdag_data( + let (new_pruning_points, new_candidate) = self.pruning_point_manager.next_pruning_points( sink_ghostdag_data, - None, current_pruning_info.candidate, current_pruning_info.pruning_point, ); - if !new_pruning_points.is_empty() { + if let Some(new_pruning_point) = new_pruning_points.last().copied() { + let retention_period_root = pruning_point_read.retention_period_root().unwrap(); + // Update past pruning points and pruning point stores let mut batch = WriteBatch::default(); let mut pruning_point_write = RwLockUpgradableReadGuard::upgrade(pruning_point_read); @@ -182,11 +186,22 @@ impl PruningProcessor { self.past_pruning_points_store.insert_batch(&mut batch, current_pruning_info.index + i as u64 + 1, past_pp).unwrap(); } let new_pp_index = current_pruning_info.index + new_pruning_points.len() as u64; - let new_pruning_point = *new_pruning_points.last().unwrap(); pruning_point_write.set_batch(&mut batch, new_pruning_point, new_candidate, new_pp_index).unwrap(); + + // For archival nodes, keep the retention root in place + let adjusted_retention_period_root = if self.config.is_archival { + retention_period_root + } else { + let adjusted_retention_period_root = self.advance_retention_period_root(retention_period_root, new_pruning_point); + pruning_point_write.set_retention_period_root(&mut batch, adjusted_retention_period_root).unwrap(); + adjusted_retention_period_root + }; + self.db.write(batch).unwrap(); drop(pruning_point_write); + trace!("New Pruning Point: {} | New Retention Period Root: {}", new_pruning_point, adjusted_retention_period_root); + // Inform the user info!("Periodic pruning point movement: advancing from {} to {}", current_pruning_info.pruning_point, new_pruning_point); @@ -198,7 +213,7 @@ impl PruningProcessor { info!("Updated the pruning point UTXO set"); // Finally, prune data in the new pruning point past - self.prune(new_pruning_point); + self.prune(new_pruning_point, adjusted_retention_period_root); } else if new_candidate != current_pruning_info.candidate { let mut pruning_point_write = RwLockUpgradableReadGuard::upgrade(pruning_point_read); pruning_point_write.set(current_pruning_info.pruning_point, new_candidate, current_pruning_info.index).unwrap(); @@ -238,7 +253,7 @@ impl PruningProcessor { info!("Pruning point UTXO commitment was verified correctly (sanity test)"); } - fn prune(&self, new_pruning_point: Hash) { + fn prune(&self, new_pruning_point: Hash, retention_period_root: Hash) { if self.config.is_archival { warn!("The node is configured as an archival node -- avoiding data pruning. Note this might lead to heavy disk usage."); return; @@ -384,7 +399,7 @@ impl PruningProcessor { let (mut counter, mut traversed) = (0, 0); info!("Header and Block pruning: starting traversal from: {} (genesis: {})", queue.iter().reusable_format(", "), genesis); while let Some(current) = queue.pop_front() { - if reachability_read.is_dag_ancestor_of_result(new_pruning_point, current).unwrap() { + if reachability_read.is_dag_ancestor_of_result(retention_period_root, current).unwrap() { continue; } traversed += 1; @@ -477,6 +492,11 @@ impl PruningProcessor { if !keep_headers.contains(¤t) { // Prune the actual headers self.headers_store.delete_batch(&mut batch, current).unwrap(); + + // We want to keep the pruning sample from POV for past pruning points + // so that pruning point queries keep working for blocks right after the current + // pruning point (keep_headers contains the past pruning points) + self.pruning_samples_store.delete_batch(&mut batch, current).unwrap(); } } @@ -514,15 +534,65 @@ impl PruningProcessor { } { - // Set the history root to the new pruning point only after we successfully pruned its past + // Set the retention checkpoint to the new retention root only after we successfully pruned its past let mut pruning_point_write = self.pruning_point_store.write(); let mut batch = WriteBatch::default(); - pruning_point_write.set_history_root(&mut batch, new_pruning_point).unwrap(); + pruning_point_write.set_retention_checkpoint(&mut batch, retention_period_root).unwrap(); self.db.write(batch).unwrap(); drop(pruning_point_write); } } + /// Adjusts the retention period root to latest pruning point sample that covers the retention period. + /// This is the pruning point sample B such that B.timestamp <= retention_period_days_ago. This may return the old hash if + /// the retention period cannot be covered yet with the node's current history. + /// + /// This function is expected to be called only when a new pruning point is determined and right before + /// doing any pruning. Pruning point must be the new pruning point this node is advancing to. + /// + /// The returned retention_period_root is guaranteed to be in past(pruning_point) or the pruning point itself. + fn advance_retention_period_root(&self, retention_period_root: Hash, pruning_point: Hash) -> Hash { + match self.config.retention_period_days { + // If the retention period wasn't set, immediately default to the pruning point. + None => pruning_point, + Some(retention_period_days) => { + // The retention period in milliseconds we need to cover + // Note: If retention period is set to an amount lower than what the new pruning point would cover + // this function will simply return the new pruning point. The new pruning point passed as an argument + // to this function serves as a clamp. + let retention_period_ms = (retention_period_days * 86400.0 * 1000.0).ceil() as u64; + + // The target timestamp we would like to find a point below + let retention_period_root_ts_target = unix_now().saturating_sub(retention_period_ms); + + // Iterate from the new pruning point to the prev retention root and search for the first point with enough days above it. + // Note that prev retention root is always a past pruning point, so we can iterate via pruning samples until we reach it. + let mut new_retention_period_root = pruning_point; + + trace!( + "Adjusting the retention period root to cover the required retention period. Target timestamp: {}", + retention_period_root_ts_target, + ); + + while new_retention_period_root != retention_period_root { + let block = new_retention_period_root; + + let timestamp = self.headers_store.get_timestamp(block).unwrap(); + trace!("block | timestamp = {} | {}", block, timestamp); + if timestamp <= retention_period_root_ts_target { + trace!("block {} timestamp {} >= {}", block, timestamp, retention_period_root_ts_target); + // We are now at a pruning point that is at or below our retention period target + break; + } + + new_retention_period_root = self.pruning_samples_store.pruning_sample_from_pov(block).unwrap(); + } + + new_retention_period_root + } + } + } + fn past_pruning_points(&self) -> BlockHashSet { (0..self.pruning_point_store.read().get().unwrap().index) .map(|index| self.past_pruning_points_store.get(index).unwrap()) diff --git a/consensus/src/pipeline/virtual_processor/processor.rs b/consensus/src/pipeline/virtual_processor/processor.rs index 21c347c4df..6367bfba0a 100644 --- a/consensus/src/pipeline/virtual_processor/processor.rs +++ b/consensus/src/pipeline/virtual_processor/processor.rs @@ -23,6 +23,7 @@ use crate::{ headers::{DbHeadersStore, HeaderStoreReader}, past_pruning_points::DbPastPruningPointsStore, pruning::{DbPruningStore, PruningStoreReader}, + pruning_samples::DbPruningSamplesStore, pruning_utxoset::PruningUtxosetStores, reachability::DbReachabilityStore, relations::{DbRelationsStore, RelationsStoreReader}, @@ -53,9 +54,13 @@ use kaspa_consensus_core::{ block::{BlockTemplate, MutableBlock, TemplateBuildMode, TemplateTransactionSelector}, blockstatus::BlockStatus::{StatusDisqualifiedFromChain, StatusUTXOValid}, coinbase::MinerData, - config::{genesis::GenesisBlock, params::ForkActivation}, + config::{ + genesis::GenesisBlock, + params::{ForkActivation, ForkedParam}, + }, header::Header, merkle::calc_hash_merkle_root, + mining_rules::MiningRules, pruning::PruningPointsList, tx::{MutableTransaction, Transaction}, utxo::{ @@ -72,14 +77,21 @@ use kaspa_consensus_notify::{ root::ConsensusNotificationRoot, }; use kaspa_consensusmanager::SessionLock; -use kaspa_core::{debug, info, time::unix_now, trace, warn}; +use kaspa_core::{ + debug, info, + time::{unix_now, Stopwatch}, + trace, warn, +}; use kaspa_database::prelude::{StoreError, StoreResultEmptyTuple, StoreResultExtensions}; -use kaspa_hashes::Hash; +use kaspa_hashes::{Hash, ZERO_HASH}; use kaspa_muhash::MuHash; use kaspa_notify::{events::EventType, notifier::Notify}; use once_cell::unsync::Lazy; -use super::errors::{PruningImportError, PruningImportResult}; +use super::{ + errors::{PruningImportError, PruningImportResult}, + utxo_validation::crescendo::CrescendoLogger, +}; use crossbeam_channel::{Receiver as CrossbeamReceiver, Sender as CrossbeamSender}; use itertools::Itertools; use kaspa_consensus_core::tx::ValidatedTransaction; @@ -98,6 +110,10 @@ use std::{ sync::{atomic::Ordering, Arc}, }; +// 100ms - since at 10BPS, average block time is 100ms and so must expect +// the block template to build faster than that +pub const BUILD_BLOCK_TEMPLATE_SPEED_THRESHOLD: u128 = 100; + pub struct VirtualStateProcessor { // Channels receiver: CrossbeamReceiver, @@ -112,9 +128,8 @@ pub struct VirtualStateProcessor { // Config pub(super) genesis: GenesisBlock, - pub(super) max_block_parents: u8, - pub(super) mergeset_size_limit: u64, - pub(super) pruning_depth: u64, + pub(super) max_block_parents: ForkedParam, + pub(super) mergeset_size_limit: ForkedParam, // Stores pub(super) statuses_store: Arc>, @@ -127,6 +142,7 @@ pub struct VirtualStateProcessor { pub(super) body_tips_store: Arc>, pub(super) depth_store: Arc, pub(super) selected_chain_store: Arc>, + pub(super) pruning_samples_store: Arc, // Utxo-related stores pub(super) utxo_diffs_store: Arc, @@ -164,9 +180,13 @@ pub struct VirtualStateProcessor { // Counters counters: Arc, - // Storage mass hardfork DAA score - pub(crate) storage_mass_activation: ForkActivation, - pub(crate) kip10_activation: ForkActivation, + pub(super) crescendo_logger: CrescendoLogger, + + // Crescendo hardfork activation score (used here for activating KIPs 9,10) + pub(crate) crescendo_activation: ForkActivation, + + // Mining Rule + mining_rules: Arc, } impl VirtualStateProcessor { @@ -183,6 +203,7 @@ impl VirtualStateProcessor { pruning_lock: SessionLock, notification_root: Arc, counters: Arc, + mining_rules: Arc, ) -> Self { Self { receiver, @@ -191,9 +212,8 @@ impl VirtualStateProcessor { thread_pool, genesis: params.genesis.clone(), - max_block_parents: params.max_block_parents, - mergeset_size_limit: params.mergeset_size_limit, - pruning_depth: params.pruning_depth, + max_block_parents: params.max_block_parents(), + mergeset_size_limit: params.mergeset_size_limit(), db, statuses_store: storage.statuses_store.clone(), @@ -206,6 +226,7 @@ impl VirtualStateProcessor { body_tips_store: storage.body_tips_store.clone(), depth_store: storage.depth_store.clone(), selected_chain_store: storage.selected_chain_store.clone(), + pruning_samples_store: storage.pruning_samples_store.clone(), utxo_diffs_store: storage.utxo_diffs_store.clone(), utxo_multisets_store: storage.utxo_multisets_store.clone(), acceptance_data_store: storage.acceptance_data_store.clone(), @@ -230,8 +251,9 @@ impl VirtualStateProcessor { pruning_lock, notification_root, counters, - storage_mass_activation: params.storage_mass_activation, - kip10_activation: params.kip10_activation, + crescendo_logger: CrescendoLogger::new(), + crescendo_activation: params.crescendo_activation, + mining_rules, } } @@ -452,7 +474,13 @@ impl VirtualStateProcessor { // Update the diff point diff_point = current; // Commit UTXO data for current chain block - self.commit_utxo_state(current, ctx.mergeset_diff, ctx.multiset_hash, ctx.mergeset_acceptance_data); + self.commit_utxo_state( + current, + ctx.mergeset_diff, + ctx.multiset_hash, + ctx.mergeset_acceptance_data, + ctx.pruning_sample_from_pov.expect("verified"), + ); // Count the number of UTXO-processed chain blocks chain_block_counter += 1; } @@ -469,11 +497,20 @@ impl VirtualStateProcessor { diff_point } - fn commit_utxo_state(&self, current: Hash, mergeset_diff: UtxoDiff, multiset: MuHash, acceptance_data: AcceptanceData) { + fn commit_utxo_state( + &self, + current: Hash, + mergeset_diff: UtxoDiff, + multiset: MuHash, + acceptance_data: AcceptanceData, + pruning_sample_from_pov: Hash, + ) { let mut batch = WriteBatch::default(); self.utxo_diffs_store.insert_batch(&mut batch, current, Arc::new(mergeset_diff)).unwrap(); self.utxo_multisets_store.insert_batch(&mut batch, current, multiset).unwrap(); self.acceptance_data_store.insert_batch(&mut batch, current, Arc::new(acceptance_data)).unwrap(); + // Note we call unwrap_or_exists since this field can be populated during IBD with headers proof + self.pruning_samples_store.insert_batch(&mut batch, current, pruning_sample_from_pov).unwrap_or_exists(); let write_guard = self.statuses_store.set_batch(&mut batch, current, StatusUTXOValid).unwrap(); self.db.write(batch).unwrap(); // Calling the drops explicitly after the batch is written in order to avoid possible errors. @@ -588,11 +625,11 @@ impl VirtualStateProcessor { /// Returns the max number of tips to consider as virtual parents in a single virtual resolve operation. /// /// Guaranteed to be `>= self.max_block_parents` - fn max_virtual_parent_candidates(&self) -> usize { + fn max_virtual_parent_candidates(&self, max_block_parents: usize) -> usize { // Limit to max_block_parents x 3 candidates. This way we avoid going over thousands of tips when the network isn't healthy. // There's no specific reason for a factor of 3, and its not a consensus rule, just an estimation for reducing the amount // of candidates considered. - self.max_block_parents as usize * 3 + max_block_parents * 3 } /// Searches for the next valid sink block (SINK = Virtual selected parent). The search is performed @@ -680,11 +717,16 @@ impl VirtualStateProcessor { // we might touch such data prior to validating the bounded merge rule. All in all, this function is short // enough so we avoid making further optimizations let _prune_guard = self.pruning_lock.blocking_read(); - let max_block_parents = self.max_block_parents as usize; - let max_candidates = self.max_virtual_parent_candidates(); + let selected_parent_daa_score = self.headers_store.get_daa_score(selected_parent).unwrap(); + let max_block_parents = self.max_block_parents.get(selected_parent_daa_score) as usize; + let mergeset_size_limit = self.mergeset_size_limit.get(selected_parent_daa_score); + let max_candidates = self.max_virtual_parent_candidates(max_block_parents); // Prioritize half the blocks with highest blue work and pick the rest randomly to ensure diversity between nodes - if candidates.len() > max_candidates { + if self.mining_rules.blue_parents_only.load(Ordering::Relaxed) { + // pick 100% of the top blue work blocks + candidates.truncate(max_candidates); + } else if candidates.len() > max_candidates { // make_contiguous should be a no op since the deque was just built let slice = candidates.make_contiguous(); @@ -710,10 +752,10 @@ impl VirtualStateProcessor { // Try adding parents as long as mergeset size and number of parents limits are not reached while let Some(candidate) = candidates.pop_front() { - if mergeset_size >= self.mergeset_size_limit || virtual_parents.len() >= max_block_parents { + if mergeset_size >= mergeset_size_limit || virtual_parents.len() >= max_block_parents { break; } - match self.mergeset_increase(&virtual_parents, candidate, self.mergeset_size_limit - mergeset_size) { + match self.mergeset_increase(&virtual_parents, candidate, mergeset_size_limit - mergeset_size) { MergesetIncreaseResult::Accepted { increase_size } => { mergeset_size += increase_size; virtual_parents.push(candidate); @@ -729,7 +771,7 @@ impl VirtualStateProcessor { } } } - assert!(mergeset_size <= self.mergeset_size_limit); + assert!(mergeset_size <= mergeset_size_limit); assert!(virtual_parents.len() <= max_block_parents); self.remove_bounded_merge_breaking_parents(virtual_parents, pruning_point) } @@ -773,25 +815,32 @@ impl VirtualStateProcessor { let mut ghostdag_data = self.ghostdag_manager.ghostdag(&virtual_parents); let merge_depth_root = self.depth_manager.calc_merge_depth_root(&ghostdag_data, current_pruning_point); let mut kosherizing_blues: Option> = None; - let mut bad_reds = Vec::new(); + let bad_reds = if self.mining_rules.blue_parents_only.load(Ordering::Relaxed) { + // Treat all reds as bad reds when this rule is triggered + ghostdag_data.mergeset_reds.as_ref().to_vec() + } else { + let mut inner_bad_reds = Vec::new(); - // - // Note that the code below optimizes for the usual case where there are no merge-bound-violating blocks. - // + // + // Note that the code below optimizes for the usual case where there are no merge-bound-violating blocks. + // - // Find red blocks violating the merge bound and which are not kosherized by any blue - for red in ghostdag_data.mergeset_reds.iter().copied() { - if self.reachability_service.is_dag_ancestor_of(merge_depth_root, red) { - continue; - } - // Lazy load the kosherizing blocks since this case is extremely rare - if kosherizing_blues.is_none() { - kosherizing_blues = Some(self.depth_manager.kosherizing_blues(&ghostdag_data, merge_depth_root).collect()); - } - if !self.reachability_service.is_dag_ancestor_of_any(red, &mut kosherizing_blues.as_ref().unwrap().iter().copied()) { - bad_reds.push(red); + // Find red blocks violating the merge bound and which are not kosherized by any blue + for red in ghostdag_data.mergeset_reds.iter().copied() { + if self.reachability_service.is_dag_ancestor_of(merge_depth_root, red) { + continue; + } + // Lazy load the kosherizing blocks since this case is extremely rare + if kosherizing_blues.is_none() { + kosherizing_blues = Some(self.depth_manager.kosherizing_blues(&ghostdag_data, merge_depth_root).collect()); + } + if !self.reachability_service.is_dag_ancestor_of_any(red, &mut kosherizing_blues.as_ref().unwrap().iter().copied()) { + inner_bad_reds.push(red); + } } - } + + inner_bad_reds + }; if !bad_reds.is_empty() { // Remove all parents which lead to merging a bad red @@ -910,8 +959,13 @@ impl VirtualStateProcessor { virtual_state.daa_score, virtual_state.past_median_time, )?; - let ValidatedTransaction { calculated_fee, .. } = - self.validate_transaction_in_utxo_context(tx, utxo_view, virtual_state.daa_score, TxValidationFlags::Full)?; + let ValidatedTransaction { calculated_fee, .. } = self.validate_transaction_in_utxo_context( + tx, + utxo_view, + virtual_state.daa_score, + virtual_state.daa_score, + TxValidationFlags::Full, + )?; Ok(calculated_fee) } @@ -928,7 +982,9 @@ impl VirtualStateProcessor { // We call for the initial tx batch before acquiring the virtual read lock, // optimizing for the common case where all txs are valid. Following selection calls // are called within the lock in order to preserve validness of already validated txs - let mut txs = tx_selector.select_transactions(); + let mut txs = + if self.mining_rules.no_transactions.load(Ordering::Relaxed) { vec![] } else { tx_selector.select_transactions() }; + let mut calculated_fees = Vec::with_capacity(txs.len()); let virtual_read = self.virtual_stores.read(); let virtual_state = virtual_read.state.get().unwrap(); @@ -1015,12 +1071,13 @@ impl VirtualStateProcessor { mut txs: Vec, calculated_fees: Vec, ) -> Result { + let swo = Stopwatch::new("virtual_processor.resolve_virtual"); // [`calc_block_parents`] can use deep blocks below the pruning point for this calculation, so we // need to hold the pruning lock. let _prune_guard = self.pruning_lock.blocking_read(); let pruning_info = self.pruning_point_store.read().get().unwrap(); let header_pruning_point = - self.pruning_point_manager.expected_header_pruning_point(virtual_state.ghostdag_data.to_compact(), pruning_info); + self.pruning_point_manager.expected_header_pruning_point_v2(virtual_state.ghostdag_data.to_compact()).pruning_point; let coinbase = self .coinbase_manager .expected_coinbase_transaction( @@ -1036,10 +1093,14 @@ impl VirtualStateProcessor { let parents_by_level = self.parents_manager.calc_block_parents(pruning_info.pruning_point, &virtual_state.parents); // Hash according to hardfork activation - let storage_mass_activated = self.storage_mass_activation.is_active(virtual_state.daa_score); + let storage_mass_activated = self.crescendo_activation.is_active(virtual_state.daa_score); let hash_merkle_root = calc_hash_merkle_root(txs.iter(), storage_mass_activated); - let accepted_id_merkle_root = kaspa_merkle::calc_merkle_root(virtual_state.accepted_tx_ids.iter().copied()); + let accepted_id_merkle_root = self.calc_accepted_id_merkle_root( + virtual_state.daa_score, + virtual_state.accepted_tx_ids.iter().copied(), + virtual_state.ghostdag_data.selected_parent, + ); let utxo_commitment = virtual_state.multiset.clone().finalize(); // Past median time is the exclusive lower bound for valid block time, so we increase by 1 to get the valid min let min_block_time = virtual_state.past_median_time + 1; @@ -1060,6 +1121,13 @@ impl VirtualStateProcessor { let selected_parent_hash = virtual_state.ghostdag_data.selected_parent; let selected_parent_timestamp = self.headers_store.get_timestamp(selected_parent_hash).unwrap(); let selected_parent_daa_score = self.headers_store.get_daa_score(selected_parent_hash).unwrap(); + + if swo.elapsed().as_millis() <= BUILD_BLOCK_TEMPLATE_SPEED_THRESHOLD { + self.counters.build_block_template_within_threshold.fetch_add(1, Ordering::SeqCst); + } else { + self.counters.build_block_template_above_threshold.fetch_add(1, Ordering::SeqCst); + } + Ok(BlockTemplate::new( MutableBlock::new(header, txs), miner_data, @@ -1080,7 +1148,8 @@ impl VirtualStateProcessor { let mut batch = WriteBatch::default(); self.past_pruning_points_store.insert_batch(&mut batch, 0, self.genesis.hash).unwrap_or_exists(); pruning_point_write.set_batch(&mut batch, self.genesis.hash, self.genesis.hash, 0).unwrap(); - pruning_point_write.set_history_root(&mut batch, self.genesis.hash).unwrap(); + pruning_point_write.set_retention_checkpoint(&mut batch, self.genesis.hash).unwrap(); + pruning_point_write.set_retention_period_root(&mut batch, self.genesis.hash).unwrap(); pruning_utxoset_write.set_utxoset_position(&mut batch, self.genesis.hash).unwrap(); self.db.write(batch).unwrap(); drop(pruning_point_write); @@ -1092,7 +1161,7 @@ impl VirtualStateProcessor { /// Note that pruning point-related stores are initialized by `init` pub fn process_genesis(self: &Arc) { // Write the UTXO state of genesis - self.commit_utxo_state(self.genesis.hash, UtxoDiff::default(), MuHash::new(), AcceptanceData::default()); + self.commit_utxo_state(self.genesis.hash, UtxoDiff::default(), MuHash::new(), AcceptanceData::default(), ZERO_HASH); // Init the virtual selected chain store let mut batch = WriteBatch::default(); @@ -1154,6 +1223,7 @@ impl VirtualStateProcessor { &new_pruning_point_transactions, &virtual_read.utxo_set, new_pruning_point_header.daa_score, + new_pruning_point_header.daa_score, TxValidationFlags::Full, ); if validated_transactions.len() < new_pruning_point_transactions.len() - 1 { diff --git a/consensus/src/pipeline/virtual_processor/tests.rs b/consensus/src/pipeline/virtual_processor/tests.rs index 20eea9e576..7972a90534 100644 --- a/consensus/src/pipeline/virtual_processor/tests.rs +++ b/consensus/src/pipeline/virtual_processor/tests.rs @@ -68,7 +68,7 @@ impl TestContext { pub fn build_block_template_row(&mut self, nonces: impl Iterator) -> &mut Self { for nonce in nonces { - self.simulated_time += self.consensus.params().target_time_per_block; + self.simulated_time += self.consensus.params().prior_target_time_per_block; self.current_templates.push_back(self.build_block_template(nonce as u64, self.simulated_time)); } self @@ -93,7 +93,7 @@ impl TestContext { pub async fn build_and_insert_disqualified_chain(&mut self, mut parents: Vec, len: usize) -> Hash { // The chain will be disqualified since build_block_with_parents builds utxo-invalid blocks for _ in 0..len { - self.simulated_time += self.consensus.params().target_time_per_block; + self.simulated_time += self.consensus.params().prior_target_time_per_block; let b = self.build_block_with_parents(parents, 0, self.simulated_time); parents = vec![b.header.hash]; self.validate_and_insert_block(b.to_immutable()).await; @@ -174,8 +174,8 @@ async fn antichain_merge_test() { let config = ConfigBuilder::new(MAINNET_PARAMS) .skip_proof_of_work() .edit_consensus_params(|p| { - p.max_block_parents = 4; - p.mergeset_size_limit = 10; + p.prior_max_block_parents = 4; + p.prior_mergeset_size_limit = 10; }) .build(); @@ -202,8 +202,8 @@ async fn basic_utxo_disqualified_test() { let config = ConfigBuilder::new(MAINNET_PARAMS) .skip_proof_of_work() .edit_consensus_params(|p| { - p.max_block_parents = 4; - p.mergeset_size_limit = 10; + p.prior_max_block_parents = 4; + p.prior_mergeset_size_limit = 10; }) .build(); @@ -234,9 +234,9 @@ async fn double_search_disqualified_test() { let config = ConfigBuilder::new(MAINNET_PARAMS) .skip_proof_of_work() .edit_consensus_params(|p| { - p.max_block_parents = 4; - p.mergeset_size_limit = 10; - p.min_difficulty_window_len = p.legacy_difficulty_window_size; + p.prior_max_block_parents = 4; + p.prior_mergeset_size_limit = 10; + p.min_difficulty_window_size = p.prior_difficulty_window_size; }) .build(); let mut ctx = TestContext::new(TestConsensus::new(&config)); diff --git a/consensus/src/pipeline/virtual_processor/utxo_inquirer.rs b/consensus/src/pipeline/virtual_processor/utxo_inquirer.rs index 617c1026bd..c917ec393e 100644 --- a/consensus/src/pipeline/virtual_processor/utxo_inquirer.rs +++ b/consensus/src/pipeline/virtual_processor/utxo_inquirer.rs @@ -24,21 +24,21 @@ impl VirtualStateProcessor { &self, txid: Hash, accepting_block_daa_score: u64, - source_hash: Hash, + retention_period_root_hash: Hash, ) -> Result { - let source_daa_score = self + let retention_period_root_daa_score = self .headers_store - .get_compact_header_data(source_hash) + .get_compact_header_data(retention_period_root_hash) .map(|compact_header| compact_header.daa_score) - .map_err(|_| UtxoInquirerError::MissingCompactHeaderForBlockHash(source_hash))?; + .map_err(|_| UtxoInquirerError::MissingCompactHeaderForBlockHash(retention_period_root_hash))?; - if accepting_block_daa_score < source_daa_score { + if accepting_block_daa_score < retention_period_root_daa_score { // Early exit if target daa score is lower than that of pruning point's daa score: return Err(UtxoInquirerError::AlreadyPruned); } let (matching_chain_block_hash, acceptance_data) = - self.find_accepting_chain_block_hash_at_daa_score(accepting_block_daa_score, source_hash)?; + self.find_accepting_chain_block_hash_at_daa_score(accepting_block_daa_score, retention_period_root_hash)?; // Expected to never fail, since we found the acceptance data and therefore there must be matching diff let utxo_diff = self @@ -86,11 +86,13 @@ impl VirtualStateProcessor { fn find_accepting_chain_block_hash_at_daa_score( &self, target_daa_score: u64, - source_hash: Hash, + retention_period_root_hash: Hash, ) -> Result<(Hash, Arc), UtxoInquirerError> { let sc_read = self.selected_chain_store.read(); - let source_index = sc_read.get_by_hash(source_hash).map_err(|_| UtxoInquirerError::MissingIndexForHash(source_hash))?; + let retention_period_root_index = sc_read + .get_by_hash(retention_period_root_hash) + .map_err(|_| UtxoInquirerError::MissingIndexForHash(retention_period_root_hash))?; let (tip_index, tip_hash) = sc_read.get_tip().map_err(|_| UtxoInquirerError::MissingTipData)?; let tip_daa_score = self .headers_store @@ -101,7 +103,7 @@ impl VirtualStateProcessor { // For a chain segment it holds that len(segment) <= daa_score(segment end) - daa_score(segment start). This is true // because each chain block increases the daa score by at least one. Hence we can lower bound our search by high index // minus the daa score gap as done below - let mut low_index = tip_index.saturating_sub(tip_daa_score.saturating_sub(target_daa_score)).max(source_index); + let mut low_index = tip_index.saturating_sub(tip_daa_score.saturating_sub(target_daa_score)).max(retention_period_root_index); let mut high_index = tip_index; let matching_chain_block_hash = loop { diff --git a/consensus/src/pipeline/virtual_processor/utxo_validation.rs b/consensus/src/pipeline/virtual_processor/utxo_validation.rs index f0da0535ed..201a634746 100644 --- a/consensus/src/pipeline/virtual_processor/utxo_validation.rs +++ b/consensus/src/pipeline/virtual_processor/utxo_validation.rs @@ -2,12 +2,23 @@ use super::VirtualStateProcessor; use crate::{ errors::{ BlockProcessResult, - RuleError::{BadAcceptedIDMerkleRoot, BadCoinbaseTransaction, BadUTXOCommitment, InvalidTransactionsInUtxoContext}, + RuleError::{ + BadAcceptedIDMerkleRoot, BadCoinbaseTransaction, BadUTXOCommitment, InvalidTransactionsInUtxoContext, + WrongHeaderPruningPoint, + }, }, - model::stores::{block_transactions::BlockTransactionsStoreReader, daa::DaaStoreReader, ghostdag::GhostdagData}, - processes::transaction_validator::{ - errors::{TxResult, TxRuleError}, - tx_validation_in_utxo_context::TxValidationFlags, + model::stores::{ + block_transactions::BlockTransactionsStoreReader, + daa::DaaStoreReader, + ghostdag::{CompactGhostdagData, GhostdagData}, + headers::HeaderStoreReader, + }, + processes::{ + pruning::PruningPointReply, + transaction_validator::{ + errors::{TxResult, TxRuleError}, + tx_validation_in_utxo_context::TxValidationFlags, + }, }, }; use kaspa_consensus_core::{ @@ -33,6 +44,36 @@ use rayon::prelude::*; use smallvec::{smallvec, SmallVec}; use std::{iter::once, ops::Deref}; +pub(crate) mod crescendo { + use kaspa_core::{info, log::CRESCENDO_KEYWORD}; + use std::sync::{ + atomic::{AtomicU8, Ordering}, + Arc, + }; + + #[derive(Clone)] + pub(crate) struct CrescendoLogger { + steps: Arc, + } + + impl CrescendoLogger { + pub fn new() -> Self { + Self { steps: Arc::new(AtomicU8::new(Self::ACTIVATE)) } + } + + const ACTIVATE: u8 = 0; + + pub fn report_activation(&self) -> bool { + if self.steps.compare_exchange(Self::ACTIVATE, Self::ACTIVATE + 1, Ordering::SeqCst, Ordering::SeqCst).is_ok() { + info!(target: CRESCENDO_KEYWORD, "[Crescendo] [--------- Crescendo activated for UTXO state processing rules ---------]"); + true + } else { + false + } + } + } +} + /// A context for processing the UTXO state of a block with respect to its selected parent. /// Note this can also be the virtual block. pub(super) struct UtxoProcessingContext<'a> { @@ -42,6 +83,7 @@ pub(super) struct UtxoProcessingContext<'a> { pub accepted_tx_ids: Vec, pub mergeset_acceptance_data: Vec, pub mergeset_rewards: BlockHashMap, + pub pruning_sample_from_pov: Option, } impl<'a> UtxoProcessingContext<'a> { @@ -54,6 +96,7 @@ impl<'a> UtxoProcessingContext<'a> { accepted_tx_ids: Vec::with_capacity(1), // We expect at least the selected parent coinbase tx mergeset_rewards: BlockHashMap::with_capacity(mergeset_size), mergeset_acceptance_data: Vec::with_capacity(mergeset_size), + pruning_sample_from_pov: Default::default(), } } @@ -95,8 +138,13 @@ impl VirtualStateProcessor { // No need to fully validate selected parent transactions since selected parent txs were already validated // as part of selected parent UTXO state verification with the exact same UTXO context. let validation_flags = if is_selected_parent { TxValidationFlags::SkipScriptChecks } else { TxValidationFlags::Full }; - let (validated_transactions, inner_multiset) = - self.validate_transactions_with_muhash_in_parallel(&txs, &composed_view, pov_daa_score, validation_flags); + let (validated_transactions, inner_multiset) = self.validate_transactions_with_muhash_in_parallel( + &txs, + &composed_view, + pov_daa_score, + self.headers_store.get_daa_score(merged_block).unwrap(), + validation_flags, + ); ctx.multiset_hash.combine(&inner_multiset); @@ -107,27 +155,19 @@ impl VirtualStateProcessor { block_fee += validated_tx.calculated_fee; } - if is_selected_parent { + ctx.mergeset_acceptance_data.push(MergesetBlockAcceptanceData { + block_hash: merged_block, // For the selected parent, we prepend the coinbase tx - ctx.mergeset_acceptance_data.push(MergesetBlockAcceptanceData { - block_hash: merged_block, - accepted_transactions: once(AcceptedTxEntry { transaction_id: validated_coinbase_id, index_within_block: 0 }) - .chain( - validated_transactions - .into_iter() - .map(|(tx, tx_idx)| AcceptedTxEntry { transaction_id: tx.id(), index_within_block: tx_idx }), - ) - .collect(), - }); - } else { - ctx.mergeset_acceptance_data.push(MergesetBlockAcceptanceData { - block_hash: merged_block, - accepted_transactions: validated_transactions - .into_iter() - .map(|(tx, tx_idx)| AcceptedTxEntry { transaction_id: tx.id(), index_within_block: tx_idx }) - .collect(), - }); - } + accepted_transactions: is_selected_parent + .then_some(AcceptedTxEntry { transaction_id: validated_coinbase_id, index_within_block: 0 }) + .into_iter() + .chain( + validated_transactions + .into_iter() + .map(|(tx, tx_idx)| AcceptedTxEntry { transaction_id: tx.id(), index_within_block: tx_idx }), + ) + .collect(), + }); let coinbase_data = self.coinbase_manager.deserialize_coinbase_payload(&txs[0].payload).unwrap(); ctx.mergeset_rewards.insert( @@ -136,17 +176,24 @@ impl VirtualStateProcessor { ); } - // Make sure accepted tx ids are sorted before building the merkle root - // NOTE: when subnetworks will be enabled, the sort should consider them in order to allow grouping under a merkle subtree - ctx.accepted_tx_ids.sort(); + // Before crescendo HF: + // - Make sure accepted tx ids are sorted before building the merkle root + // After crescendo HF: + // - Preserve canonical order of accepted transactions after hard-fork + if !self.crescendo_activation.is_active(pov_daa_score) { + // Note that pov_daa_score is the score of the header which will have its accepted_id_merkle_root + // set according to accepted_tx_ids, so we are consistent in activating via the correct score + ctx.accepted_tx_ids.sort(); + } } /// Verify that the current block fully respects its own UTXO view. We define a block as /// UTXO valid if all the following conditions hold: /// 1. The block header includes the expected `utxo_commitment`. /// 2. The block header includes the expected `accepted_id_merkle_root`. - /// 3. The block coinbase transaction rewards the mergeset blocks correctly. - /// 4. All non-coinbase block transactions are valid against its own UTXO view. + /// 3. The block header includes the expected `pruning_point`. + /// 4. The block coinbase transaction rewards the mergeset blocks correctly. + /// 5. All non-coinbase block transactions are valid against its own UTXO view. pub(super) fn verify_expected_utxo_state( &self, ctx: &mut UtxoProcessingContext, @@ -161,7 +208,9 @@ impl VirtualStateProcessor { trace!("correct commitment: {}, {}", header.hash, expected_commitment); // Verify header accepted_id_merkle_root - let expected_accepted_id_merkle_root = kaspa_merkle::calc_merkle_root(ctx.accepted_tx_ids.iter().copied()); + let expected_accepted_id_merkle_root = + self.calc_accepted_id_merkle_root(header.daa_score, ctx.accepted_tx_ids.iter().copied(), ctx.selected_parent()); + if expected_accepted_id_merkle_root != header.accepted_id_merkle_root { return Err(BadAcceptedIDMerkleRoot(header.hash, header.accepted_id_merkle_root, expected_accepted_id_merkle_root)); } @@ -177,10 +226,19 @@ impl VirtualStateProcessor { &self.daa_excluded_store.get_mergeset_non_daa(header.hash).unwrap(), )?; + // Verify the header pruning point + let reply = self.verify_header_pruning_point(header, ctx.ghostdag_data.to_compact())?; + ctx.pruning_sample_from_pov = Some(reply.pruning_sample); + // Verify all transactions are valid in context let current_utxo_view = selected_parent_utxo_view.compose(&ctx.mergeset_diff); - let validated_transactions = - self.validate_transactions_in_parallel(&txs, ¤t_utxo_view, header.daa_score, TxValidationFlags::Full); + let validated_transactions = self.validate_transactions_in_parallel( + &txs, + ¤t_utxo_view, + header.daa_score, + header.daa_score, + TxValidationFlags::Full, + ); if validated_transactions.len() < txs.len() - 1 { // Some non-coinbase transactions are invalid return Err(InvalidTransactionsInUtxoContext(txs.len() - 1 - validated_transactions.len(), txs.len() - 1)); @@ -189,6 +247,32 @@ impl VirtualStateProcessor { Ok(()) } + fn verify_header_pruning_point( + &self, + header: &Header, + ghostdag_data: CompactGhostdagData, + ) -> BlockProcessResult { + // [Crescendo]: changing expected pruning point check from header validity to chain qualification. + // Note that we activate here based on the selected parent DAA score thus complementing the deactivation + // in header processor which is based on selected parent DAA score as well. + + if self.crescendo_activation.is_within_range_from_activation(header.daa_score, 1000) { + self.crescendo_logger.report_activation(); + } + + let selected_parent_daa_score = self.headers_store.get_daa_score(ghostdag_data.selected_parent).unwrap(); + // [Crescendo]: we need to save reply.pruning_sample to the database also prior to activation + let reply = self.pruning_point_manager.expected_header_pruning_point_v2(ghostdag_data); + if self.crescendo_activation.is_active(selected_parent_daa_score) { + if reply.pruning_point != header.pruning_point { + return Err(WrongHeaderPruningPoint(reply.pruning_point, header.pruning_point)); + } + } else { + assert_eq!(reply.pruning_point, header.pruning_point, "verified by header validation (v1 = v2 pre activation)"); + } + Ok(reply) + } + fn verify_coinbase_transaction( &self, coinbase: &Transaction, @@ -204,6 +288,10 @@ impl VirtualStateProcessor { .expected_coinbase_transaction(daa_score, miner_data, ghostdag_data, mergeset_rewards, mergeset_non_daa) .unwrap() .tx; + // [Crescendo]: we can pass include_mass_field = false here since post activation coinbase mass field + // is guaranteed to be zero (see check_coinbase_has_zero_mass), so after the fork we will be able to + // safely remove the include_mass_field parameter. This is because internally include_mass_field = false + // and mass = 0 are treated the same. if hashing::tx::hash(coinbase, false) != hashing::tx::hash(&expected_coinbase, false) { Err(BadCoinbaseTransaction) } else { @@ -218,6 +306,7 @@ impl VirtualStateProcessor { txs: &'a Vec, utxo_view: &V, pov_daa_score: u64, + block_daa_score: u64, flags: TxValidationFlags, ) -> Vec<(ValidatedTransaction<'a>, u32)> { self.thread_pool.install(|| { @@ -226,7 +315,7 @@ impl VirtualStateProcessor { // that all txs within each block are independent .enumerate() .skip(1) // Skip the coinbase tx. - .filter_map(|(i, tx)| self.validate_transaction_in_utxo_context(tx, &utxo_view, pov_daa_score, flags).ok().map(|vtx| (vtx, i as u32))) + .filter_map(|(i, tx)| self.validate_transaction_in_utxo_context(tx, &utxo_view, pov_daa_score, block_daa_score, flags).ok().map(|vtx| (vtx, i as u32))) .collect() }) } @@ -238,6 +327,7 @@ impl VirtualStateProcessor { txs: &'a Vec, utxo_view: &V, pov_daa_score: u64, + block_daa_score: u64, flags: TxValidationFlags, ) -> (SmallVec<[(ValidatedTransaction<'a>, u32); 2]>, MuHash) { self.thread_pool.install(|| { @@ -246,7 +336,7 @@ impl VirtualStateProcessor { // that all txs within each block are independent .enumerate() .skip(1) // Skip the coinbase tx. - .filter_map(|(i, tx)| self.validate_transaction_in_utxo_context(tx, &utxo_view, pov_daa_score, flags).ok().map(|vtx| { + .filter_map(|(i, tx)| self.validate_transaction_in_utxo_context(tx, &utxo_view, pov_daa_score, block_daa_score, flags).ok().map(|vtx| { let mh = MuHash::from_transaction(&vtx, pov_daa_score); (smallvec![(vtx, i as u32)], mh) } @@ -268,6 +358,7 @@ impl VirtualStateProcessor { transaction: &'a Transaction, utxo_view: &impl UtxoView, pov_daa_score: u64, + block_daa_score: u64, flags: TxValidationFlags, ) -> TxResult> { let mut entries = Vec::with_capacity(transaction.inputs.len()); @@ -280,10 +371,19 @@ impl VirtualStateProcessor { } } let populated_tx = PopulatedTransaction::new(transaction, entries); - let res = self.transaction_validator.validate_populated_transaction_and_get_fee(&populated_tx, pov_daa_score, flags, None); + let res = self.transaction_validator.validate_populated_transaction_and_get_fee( + &populated_tx, + pov_daa_score, + block_daa_score, + flags, + None, + ); match res { Ok(calculated_fee) => Ok(ValidatedTransaction::new(populated_tx, calculated_fee)), Err(tx_rule_error) => { + // TODO (relaxed): aggregate by error types and log through the monitor (in order to not flood the logs) + // [Crescendo]: the above suggested aggregate seems not crucial for crescendo since unupdated miners + // will mine invalid blocks (due to difficulty, coinbase etc) info!("Rejecting transaction {} due to transaction rule error: {}", transaction.id(), tx_rule_error); Err(tx_rule_error) } @@ -325,27 +425,48 @@ impl VirtualStateProcessor { ) -> TxResult<()> { self.populate_mempool_transaction_in_utxo_context(mutable_tx, utxo_view)?; - // Calc the full contextual mass including storage mass + // Calc the contextual storage mass let contextual_mass = self .transaction_validator .mass_calculator - .calc_tx_overall_mass(&mutable_tx.as_verifiable(), mutable_tx.calculated_compute_mass) + .calc_contextual_masses(&mutable_tx.as_verifiable()) .ok_or(TxRuleError::MassIncomputable)?; // Set the inner mass field - mutable_tx.tx.set_mass(contextual_mass); + mutable_tx.tx.set_mass(contextual_mass.storage_mass); // At this point we know all UTXO entries are populated, so we can safely pass the tx as verifiable - let mass_and_feerate_threshold = args.feerate_threshold.map(|threshold| (contextual_mass, threshold)); + let mass_and_feerate_threshold = args + .feerate_threshold + .map(|threshold| (contextual_mass.max(mutable_tx.calculated_non_contextual_masses.unwrap()), threshold)); let calculated_fee = self.transaction_validator.validate_populated_transaction_and_get_fee( &mutable_tx.as_verifiable(), pov_daa_score, + pov_daa_score, TxValidationFlags::SkipMassCheck, // we can skip the mass check since we just set it mass_and_feerate_threshold, )?; mutable_tx.calculated_fee = Some(calculated_fee); Ok(()) } + + /// Calculates the accepted_id_merkle_root based on the current DAA score and the accepted tx ids + /// refer KIP-15 for more details + pub(super) fn calc_accepted_id_merkle_root( + &self, + daa_score: u64, + accepted_tx_ids: impl ExactSizeIterator, + selected_parent: Hash, + ) -> Hash { + if self.crescendo_activation.is_active(daa_score) { + kaspa_merkle::merkle_hash( + self.headers_store.get_header(selected_parent).unwrap().accepted_id_merkle_root, + kaspa_merkle::calc_merkle_root(accepted_tx_ids), + ) + } else { + kaspa_merkle::calc_merkle_root(accepted_tx_ids) + } + } } #[cfg(test)] diff --git a/consensus/src/processes/block_depth.rs b/consensus/src/processes/block_depth.rs index 24d948f708..2f6c47ee39 100644 --- a/consensus/src/processes/block_depth.rs +++ b/consensus/src/processes/block_depth.rs @@ -1,4 +1,7 @@ -use kaspa_consensus_core::blockhash::ORIGIN; +use kaspa_consensus_core::{ + blockhash::{BlockHashExtensions, ORIGIN}, + config::params::ForkedParam, +}; use kaspa_hashes::Hash; use std::sync::Arc; @@ -7,42 +10,56 @@ use crate::model::{ stores::{ depth::DepthStoreReader, ghostdag::{GhostdagData, GhostdagStoreReader}, + headers::HeaderStoreReader, reachability::ReachabilityStoreReader, }, }; +enum BlockDepthType { + MergeRoot, + Finality, +} + #[derive(Clone)] -pub struct BlockDepthManager { - merge_depth: u64, - finality_depth: u64, +pub struct BlockDepthManager { + merge_depth: ForkedParam, + finality_depth: ForkedParam, genesis_hash: Hash, depth_store: Arc, reachability_service: MTReachabilityService, ghostdag_store: Arc, + headers_store: Arc, } -impl BlockDepthManager { +impl BlockDepthManager { pub fn new( - merge_depth: u64, - finality_depth: u64, + merge_depth: ForkedParam, + finality_depth: ForkedParam, genesis_hash: Hash, depth_store: Arc, reachability_service: MTReachabilityService, ghostdag_store: Arc, + headers_store: Arc, ) -> Self { - Self { merge_depth, finality_depth, genesis_hash, depth_store, reachability_service, ghostdag_store } + Self { merge_depth, finality_depth, genesis_hash, depth_store, reachability_service, ghostdag_store, headers_store } } pub fn calc_merge_depth_root(&self, ghostdag_data: &GhostdagData, pruning_point: Hash) -> Hash { - self.calculate_block_at_depth(ghostdag_data, self.merge_depth, pruning_point) + self.calculate_block_at_depth(ghostdag_data, BlockDepthType::MergeRoot, pruning_point) } pub fn calc_finality_point(&self, ghostdag_data: &GhostdagData, pruning_point: Hash) -> Hash { - self.calculate_block_at_depth(ghostdag_data, self.finality_depth, pruning_point) + self.calculate_block_at_depth(ghostdag_data, BlockDepthType::Finality, pruning_point) } - fn calculate_block_at_depth(&self, ghostdag_data: &GhostdagData, depth: u64, pruning_point: Hash) -> Hash { - assert!(depth == self.merge_depth || depth == self.finality_depth); - + fn calculate_block_at_depth(&self, ghostdag_data: &GhostdagData, depth_type: BlockDepthType, pruning_point: Hash) -> Hash { + if ghostdag_data.selected_parent.is_origin() { + return ORIGIN; + } + let selected_parent_daa_score = self.headers_store.get_daa_score(ghostdag_data.selected_parent).unwrap(); + let depth = match depth_type { + BlockDepthType::MergeRoot => self.merge_depth.get(selected_parent_daa_score), + BlockDepthType::Finality => self.finality_depth.get(selected_parent_daa_score), + }; if ghostdag_data.blue_score < depth { return self.genesis_hash; } @@ -57,10 +74,12 @@ impl Bl return ORIGIN; } - let mut current = if depth == self.merge_depth { - self.depth_store.merge_depth_root(ghostdag_data.selected_parent).unwrap() - } else { - self.depth_store.finality_point(ghostdag_data.selected_parent).unwrap() + // [Crescendo]: we start from the depth/finality point of the selected parent. This makes the selection monotonic + // also when the depth increases in the fork activation point. The loop below will simply not progress for a while, + // until a new block above the previous point reaches the *new increased depth*. + let mut current = match depth_type { + BlockDepthType::MergeRoot => self.depth_store.merge_depth_root(ghostdag_data.selected_parent).unwrap(), + BlockDepthType::Finality => self.depth_store.finality_point(ghostdag_data.selected_parent).unwrap(), }; // In this case we expect the pruning point or a block above it to be the block at depth. diff --git a/consensus/src/processes/coinbase.rs b/consensus/src/processes/coinbase.rs index d67f922c81..63f656270a 100644 --- a/consensus/src/processes/coinbase.rs +++ b/consensus/src/processes/coinbase.rs @@ -1,5 +1,6 @@ use kaspa_consensus_core::{ coinbase::*, + config::params::ForkedParam, errors::coinbase::{CoinbaseError, CoinbaseResult}, subnets, tx::{ScriptPublicKey, ScriptVec, Transaction, TransactionOutput}, @@ -30,13 +31,16 @@ pub struct CoinbaseManager { max_coinbase_payload_len: usize, deflationary_phase_daa_score: u64, pre_deflationary_phase_base_subsidy: u64, - target_time_per_block: u64, + bps: ForkedParam, - /// Precomputed number of blocks per month - blocks_per_month: u64, + /// Precomputed subsidy by month tables (for before and after the Crescendo hardfork) + subsidy_by_month_table_before: SubsidyByMonthTable, + subsidy_by_month_table_after: SubsidyByMonthTable, - /// Precomputed subsidy by month table - subsidy_by_month_table: SubsidyByMonthTable, + /// The crescendo activation DAA score where BPS increased from 1 to 10. + /// This score is required here long-term (and not only for the actual forking), in + /// order to correctly determine the subsidy month from the live DAA score of the network + crescendo_activation_daa_score: u64, } /// Struct used to streamline payload parsing @@ -63,31 +67,31 @@ impl CoinbaseManager { max_coinbase_payload_len: usize, deflationary_phase_daa_score: u64, pre_deflationary_phase_base_subsidy: u64, - target_time_per_block: u64, + bps: ForkedParam, ) -> Self { - assert!(1000 % target_time_per_block == 0); - let bps = 1000 / target_time_per_block; - let blocks_per_month = SECONDS_PER_MONTH * bps; - // Precomputed subsidy by month table for the actual block per second rate // Here values are rounded up so that we keep the same number of rewarding months as in the original 1 BPS table. // In a 10 BPS network, the induced increase in total rewards is 51 KAS (see tests::calc_high_bps_total_rewards_delta()) - let subsidy_by_month_table: SubsidyByMonthTable = core::array::from_fn(|i| SUBSIDY_BY_MONTH_TABLE[i].div_ceil(bps)); + let subsidy_by_month_table_before: SubsidyByMonthTable = + core::array::from_fn(|i| SUBSIDY_BY_MONTH_TABLE[i].div_ceil(bps.before())); + let subsidy_by_month_table_after: SubsidyByMonthTable = + core::array::from_fn(|i| SUBSIDY_BY_MONTH_TABLE[i].div_ceil(bps.after())); Self { coinbase_payload_script_public_key_max_len, max_coinbase_payload_len, deflationary_phase_daa_score, pre_deflationary_phase_base_subsidy, - target_time_per_block, - blocks_per_month, - subsidy_by_month_table, + bps, + subsidy_by_month_table_before, + subsidy_by_month_table_after, + crescendo_activation_daa_score: bps.activation().daa_score(), } } #[cfg(test)] #[inline] - pub fn bps(&self) -> u64 { - 1000 / self.target_time_per_block + pub fn bps(&self) -> ForkedParam { + self.bps } pub fn expected_coinbase_transaction>( @@ -113,10 +117,24 @@ impl CoinbaseManager { // Collect all rewards from mergeset reds ∩ DAA window and create a // single output rewarding all to the current block (the "merging" block) let mut red_reward = 0u64; - for red in ghostdag_data.mergeset_reds.iter().filter(|h| !mergeset_non_daa.contains(h)) { - let reward_data = mergeset_rewards.get(red).unwrap(); - red_reward += reward_data.subsidy + reward_data.total_fees; + + // bps activation = crescendo activation + if self.bps.activation().is_active(daa_score) { + for red in ghostdag_data.mergeset_reds.iter() { + let reward_data = mergeset_rewards.get(red).unwrap(); + if mergeset_non_daa.contains(red) { + red_reward += reward_data.total_fees; + } else { + red_reward += reward_data.subsidy + reward_data.total_fees; + } + } + } else { + for red in ghostdag_data.mergeset_reds.iter().filter(|h| !mergeset_non_daa.contains(h)) { + let reward_data = mergeset_rewards.get(red).unwrap(); + red_reward += reward_data.subsidy + reward_data.total_fees; + } } + if red_reward > 0 { outputs.push(TransactionOutput::new(red_reward, miner_data.script_public_key.clone())); } @@ -214,13 +232,33 @@ impl CoinbaseManager { return self.pre_deflationary_phase_base_subsidy; } - let months_since_deflationary_phase_started = - ((daa_score - self.deflationary_phase_daa_score) / self.blocks_per_month) as usize; - if months_since_deflationary_phase_started >= self.subsidy_by_month_table.len() { - *(self.subsidy_by_month_table).last().unwrap() + let subsidy_month = self.subsidy_month(daa_score) as usize; + let subsidy_table = if self.bps.activation().is_active(daa_score) { + &self.subsidy_by_month_table_after } else { - self.subsidy_by_month_table[months_since_deflationary_phase_started] - } + &self.subsidy_by_month_table_before + }; + subsidy_table[subsidy_month.min(subsidy_table.len() - 1)] + } + + /// Get the subsidy month as function of the current DAA score. + /// + /// Note that this function is called only if daa_score >= self.deflationary_phase_daa_score + fn subsidy_month(&self, daa_score: u64) -> u64 { + let seconds_since_deflationary_phase_started = if self.crescendo_activation_daa_score < self.deflationary_phase_daa_score { + // crescendo_activation < deflationary_phase <= daa_score (activated before deflation) + (daa_score - self.deflationary_phase_daa_score) / self.bps.after() + } else if daa_score < self.crescendo_activation_daa_score { + // deflationary_phase <= daa_score < crescendo_activation (pre activation) + (daa_score - self.deflationary_phase_daa_score) / self.bps.before() + } else { + // Else - deflationary_phase <= crescendo_activation <= daa_score. + // Count seconds differently before and after Crescendo activation + (self.crescendo_activation_daa_score - self.deflationary_phase_daa_score) / self.bps.before() + + (daa_score - self.crescendo_activation_daa_score) / self.bps.after() + }; + + seconds_since_deflationary_phase_started / SECONDS_PER_MONTH } #[cfg(test)] @@ -244,7 +282,7 @@ impl CoinbaseManager { /* This table was pre-calculated by calling `calcDeflationaryPeriodBlockSubsidyFloatCalc` (in kaspad-go) for all months until reaching 0 subsidy. To regenerate this table, run `TestBuildSubsidyTable` in coinbasemanager_test.go (note the `deflationaryPhaseBaseSubsidy` therein). - These values apply to 1 block per second. + These values represent the reward per second for each month (= reward per block for 1 BPS). */ #[rustfmt::skip] const SUBSIDY_BY_MONTH_TABLE: [u64; 426] = [ @@ -273,26 +311,24 @@ mod tests { use super::*; use crate::params::MAINNET_PARAMS; use kaspa_consensus_core::{ - config::params::{Params, TESTNET11_PARAMS}, + config::params::{ForkActivation, Params, SIMNET_PARAMS}, constants::SOMPI_PER_KASPA, - network::NetworkId, + network::{NetworkId, NetworkType}, tx::scriptvec, }; #[test] fn calc_high_bps_total_rewards_delta() { - const SECONDS_PER_MONTH: u64 = 2629800; - let legacy_cbm = create_legacy_manager(); let pre_deflationary_rewards = legacy_cbm.pre_deflationary_phase_base_subsidy * legacy_cbm.deflationary_phase_daa_score; let total_rewards: u64 = pre_deflationary_rewards + SUBSIDY_BY_MONTH_TABLE.iter().map(|x| x * SECONDS_PER_MONTH).sum::(); - let testnet_11_bps = TESTNET11_PARAMS.bps(); + let testnet_11_bps = SIMNET_PARAMS.bps().upper_bound(); let total_high_bps_rewards_rounded_up: u64 = pre_deflationary_rewards + SUBSIDY_BY_MONTH_TABLE.iter().map(|x| (x.div_ceil(testnet_11_bps) * testnet_11_bps) * SECONDS_PER_MONTH).sum::(); - let cbm = create_manager(&TESTNET11_PARAMS); - let total_high_bps_rewards: u64 = - pre_deflationary_rewards + cbm.subsidy_by_month_table.iter().map(|x| x * cbm.blocks_per_month).sum::(); + let cbm = create_manager(&SIMNET_PARAMS); + let total_high_bps_rewards: u64 = pre_deflationary_rewards + + cbm.subsidy_by_month_table_before.iter().map(|x| x * SECONDS_PER_MONTH * cbm.bps().before()).sum::(); assert_eq!(total_high_bps_rewards_rounded_up, total_high_bps_rewards, "subsidy adjusted to bps must be rounded up"); let delta = total_high_bps_rewards as i64 - total_rewards as i64; @@ -305,15 +341,23 @@ mod tests { #[test] fn subsidy_by_month_table_test() { let cbm = create_legacy_manager(); - cbm.subsidy_by_month_table.iter().enumerate().for_each(|(i, x)| { + cbm.subsidy_by_month_table_before.iter().enumerate().for_each(|(i, x)| { assert_eq!(SUBSIDY_BY_MONTH_TABLE[i], *x, "for 1 BPS, const table and precomputed values must match"); }); for network_id in NetworkId::iter() { let cbm = create_manager(&network_id.into()); - cbm.subsidy_by_month_table.iter().enumerate().for_each(|(i, x)| { + cbm.subsidy_by_month_table_before.iter().enumerate().for_each(|(i, x)| { + assert_eq!( + SUBSIDY_BY_MONTH_TABLE[i].div_ceil(cbm.bps().before()), + *x, + "{}: locally computed and precomputed values must match", + network_id + ); + }); + cbm.subsidy_by_month_table_after.iter().enumerate().for_each(|(i, x)| { assert_eq!( - SUBSIDY_BY_MONTH_TABLE[i].div_ceil(cbm.bps()), + SUBSIDY_BY_MONTH_TABLE[i].div_ceil(cbm.bps().after()), *x, "{}: locally computed and precomputed values must match", network_id @@ -322,6 +366,71 @@ mod tests { } } + /// Takes over 60 seconds, run with the following command line: + /// `cargo test --release --package kaspa-consensus --lib -- processes::coinbase::tests::verify_crescendo_emission_schedule --exact --nocapture --ignored` + #[test] + #[ignore = "long"] + fn verify_crescendo_emission_schedule() { + // No need to loop over all nets since the relevant params are only + // deflation and activation DAA scores (and the test is long anyway) + for network_id in [NetworkId::new(NetworkType::Mainnet)] { + let mut params: Params = network_id.into(); + params.crescendo_activation = ForkActivation::never(); + let cbm = create_manager(¶ms); + let (baseline_epochs, baseline_total) = calculate_emission(cbm); + + let mut activations = vec![10000, 33444444, 120727479]; + for network_id in NetworkId::iter() { + let activation = Params::from(network_id).crescendo_activation; + if activation != ForkActivation::never() && activation != ForkActivation::always() { + activations.push(activation.daa_score()); + } + } + + // Loop over a few random activation points + specified activation points for all nets + for activation in activations { + params.crescendo_activation = ForkActivation::new(activation); + let cbm = create_manager(¶ms); + let (new_epochs, new_total) = calculate_emission(cbm); + + // Epochs only represents the number of times the subsidy changed (lower after activation due to rounding) + println!("BASELINE:\t{}\tepochs, total emission: {}", baseline_epochs, baseline_total); + println!("CRESCENDO:\t{}\tepochs, total emission: {}, activation: {}", new_epochs, new_total, activation); + + let diff = (new_total as i64 - baseline_total as i64) / SOMPI_PER_KASPA as i64; + assert!(diff.abs() <= 51, "activation: {}", activation); + println!("DIFF (KAS): {}", diff); + } + } + } + + fn calculate_emission(cbm: CoinbaseManager) -> (u64, u64) { + let activation = cbm.bps().activation().daa_score(); + let mut current = 0; + let mut total = 0; + let mut epoch = 0u64; + let mut prev = cbm.calc_block_subsidy(0); + loop { + let subsidy = cbm.calc_block_subsidy(current); + // Pre activation we expect the legacy calc (1bps) + if current < activation { + assert_eq!(cbm.legacy_calc_block_subsidy(current), subsidy); + } + if subsidy == 0 { + break; + } + total += subsidy; + if subsidy != prev { + println!("epoch: {}, subsidy: {}", epoch, subsidy); + prev = subsidy; + epoch += 1; + } + current += 1; + } + + (epoch, total) + } + #[test] fn subsidy_test() { const PRE_DEFLATIONARY_PHASE_BASE_SUBSIDY: u64 = 50000000000; @@ -330,12 +439,17 @@ mod tests { const SECONDS_PER_HALVING: u64 = SECONDS_PER_MONTH * 12; for network_id in NetworkId::iter() { - let params = &network_id.into(); - let cbm = create_manager(params); + let mut params: Params = network_id.into(); + if params.crescendo_activation != ForkActivation::always() { + // We test activation scenarios in verify_crescendo_emission_schedule + params.crescendo_activation = ForkActivation::never(); + } + let cbm = create_manager(¶ms); + let bps = params.bps().before(); - let pre_deflationary_phase_base_subsidy = PRE_DEFLATIONARY_PHASE_BASE_SUBSIDY / params.bps(); - let deflationary_phase_initial_subsidy = DEFLATIONARY_PHASE_INITIAL_SUBSIDY / params.bps(); - let blocks_per_halving = SECONDS_PER_HALVING * params.bps(); + let pre_deflationary_phase_base_subsidy = PRE_DEFLATIONARY_PHASE_BASE_SUBSIDY / bps; + let deflationary_phase_initial_subsidy = DEFLATIONARY_PHASE_INITIAL_SUBSIDY / bps; + let blocks_per_halving = SECONDS_PER_HALVING * bps; struct Test { name: &'static str, @@ -373,7 +487,7 @@ mod tests { Test { name: "after 32 halvings", daa_score: params.deflationary_phase_daa_score + 32 * blocks_per_halving, - expected: (DEFLATIONARY_PHASE_INITIAL_SUBSIDY / 2_u64.pow(32)).div_ceil(cbm.bps()), + expected: (DEFLATIONARY_PHASE_INITIAL_SUBSIDY / 2_u64.pow(32)).div_ceil(bps), }, Test { name: "just before subsidy depleted", @@ -389,7 +503,7 @@ mod tests { for t in tests { assert_eq!(cbm.calc_block_subsidy(t.daa_score), t.expected, "{} test '{}' failed", network_id, t.name); - if params.bps() == 1 { + if bps == 1 { assert_eq!(cbm.legacy_calc_block_subsidy(t.daa_score), t.expected, "{} test '{}' failed", network_id, t.name); } } @@ -478,12 +592,12 @@ mod tests { params.max_coinbase_payload_len, params.deflationary_phase_daa_score, params.pre_deflationary_phase_base_subsidy, - params.target_time_per_block, + params.bps(), ) } /// Return a CoinbaseManager with legacy golang 1 BPS properties fn create_legacy_manager() -> CoinbaseManager { - CoinbaseManager::new(150, 204, 15778800 - 259200, 50000000000, 1000) + CoinbaseManager::new(150, 204, 15778800 - 259200, 50000000000, ForkedParam::new_const(1)) } } diff --git a/consensus/src/processes/difficulty.rs b/consensus/src/processes/difficulty.rs index a27da68a25..c14ee35cb6 100644 --- a/consensus/src/processes/difficulty.rs +++ b/consensus/src/processes/difficulty.rs @@ -4,19 +4,24 @@ use crate::model::stores::{ headers::HeaderStoreReader, }; use kaspa_consensus_core::{ - config::params::MIN_DIFFICULTY_WINDOW_LEN, + config::params::{ForkActivation, MAX_DIFFICULTY_TARGET_AS_F64}, errors::difficulty::{DifficultyError, DifficultyResult}, BlockHashSet, BlueWorkType, MAX_WORK_LEVEL, }; +use kaspa_core::{info, log::CRESCENDO_KEYWORD}; +use kaspa_hashes::Hash; use kaspa_math::{Uint256, Uint320}; use std::{ cmp::{max, Ordering}, iter::once_with, ops::Deref, - sync::Arc, + sync::{ + atomic::{AtomicU8, Ordering as AtomicOrdering}, + Arc, + }, }; -use super::ghostdag::ordering::SortableBlock; +use super::{ghostdag::ordering::SortableBlock, utils::CoinFlip}; use itertools::Itertools; trait DifficultyManagerExtension { @@ -63,12 +68,11 @@ trait DifficultyManagerExtension { } #[inline] - fn check_min_difficulty_window_len(difficulty_window_size: usize, min_difficulty_window_len: usize) { + fn check_min_difficulty_window_size(difficulty_window_size: usize, min_difficulty_window_size: usize) { assert!( - MIN_DIFFICULTY_WINDOW_LEN <= min_difficulty_window_len && min_difficulty_window_len <= difficulty_window_size, - "min_difficulty_window_len {} is expected to fit within {}..={}", - min_difficulty_window_len, - MIN_DIFFICULTY_WINDOW_LEN, + min_difficulty_window_size <= difficulty_window_size, + "min_difficulty_window_size {} is expected to be <= difficulty_window_size {}", + min_difficulty_window_size, difficulty_window_size ); } @@ -82,7 +86,7 @@ pub struct FullDifficultyManager { genesis_bits: u32, max_difficulty_target: Uint320, difficulty_window_size: usize, - min_difficulty_window_len: usize, + min_difficulty_window_size: usize, target_time_per_block: u64, } @@ -92,16 +96,16 @@ impl FullDifficultyManager { genesis_bits: u32, max_difficulty_target: Uint256, difficulty_window_size: usize, - min_difficulty_window_len: usize, + min_difficulty_window_size: usize, target_time_per_block: u64, ) -> Self { - Self::check_min_difficulty_window_len(difficulty_window_size, min_difficulty_window_len); + Self::check_min_difficulty_window_size(difficulty_window_size, min_difficulty_window_size); Self { headers_store, genesis_bits, max_difficulty_target: max_difficulty_target.into(), difficulty_window_size, - min_difficulty_window_len, + min_difficulty_window_size, target_time_per_block, } } @@ -132,7 +136,7 @@ impl FullDifficultyManager { let mut difficulty_blocks = self.get_difficulty_blocks(window); // Until there are enough blocks for a valid calculation the difficulty should remain constant. - if difficulty_blocks.len() < self.min_difficulty_window_len { + if difficulty_blocks.len() < self.min_difficulty_window_size { return self.genesis_bits; } @@ -164,38 +168,119 @@ impl DifficultyManagerExtension for FullDifficultyManager< } } +#[derive(Clone)] +struct CrescendoLogger { + steps: Arc, +} + +impl CrescendoLogger { + fn new() -> Self { + Self { steps: Arc::new(AtomicU8::new(Self::ACTIVATE)) } + } + + const ACTIVATE: u8 = 0; + const DYNAMIC: u8 = 1; + const FULL: u8 = 2; + + pub fn report_activation_progress(&self, step: u8) -> bool { + if self.steps.compare_exchange(step, step + 1, AtomicOrdering::SeqCst, AtomicOrdering::SeqCst).is_ok() { + match step { + Self::ACTIVATE => { + // TODO (Crescendo): finalize mainnet ascii art + info!(target: CRESCENDO_KEYWORD, + r#" + ____ _ + / ___|_ __ ___ ___ ___ ___ _ __ __| | ___ + | | | '__/ _ \/ __|/ __/ _ \ '_ \ / _` |/ _ \ + | |___| | | __/\__ \ (_| __/ | | | (_| | (_) | + \____|_| \___||___/\___\___|_| |_|\__,_|\___/ + _ _ __ _ ___ _ + / | |__ _ __ ___ \ \ / |/ _ \| |__ _ __ ___ + | | '_ \| '_ \/ __| _____\ \ | | | | | '_ \| '_ \/ __| + | | |_) | |_) \__ \ |_____/ / | | |_| | |_) | |_) \__ \ + |_|_.__/| .__/|___/ /_/ |_|\___/|_.__/| .__/|___/ + |_| |_| +"# + ); + info!(target: CRESCENDO_KEYWORD, "[Crescendo] Accelerating block rate 10 fold") + } + Self::DYNAMIC => {} + Self::FULL => {} + _ => {} + } + true + } else { + false + } + } +} + +fn hash_suffix(n: f64) -> (f64, &'static str) { + match n { + n if n < 1_000.0 => (n, "hash/block"), + n if n < 1_000_000.0 => (n / 1_000.0, "Khash/block"), + n if n < 1_000_000_000.0 => (n / 1_000_000.0, "Mhash/block"), + n if n < 1_000_000_000_000.0 => (n / 1_000_000_000.0, "Ghash/block"), + n if n < 1_000_000_000_000_000.0 => (n / 1_000_000_000_000.0, "Thash/block"), + n if n < 1_000_000_000_000_000_000.0 => (n / 1_000_000_000_000_000.0, "Phash/block"), + n => (n / 1_000_000_000_000_000_000.0, "Ehash/block"), + } +} + +fn difficulty_desc(target: Uint320) -> String { + let difficulty = MAX_DIFFICULTY_TARGET_AS_F64 / target.as_f64(); + let hashrate = difficulty * 2.0; + let (rate, suffix) = hash_suffix(hashrate); + format!("{:.2} {}", rate, suffix) +} + /// A difficulty manager implementing [KIP-0004](https://github.com/kaspanet/kips/blob/master/kip-0004.md), /// so based on sampled windows #[derive(Clone)] -pub struct SampledDifficultyManager { +pub struct SampledDifficultyManager { headers_store: Arc, + ghostdag_store: Arc, + genesis_hash: Hash, genesis_bits: u32, max_difficulty_target: Uint320, difficulty_window_size: usize, - min_difficulty_window_len: usize, + min_difficulty_window_size: usize, difficulty_sample_rate: u64, + prior_target_time_per_block: u64, target_time_per_block: u64, + crescendo_activation: ForkActivation, + crescendo_logger: CrescendoLogger, } -impl SampledDifficultyManager { +impl SampledDifficultyManager { + #[allow(clippy::too_many_arguments)] pub fn new( headers_store: Arc, + ghostdag_store: Arc, + genesis_hash: Hash, genesis_bits: u32, max_difficulty_target: Uint256, difficulty_window_size: usize, - min_difficulty_window_len: usize, + min_difficulty_window_size: usize, difficulty_sample_rate: u64, + prior_target_time_per_block: u64, target_time_per_block: u64, + crescendo_activation: ForkActivation, ) -> Self { - Self::check_min_difficulty_window_len(difficulty_window_size, min_difficulty_window_len); + Self::check_min_difficulty_window_size(difficulty_window_size, min_difficulty_window_size); Self { headers_store, + ghostdag_store, + genesis_hash, genesis_bits, max_difficulty_target: max_difficulty_target.into(), difficulty_window_size, - min_difficulty_window_len, + min_difficulty_window_size, difficulty_sample_rate, + prior_target_time_per_block, target_time_per_block, + crescendo_activation, + crescendo_logger: CrescendoLogger::new(), } } @@ -230,14 +315,46 @@ impl SampledDifficultyManager { (self.internal_calc_daa_score(ghostdag_data, &mergeset_non_daa), mergeset_non_daa) } - pub fn calculate_difficulty_bits(&self, window: &BlockWindowHeap) -> u32 { - // Note: this fn is duplicated (almost, see `* self.difficulty_sample_rate`) in Full and Sampled structs - // so some alternate calculation can be investigated here. + pub(crate) fn crescendo_activated(&self, selected_parent: Hash) -> bool { + let sp_daa_score = self.headers_store.get_daa_score(selected_parent).unwrap(); + self.crescendo_activation.is_active(sp_daa_score) + } + + pub fn calculate_difficulty_bits(&self, window: &BlockWindowHeap, ghostdag_data: &GhostdagData) -> u32 { let mut difficulty_blocks = self.get_difficulty_blocks(window); // Until there are enough blocks for a valid calculation the difficulty should remain constant. - if difficulty_blocks.len() < self.min_difficulty_window_len { - return self.genesis_bits; + // + // [Crescendo]: post activation special case -- first activated blocks which do not have + // enough activated samples in their past + if difficulty_blocks.len() < self.min_difficulty_window_size { + let selected_parent = ghostdag_data.selected_parent; + if selected_parent == self.genesis_hash { + return self.genesis_bits; + } + + // We will use the selected parent as a source for the difficulty bits + let bits = self.headers_store.get_bits(selected_parent).unwrap(); + + // Check if the selected parent itself is already post crescendo activation (by checking the DAA score + // of its selected parent). We ruled out genesis, so we can safely assume the grandparent exists + if self.crescendo_activated(self.ghostdag_store.get_selected_parent(selected_parent).unwrap()) { + // In this case we simply take the selected parent bits as is + return bits; + } else { + // This indicates we are at the first blocks post activation (i.e., the selected parent was not activated). + // We use the selected parent target difficulty as baseline and scale it by the target_time_per_block ratio change + let target = Uint320::from(Uint256::from_compact_target_bits(bits)); + let scaled_target = target * self.prior_target_time_per_block / self.target_time_per_block; + let scaled_bits = Uint256::try_from(scaled_target.min(self.max_difficulty_target)).unwrap().compact_target_bits(); + + if self.crescendo_logger.report_activation_progress(CrescendoLogger::ACTIVATE) { + info!(target: CRESCENDO_KEYWORD, "[Crescendo] Block target time change: {} -> {} milliseconds", self.prior_target_time_per_block, self.target_time_per_block); + info!(target: CRESCENDO_KEYWORD, "[Crescendo] Difficulty change: {} -> {} ", difficulty_desc(target), difficulty_desc(scaled_target)); + } + + return scaled_bits; + } } let (min_ts_index, max_ts_index) = difficulty_blocks.iter().position_minmax().into_option().unwrap(); @@ -256,6 +373,27 @@ impl SampledDifficultyManager { let measured_duration = max(max_ts - min_ts, 1); let expected_duration = self.target_time_per_block * self.difficulty_sample_rate * difficulty_blocks_len; // This does differ from FullDifficultyManager version let new_target = average_target * measured_duration / expected_duration; + + if difficulty_blocks_len + 1 < self.difficulty_window_size as u64 { + if self.crescendo_logger.report_activation_progress(CrescendoLogger::DYNAMIC) { + info!(target: CRESCENDO_KEYWORD, + "[Crescendo] Dynamic DAA reactivated, scaling the difficulty by the measured/expected duration ratio: \n\t\t\t\t\t\t {} -> {} (measured duration: {}, expected duration: {}, ratio {:.4})", + difficulty_desc(average_target), + difficulty_desc(new_target), + measured_duration, + expected_duration, + measured_duration as f64 / expected_duration as f64 + ); + } + if CoinFlip::default().flip() { + info!(target: CRESCENDO_KEYWORD, + "[Crescendo] DAA window increasing post activation: {} (target: {})", + difficulty_blocks_len + 1, + self.difficulty_window_size + ); + } + } + Uint256::try_from(new_target.min(self.max_difficulty_target)).expect("max target < Uint256::MAX").compact_target_bits() } @@ -264,7 +402,7 @@ impl SampledDifficultyManager { } } -impl DifficultyManagerExtension for SampledDifficultyManager { +impl DifficultyManagerExtension for SampledDifficultyManager { fn headers_store(&self) -> &dyn HeaderStoreReader { self.headers_store.deref() } diff --git a/consensus/src/processes/ghostdag/protocol.rs b/consensus/src/processes/ghostdag/protocol.rs index 1032868ee0..999c43de0e 100644 --- a/consensus/src/processes/ghostdag/protocol.rs +++ b/consensus/src/processes/ghostdag/protocol.rs @@ -2,6 +2,7 @@ use std::sync::Arc; use kaspa_consensus_core::{ blockhash::{self, BlockHashExtensions, BlockHashes}, + config::params::ForkedParam, BlockHashMap, BlockLevel, BlueWorkType, HashMapCustomHasher, }; use kaspa_hashes::Hash; @@ -24,7 +25,7 @@ use super::ordering::*; #[derive(Clone)] pub struct GhostdagManager { genesis_hash: Hash, - pub(super) k: KType, + pub(super) k: ForkedParam, pub(super) ghostdag_store: Arc, pub(super) relations_store: S, pub(super) headers_store: Arc, @@ -43,7 +44,7 @@ pub struct GhostdagManager GhostdagManager { pub fn new( genesis_hash: Hash, - k: KType, + k: ForkedParam, ghostdag_store: Arc, relations_store: S, headers_store: Arc, @@ -65,7 +66,7 @@ impl Self { Self { genesis_hash, - k, + k: ForkedParam::new_const(k), ghostdag_store, relations_store, reachability_service, @@ -128,13 +129,20 @@ impl, candidate_blue_anticone_size: &mut KType, + k: KType, ) -> ColoringState { // If blue_candidate is in the future of chain_block, it means // that all remaining blues are in the past of chain_block and thus @@ -189,21 +192,28 @@ impl self.k { + if *candidate_blue_anticone_size > k { // k-cluster violation: The candidate's blue anticone exceeded k return ColoringState::Red; } - if *candidate_blues_anticone_sizes.get(&block).unwrap() == self.k { + if peer_blue_anticone_size == k { // k-cluster violation: A block in candidate's blue anticone already // has k blue blocks in its own anticone return ColoringState::Red; @@ -211,7 +221,9 @@ impl= k } ColoringState::Pending @@ -236,14 +248,14 @@ impl ColoringOutput { + fn check_blue_candidate(&self, new_block_data: &GhostdagData, blue_candidate: Hash, k: KType) -> ColoringOutput { // The maximum length of new_block_data.mergeset_blues can be K+1 because // it contains the selected parent. - if new_block_data.mergeset_blues.len() as KType == self.k + 1 { + if new_block_data.mergeset_blues.len() as KType == k + 1 { return ColoringOutput::Red; } - let mut candidate_blues_anticone_sizes: BlockHashMap = BlockHashMap::with_capacity(self.k as usize); + let mut candidate_blues_anticone_sizes: BlockHashMap = BlockHashMap::with_capacity(k as usize); // Iterate over all blocks in the blue past of the new block that are not in the past // of blue_candidate, and check for each one of them if blue_candidate potentially // enlarges their blue anticone to be over K, or that they enlarge the blue anticone @@ -258,6 +270,7 @@ impl SampledPastMedianTimeManager { Self { headers_store, genesis_timestamp } } - pub fn calc_past_median_time(&self, window: &BlockWindowHeap) -> Result { + pub fn calc_past_median_time(&self, window: &BlockWindowHeap, selected_parent: Hash) -> Result { // The past median time is actually calculated taking the average of the 11 values closest to the center // of the sorted timestamps const AVERAGE_FRAME_SIZE: usize = 11; + /* + + [Crescendo]: In the first moments post activation the median time window will be empty or smaller than expected. + Which means that past median time will be closer to current time and less flexible. This is ok since + BBT makes sure to respect this lower bound. The following alternatives were considered and ruled out: + + 1. fill the window with non activated blocks as well, this means the sampled window will go 10x + time back (~45 minutes), so the timestamp for the first blocks post activation can go ~22 + minutes back (if abused). The result for DAA can be further temporary acceleration beyond + the new desired BPS (window duration will be much longer than expected hence difficulty will + go down further). + + 2. sampling the window before and after the activation with different corresponding sample rates. This approach + is ruled out due to complexity, and because the proposed (simpler) solution has no significant drawbacks. + + With the proposed solution, the worst case scenario can be forcing the last blocks pre-activation to a timestamp + which is timestamp_deviation_tolerance seconds in the future (~2 minutes), which will force the first blocks post + activation to this timestamp as well. However, this will only slightly smooth out the block rate transition. + */ + if window.is_empty() { - return Ok(self.genesis_timestamp); + // [Crescendo]: this indicates we are in the few seconds post activation where the window is + // still empty, simply take the selected parent timestamp + return Ok(self.headers_store.get_timestamp(selected_parent).unwrap()); } let mut window_timestamps: Vec = diff --git a/consensus/src/processes/pruning.rs b/consensus/src/processes/pruning.rs index 5916df74d7..6e4af17893 100644 --- a/consensus/src/processes/pruning.rs +++ b/consensus/src/processes/pruning.rs @@ -1,6 +1,6 @@ use std::{collections::VecDeque, sync::Arc}; -use super::reachability::ReachabilityResultExtensions; +use super::{reachability::ReachabilityResultExtensions, utils::CoinFlip}; use crate::model::{ services::reachability::{MTReachabilityService, ReachabilityService}, stores::{ @@ -9,12 +9,29 @@ use crate::model::{ headers_selected_tip::HeadersSelectedTipStoreReader, past_pruning_points::PastPruningPointsStoreReader, pruning::PruningPointInfo, + pruning_samples::PruningSamplesStore, reachability::ReachabilityStoreReader, }, }; +use kaspa_consensus_core::{ + blockhash::BlockHashExtensions, + config::params::ForkedParam, + errors::pruning::{PruningImportError, PruningImportResult}, +}; +use kaspa_core::{info, log::CRESCENDO_KEYWORD}; +use kaspa_database::prelude::StoreResultEmptyTuple; use kaspa_hashes::Hash; use parking_lot::RwLock; +pub struct PruningPointReply { + /// The most recent pruning sample from POV of the queried block (with distance up to ~F) + pub pruning_sample: Hash, + + /// The pruning point of the queried block. I.e., the most recent pruning sample with + /// depth P (except for shortly after the fork where the new P' is gradually reached) + pub pruning_point: Hash, +} + #[derive(Clone)] pub struct PruningPointManager< S: GhostdagStoreReader, @@ -22,9 +39,16 @@ pub struct PruningPointManager< U: HeaderStoreReader, V: PastPruningPointsStoreReader, W: HeadersSelectedTipStoreReader, + Y: PruningSamplesStore, > { - pruning_depth: u64, - finality_depth: u64, + /// Forked pruning depth param. Throughout this file we use P, P' to indicate the pre, post activation depths respectively + pruning_depth: ForkedParam, + + /// Forked finality depth param. Throughout this file we use F, F' to indicate the pre, post activation depths respectively. + /// Note that this quantity represents here the interval between pruning point samples and is not tightly coupled with the + /// actual concept of finality as used by virtual processor to reject deep reorgs + finality_depth: ForkedParam, + genesis_hash: Hash, reachability_service: MTReachabilityService, @@ -32,6 +56,10 @@ pub struct PruningPointManager< headers_store: Arc, past_pruning_points_store: Arc, header_selected_tip_store: Arc>, + pruning_samples_store: Arc, + + /// The number of hops to go through pruning samples in order to get the pruning point of a sample + pruning_samples_steps: u64, } impl< @@ -40,18 +68,29 @@ impl< U: HeaderStoreReader, V: PastPruningPointsStoreReader, W: HeadersSelectedTipStoreReader, - > PruningPointManager + Y: PruningSamplesStore, + > PruningPointManager { pub fn new( - pruning_depth: u64, - finality_depth: u64, + pruning_depth: ForkedParam, + finality_depth: ForkedParam, genesis_hash: Hash, reachability_service: MTReachabilityService, ghostdag_store: Arc, headers_store: Arc, past_pruning_points_store: Arc, header_selected_tip_store: Arc>, + pruning_samples_store: Arc, ) -> Self { + // [Crescendo]: These conditions ensure that blue score points with the same finality score before + // the fork will remain with the same finality score post the fork. See below for the usage. + assert!(finality_depth.before() <= finality_depth.after()); + assert!(finality_depth.after() % finality_depth.before() == 0); + assert!(pruning_depth.before() <= pruning_depth.after()); + + let pruning_samples_steps = pruning_depth.before().div_ceil(finality_depth.before()); + assert_eq!(pruning_samples_steps, pruning_depth.after().div_ceil(finality_depth.after())); + Self { pruning_depth, finality_depth, @@ -61,53 +100,247 @@ impl< headers_store, past_pruning_points_store, header_selected_tip_store, + pruning_samples_steps, + pruning_samples_store, } } - pub fn next_pruning_points_and_candidate_by_ghostdag_data( + /// The new method for calculating the expected pruning point from some POV (header/virtual) using the new + /// pruning samples store. Except for edge cases during fork transition, this method is expected to retain + /// the exact semantics of current rules (v1). + /// + /// Let B denote the current block (represented by `ghostdag_data`) + /// Assumptions: + /// 1. Unlike v1 this method assumes that the current global pruning point is on B's chain, which + /// is why it should be called only for chain candidates / sink / virtual + /// 2. All chain ancestors of B up to the pruning point are expected to have a + /// `pruning_sample_from_pov` store entry + pub fn expected_header_pruning_point_v2(&self, ghostdag_data: CompactGhostdagData) -> PruningPointReply { + // + // Note that past pruning samples are only assumed to have a header store entry and a pruning sample + // store entry, se we only use these stores here (and specifically do not use the ghostdag store) + // + + let selected_parent_daa_score = self.headers_store.get_daa_score(ghostdag_data.selected_parent).unwrap(); + let pruning_depth = self.pruning_depth.get(selected_parent_daa_score); + let finality_depth = self.finality_depth.get(selected_parent_daa_score); + + let selected_parent_blue_score = self.headers_store.get_blue_score(ghostdag_data.selected_parent).unwrap(); + + let pruning_sample = if ghostdag_data.selected_parent == self.genesis_hash { + self.genesis_hash + } else { + let selected_parent_pruning_sample = + self.pruning_samples_store.pruning_sample_from_pov(ghostdag_data.selected_parent).unwrap(); + let selected_parent_pruning_sample_blue_score = self.headers_store.get_blue_score(selected_parent_pruning_sample).unwrap(); + + if self.is_pruning_sample(selected_parent_blue_score, selected_parent_pruning_sample_blue_score, finality_depth) { + // The selected parent is the most recent sample + ghostdag_data.selected_parent + } else { + // ...otherwise take the sample from its pov + selected_parent_pruning_sample + } + }; + + let is_self_pruning_sample = self.is_pruning_sample(ghostdag_data.blue_score, selected_parent_blue_score, finality_depth); + let selected_parent_pruning_point = self.headers_store.get_header(ghostdag_data.selected_parent).unwrap().pruning_point; + let mut steps = 1; + let mut current = pruning_sample; + let pruning_point = loop { + if current == self.genesis_hash { + break current; + } + let current_blue_score = self.headers_store.get_blue_score(current).unwrap(); + // Find the most recent sample with pruning depth + if current_blue_score + pruning_depth <= ghostdag_data.blue_score { + break current; + } + // For samples: special clamp for the period right after the fork (where we reach ceiling(P/F) steps before reaching P' depth) + if is_self_pruning_sample && steps == self.pruning_samples_steps { + break current; + } + // For non samples: clamp to selected parent pruning point to maintain monotonicity (needed because of the previous condition) + if current == selected_parent_pruning_point { + break current; + } + current = self.pruning_samples_store.pruning_sample_from_pov(current).unwrap(); + steps += 1; + }; + + PruningPointReply { pruning_sample, pruning_point } + } + + fn log_pruning_depth_post_activation( &self, ghostdag_data: CompactGhostdagData, - suggested_low_hash: Option, + selected_parent_daa_score: u64, + pruning_point_blue_score: u64, + ) { + if self.pruning_depth.activation().is_active(selected_parent_daa_score) + && ghostdag_data.blue_score.saturating_sub(pruning_point_blue_score) < self.pruning_depth.after() + && CoinFlip::new(1.0 / 1000.0).flip() + { + info!(target: CRESCENDO_KEYWORD, + "[Crescendo] Pruning depth increasing post activation: {} (target: {})", + ghostdag_data.blue_score.saturating_sub(pruning_point_blue_score), + self.pruning_depth.after() + ); + } + } + + /// A block is a pruning sample *iff* its own finality score is larger than its pruning sample + /// finality score or its selected parent finality score (or any block in between them). + /// + /// To see why we can compare to any such block, observe that by definition all blocks in the range + /// `[pruning sample, selected parent]` must have the same finality score. + fn is_pruning_sample(&self, self_blue_score: u64, epoch_chain_ancestor_blue_score: u64, finality_depth: u64) -> bool { + self.finality_score(epoch_chain_ancestor_blue_score, finality_depth) < self.finality_score(self_blue_score, finality_depth) + } + + pub fn next_pruning_points( + &self, + sink_ghostdag: CompactGhostdagData, current_candidate: Hash, current_pruning_point: Hash, ) -> (Vec, Hash) { - let low_hash = match suggested_low_hash { - Some(suggested) => { - if !self.reachability_service.is_chain_ancestor_of(suggested, current_candidate) { - assert!(self.reachability_service.is_chain_ancestor_of(current_candidate, suggested)); - suggested - } else { - current_candidate - } - } - None => current_candidate, - }; + if sink_ghostdag.selected_parent.is_origin() { + // This only happens when sink is genesis + return (vec![], current_candidate); + } + let selected_parent_daa_score = self.headers_store.get_daa_score(sink_ghostdag.selected_parent).unwrap(); + if self.pruning_depth.activation().is_active(selected_parent_daa_score) { + let v2 = self.next_pruning_points_v2(sink_ghostdag, selected_parent_daa_score, current_pruning_point); + // Keep the candidate valid also post activation just in case it's still used by v1 calls + let candidate = v2.last().copied().unwrap_or(current_candidate); + (v2, candidate) + } else { + let (v1, candidate) = self.next_pruning_points_v1(sink_ghostdag, current_candidate, current_pruning_point); + // [Crescendo]: sanity check that v2 logic pre activation is equivalent to v1 + let v2 = self.next_pruning_points_v2(sink_ghostdag, selected_parent_daa_score, current_pruning_point); + assert_eq!(v1, v2, "v1 = v2 pre activation"); + (v1, candidate) + } + } + + fn next_pruning_points_v2( + &self, + sink_ghostdag: CompactGhostdagData, + selected_parent_daa_score: u64, + current_pruning_point: Hash, + ) -> Vec { + let current_pruning_point_blue_score = self.headers_store.get_blue_score(current_pruning_point).unwrap(); + + // Sanity check #1: global pruning point depth from sink >= min(P, P') + if current_pruning_point_blue_score + self.pruning_depth.lower_bound() > sink_ghostdag.blue_score { + // During initial IBD the sink can be close to the global pruning point. + // We use min(P, P') here and rely on sanity check #2 for post activation edge cases + return vec![]; + } + + let sink_pruning_point = self.expected_header_pruning_point_v2(sink_ghostdag).pruning_point; + let sink_pruning_point_blue_score = self.headers_store.get_blue_score(sink_pruning_point).unwrap(); + + // Log the current pruning depth if it has not reached P' yet + self.log_pruning_depth_post_activation(sink_ghostdag, selected_parent_daa_score, sink_pruning_point_blue_score); + + // Sanity check #2: if the sink pruning point is lower or equal to current, there is no need to search + if sink_pruning_point_blue_score <= current_pruning_point_blue_score { + return vec![]; + } + + let mut current = sink_pruning_point; + let mut deque = VecDeque::with_capacity(self.pruning_samples_steps as usize); + // At this point we have verified that sink_pruning_point is a chain block above current_pruning_point + // (by comparing blue score) so we know the loop must eventually exit correctly + while current != current_pruning_point { + deque.push_front(current); + current = self.pruning_samples_store.pruning_sample_from_pov(current).unwrap(); + } + deque.into() + } + + fn next_pruning_points_v1( + &self, + ghostdag_data: CompactGhostdagData, + current_candidate: Hash, + current_pruning_point: Hash, + ) -> (Vec, Hash) { + let selected_parent_daa_score = self.headers_store.get_daa_score(ghostdag_data.selected_parent).unwrap(); + let pruning_depth = self.pruning_depth.get(selected_parent_daa_score); + let finality_depth = self.finality_depth.get(selected_parent_daa_score); + self.next_pruning_points_v1_inner(ghostdag_data, current_candidate, current_pruning_point, pruning_depth, finality_depth) + } + + /// Returns the next pruning points and an updated pruning point candidate given the current + /// pruning point (P), a current candidate (C) and a target block B (represented by GD data). + /// + /// The pruning point candidate C is a moving block which usually has pruning depth from sink but + /// its finality score is still equal to P. It serves as an optimal starting point for searching + /// up rather then restarting the search from P each time. + /// + /// Assumptions: P ∈ chain(C), C ∈ chain(B), P and C have the same finality score + /// + /// Returns: new pruning points ordered from bottom up and an updated candidate + fn next_pruning_points_v1_inner( + &self, + ghostdag_data: CompactGhostdagData, + current_candidate: Hash, + current_pruning_point: Hash, + pruning_depth: u64, + finality_depth: u64, + ) -> (Vec, Hash) { // If the pruning point is more out of date than that, an IBD with headers proof is needed anyway. - let mut new_pruning_points = Vec::with_capacity((self.pruning_depth / self.finality_depth) as usize); + let mut new_pruning_points = Vec::with_capacity((pruning_depth / finality_depth) as usize); let mut latest_pruning_point_bs = self.ghostdag_store.get_blue_score(current_pruning_point).unwrap(); - if latest_pruning_point_bs + self.pruning_depth > ghostdag_data.blue_score { + if latest_pruning_point_bs + pruning_depth > ghostdag_data.blue_score { // The pruning point is not in depth of self.pruning_depth, so there's // no point in checking if it is required to update it. This can happen - // because the virtual is not updated after IBD, so the pruning point + // because virtual is not immediately updated during IBD, so the pruning point // might be in depth less than self.pruning_depth. return (vec![], current_candidate); } let mut new_candidate = current_candidate; - for selected_child in self.reachability_service.forward_chain_iterator(low_hash, ghostdag_data.selected_parent, true) { + /* + [Crescendo] + + Notation: + P = pruning point + C = candidate + F0 = the finality depth before the fork + F1 = the finality depth after the fork + + Property 1: F0 <= F1 AND F1 % F0 == 0 (validated in Self::new) + + Remark 1: if P,C had the same finality score with regard to F0, they have the same finality score also with regard to F1 + + Proof by picture (based on Property 1): + F0: [ 0 ] [ 1 ] [ 2 ] [ 3 ] [ 4 ] [ 5 ] ... [ 9 ] ... + F1: [ 0 ] [ 1 ] ... + + (each row divides the blue score space into finality score buckets with F0 or F1 numbers in each bucket correspondingly) + + This means we can safely begin the search from C even in the few moments post the fork (i.e., there's no fear of needing to "pull" C back) + + Note that overall this search is guaranteed to provide the desired monotonicity described in KIP-14: + https://github.com/kaspanet/kips/blob/master/kip-0014.md#pruning-point-adjustment + */ + for selected_child in self.reachability_service.forward_chain_iterator(current_candidate, ghostdag_data.selected_parent, true) + { let selected_child_bs = self.ghostdag_store.get_blue_score(selected_child).unwrap(); - if ghostdag_data.blue_score - selected_child_bs < self.pruning_depth { + if ghostdag_data.blue_score - selected_child_bs < pruning_depth { break; } new_candidate = selected_child; let new_candidate_bs = selected_child_bs; - if self.finality_score(new_candidate_bs) > self.finality_score(latest_pruning_point_bs) { + if self.finality_score(new_candidate_bs, finality_depth) > self.finality_score(latest_pruning_point_bs, finality_depth) { new_pruning_points.push(new_candidate); latest_pruning_point_bs = new_candidate_bs; } @@ -116,21 +349,40 @@ impl< (new_pruning_points, new_candidate) } - // finality_score is the number of finality intervals passed since - // the given block. - fn finality_score(&self, blue_score: u64) -> u64 { - blue_score / self.finality_depth + /// Returns the floored integer division of blue score by finality depth. + /// The returned number represent the sampling epoch this blue score point belongs to. + fn finality_score(&self, blue_score: u64, finality_depth: u64) -> u64 { + blue_score / finality_depth } - pub fn expected_header_pruning_point(&self, ghostdag_data: CompactGhostdagData, pruning_info: PruningPointInfo) -> Hash { + fn expected_header_pruning_point_v1_inner( + &self, + ghostdag_data: CompactGhostdagData, + current_candidate: Hash, + current_pruning_point: Hash, + pruning_depth: u64, + finality_depth: u64, + ) -> Hash { + self.next_pruning_points_v1_inner(ghostdag_data, current_candidate, current_pruning_point, pruning_depth, finality_depth) + .0 + .last() + .copied() + .unwrap_or(current_pruning_point) + } + + pub fn expected_header_pruning_point_v1(&self, ghostdag_data: CompactGhostdagData, pruning_info: PruningPointInfo) -> Hash { if ghostdag_data.selected_parent == self.genesis_hash { return self.genesis_hash; } + let selected_parent_daa_score = self.headers_store.get_daa_score(ghostdag_data.selected_parent).unwrap(); + let pruning_depth = self.pruning_depth.get(selected_parent_daa_score); + let finality_depth = self.finality_depth.get(selected_parent_daa_score); + let (current_pruning_point, current_candidate, current_pruning_point_index) = pruning_info.decompose(); - let sp_header_pp = self.headers_store.get_header(ghostdag_data.selected_parent).unwrap().pruning_point; - let sp_header_pp_blue_score = self.headers_store.get_blue_score(sp_header_pp).unwrap(); + let sp_pp = self.headers_store.get_header(ghostdag_data.selected_parent).unwrap().pruning_point; + let sp_pp_blue_score = self.headers_store.get_blue_score(sp_pp).unwrap(); // If the block doesn't have the pruning in its selected chain we know for sure that it can't trigger a pruning point // change (we check the selected parent to take care of the case where the block is the virtual which doesn't have reachability data). @@ -140,35 +392,70 @@ impl< // Note: the pruning point from the POV of the current block is the first block in its chain that is in depth of self.pruning_depth and // its finality score is greater than the previous pruning point. This is why if the diff between finality_score(selected_parent.blue_score + 1) * finality_interval // and the current block blue score is less than self.pruning_depth we can know for sure that this block didn't trigger a pruning point change. - let min_required_blue_score_for_next_pruning_point = (self.finality_score(sp_header_pp_blue_score) + 1) * self.finality_depth; + let min_required_blue_score_for_next_pruning_point = + (self.finality_score(sp_pp_blue_score, finality_depth) + 1) * finality_depth; let next_or_current_pp = if has_pruning_point_in_its_selected_chain - && min_required_blue_score_for_next_pruning_point + self.pruning_depth <= ghostdag_data.blue_score + && min_required_blue_score_for_next_pruning_point + pruning_depth <= ghostdag_data.blue_score { // If the selected parent pruning point is in the future of current global pruning point, then provide it as a suggestion - let suggested_low_hash = self - .reachability_service - .is_dag_ancestor_of_result(current_pruning_point, sp_header_pp) - .unwrap_option() - .and_then(|b| if b { Some(sp_header_pp) } else { None }); - let (new_pruning_points, _) = self.next_pruning_points_and_candidate_by_ghostdag_data( - ghostdag_data, - suggested_low_hash, - current_candidate, - current_pruning_point, - ); + let sp_pp_in_global_pp_future = + self.reachability_service.is_dag_ancestor_of_result(current_pruning_point, sp_pp).unwrap_option().is_some_and(|b| b); + + /* + Notation: + P = global pruning point + C = global candidate + B = current block (can be virtual) + S = B's selected parent + R = S's pruning point + F = the finality depth + */ + + let (pp, cc) = if sp_pp_in_global_pp_future { + if self.reachability_service.is_chain_ancestor_of(sp_pp, current_candidate) { + // R ∈ future(P), R ∈ chain(C): use R as pruning point and C as candidate + // There are two cases: (i) C is not deep enough from B, R will be returned + // (ii) C is deep enough and the search will start from it, possibly finding a new pruning point for B + (sp_pp, current_candidate) + } else { + // R ∈ future(P), R ∉ chain(C): Use R as candidate as well. + // This might require a long walk up from R (bounded by F), however it is highly unlikely since it + // requires a ~pruning depth deep parallel chain + (sp_pp, sp_pp) + } + } else if self.reachability_service.is_chain_ancestor_of(current_candidate, ghostdag_data.selected_parent) { + // R ∉ future(P), P,C ∈ chain(B) + (current_pruning_point, current_candidate) + } else { + // R ∉ future(P), P ∈ chain(B), C ∉ chain(B) + (current_pruning_point, current_pruning_point) + }; - new_pruning_points.last().copied().unwrap_or(current_pruning_point) + self.expected_header_pruning_point_v1_inner(ghostdag_data, cc, pp, pruning_depth, finality_depth) } else { - sp_header_pp + sp_pp }; - if self.is_pruning_point_in_pruning_depth(ghostdag_data.blue_score, next_or_current_pp) { + // [Crescendo]: shortly after fork activation, R is not guaranteed to comply with the new + // increased pruning depth, so we must manually verify not to go below it + if sp_pp_blue_score >= self.headers_store.get_blue_score(next_or_current_pp).unwrap() { + return sp_pp; + } + + if self.is_pruning_point_in_pruning_depth(ghostdag_data.blue_score, next_or_current_pp, pruning_depth) { return next_or_current_pp; } for i in (0..=current_pruning_point_index).rev() { let past_pp = self.past_pruning_points_store.get(i).unwrap(); - if self.is_pruning_point_in_pruning_depth(ghostdag_data.blue_score, past_pp) { + + // [Crescendo]: shortly after fork activation, R is not guaranteed to comply with the new + // increased pruning depth, so we must manually verify not to go below it + if sp_pp_blue_score >= self.headers_store.get_blue_score(past_pp).unwrap() { + return sp_pp; + } + + if self.is_pruning_point_in_pruning_depth(ghostdag_data.blue_score, past_pp, pruning_depth) { return past_pp; } } @@ -176,24 +463,29 @@ impl< self.genesis_hash } - fn is_pruning_point_in_pruning_depth(&self, pov_blue_score: u64, pruning_point: Hash) -> bool { + fn is_pruning_point_in_pruning_depth(&self, pov_blue_score: u64, pruning_point: Hash, pruning_depth: u64) -> bool { let pp_bs = self.headers_store.get_blue_score(pruning_point).unwrap(); - pov_blue_score >= pp_bs + self.pruning_depth + pov_blue_score >= pp_bs + pruning_depth } - pub fn is_valid_pruning_point(&self, pp_candidate: Hash, hst: Hash) -> bool { + pub fn is_valid_pruning_point(&self, pp_candidate: Hash, tip: Hash) -> bool { if pp_candidate == self.genesis_hash { return true; } - if !self.reachability_service.is_chain_ancestor_of(pp_candidate, hst) { + if !self.reachability_service.is_chain_ancestor_of(pp_candidate, tip) { return false; } - let hst_bs = self.ghostdag_store.get_blue_score(hst).unwrap(); - self.is_pruning_point_in_pruning_depth(hst_bs, pp_candidate) + let tip_bs = self.ghostdag_store.get_blue_score(tip).unwrap(); + // [Crescendo]: for new nodes syncing right after the fork, it might be difficult to determine whether the + // new pruning depth is expected, so we use the DAA score of the pruning point itself as an indicator. + // This means that in the first few days following the fork we err on the side of a shorter period which is + // a weaker requirement + let pruning_depth = self.pruning_depth.get(self.headers_store.get_daa_score(pp_candidate).unwrap()); + self.is_pruning_point_in_pruning_depth(tip_bs, pp_candidate, pruning_depth) } - pub fn are_pruning_points_in_valid_chain(&self, pruning_info: PruningPointInfo, hst: Hash) -> bool { + pub fn are_pruning_points_in_valid_chain(&self, pruning_info: PruningPointInfo, syncer_sink: Hash) -> PruningImportResult<()> { // We want to validate that the past pruning points form a chain to genesis. Since // each pruning point's header doesn't point to the previous pruning point, but to // the pruning point from its POV, we can't just traverse from one pruning point to @@ -210,10 +502,29 @@ impl< // any other pruning point in the list, so we are compelled to check if it's referenced by // the selected chain. let mut expected_pps_queue = VecDeque::new(); - for current in self.reachability_service.backward_chain_iterator(hst, pruning_info.pruning_point, false) { + for current in self.reachability_service.forward_chain_iterator(pruning_info.pruning_point, syncer_sink, true).skip(1) { let current_header = self.headers_store.get_header(current).unwrap(); - if expected_pps_queue.back().is_none_or(|&h| h != current_header.pruning_point) { - expected_pps_queue.push_back(current_header.pruning_point); + // Post-crescendo: expected header pruning point is no longer part of header validity, but we want to make sure + // the syncer's virtual chain indeed coincides with the pruning point and past pruning points before downloading + // the UTXO set and resolving virtual. Hence we perform the check over this chain here. + let reply = self.expected_header_pruning_point_v2(self.ghostdag_store.get_compact_data(current).unwrap()); + if reply.pruning_point != current_header.pruning_point { + return Err(PruningImportError::WrongHeaderPruningPoint(current_header.pruning_point, current)); + } + // Save so that following blocks can recursively use this value + self.pruning_samples_store.insert(current, reply.pruning_sample).unwrap_or_exists(); + /* + Going up the chain from the pruning point to the sink. The goal is to exit this loop with a queue [P(0), P(-1), P(-2), ..., P(-n)] + where P(0) is the current pruning point, P(-1) is the point before it and P(-n) is the pruning point of P(0). That is, + ceiling(P/F) = n (where n is usually 3). + + Let C be the current block's pruning point. Push to the front of the queue if: + 1. the queue is empty; OR + 2. the front of the queue is different than C; AND + 3. the front of the queue is different than P(0) (if it is P(0), we already filled the queue with what we need) + */ + if expected_pps_queue.front().is_none_or(|&h| h != current_header.pruning_point && h != pruning_info.pruning_point) { + expected_pps_queue.push_front(current_header.pruning_point); } } @@ -222,18 +533,18 @@ impl< let pp_header = self.headers_store.get_header(pp).unwrap(); let Some(expected_pp) = expected_pps_queue.pop_front() else { // If we have less than expected pruning points. - return false; + return Err(PruningImportError::MissingPointedPruningPoint); }; if expected_pp != pp { - return false; + return Err(PruningImportError::WrongPointedPruningPoint); } if idx == 0 { // The 0th pruning point should always be genesis, and no // more pruning points should be expected below it. if !expected_pps_queue.is_empty() || pp != self.genesis_hash { - return false; + return Err(PruningImportError::UnpointedPruningPoint); } break; } @@ -249,16 +560,34 @@ impl< None => { // expected_pps_queue should always have one block in the queue // until we reach genesis. - return false; + return Err(PruningImportError::MissingPointedPruningPoint); } } } - true + Ok(()) } } #[cfg(test)] mod tests { - // TODO: add unit-tests for next_pruning_point_and_candidate_by_block_hash and expected_header_pruning_point + use kaspa_consensus_core::{config::params::Params, network::NetworkType}; + + #[test] + fn assert_pruning_depth_consistency() { + for net in NetworkType::iter() { + let params: Params = net.into(); + + let pruning_depth = params.pruning_depth(); + let finality_depth = params.finality_depth(); + let ghostdag_k = params.ghostdag_k(); + + // Assert P is not a multiple of F +- noise(K) + let mod_before = pruning_depth.before() % finality_depth.before(); + assert!((ghostdag_k.before() as u64) < mod_before && mod_before < finality_depth.before() - ghostdag_k.before() as u64); + + let mod_after = pruning_depth.after() % finality_depth.after(); + assert!((ghostdag_k.after() as u64) < mod_after && mod_after < finality_depth.after() - ghostdag_k.after() as u64); + } + } } diff --git a/consensus/src/processes/pruning_proof/build.rs b/consensus/src/processes/pruning_proof/build.rs index 664eb5981b..cdb2997b78 100644 --- a/consensus/src/processes/pruning_proof/build.rs +++ b/consensus/src/processes/pruning_proof/build.rs @@ -5,7 +5,7 @@ use kaspa_consensus_core::{ blockhash::{BlockHashExtensions, BlockHashes}, header::Header, pruning::PruningPointProof, - BlockHashSet, BlockLevel, HashMapCustomHasher, + BlockHashMap, BlockHashSet, BlockLevel, HashMapCustomHasher, KType, }; use kaspa_core::debug; use kaspa_database::prelude::{CachePolicy, ConnBuilder, StoreError, StoreResult, StoreResultEmptyTuple, StoreResultExtensions, DB}; @@ -71,6 +71,11 @@ impl PruningProofManager { let pp_header = self.headers_store.get_header_with_block_level(pp).unwrap(); let (ghostdag_stores, selected_tip_by_level, roots_by_level) = self.calc_gd_for_all_levels(&pp_header, temp_db); + // The pruning proof can contain many duplicate headers (across levels), so we use a local cache in order + // to make sure we hold a single Arc per header + let mut cache: BlockHashMap> = BlockHashMap::with_capacity(4 * self.pruning_proof_m as usize); + let mut get_header = |hash| cache.entry(hash).or_insert_with_key(|&hash| self.headers_store.get_header(hash).unwrap()).clone(); + (0..=self.max_block_level) .map(|level| { let level = level as usize; @@ -114,7 +119,7 @@ impl PruningProofManager { let mut headers = Vec::with_capacity(2 * self.pruning_proof_m as usize); let mut queue = BinaryHeap::>::new(); let mut visited = BlockHashSet::new(); - queue.push(Reverse(SortableBlock::new(root, self.headers_store.get_header(root).unwrap().blue_work))); + queue.push(Reverse(SortableBlock::new(root, get_header(root).blue_work))); while let Some(current) = queue.pop() { let current = current.0.hash; if !visited.insert(current) { @@ -130,9 +135,9 @@ impl PruningProofManager { continue; } - headers.push(self.headers_store.get_header(current).unwrap()); + headers.push(get_header(current)); for child in self.relations_stores.read()[level].get_children(current).unwrap().read().iter().copied() { - queue.push(Reverse(SortableBlock::new(child, self.headers_store.get_header(child).unwrap().blue_work))); + queue.push(Reverse(SortableBlock::new(child, get_header(child).blue_work))); } } @@ -216,7 +221,7 @@ impl PruningProofManager { &self, pp_header: &HeaderWithBlockLevel, level: BlockLevel, - current_dag_level: BlockLevel, + _current_dag_level: BlockLevel, required_block: Option, temp_db: Arc, ) -> PruningProofManagerInternalResult<(Arc, Hash, Hash)> { @@ -232,11 +237,17 @@ impl PruningProofManager { // We only have the headers store (which has level 0 blue_scores) to assemble the proof data from. // We need to look deeper at higher levels (2x deeper every level) to find 2M (plus margin) blocks at that level - let mut required_base_level_depth = self.estimated_blue_depth_at_level_0( - level, - required_level_depth + 100, // We take a safety margin - current_dag_level, - ); + // TODO: uncomment when the full fix to minimize proof sizes comes. + // let mut required_base_level_depth = self.estimated_blue_depth_at_level_0( + // level, + // required_level_depth + 100, // We take a safety margin + // current_dag_level, + // ); + // NOTE: Starting from required_level_depth (a much lower starting point than normal) will typically require O(N) iterations + // for level L + N where L is the current dag level. This is fine since the steps per iteration are still exponential + // and so we will complete each level in not much more than N iterations per level. + // We start here anyway so we can try to minimize the proof size when the current dag level goes down significantly. + let mut required_base_level_depth = required_level_depth + 100; let mut is_last_level_header; let mut tries = 0; @@ -285,6 +296,7 @@ impl PruningProofManager { &ghostdag_store, Some(block_at_depth_m_at_next_level), level, + self.ghostdag_k.get(pp_header.header.daa_score), ); // Step 4 - Check if we actually have enough depth. @@ -325,6 +337,7 @@ impl PruningProofManager { ghostdag_store: &Arc, required_block: Option, level: BlockLevel, + ghostdag_k: KType, ) -> bool { let relations_service = RelationsStoreInFutureOfRoot { relations_store: self.level_relations_services[level as usize].clone(), @@ -333,7 +346,7 @@ impl PruningProofManager { }; let gd_manager = GhostdagManager::with_level( root, - self.ghostdag_k, + ghostdag_k, ghostdag_store.clone(), relations_service.clone(), self.headers_store.clone(), diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index a9412bbf60..9a83bc29c0 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -16,7 +16,11 @@ use rocksdb::WriteBatch; use kaspa_consensus_core::{ blockhash::{self, BlockHashExtensions}, - errors::consensus::{ConsensusError, ConsensusResult}, + config::params::ForkedParam, + errors::{ + consensus::{ConsensusError, ConsensusResult}, + pruning::{PruningImportError, PruningImportResult}, + }, header::Header, pruning::{PruningPointProof, PruningPointTrustedData}, trusted::{TrustedGhostdagData, TrustedHeader}, @@ -42,6 +46,7 @@ use crate::{ headers_selected_tip::DbHeadersSelectedTipStore, past_pruning_points::{DbPastPruningPointsStore, PastPruningPointsStore}, pruning::{DbPruningStore, PruningStoreReader}, + pruning_samples::{DbPruningSamplesStore, PruningSamplesStore}, reachability::DbReachabilityStore, relations::{DbRelationsStore, RelationsStoreReader}, selected_chain::DbSelectedChainStore, @@ -109,6 +114,7 @@ pub struct PruningProofManager { headers_selected_tip_store: Arc>, depth_store: Arc, selected_chain_store: Arc>, + pruning_samples_store: Arc, ghostdag_manager: DbGhostdagManager, traversal_manager: DbDagTraversalManager, @@ -121,8 +127,8 @@ pub struct PruningProofManager { max_block_level: BlockLevel, genesis_hash: Hash, pruning_proof_m: u64, - anticone_finalization_depth: u64, - ghostdag_k: KType, + anticone_finalization_depth: ForkedParam, + ghostdag_k: ForkedParam, is_consensus_exiting: Arc, } @@ -140,8 +146,8 @@ impl PruningProofManager { max_block_level: BlockLevel, genesis_hash: Hash, pruning_proof_m: u64, - anticone_finalization_depth: u64, - ghostdag_k: KType, + anticone_finalization_depth: ForkedParam, + ghostdag_k: ForkedParam, is_consensus_exiting: Arc, ) -> Self { Self { @@ -159,6 +165,7 @@ impl PruningProofManager { headers_selected_tip_store: storage.headers_selected_tip_store.clone(), selected_chain_store: storage.selected_chain_store.clone(), depth_store: storage.depth_store.clone(), + pruning_samples_store: storage.pruning_samples_store.clone(), traversal_manager, window_manager, @@ -182,10 +189,27 @@ impl PruningProofManager { } } - pub fn import_pruning_points(&self, pruning_points: &[Arc
]) { + pub fn import_pruning_points(&self, pruning_points: &[Arc
]) -> PruningImportResult<()> { + let unique_count = pruning_points.iter().map(|h| h.hash).unique().count(); + if unique_count < pruning_points.len() { + return Err(PruningImportError::DuplicatedPastPruningPoints(pruning_points.len() - unique_count)); + } for (i, header) in pruning_points.iter().enumerate() { self.past_pruning_points_store.set(i as u64, header.hash).unwrap(); + if i > 0 { + let prev_blue_score = pruning_points[i - 1].blue_score; + // This is a sufficient condition for running expected pruning point algo (v2) + // over blocks B s.t. pruning point ∈ chain(B) w/o a risk of not converging + if prev_blue_score >= header.blue_score { + return Err(PruningImportError::InconsistentPastPruningPoints(i - 1, i, prev_blue_score, header.blue_score)); + } + // Store the i-1 pruning point as the last pruning sample from POV of the i'th pruning point. + // If this data is inconsistent, then blocks above the pruning point will fail the expected + // pruning point validation performed at are_pruning_points_in_valid_chain + self.pruning_samples_store.insert(header.hash, pruning_points[i - 1].hash).unwrap(); + } + if self.headers_store.has(header.hash).unwrap() { continue; } @@ -200,9 +224,12 @@ impl PruningProofManager { let mut pruning_point_write = self.pruning_point_store.write(); let mut batch = WriteBatch::default(); pruning_point_write.set_batch(&mut batch, new_pruning_point, new_pruning_point, (pruning_points.len() - 1) as u64).unwrap(); - pruning_point_write.set_history_root(&mut batch, new_pruning_point).unwrap(); + pruning_point_write.set_retention_checkpoint(&mut batch, new_pruning_point).unwrap(); + pruning_point_write.set_retention_period_root(&mut batch, new_pruning_point).unwrap(); self.db.write(batch).unwrap(); drop(pruning_point_write); + + Ok(()) } // Used in apply and validate @@ -244,10 +271,10 @@ impl PruningProofManager { /// the search is halted and a partial chain is returned. /// /// The returned hashes are guaranteed to have GHOSTDAG data - pub(crate) fn get_ghostdag_chain_k_depth(&self, hash: Hash) -> Vec { - let mut hashes = Vec::with_capacity(self.ghostdag_k as usize + 1); + pub(crate) fn get_ghostdag_chain_k_depth(&self, hash: Hash, ghostdag_k: KType) -> Vec { + let mut hashes = Vec::with_capacity(ghostdag_k as usize + 1); let mut current = hash; - for _ in 0..=self.ghostdag_k { + for _ in 0..=ghostdag_k { hashes.push(current); let Some(parent) = self.ghostdag_store.get_selected_parent(current).unwrap_option() else { break; @@ -275,6 +302,10 @@ impl PruningProofManager { let mut daa_window_blocks = BlockHashMap::new(); let mut ghostdag_blocks = BlockHashMap::new(); + // [Crescendo]: get ghostdag k based on the pruning point's DAA score. The off-by-one of not going by selected parent + // DAA score is not important here as we simply increase K one block earlier which is more conservative (saving/sending more data) + let ghostdag_k = self.ghostdag_k.get(self.headers_store.get_daa_score(pruning_point).unwrap()); + // PRUNE SAFETY: called either via consensus under the prune guard or by the pruning processor (hence no pruning in parallel) for anticone_block in anticone.iter().copied() { @@ -291,7 +322,7 @@ impl PruningProofManager { } } - let ghostdag_chain = self.get_ghostdag_chain_k_depth(anticone_block); + let ghostdag_chain = self.get_ghostdag_chain_k_depth(anticone_block, ghostdag_k); for hash in ghostdag_chain { if let Entry::Vacant(e) = ghostdag_blocks.entry(hash) { let ghostdag = self.ghostdag_store.get_data(hash).unwrap(); @@ -353,6 +384,11 @@ impl PruningProofManager { } } let proof = Arc::new(self.build_pruning_point_proof(pp)); + info!( + "Built headers proof with overall {} headers ({} unique)", + proof.iter().map(|l| l.len()).sum::(), + proof.iter().flatten().unique_by(|h| h.hash).count() + ); cache_lock.replace(CachedPruningPointData { pruning_point: pp, data: proof.clone() }); proof } @@ -369,8 +405,12 @@ impl PruningProofManager { let virtual_state = self.virtual_stores.read().state.get().unwrap(); let pp_bs = self.headers_store.get_blue_score(pp).unwrap(); + // [Crescendo]: use pruning point DAA score for activation. This means that only after sufficient time + // post activation we will require the increased finalization depth + let pruning_point_daa_score = self.headers_store.get_daa_score(pp).unwrap(); + // The anticone is considered final only if the pruning point is at sufficient depth from virtual - if virtual_state.ghostdag_data.blue_score >= pp_bs + self.anticone_finalization_depth { + if virtual_state.ghostdag_data.blue_score >= pp_bs + self.anticone_finalization_depth.get(pruning_point_daa_score) { let anticone = Arc::new(self.calculate_pruning_point_anticone_and_trusted_data(pp, virtual_state.parents.iter().copied())); cache_lock.replace(CachedPruningPointData { pruning_point: pp, data: anticone.clone() }); Ok(anticone) diff --git a/consensus/src/processes/pruning_proof/validate.rs b/consensus/src/processes/pruning_proof/validate.rs index 3262b65901..f612188104 100644 --- a/consensus/src/processes/pruning_proof/validate.rs +++ b/consensus/src/processes/pruning_proof/validate.rs @@ -110,12 +110,15 @@ impl PruningProofManager { ) { let proof_level_blue_work_diff = proof_selected_tip_gd.blue_work.saturating_sub(proof_common_ancestor_gd.blue_work); for parent in self.parents_manager.parents_at_level(¤t_pp_header, level).iter().copied() { - let parent_blue_work = current_consensus_ghostdag_stores[level_idx].get_blue_work(parent).unwrap(); - let parent_blue_work_diff = parent_blue_work.saturating_sub(common_ancestor_gd.blue_work); - if parent_blue_work_diff.saturating_add(pruning_period_work) - >= proof_level_blue_work_diff.saturating_add(prover_claimed_pruning_period_work) + // Not all parents by level are guaranteed to be GD populated, but at least one of them will (the proof level selected tip) + if let Some(parent_blue_work) = current_consensus_ghostdag_stores[level_idx].get_blue_work(parent).unwrap_option() { - return Err(PruningImportError::PruningProofInsufficientBlueWork); + let parent_blue_work_diff = parent_blue_work.saturating_sub(common_ancestor_gd.blue_work); + if parent_blue_work_diff.saturating_add(pruning_period_work) + >= proof_level_blue_work_diff.saturating_add(prover_claimed_pruning_period_work) + { + return Err(PruningImportError::PruningProofInsufficientBlueWork); + } } } @@ -173,6 +176,10 @@ impl PruningProofManager { return Err(PruningImportError::PruningProofNotEnoughHeaders); } + // [Crescendo]: decide on ghostdag K based on proof pruning point DAA score + let proof_pp_daa_score = proof[0].last().expect("checked if empty").daa_score; + let ghostdag_k = self.ghostdag_k.get(proof_pp_daa_score); + let headers_estimate = self.estimate_proof_unique_size(proof); let (db_lifetime, db) = kaspa_database::create_temp_db!(ConnBuilder::default().with_files_limit(10)); @@ -199,7 +206,7 @@ impl PruningProofManager { .map(|(level, ghostdag_store)| { GhostdagManager::with_level( self.genesis_hash, - self.ghostdag_k, + ghostdag_k, ghostdag_store, relations_stores[level].clone(), headers_store.clone(), diff --git a/consensus/src/processes/sync/mod.rs b/consensus/src/processes/sync/mod.rs index 839e48a9ef..4b39216c01 100644 --- a/consensus/src/processes/sync/mod.rs +++ b/consensus/src/processes/sync/mod.rs @@ -1,7 +1,10 @@ use std::{cmp::min, ops::Deref, sync::Arc}; use itertools::Itertools; -use kaspa_consensus_core::errors::sync::{SyncManagerError, SyncManagerResult}; +use kaspa_consensus_core::{ + config::params::ForkedParam, + errors::sync::{SyncManagerError, SyncManagerResult}, +}; use kaspa_database::prelude::StoreResultExtensions; use kaspa_hashes::Hash; use kaspa_math::uint::malachite_base::num::arithmetic::traits::CeilingLogBase2; @@ -28,7 +31,7 @@ pub struct SyncManager< X: PruningStoreReader, Y: StatusesStoreReader, > { - mergeset_size_limit: usize, + mergeset_size_limit: ForkedParam, reachability_service: MTReachabilityService, traversal_manager: DagTraversalManager, ghostdag_store: Arc, @@ -49,7 +52,7 @@ impl< > SyncManager { pub fn new( - mergeset_size_limit: usize, + mergeset_size_limit: ForkedParam, reachability_service: MTReachabilityService, traversal_manager: DagTraversalManager, ghostdag_store: Arc, @@ -75,7 +78,7 @@ impl< /// because it returns blocks with MergeSet granularity, so if MergeSet > max_blocks, the function will return nothing which is undesired behavior. pub fn antipast_hashes_between(&self, low: Hash, high: Hash, max_blocks: Option) -> (Vec, Hash) { let max_blocks = max_blocks.unwrap_or(usize::MAX); - assert!(max_blocks >= self.mergeset_size_limit); + assert!(max_blocks >= self.mergeset_size_limit.upper_bound() as usize); // If low is not in the chain of high - forward_chain_iterator will fail. // Therefore, we traverse down low's chain until we reach a block that is in diff --git a/consensus/src/processes/transaction_validator/mod.rs b/consensus/src/processes/transaction_validator/mod.rs index 519a196f82..c58dfb45df 100644 --- a/consensus/src/processes/transaction_validator/mod.rs +++ b/consensus/src/processes/transaction_validator/mod.rs @@ -4,67 +4,54 @@ pub mod tx_validation_in_isolation; pub mod tx_validation_in_utxo_context; use std::sync::Arc; -use crate::model::stores::ghostdag; - use kaspa_txscript::{ caches::{Cache, TxScriptCacheCounters}, SigCacheKey, }; -use kaspa_consensus_core::{config::params::ForkActivation, mass::MassCalculator}; +use kaspa_consensus_core::{ + config::params::{ForkActivation, ForkedParam}, + mass::MassCalculator, +}; #[derive(Clone)] pub struct TransactionValidator { - max_tx_inputs: usize, - max_tx_outputs: usize, - max_signature_script_len: usize, - max_script_public_key_len: usize, - ghostdag_k: ghostdag::KType, + max_tx_inputs: ForkedParam, + max_tx_outputs: ForkedParam, + max_signature_script_len: ForkedParam, + max_script_public_key_len: ForkedParam, coinbase_payload_script_public_key_max_len: u8, - coinbase_maturity: u64, + coinbase_maturity: ForkedParam, sig_cache: Cache, pub(crate) mass_calculator: MassCalculator, - /// Storage mass hardfork DAA score - storage_mass_activation: ForkActivation, - /// KIP-10 hardfork DAA score - kip10_activation: ForkActivation, - payload_activation: ForkActivation, - runtime_sig_op_counting: ForkActivation, + /// Crescendo hardfork activation score. Activates KIPs 9, 10, 14 + crescendo_activation: ForkActivation, } impl TransactionValidator { - #[allow(clippy::too_many_arguments)] pub fn new( - max_tx_inputs: usize, - max_tx_outputs: usize, - max_signature_script_len: usize, - max_script_public_key_len: usize, - ghostdag_k: ghostdag::KType, + max_tx_inputs: ForkedParam, + max_tx_outputs: ForkedParam, + max_signature_script_len: ForkedParam, + max_script_public_key_len: ForkedParam, coinbase_payload_script_public_key_max_len: u8, - coinbase_maturity: u64, + coinbase_maturity: ForkedParam, counters: Arc, mass_calculator: MassCalculator, - storage_mass_activation: ForkActivation, - kip10_activation: ForkActivation, - payload_activation: ForkActivation, - runtime_sig_op_counting: ForkActivation, + crescendo_activation: ForkActivation, ) -> Self { Self { max_tx_inputs, max_tx_outputs, max_signature_script_len, max_script_public_key_len, - ghostdag_k, coinbase_payload_script_public_key_max_len, coinbase_maturity, sig_cache: Cache::with_counters(10_000, counters), mass_calculator, - storage_mass_activation, - kip10_activation, - payload_activation, - runtime_sig_op_counting, + crescendo_activation, } } @@ -73,25 +60,20 @@ impl TransactionValidator { max_tx_outputs: usize, max_signature_script_len: usize, max_script_public_key_len: usize, - ghostdag_k: ghostdag::KType, coinbase_payload_script_public_key_max_len: u8, coinbase_maturity: u64, counters: Arc, ) -> Self { Self { - max_tx_inputs, - max_tx_outputs, - max_signature_script_len, - max_script_public_key_len, - ghostdag_k, + max_tx_inputs: ForkedParam::new_const(max_tx_inputs), + max_tx_outputs: ForkedParam::new_const(max_tx_outputs), + max_signature_script_len: ForkedParam::new_const(max_signature_script_len), + max_script_public_key_len: ForkedParam::new_const(max_script_public_key_len), coinbase_payload_script_public_key_max_len, - coinbase_maturity, + coinbase_maturity: ForkedParam::new_const(coinbase_maturity), sig_cache: Cache::with_counters(10_000, counters), mass_calculator: MassCalculator::new(0, 0, 0, 0), - storage_mass_activation: ForkActivation::never(), - kip10_activation: ForkActivation::never(), - payload_activation: ForkActivation::never(), - runtime_sig_op_counting: ForkActivation::never(), + crescendo_activation: ForkActivation::never(), } } } diff --git a/consensus/src/processes/transaction_validator/tx_validation_in_header_context.rs b/consensus/src/processes/transaction_validator/tx_validation_in_header_context.rs index 129627c59d..e080028f28 100644 --- a/consensus/src/processes/transaction_validator/tx_validation_in_header_context.rs +++ b/consensus/src/processes/transaction_validator/tx_validation_in_header_context.rs @@ -46,6 +46,10 @@ impl TransactionValidator { lock_time_arg: LockTimeArg, ) -> TxResult<()> { self.check_transaction_payload(tx, ctx_daa_score)?; + self.check_transaction_inputs_count_ctx(tx, ctx_daa_score)?; + self.check_transaction_outputs_count_ctx(tx, ctx_daa_score)?; + self.check_transaction_signature_scripts_ctx(tx, ctx_daa_score)?; + self.check_transaction_script_public_keys_ctx(tx, ctx_daa_score)?; self.check_tx_is_finalized(tx, lock_time_arg) } @@ -90,7 +94,7 @@ impl TransactionValidator { fn check_transaction_payload(&self, tx: &Transaction, ctx_daa_score: u64) -> TxResult<()> { // TODO (post HF): move back to in isolation validation - if self.payload_activation.is_active(ctx_daa_score) { + if self.crescendo_activation.is_active(ctx_daa_score) { Ok(()) } else { if !tx.is_coinbase() && !tx.payload.is_empty() { @@ -99,4 +103,54 @@ impl TransactionValidator { Ok(()) } } + + fn check_transaction_outputs_count_ctx(&self, tx: &Transaction, ctx_daa_score: u64) -> TxResult<()> { + // TODO (post HF): move back to in isolation validation + if tx.is_coinbase() { + // We already check coinbase outputs count vs. Ghostdag K + 2 + return Ok(()); + } + if tx.outputs.len() > self.max_tx_outputs.get(ctx_daa_score) { + return Err(TxRuleError::TooManyOutputs(tx.outputs.len(), self.max_tx_inputs.get(ctx_daa_score))); + } + + Ok(()) + } + + fn check_transaction_inputs_count_ctx(&self, tx: &Transaction, ctx_daa_score: u64) -> TxResult<()> { + // TODO (post HF): move back to in isolation validation + if !tx.is_coinbase() && tx.inputs.is_empty() { + return Err(TxRuleError::NoTxInputs); + } + + if tx.inputs.len() > self.max_tx_inputs.get(ctx_daa_score) { + return Err(TxRuleError::TooManyInputs(tx.inputs.len(), self.max_tx_inputs.get(ctx_daa_score))); + } + + Ok(()) + } + + // The main purpose of this check is to avoid overflows when calculating transaction mass later. + fn check_transaction_signature_scripts_ctx(&self, tx: &Transaction, ctx_daa_score: u64) -> TxResult<()> { + // TODO (post HF): move back to in isolation validation + if let Some(i) = + tx.inputs.iter().position(|input| input.signature_script.len() > self.max_signature_script_len.get(ctx_daa_score)) + { + return Err(TxRuleError::TooBigSignatureScript(i, self.max_signature_script_len.get(ctx_daa_score))); + } + + Ok(()) + } + + // The main purpose of this check is to avoid overflows when calculating transaction mass later. + fn check_transaction_script_public_keys_ctx(&self, tx: &Transaction, ctx_daa_score: u64) -> TxResult<()> { + // TODO (post HF): move back to in isolation validation + if let Some(i) = + tx.outputs.iter().position(|out| out.script_public_key.script().len() > self.max_script_public_key_len.get(ctx_daa_score)) + { + return Err(TxRuleError::TooBigScriptPublicKey(i, self.max_script_public_key_len.get(ctx_daa_score))); + } + + Ok(()) + } } diff --git a/consensus/src/processes/transaction_validator/tx_validation_in_isolation.rs b/consensus/src/processes/transaction_validator/tx_validation_in_isolation.rs index b509a71c72..7780dc5c5b 100644 --- a/consensus/src/processes/transaction_validator/tx_validation_in_isolation.rs +++ b/consensus/src/processes/transaction_validator/tx_validation_in_isolation.rs @@ -35,17 +35,23 @@ impl TransactionValidator { self.check_transaction_script_public_keys(tx) } - fn check_coinbase_in_isolation(&self, tx: &kaspa_consensus_core::tx::Transaction) -> TxResult<()> { + fn check_coinbase_in_isolation(&self, tx: &Transaction) -> TxResult<()> { if !tx.is_coinbase() { return Ok(()); } if !tx.inputs.is_empty() { return Err(TxRuleError::CoinbaseHasInputs(tx.inputs.len())); } + + /* + [Crescendo]: moved this specific check to body_validation_in_context since it depends on fork activation + TODO (post HF): move back here + let outputs_limit = self.ghostdag_k as u64 + 2; if tx.outputs.len() as u64 > outputs_limit { return Err(TxRuleError::CoinbaseTooManyOutputs(tx.outputs.len(), outputs_limit)); } + */ for (i, output) in tx.outputs.iter().enumerate() { if output.script_public_key.script().len() > self.coinbase_payload_script_public_key_max_len as usize { return Err(TxRuleError::CoinbaseScriptPublicKeyTooLong(i)); @@ -55,8 +61,13 @@ impl TransactionValidator { } fn check_transaction_outputs_count(&self, tx: &Transaction) -> TxResult<()> { - if tx.outputs.len() > self.max_tx_outputs { - return Err(TxRuleError::TooManyOutputs(tx.inputs.len(), self.max_tx_inputs)); + if tx.is_coinbase() { + // We already check coinbase outputs count vs. Ghostdag K + 2 + return Ok(()); + } + // [Crescendo]: keep the check here over the upper limit. Add a tight check to in_header_context validation + if tx.outputs.len() > self.max_tx_outputs.upper_bound() { + return Err(TxRuleError::TooManyOutputs(tx.outputs.len(), self.max_tx_inputs.upper_bound())); } Ok(()) @@ -67,8 +78,9 @@ impl TransactionValidator { return Err(TxRuleError::NoTxInputs); } - if tx.inputs.len() > self.max_tx_inputs { - return Err(TxRuleError::TooManyInputs(tx.inputs.len(), self.max_tx_inputs)); + // [Crescendo]: keep the check here over the upper limit. Add a tight check to in_header_context validation + if tx.inputs.len() > self.max_tx_inputs.upper_bound() { + return Err(TxRuleError::TooManyInputs(tx.inputs.len(), self.max_tx_inputs.upper_bound())); } Ok(()) @@ -76,8 +88,10 @@ impl TransactionValidator { // The main purpose of this check is to avoid overflows when calculating transaction mass later. fn check_transaction_signature_scripts(&self, tx: &Transaction) -> TxResult<()> { - if let Some(i) = tx.inputs.iter().position(|input| input.signature_script.len() > self.max_signature_script_len) { - return Err(TxRuleError::TooBigSignatureScript(i, self.max_signature_script_len)); + // [Crescendo]: keep the check here over the upper limit. Add a tight check to in_header_context validation + if let Some(i) = tx.inputs.iter().position(|input| input.signature_script.len() > self.max_signature_script_len.upper_bound()) + { + return Err(TxRuleError::TooBigSignatureScript(i, self.max_signature_script_len.upper_bound())); } Ok(()) @@ -85,8 +99,11 @@ impl TransactionValidator { // The main purpose of this check is to avoid overflows when calculating transaction mass later. fn check_transaction_script_public_keys(&self, tx: &Transaction) -> TxResult<()> { - if let Some(i) = tx.outputs.iter().position(|input| input.script_public_key.script().len() > self.max_script_public_key_len) { - return Err(TxRuleError::TooBigScriptPublicKey(i, self.max_script_public_key_len)); + // [Crescendo]: keep the check here over the upper limit. Add a tight check to in_header_context validation + if let Some(i) = + tx.outputs.iter().position(|out| out.script_public_key.script().len() > self.max_script_public_key_len.upper_bound()) + { + return Err(TxRuleError::TooBigScriptPublicKey(i, self.max_script_public_key_len.upper_bound())); } Ok(()) @@ -168,16 +185,15 @@ mod tests { #[test] fn validate_tx_in_isolation_test() { let mut params = MAINNET_PARAMS.clone(); - params.max_tx_inputs = 10; - params.max_tx_outputs = 15; + params.prior_max_tx_inputs = 10; + params.prior_max_tx_outputs = 15; let tv = TransactionValidator::new_for_tests( - params.max_tx_inputs, - params.max_tx_outputs, - params.max_signature_script_len, - params.max_script_public_key_len, - params.ghostdag_k, + params.prior_max_tx_inputs, + params.prior_max_tx_outputs, + params.prior_max_signature_script_len, + params.prior_max_script_public_key_len, params.coinbase_payload_script_public_key_max_len, - params.coinbase_maturity, + params.prior_coinbase_maturity, Default::default(), ); @@ -275,19 +291,19 @@ mod tests { assert_match!(tv.validate_tx_in_isolation(&tx), Err(TxRuleError::NoTxInputs)); let mut tx = valid_tx.clone(); - tx.inputs = (0..params.max_tx_inputs + 1).map(|_| valid_tx.inputs[0].clone()).collect(); + tx.inputs = (0..params.prior_max_tx_inputs + 1).map(|_| valid_tx.inputs[0].clone()).collect(); assert_match!(tv.validate_tx_in_isolation(&tx), Err(TxRuleError::TooManyInputs(_, _))); let mut tx = valid_tx.clone(); - tx.inputs[0].signature_script = vec![0; params.max_signature_script_len + 1]; + tx.inputs[0].signature_script = vec![0; params.prior_max_signature_script_len + 1]; assert_match!(tv.validate_tx_in_isolation(&tx), Err(TxRuleError::TooBigSignatureScript(_, _))); let mut tx = valid_tx.clone(); - tx.outputs = (0..params.max_tx_outputs + 1).map(|_| valid_tx.outputs[0].clone()).collect(); + tx.outputs = (0..params.prior_max_tx_outputs + 1).map(|_| valid_tx.outputs[0].clone()).collect(); assert_match!(tv.validate_tx_in_isolation(&tx), Err(TxRuleError::TooManyOutputs(_, _))); let mut tx = valid_tx.clone(); - tx.outputs[0].script_public_key = ScriptPublicKey::new(0, scriptvec![0u8; params.max_script_public_key_len + 1]); + tx.outputs[0].script_public_key = ScriptPublicKey::new(0, scriptvec![0u8; params.prior_max_script_public_key_len + 1]); assert_match!(tv.validate_tx_in_isolation(&tx), Err(TxRuleError::TooBigScriptPublicKey(_, _))); let mut tx = valid_tx.clone(); diff --git a/consensus/src/processes/transaction_validator/tx_validation_in_utxo_context.rs b/consensus/src/processes/transaction_validator/tx_validation_in_utxo_context.rs index 5c74bf07b5..4eccdbd897 100644 --- a/consensus/src/processes/transaction_validator/tx_validation_in_utxo_context.rs +++ b/consensus/src/processes/transaction_validator/tx_validation_in_utxo_context.rs @@ -3,7 +3,6 @@ use kaspa_consensus_core::{ hashing::sighash::{SigHashReusedValuesSync, SigHashReusedValuesUnsync}, tx::{TransactionInput, VerifiableTransaction}, }; -use kaspa_core::warn; use kaspa_txscript::{caches::Cache, get_sig_op_count_upper_bound, SigCacheKey, TxScriptEngine}; use kaspa_txscript_errors::TxScriptError; use rayon::iter::{IntoParallelIterator, ParallelIterator}; @@ -36,20 +35,17 @@ impl TransactionValidator { &self, tx: &(impl VerifiableTransaction + Sync), pov_daa_score: u64, + block_daa_score: u64, flags: TxValidationFlags, mass_and_feerate_threshold: Option<(u64, f64)>, ) -> TxResult { - self.check_transaction_coinbase_maturity(tx, pov_daa_score)?; + self.check_transaction_coinbase_maturity(tx, pov_daa_score, block_daa_score)?; let total_in = self.check_transaction_input_amounts(tx)?; let total_out = Self::check_transaction_output_values(tx, total_in)?; let fee = total_in - total_out; - if flags != TxValidationFlags::SkipMassCheck && self.storage_mass_activation.is_active(pov_daa_score) { + if flags != TxValidationFlags::SkipMassCheck && self.crescendo_activation.is_active(block_daa_score) { // Storage mass hardfork was activated self.check_mass_commitment(tx)?; - - if self.storage_mass_activation.is_within_range_from_activation(pov_daa_score, 10) { - warn!("--------- Storage mass hardfork was activated successfully!!! --------- (DAA score: {})", pov_daa_score); - } } Self::check_sequence_lock(tx, pov_daa_score)?; @@ -59,10 +55,10 @@ impl TransactionValidator { match flags { TxValidationFlags::Full | TxValidationFlags::SkipMassCheck => { - if !self.runtime_sig_op_counting.is_active(pov_daa_score) { + if !self.crescendo_activation.is_active(block_daa_score) { Self::check_sig_op_counts(tx)?; } - self.check_scripts(tx, pov_daa_score)?; + self.check_scripts(tx, block_daa_score)?; } TxValidationFlags::SkipScriptChecks => {} } @@ -81,18 +77,21 @@ impl TransactionValidator { Ok(()) } - fn check_transaction_coinbase_maturity(&self, tx: &impl VerifiableTransaction, pov_daa_score: u64) -> TxResult<()> { - if let Some((index, (input, entry))) = tx - .populated_inputs() - .enumerate() - .find(|(_, (_, entry))| entry.is_coinbase && entry.block_daa_score + self.coinbase_maturity > pov_daa_score) - { + fn check_transaction_coinbase_maturity( + &self, + tx: &impl VerifiableTransaction, + pov_daa_score: u64, + block_daa_score: u64, + ) -> TxResult<()> { + if let Some((index, (input, entry))) = tx.populated_inputs().enumerate().find(|(_, (_, entry))| { + entry.is_coinbase && entry.block_daa_score + self.coinbase_maturity.get(block_daa_score) > pov_daa_score + }) { return Err(TxRuleError::ImmatureCoinbaseSpend( index, input.previous_outpoint, entry.block_daa_score, pov_daa_score, - self.coinbase_maturity, + self.coinbase_maturity.get(block_daa_score), )); } @@ -127,7 +126,8 @@ impl TransactionValidator { } fn check_mass_commitment(&self, tx: &impl VerifiableTransaction) -> TxResult<()> { - let calculated_contextual_mass = self.mass_calculator.calc_tx_overall_mass(tx, None).ok_or(TxRuleError::MassIncomputable)?; + let calculated_contextual_mass = + self.mass_calculator.calc_contextual_masses(tx).ok_or(TxRuleError::MassIncomputable)?.storage_mass; let committed_contextual_mass = tx.tx().mass(); if committed_contextual_mass != calculated_contextual_mass { return Err(TxRuleError::WrongMass(calculated_contextual_mass, committed_contextual_mass)); @@ -173,12 +173,12 @@ impl TransactionValidator { Ok(()) } - pub fn check_scripts(&self, tx: &(impl VerifiableTransaction + Sync), pov_daa_score: u64) -> TxResult<()> { + pub fn check_scripts(&self, tx: &(impl VerifiableTransaction + Sync), block_daa_score: u64) -> TxResult<()> { check_scripts( &self.sig_cache, tx, - self.kip10_activation.is_active(pov_daa_score), - self.runtime_sig_op_counting.is_active(pov_daa_score), + self.crescendo_activation.is_active(block_daa_score), + self.crescendo_activation.is_active(block_daa_score), ) } } @@ -273,16 +273,15 @@ mod tests { #[test] fn check_signature_test() { let mut params = MAINNET_PARAMS.clone(); - params.max_tx_inputs = 10; - params.max_tx_outputs = 15; + params.prior_max_tx_inputs = 10; + params.prior_max_tx_outputs = 15; let tv = TransactionValidator::new_for_tests( - params.max_tx_inputs, - params.max_tx_outputs, - params.max_signature_script_len, - params.max_script_public_key_len, - params.ghostdag_k, + params.prior_max_tx_inputs, + params.prior_max_tx_outputs, + params.prior_max_signature_script_len, + params.prior_max_script_public_key_len, params.coinbase_payload_script_public_key_max_len, - params.coinbase_maturity, + params.prior_coinbase_maturity, Default::default(), ); @@ -342,16 +341,15 @@ mod tests { #[test] fn check_incorrect_signature_test() { let mut params = MAINNET_PARAMS.clone(); - params.max_tx_inputs = 10; - params.max_tx_outputs = 15; + params.prior_max_tx_inputs = 10; + params.prior_max_tx_outputs = 15; let tv = TransactionValidator::new_for_tests( - params.max_tx_inputs, - params.max_tx_outputs, - params.max_signature_script_len, - params.max_script_public_key_len, - params.ghostdag_k, + params.prior_max_tx_inputs, + params.prior_max_tx_outputs, + params.prior_max_signature_script_len, + params.prior_max_script_public_key_len, params.coinbase_payload_script_public_key_max_len, - params.coinbase_maturity, + params.prior_coinbase_maturity, Default::default(), ); @@ -415,16 +413,15 @@ mod tests { #[test] fn check_multi_signature_test() { let mut params = MAINNET_PARAMS.clone(); - params.max_tx_inputs = 10; - params.max_tx_outputs = 15; + params.prior_max_tx_inputs = 10; + params.prior_max_tx_outputs = 15; let tv = TransactionValidator::new_for_tests( - params.max_tx_inputs, - params.max_tx_outputs, - params.max_signature_script_len, - params.max_script_public_key_len, - params.ghostdag_k, + params.prior_max_tx_inputs, + params.prior_max_tx_outputs, + params.prior_max_signature_script_len, + params.prior_max_script_public_key_len, params.coinbase_payload_script_public_key_max_len, - params.coinbase_maturity, + params.prior_coinbase_maturity, Default::default(), ); @@ -485,16 +482,15 @@ mod tests { #[test] fn check_last_sig_incorrect_multi_signature_test() { let mut params = MAINNET_PARAMS.clone(); - params.max_tx_inputs = 10; - params.max_tx_outputs = 15; + params.prior_max_tx_inputs = 10; + params.prior_max_tx_outputs = 15; let tv = TransactionValidator::new_for_tests( - params.max_tx_inputs, - params.max_tx_outputs, - params.max_signature_script_len, - params.max_script_public_key_len, - params.ghostdag_k, + params.prior_max_tx_inputs, + params.prior_max_tx_outputs, + params.prior_max_signature_script_len, + params.prior_max_script_public_key_len, params.coinbase_payload_script_public_key_max_len, - params.coinbase_maturity, + params.prior_coinbase_maturity, Default::default(), ); @@ -555,16 +551,15 @@ mod tests { #[test] fn check_first_sig_incorrect_multi_signature_test() { let mut params = MAINNET_PARAMS.clone(); - params.max_tx_inputs = 10; - params.max_tx_outputs = 15; + params.prior_max_tx_inputs = 10; + params.prior_max_tx_outputs = 15; let tv = TransactionValidator::new_for_tests( - params.max_tx_inputs, - params.max_tx_outputs, - params.max_signature_script_len, - params.max_script_public_key_len, - params.ghostdag_k, + params.prior_max_tx_inputs, + params.prior_max_tx_outputs, + params.prior_max_signature_script_len, + params.prior_max_script_public_key_len, params.coinbase_payload_script_public_key_max_len, - params.coinbase_maturity, + params.prior_coinbase_maturity, Default::default(), ); @@ -625,16 +620,15 @@ mod tests { #[test] fn check_empty_incorrect_multi_signature_test() { let mut params = MAINNET_PARAMS.clone(); - params.max_tx_inputs = 10; - params.max_tx_outputs = 15; + params.prior_max_tx_inputs = 10; + params.prior_max_tx_outputs = 15; let tv = TransactionValidator::new_for_tests( - params.max_tx_inputs, - params.max_tx_outputs, - params.max_signature_script_len, - params.max_script_public_key_len, - params.ghostdag_k, + params.prior_max_tx_inputs, + params.prior_max_tx_outputs, + params.prior_max_signature_script_len, + params.prior_max_script_public_key_len, params.coinbase_payload_script_public_key_max_len, - params.coinbase_maturity, + params.prior_coinbase_maturity, Default::default(), ); @@ -697,13 +691,12 @@ mod tests { // We test a situation where the script itself is valid, but the script signature is not push only let params = MAINNET_PARAMS.clone(); let tv = TransactionValidator::new_for_tests( - params.max_tx_inputs, - params.max_tx_outputs, - params.max_signature_script_len, - params.max_script_public_key_len, - params.ghostdag_k, + params.prior_max_tx_inputs, + params.prior_max_tx_outputs, + params.prior_max_signature_script_len, + params.prior_max_script_public_key_len, params.coinbase_payload_script_public_key_max_len, - params.coinbase_maturity, + params.prior_coinbase_maturity, Default::default(), ); @@ -760,13 +753,12 @@ mod tests { fn test_sign() { let params = MAINNET_PARAMS.clone(); let tv = TransactionValidator::new_for_tests( - params.max_tx_inputs, - params.max_tx_outputs, - params.max_signature_script_len, - params.max_script_public_key_len, - params.ghostdag_k, + params.prior_max_tx_inputs, + params.prior_max_tx_outputs, + params.prior_max_signature_script_len, + params.prior_max_script_public_key_len, params.coinbase_payload_script_public_key_max_len, - params.coinbase_maturity, + params.prior_coinbase_maturity, Default::default(), ); diff --git a/consensus/src/processes/utils.rs b/consensus/src/processes/utils.rs new file mode 100644 index 0000000000..cc249a9082 --- /dev/null +++ b/consensus/src/processes/utils.rs @@ -0,0 +1,21 @@ +use rand::Rng; + +pub(crate) struct CoinFlip { + p: f64, +} + +impl Default for CoinFlip { + fn default() -> Self { + Self { p: 1.0 / 200.0 } + } +} + +impl CoinFlip { + pub(crate) fn new(p: f64) -> Self { + Self { p } + } + + pub fn flip(self) -> bool { + rand::thread_rng().gen_bool(self.p) + } +} diff --git a/consensus/src/processes/window.rs b/consensus/src/processes/window.rs index 1caff9c007..4a388a84a0 100644 --- a/consensus/src/processes/window.rs +++ b/consensus/src/processes/window.rs @@ -8,11 +8,12 @@ use crate::{ processes::ghostdag::ordering::SortableBlock, }; use kaspa_consensus_core::{ - blockhash::BlockHashExtensions, + blockhash::{BlockHashExtensions, ORIGIN}, config::{genesis::GenesisBlock, params::ForkActivation}, errors::{block::RuleError, difficulty::DifficultyResult}, - BlockHashSet, BlueWorkType, + BlockHashSet, BlueWorkType, HashMapCustomHasher, }; +use kaspa_core::{info, log::CRESCENDO_KEYWORD}; use kaspa_hashes::Hash; use kaspa_math::Uint256; use kaspa_utils::refs::Refs; @@ -27,6 +28,7 @@ use std::{ use super::{ difficulty::{FullDifficultyManager, SampledDifficultyManager}, past_median_time::{FullPastMedianTimeManager, SampledPastMedianTimeManager}, + utils::CoinFlip, }; #[derive(Clone, Copy)] @@ -112,7 +114,7 @@ impl Self { let difficulty_manager = FullDifficultyManager::new( @@ -120,7 +122,7 @@ impl, block_window_cache_for_past_median_time: Arc, target_time_per_block: u64, - sampling_activation: ForkActivation, + crescendo_activation: ForkActivation, difficulty_window_size: usize, difficulty_sample_rate: u64, past_median_time_window_size: usize, past_median_time_sample_rate: u64, - difficulty_manager: SampledDifficultyManager, + difficulty_manager: SampledDifficultyManager, past_median_time_manager: SampledPastMedianTimeManager, } @@ -326,22 +328,27 @@ impl, block_window_cache_for_past_median_time: Arc, max_difficulty_target: Uint256, + prior_target_time_per_block: u64, target_time_per_block: u64, - sampling_activation: ForkActivation, + crescendo_activation: ForkActivation, difficulty_window_size: usize, - min_difficulty_window_len: usize, + min_difficulty_window_size: usize, difficulty_sample_rate: u64, past_median_time_window_size: usize, past_median_time_sample_rate: u64, ) -> Self { let difficulty_manager = SampledDifficultyManager::new( headers_store.clone(), + ghostdag_store.clone(), + genesis.hash, genesis.bits, max_difficulty_target, difficulty_window_size, - min_difficulty_window_len, + min_difficulty_window_size, difficulty_sample_rate, + prior_target_time_per_block, target_time_per_block, + crescendo_activation, ); let past_median_time_manager = SampledPastMedianTimeManager::new(headers_store.clone(), genesis.timestamp); Self { @@ -352,7 +359,7 @@ impl bool { + if selected_parent.is_origin() { + // Trusted block syncer<>syncee contract: if the selected parent header wasn't provided, we assume activation. + // See crescendo-related comment in consecutive_cover_for_window for the syncer side of the contract + return true; + } + let sp_daa_score = self.headers_store.get_daa_score(selected_parent).unwrap(); + self.crescendo_activation.is_active(sp_daa_score) + } + fn build_block_window( &self, ghostdag_data: &GhostdagData, @@ -384,6 +401,28 @@ impl Some(&self.block_window_cache_for_difficulty), WindowType::MedianTimeWindow => Some(&self.block_window_cache_for_past_median_time), @@ -404,7 +443,10 @@ impl); - + self.push_mergeset( + &mut &mut window_heap, + sample_rate, + ¤t_ghostdag, + parent_ghostdag.blue_work, + None::, + filter_non_activated, + ); + + // [Crescendo]: the chain ancestor window will be in the cache only if it was + // activated (due to tracking of window origin), so we can safely inherit it + // // see if we can inherit and merge with the selected parent cache if self.try_merge_with_selected_parent_cache(&mut window_heap, &cache, ¤t_ghostdag.selected_parent) { // if successful, we may break out of the loop, with the window already filled. @@ -469,10 +529,11 @@ impl, + filter_non_activated: bool, ) { if let Some(mut mergeset_non_daa_inserter) = mergeset_non_daa_inserter { - // If we have a non-daa inserter, we most iterate over the whole mergeset and op the sampled and non-daa blocks. - for block in self.sampled_mergeset_iterator(sample_rate, ghostdag_data, selected_parent_blue_work) { + // If we have a non-daa inserter, we must iterate over the whole mergeset and operate on the sampled and non-daa blocks. + for block in self.sampled_mergeset_iterator(sample_rate, ghostdag_data, selected_parent_blue_work, filter_non_activated) { match block { SampledBlock::Sampled(block) => { heap.try_push(block.hash, block.blue_work); @@ -482,7 +543,7 @@ impl, + filter_non_activated: bool, ) -> Option> { cache.get(&ghostdag_data.selected_parent).map(|selected_parent_window| { let mut heap = Lazy::new(|| BoundedSizeBlockHeap::from_binary_heap(window_size, (*selected_parent_window).clone())); // We pass a Lazy heap as an optimization to avoid cloning the selected parent heap in cases where the mergeset contains no samples - self.push_mergeset(&mut heap, sample_rate, ghostdag_data, selected_parent_blue_work, mergeset_non_daa_inserter); + self.push_mergeset( + &mut heap, + sample_rate, + ghostdag_data, + selected_parent_blue_work, + mergeset_non_daa_inserter, + filter_non_activated, + ); if let Ok(heap) = Lazy::into_value(heap) { Arc::new(heap.binary_heap) } else { @@ -532,6 +601,7 @@ impl impl Iterator + 'a { let selected_parent_block = SortableBlock::new(ghostdag_data.selected_parent, selected_parent_blue_work); let selected_parent_daa_score = self.headers_store.get_daa_score(ghostdag_data.selected_parent).unwrap(); @@ -541,10 +611,14 @@ impl u32 { - self.difficulty_manager.calculate_difficulty_bits(&daa_window.window) + fn calculate_difficulty_bits(&self, ghostdag_data: &GhostdagData, daa_window: &DaaWindow) -> u32 { + self.difficulty_manager.calculate_difficulty_bits(&daa_window.window, ghostdag_data) } fn calc_past_median_time(&self, ghostdag_data: &GhostdagData) -> Result<(u64, Arc), RuleError> { let window = self.block_window(ghostdag_data, WindowType::MedianTimeWindow)?; - let past_median_time = self.past_median_time_manager.calc_past_median_time(&window)?; + if window.len() < self.past_median_time_window_size && CoinFlip::default().flip() { + info!(target: CRESCENDO_KEYWORD, + "[Crescendo] MDT window increasing post activation: {} (target: {})", + window.len(), + self.past_median_time_window_size + ); + } + let past_median_time = self.past_median_time_manager.calc_past_median_time(&window, ghostdag_data.selected_parent)?; Ok((past_median_time, window)) } fn calc_past_median_time_for_known_hash(&self, hash: Hash) -> Result { if let Some(window) = self.block_window_cache_for_past_median_time.get(&hash, WindowOrigin::Sampled) { - let past_median_time = self.past_median_time_manager.calc_past_median_time(&window)?; + let past_median_time = self + .past_median_time_manager + .calc_past_median_time(&window, self.ghostdag_store.get_selected_parent(hash).unwrap())?; Ok(past_median_time) } else { let ghostdag_data = self.ghostdag_store.get_data(hash).unwrap(); @@ -629,7 +712,7 @@ impl { ghostdag_store: Arc, headers_store: Arc, - sampling_activation: ForkActivation, + crescendo_activation: ForkActivation, full_window_manager: FullWindowManager, sampled_window_manager: SampledWindowManager, } @@ -681,11 +770,12 @@ impl, block_window_cache_for_past_median_time: Arc, max_difficulty_target: Uint256, - target_time_per_block: u64, - sampling_activation: ForkActivation, + prior_target_time_per_block: u64, + crescendo_target_time_per_block: u64, + crescendo_activation: ForkActivation, full_difficulty_window_size: usize, sampled_difficulty_window_size: usize, - min_difficulty_window_len: usize, + min_difficulty_window_size: usize, difficulty_sample_rate: u64, full_past_median_time_window_size: usize, sampled_past_median_time_window_size: usize, @@ -698,9 +788,9 @@ impl bool { let sp_daa_score = self.headers_store.get_daa_score(selected_parent).unwrap(); - self.sampling_activation.is_active(sp_daa_score) + self.crescendo_activation.is_active(sp_daa_score) } } diff --git a/core/Cargo.toml b/core/Cargo.toml index e76be862f7..ffda54c3d3 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -24,3 +24,4 @@ intertrait.workspace = true log4rs = { workspace = true, features = ["all_components", "gzip", "background_rotation"] } num_cpus.workspace = true tokio = { workspace = true, features = ["rt", "macros", "rt-multi-thread", "time"] } +anyhow = "1.0.97" diff --git a/core/src/log/appender.rs b/core/src/log/appender.rs index f1695fb26c..e5a2dfbb2f 100644 --- a/core/src/log/appender.rs +++ b/core/src/log/appender.rs @@ -12,7 +12,7 @@ use log4rs::{ Append, }, config::Appender, - encode::pattern::PatternEncoder, + encode::{pattern::PatternEncoder, Color, Encode, Style, Write}, filter::{threshold::ThresholdFilter, Filter}, }; use std::path::PathBuf; @@ -28,7 +28,7 @@ impl AppenderSpec { Self::new( name, level, - Box::new(ConsoleAppender::builder().encoder(Box::new(PatternEncoder::new(LOG_LINE_PATTERN_COLORED))).build()), + Box::new(ConsoleAppender::builder().encoder(Box::new(CrescendoEncoder::new(LOG_LINE_PATTERN_COLORED))).build()), ) } @@ -66,3 +66,40 @@ impl AppenderSpec { .build(self.name, self.append.take().unwrap()) } } + +pub const CRESCENDO_KEYWORD: &str = "crescendo"; +const CRESCENDO_LOG_LINE_PATTERN_COLORED: &str = "{d(%Y-%m-%d %H:%M:%S%.3f%:z)} [{h({(CRND):5.5})}] {m}{n}"; + +// TODO (post HF): remove or hide the custom encoder +#[derive(Debug)] +struct CrescendoEncoder { + general_encoder: PatternEncoder, + crescendo_encoder: PatternEncoder, + keyword: &'static str, +} + +impl CrescendoEncoder { + fn new(pattern: &str) -> Self { + CrescendoEncoder { + general_encoder: PatternEncoder::new(pattern), + crescendo_encoder: PatternEncoder::new(CRESCENDO_LOG_LINE_PATTERN_COLORED), + keyword: CRESCENDO_KEYWORD, + } + } +} + +impl Encode for CrescendoEncoder { + fn encode(&self, w: &mut dyn Write, record: &log::Record) -> anyhow::Result<()> { + if record.target() == self.keyword { + // Hack: override log level to debug so that inner encoder does not reset the style + // (note that we use the custom pattern with CRND so this change isn't visible) + let record = record.to_builder().level(log::Level::Debug).build(); + w.set_style(Style::new().text(Color::Cyan))?; + self.crescendo_encoder.encode(w, &record)?; + w.set_style(&Style::new())?; + Ok(()) + } else { + self.general_encoder.encode(w, record) + } + } +} diff --git a/core/src/log/mod.rs b/core/src/log/mod.rs index 4207cb74f7..17456ebb3f 100644 --- a/core/src/log/mod.rs +++ b/core/src/log/mod.rs @@ -14,6 +14,8 @@ cfg_if::cfg_if! { mod appender; mod consts; mod logger; + + pub use appender::CRESCENDO_KEYWORD; } } diff --git a/crypto/merkle/src/lib.rs b/crypto/merkle/src/lib.rs index 56579ccdf7..aca64c1a3c 100644 --- a/crypto/merkle/src/lib.rs +++ b/crypto/merkle/src/lib.rs @@ -22,7 +22,7 @@ pub fn calc_merkle_root(hashes: impl ExactSizeIterator) -> Hash { merkles.last().unwrap().unwrap() } -fn merkle_hash(left: Hash, right: Hash) -> Hash { +pub fn merkle_hash(left: Hash, right: Hash) -> Hash { let mut hasher = MerkleBranchHash::new(); hasher.update(left).update(right); hasher.finalize() diff --git a/database/src/registry.rs b/database/src/registry.rs index 36a728ebe6..49c85f44d8 100644 --- a/database/src/registry.rs +++ b/database/src/registry.rs @@ -21,7 +21,7 @@ pub enum DatabaseStorePrefixes { PruningUtxoset = 11, PruningUtxosetPosition = 12, PruningPoint = 13, - HistoryRoot = 14, + RetentionCheckpoint = 14, Reachability = 15, ReachabilityReindexRoot = 16, ReachabilityRelations = 17, @@ -36,6 +36,7 @@ pub enum DatabaseStorePrefixes { UtxoMultisets = 26, VirtualUtxoset = 27, VirtualState = 28, + PruningSamples = 29, // ---- Decomposed reachability stores ---- ReachabilityTreeChildren = 30, @@ -45,6 +46,9 @@ pub enum DatabaseStorePrefixes { TempGhostdag = 40, TempGhostdagCompact = 41, + // ---- Retention Period Root ---- + RetentionPeriodRoot = 50, + // ---- Metadata ---- MultiConsensusMetadata = 124, ConsensusEntries = 125, diff --git a/docs/testnet10-transition.md b/docs/testnet10-transition.md new file mode 100644 index 0000000000..731635570f --- /dev/null +++ b/docs/testnet10-transition.md @@ -0,0 +1,101 @@ +# Kaspa Testnet 10 (TN10) – Crescendo Hardfork Node Setup Guide + +Kaspa is about to take a significant leap with the **Crescendo Hardfork**, as detailed in [KIP14](https://github.com/kaspanet/kips/blob/master/kip-0014.md), transitioning from 1 to 10 blocks per second. To ensure a stable rollout, **Testnet 10 (TN10)** will first undergo this shift on approximately **March 6, 2025, 18:30 UTC**. By running TN10 and providing feedback, you help prepare for a smooth mainnet upgrade, tentatively planned for the end of April or early May. + + +**Important Note:** +- Version [0.17.1](https://github.com/kaspanet/rusty-kaspa/releases/tag/v0.17.1) does **not** support TN11. Some participants should keep TN11 nodes running on the latest stable release or `stable` branch until TN10’s performance is proven stable. + +--- + +## Recommended Hardware Specifications + +- **Minimum**: + - 8 CPU cores + - 16 GB RAM + - 256 GB SSD + - 5 MB/s (or ~40 Mbit/s) network bandwidth + +- **Preferred for Higher Performance**: + - 12–16 CPU cores + - 32 GB RAM + - 512 GB SSD + - Higher network bandwidth for robust peer support + +While the minimum specs suffice to sync and maintain a TN10 node with the accelerated 10 bps, increasing CPU cores, RAM, storage, and bandwidth allows your node to serve as a stronger focal point on the network. This leads to faster initial block download (IBD) for peers syncing from your node and provides more leeway for future storage growth and optimization. + + +--- + +## 1. Install & Run Your TN10 Node + +1. **Obtain Kaspa 0.17.1 binaries** + Download and extract the official [0.17.1 release](https://github.com/kaspanet/rusty-kaspa/releases/tag/v0.17.1), or build from the `master` branch by following the instructions in the project README. + +2. **Launch the Node** + While TN10 is the default netsuffix, specifying it explicitly is recommended: + + ``` + kaspad --testnet --netsuffix=10 --utxoindex + ``` + + *(If running from source code:)* + ``` + cargo run --bin kaspad --release -- --testnet --netsuffix=10 --utxoindex + ``` + +Leave this process running. Closing it will stop your node. + +- **Advanced Command-Line Options**: + - `--rpclisten=0.0.0.0` to listen for RPC connections on all network interfaces (public RPC). + - `--rpclisten-borsh` for local borsh RPC access from the `kaspa-cli` binary. + - `--unsaferpc` for allowing P2P peer query and management via RPC (recommended to use only if **not** exposing RPC publicly). + - `--perf-metrics --loglevel=info,kaspad_lib::daemon=debug,kaspa_mining::monitor=debug` for detailed performance logs. + - `--loglevel=kaspa_grpc_server=warn` for suppressing most RPC connect/disconnect log reports. + - `--ram-scale=3.0` for increasing cache size threefold (relevant for utilizing large RAM; can be set between 0.1 and 10). + +--- + +## 2. Generate Transactions with Rothschild + +1. **Create a Wallet** + ``` + rothschild + ``` + + This outputs a private key and a public address. Fund your wallet by mining to it or obtaining test coins from other TN10 participants. + +2. **Broadcast Transactions** + ``` + rothschild --private-key -t=10 + ``` + + Replace with the key from step 1. The `-t=10` flag sets your transaction rate to 10 TPS (feel free to try different rates, but keep it below 50 TPS). + +--- + +## 3. Mining on TN10 + +1. **Download the Miner** + Use the latest Kaspa CPU miner [release](https://github.com/elichai/kaspa-miner/releases) which supports TN10. + +2. **Start Mining** + ``` + kaspa-miner --testnet --mining-address -p 16210 -t 1 + ``` + + Replace with your TN10 address (e.g., from Rothschild) if you want to mine and generate transactions simultaneously. + +--- + +## Summary & Next Steps + +- **Node Sync:** + `kaspad --testnet --netsuffix=10 --utxoindex` +- **Transaction Generation:** + `rothschild --private-key -t=10` +- **Mining:** + `kaspa-miner --testnet --mining-address -p 16210 -t 1` + +By participating in TN10, you help stress-test the Crescendo Hardfork environment and prepare for a robust mainnet upgrade in end of April / early May. Share any challenges or successes in the #testnet Discord channel, and thank you for supporting Kaspa’s continued evolution. + diff --git a/kaspad/Cargo.toml b/kaspad/Cargo.toml index 3507339f29..d79ee36a63 100644 --- a/kaspad/Cargo.toml +++ b/kaspad/Cargo.toml @@ -31,6 +31,8 @@ kaspa-index-processor.workspace = true kaspa-mining.workspace = true kaspa-notify.workspace = true kaspa-p2p-flows.workspace = true +kaspa-p2p-lib.workspace = true +kaspa-p2p-mining.workspace = true kaspa-perf-monitor.workspace = true kaspa-rpc-core.workspace = true kaspa-rpc-service.workspace = true diff --git a/kaspad/src/args.rs b/kaspad/src/args.rs index 56dd7c1de7..9a58bc6dbf 100644 --- a/kaspad/src/args.rs +++ b/kaspad/src/args.rs @@ -90,6 +90,7 @@ pub struct Args { #[serde(rename = "nogrpc")] pub disable_grpc: bool, pub ram_scale: f64, + pub retention_period_days: Option, } impl Default for Args { @@ -140,6 +141,7 @@ impl Default for Args { disable_dns_seeding: false, disable_grpc: false, ram_scale: 1.0, + retention_period_days: None, } } } @@ -159,6 +161,7 @@ impl Args { config.p2p_listen_address = self.listen.unwrap_or(ContextualNetAddress::unspecified()); config.externalip = self.externalip.map(|v| v.normalize(config.default_p2p_port())); config.ram_scale = self.ram_scale; + config.retention_period_days = self.retention_period_days; #[cfg(feature = "devnet-prealloc")] if let Some(num_prealloc_utxos) = self.num_prealloc_utxos { @@ -369,6 +372,13 @@ Setting to 0 prevents the preallocation and sets the maximum to {}, leading to 0 .help("Apply a scale factor to memory allocation bounds. Nodes with limited RAM (~4-8GB) should set this to ~0.3-0.5 respectively. Nodes with a large RAM (~64GB) can set this value to ~3.0-4.0 and gain superior performance especially for syncing peers faster"), ) + .arg( + Arg::new("retention-period-days") + .long("retention-period-days") + .require_equals(true) + .value_parser(clap::value_parser!(f64)) + .help("The number of total days of data to keep.") + ) ; #[cfg(feature = "devnet-prealloc")] @@ -448,6 +458,7 @@ impl Args { disable_dns_seeding: arg_match_unwrap_or::(&m, "nodnsseed", defaults.disable_dns_seeding), disable_grpc: arg_match_unwrap_or::(&m, "nogrpc", defaults.disable_grpc), ram_scale: arg_match_unwrap_or::(&m, "ram-scale", defaults.ram_scale), + retention_period_days: m.get_one::("retention-period-days").cloned().or(defaults.retention_period_days), #[cfg(feature = "devnet-prealloc")] num_prealloc_utxos: m.get_one::("num-prealloc-utxos").cloned(), diff --git a/kaspad/src/daemon.rs b/kaspad/src/daemon.rs index db9f32c165..943a2b98ac 100644 --- a/kaspad/src/daemon.rs +++ b/kaspad/src/daemon.rs @@ -3,7 +3,9 @@ use std::{fs, path::PathBuf, process::exit, sync::Arc, time::Duration}; use async_channel::unbounded; use kaspa_consensus_core::{ config::ConfigBuilder, + constants::TRANSIENT_BYTE_TO_MASS_FACTOR, errors::config::{ConfigError, ConfigResult}, + mining_rules::MiningRules, }; use kaspa_consensus_notify::{root::ConsensusNotificationRoot, service::NotifyService}; use kaspa_core::{core::Core, debug, info, trace}; @@ -14,6 +16,8 @@ use kaspa_database::{ }; use kaspa_grpc_server::service::GrpcService; use kaspa_notify::{address::tracker::Tracker, subscription::context::SubscriptionContext}; +use kaspa_p2p_lib::Hub; +use kaspa_p2p_mining::rule_engine::MiningRuleEngine; use kaspa_rpc_service::service::RpcCoreService; use kaspa_txscript::caches::TxScriptCacheCounters; use kaspa_utils::git; @@ -50,6 +54,11 @@ pub const DESIRED_DAEMON_SOFT_FD_LIMIT: u64 = 8 * 1024; /// this value may impact the database performance). pub const MINIMUM_DAEMON_SOFT_FD_LIMIT: u64 = 4 * 1024; +/// If set, the retention period days must be at least this value +/// (otherwise it is meaningless since pruning periods are typically at least 2 days long) +const MINIMUM_RETENTION_PERIOD_DAYS: f64 = 2.0; +const ONE_GIGABYTE: f64 = 1_000_000_000.0; + use crate::args::Args; const DEFAULT_DATA_DIR: &str = "datadir"; @@ -233,8 +242,6 @@ pub fn create_core_with_runtime(runtime: &Runtime, args: &Args, fd_total_budget: .build(), ); - // TODO: Validate `config` forms a valid set of properties - let app_dir = get_app_dir_from_args(args); let db_dir = app_dir.join(network.to_prefixed()).join(DEFAULT_DATA_DIR); @@ -275,6 +282,29 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm fs::create_dir_all(utxoindex_db_dir.as_path()).unwrap(); } + if !args.archival && args.retention_period_days.is_some() { + let retention_period_days = args.retention_period_days.unwrap(); + // Look only at post-fork values (which are the worst-case) + let finality_depth = config.finality_depth().after(); + let target_time_per_block = config.target_time_per_block().after(); // in ms + + let retention_period_milliseconds = (retention_period_days * 24.0 * 60.0 * 60.0 * 1000.0).ceil() as u64; + if MINIMUM_RETENTION_PERIOD_DAYS <= retention_period_days { + let total_blocks = retention_period_milliseconds / target_time_per_block; + // This worst case usage only considers block space. It does not account for usage of + // other stores (reachability, block status, mempool, etc.) + let worst_case_usage = + ((total_blocks + finality_depth) * (config.max_block_mass / TRANSIENT_BYTE_TO_MASS_FACTOR)) as f64 / ONE_GIGABYTE; + + info!( + "Retention period is set to {} days. Disk usage may be up to {:.2} GB for block space required for this period.", + retention_period_days, worst_case_usage + ); + } else { + panic!("Retention period ({}) must be at least {} days", retention_period_days, MINIMUM_RETENTION_PERIOD_DAYS); + } + } + // DB used for addresses store and for multi-consensus management let mut meta_db = kaspa_database::prelude::ConnBuilder::default() .with_db_path(meta_db_dir.clone()) @@ -300,16 +330,16 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm let headers_store = DbHeadersStore::new(consensus_db, CachePolicy::Empty, CachePolicy::Empty); if headers_store.has(config.genesis.hash).unwrap() { - info!("Genesis is found in active consensus DB. No action needed."); + debug!("Genesis is found in active consensus DB. No action needed."); } else { - let msg = "Genesis not found in active consensus DB. This happens when Testnet 11 is restarted and your database needs to be fully deleted. Do you confirm the delete? (y/n)"; + let msg = "Genesis not found in active consensus DB. This happens when Testnets are restarted and your database needs to be fully deleted. Do you confirm the delete? (y/n)"; get_user_approval_or_exit(msg, args.yes); is_db_reset_needed = true; } } None => { - info!("Consensus not initialized yet. Skipping genesis check."); + debug!("Consensus not initialized yet. Skipping genesis check."); } } } @@ -454,8 +484,9 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm let connect_peers = args.connect_peers.iter().map(|x| x.normalize(config.default_p2p_port())).collect::>(); let add_peers = args.add_peers.iter().map(|x| x.normalize(config.default_p2p_port())).collect(); let p2p_server_addr = args.listen.unwrap_or(ContextualNetAddress::unspecified()).normalize(config.default_p2p_port()); - // connect_peers means no DNS seeding and no outbound peers + // connect_peers means no DNS seeding and no outbound/inbound peers let outbound_target = if connect_peers.is_empty() { args.outbound_target } else { 0 }; + let inbound_limit = if connect_peers.is_empty() { args.inbound_limit } else { 0 }; let dns_seeders = if connect_peers.is_empty() && !args.disable_dns_seeding { config.dns_seeders } else { &[] }; let grpc_server_addr = args.rpclisten.unwrap_or(ContextualNetAddress::loopback()).normalize(config.default_rpc_port()); @@ -478,6 +509,7 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm let grpc_tower_counters = Arc::new(TowerConnectionCounters::default()); // Use `num_cpus` background threads for the consensus database as recommended by rocksdb + let mining_rules = Arc::new(MiningRules::default()); let consensus_db_parallelism = num_cpus::get(); let consensus_factory = Arc::new(ConsensusFactory::new( meta_db.clone(), @@ -488,6 +520,7 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm processing_counters.clone(), tx_script_cache_counters.clone(), fd_remaining, + mining_rules.clone(), )); let consensus_manager = Arc::new(ConsensusManager::new(consensus_factory)); let consensus_monitor = Arc::new(ConsensusMonitor::new(processing_counters.clone(), tick_service.clone())); @@ -527,16 +560,30 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm let (address_manager, port_mapping_extender_svc) = AddressManager::new(config.clone(), meta_db, tick_service.clone()); let mining_manager = MiningManagerProxy::new(Arc::new(MiningManager::new_with_extended_config( - config.target_time_per_block, + config.target_time_per_block(), false, config.max_block_mass, config.ram_scale, config.block_template_cache_lifetime, mining_counters.clone(), ))); - let mining_monitor = - Arc::new(MiningMonitor::new(mining_manager.clone(), mining_counters, tx_script_cache_counters.clone(), tick_service.clone())); + let mining_monitor = Arc::new(MiningMonitor::new( + mining_manager.clone(), + consensus_manager.clone(), + mining_counters, + tx_script_cache_counters.clone(), + tick_service.clone(), + )); + let hub = Hub::new(); + let mining_rule_engine = Arc::new(MiningRuleEngine::new( + consensus_manager.clone(), + config.clone(), + processing_counters.clone(), + tick_service.clone(), + hub.clone(), + mining_rules, + )); let flow_context = Arc::new(FlowContext::new( consensus_manager.clone(), address_manager, @@ -544,6 +591,8 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm mining_manager.clone(), tick_service.clone(), notification_root, + hub.clone(), + mining_rule_engine.clone(), )); let p2p_service = Arc::new(P2pService::new( flow_context.clone(), @@ -551,7 +600,7 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm add_peers, p2p_server_addr, outbound_target, - args.inbound_limit, + inbound_limit, dns_seeders, config.default_p2p_port(), p2p_tower_counters.clone(), @@ -574,6 +623,7 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm p2p_tower_counters.clone(), grpc_tower_counters.clone(), system_info, + mining_rule_engine.clone(), )); let grpc_service_broadcasters: usize = 3; // TODO: add a command line argument or derive from other arg/config/host-related fields let grpc_service = if !args.disable_grpc { @@ -607,6 +657,8 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm async_runtime.register(consensus_monitor); async_runtime.register(mining_monitor); async_runtime.register(perf_monitor); + async_runtime.register(mining_rule_engine); + let wrpc_service_tasks: usize = 2; // num_cpus::get() / 2; // Register wRPC servers based on command line arguments [ diff --git a/math/benches/bench.rs b/math/benches/bench.rs index 3d208f525e..34bcd3ece5 100644 --- a/math/benches/bench.rs +++ b/math/benches/bench.rs @@ -33,8 +33,8 @@ where fn bench_uint128(c: &mut Criterion) { let mut rng = ChaCha8Rng::from_seed([42u8; 32]); - let u128_one: Vec<_> = (0..ITERS_128).map(|_| (rng.next_u64() as u128) << 64 | rng.next_u64() as u128).collect(); - let u128_two: Vec<_> = (0..ITERS_128).map(|_| (rng.next_u64() as u128) << 64 | rng.next_u64() as u128).collect(); + let u128_one: Vec<_> = (0..ITERS_128).map(|_| ((rng.next_u64() as u128) << 64) | rng.next_u64() as u128).collect(); + let u128_two: Vec<_> = (0..ITERS_128).map(|_| ((rng.next_u64() as u128) << 64) | rng.next_u64() as u128).collect(); let shifts: Vec<_> = (0..ITERS_128).map(|_| rng.next_u32() % 128 * 8).collect(); let u64s: Vec<_> = (0..ITERS_128).map(|_| rng.next_u64()).collect(); diff --git a/mining/errors/src/mempool.rs b/mining/errors/src/mempool.rs index 12416be678..42dd566d70 100644 --- a/mining/errors/src/mempool.rs +++ b/mining/errors/src/mempool.rs @@ -106,11 +106,14 @@ pub enum NonStandardError { #[error("transaction version {1} is not in the valid range of {2}-{3}")] RejectVersion(TransactionId, u16, u16, u16), - #[error("transaction mass of {1} is larger than max allowed size of {2}")] - RejectMass(TransactionId, u64, u64), + #[error("transaction compute mass of {1} is larger than max allowed size of {2}")] + RejectComputeMass(TransactionId, u64, u64), - #[error("transaction mass in context (including storage mass) of {1} is larger than max allowed size of {2}")] - RejectContextualMass(TransactionId, u64, u64), + #[error("transaction transient (storage) mass of {1} is larger than max allowed size of {2}")] + RejectTransientMass(TransactionId, u64, u64), + + #[error("transaction storage mass of {1} is larger than max allowed size of {2}")] + RejectStorageMass(TransactionId, u64, u64), #[error("transaction input #{1}: signature script size of {2} bytes is larger than the maximum allowed size of {3} bytes")] RejectSignatureScriptSize(TransactionId, usize, u64, u64), @@ -138,8 +141,9 @@ impl NonStandardError { pub fn transaction_id(&self) -> &TransactionId { match self { NonStandardError::RejectVersion(id, _, _, _) => id, - NonStandardError::RejectMass(id, _, _) => id, - NonStandardError::RejectContextualMass(id, _, _) => id, + NonStandardError::RejectComputeMass(id, _, _) => id, + NonStandardError::RejectTransientMass(id, _, _) => id, + NonStandardError::RejectStorageMass(id, _, _) => id, NonStandardError::RejectSignatureScriptSize(id, _, _, _) => id, NonStandardError::RejectScriptPublicKeyVersion(id, _) => id, NonStandardError::RejectOutputScriptClass(id, _) => id, diff --git a/mining/src/manager.rs b/mining/src/manager.rs index 32893312a1..c3d9df4dae 100644 --- a/mining/src/manager.rs +++ b/mining/src/manager.rs @@ -28,6 +28,7 @@ use kaspa_consensus_core::{ }, block::{BlockTemplate, TemplateBuildMode, TemplateTransactionSelector}, coinbase::MinerData, + config::params::ForkedParam, errors::{block::RuleError as BlockRuleError, tx::TxRuleError}, tx::{MutableTransaction, Transaction, TransactionId, TransactionOutput}, }; @@ -46,6 +47,7 @@ pub struct MiningManager { } impl MiningManager { + // [Crescendo]: used for tests only so we can pass a single value target_time_per_block pub fn new( target_time_per_block: u64, relay_non_std_transactions: bool, @@ -53,12 +55,12 @@ impl MiningManager { cache_lifetime: Option, counters: Arc, ) -> Self { - let config = Config::build_default(target_time_per_block, relay_non_std_transactions, max_block_mass); + let config = Config::build_default(ForkedParam::new_const(target_time_per_block), relay_non_std_transactions, max_block_mass); Self::with_config(config, cache_lifetime, counters) } pub fn new_with_extended_config( - target_time_per_block: u64, + target_time_per_block: ForkedParam, relay_non_std_transactions: bool, max_block_mass: u64, ram_scale: f64, @@ -203,8 +205,11 @@ impl MiningManager { } /// Returns realtime feerate estimations based on internal mempool state - pub(crate) fn get_realtime_feerate_estimations(&self) -> FeerateEstimations { - let args = FeerateEstimatorArgs::new(self.config.network_blocks_per_second, self.config.maximum_mass_per_block); + pub(crate) fn get_realtime_feerate_estimations(&self, virtual_daa_score: u64) -> FeerateEstimations { + let args = FeerateEstimatorArgs::new( + self.config.network_blocks_per_second.get(virtual_daa_score), + self.config.maximum_mass_per_block, + ); let estimator = self.mempool.read().build_feerate_estimator(args); estimator.calc_estimations(self.config.minimum_feerate()) } @@ -215,7 +220,10 @@ impl MiningManager { consensus: &dyn ConsensusApi, prefix: kaspa_addresses::Prefix, ) -> MiningManagerResult { - let args = FeerateEstimatorArgs::new(self.config.network_blocks_per_second, self.config.maximum_mass_per_block); + let args = FeerateEstimatorArgs::new( + self.config.network_blocks_per_second.get(consensus.get_virtual_daa_score()), + self.config.maximum_mass_per_block, + ); let network_mass_per_second = args.network_mass_per_second(); let mempool_read = self.mempool.read(); let estimator = mempool_read.build_feerate_estimator(args); @@ -516,7 +524,7 @@ impl MiningManager { transactions[lower_bound..] .iter() .position(|tx| { - mass += tx.calculated_compute_mass.unwrap(); + mass += tx.calculated_non_contextual_masses.unwrap().max(); mass >= self.config.maximum_mass_per_block }) // Make sure the upper bound is greater than the lower bound, allowing to handle a very unlikely, @@ -854,8 +862,8 @@ impl MiningManagerProxy { } /// Returns realtime feerate estimations based on internal mempool state - pub async fn get_realtime_feerate_estimations(self) -> FeerateEstimations { - spawn_blocking(move || self.inner.get_realtime_feerate_estimations()).await.unwrap() + pub async fn get_realtime_feerate_estimations(self, virtual_daa_score: u64) -> FeerateEstimations { + spawn_blocking(move || self.inner.get_realtime_feerate_estimations(virtual_daa_score)).await.unwrap() } /// Returns realtime feerate estimations based on internal mempool state with additional verbose data diff --git a/mining/src/manager_tests.rs b/mining/src/manager_tests.rs index 6ddc86e45b..25ae76ec8c 100644 --- a/mining/src/manager_tests.rs +++ b/mining/src/manager_tests.rs @@ -20,9 +20,10 @@ mod tests { api::ConsensusApi, block::TemplateBuildMode, coinbase::MinerData, + config::params::ForkedParam, constants::{MAX_TX_IN_SEQUENCE_NUM, SOMPI_PER_KASPA, TX_VERSION}, errors::tx::TxRuleError, - mass::transaction_estimated_serialized_size, + mass::{transaction_estimated_serialized_size, NonContextualMasses}, subnets::SUBNETWORK_ID_NATIVE, tx::{ scriptvec, MutableTransaction, ScriptPublicKey, Transaction, TransactionId, TransactionInput, TransactionOutpoint, @@ -109,12 +110,12 @@ mod tests { tx.calculated_fee.unwrap() ); assert_eq!( - tx_to_insert.calculated_compute_mass.unwrap(), - tx.calculated_compute_mass.unwrap(), + tx_to_insert.calculated_non_contextual_masses.unwrap(), + tx.calculated_non_contextual_masses.unwrap(), "({priority:?}, {orphan:?}, {rbf_policy:?}) wrong mass in transaction {}: expected: {}, got: {}", tx.id(), - tx_to_insert.calculated_compute_mass.unwrap(), - tx.calculated_compute_mass.unwrap() + tx_to_insert.calculated_non_contextual_masses.unwrap(), + tx.calculated_non_contextual_masses.unwrap() ); } assert!( @@ -901,7 +902,7 @@ mod tests { ]; let consensus = Arc::new(ConsensusMock::new()); - let mut config = Config::build_default(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS); + let mut config = Config::build_default(ForkedParam::new_const(TARGET_TIME_PER_BLOCK), false, MAX_BLOCK_MASS); // Limit the orphan pool to 2 transactions config.maximum_orphan_transaction_count = 2; let counters = Arc::new(MiningCounters::default()); @@ -1129,7 +1130,7 @@ mod tests { let consensus = Arc::new(ConsensusMock::new()); let counters = Arc::new(MiningCounters::default()); - let mut config = Config::build_default(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS); + let mut config = Config::build_default(ForkedParam::new_const(TARGET_TIME_PER_BLOCK), false, MAX_BLOCK_MASS); let tx_size = txs[0].mempool_estimated_bytes(); let size_limit = TX_COUNT * tx_size; config.mempool_size_limit = size_limit; @@ -1348,7 +1349,9 @@ mod tests { let mut mutable_tx = MutableTransaction::from_tx(transaction); mutable_tx.calculated_fee = Some(DEFAULT_MINIMUM_RELAY_TRANSACTION_FEE); // Please note: this is the ConsensusMock version of the calculated_mass which differs from Consensus - mutable_tx.calculated_compute_mass = Some(transaction_estimated_serialized_size(&mutable_tx.tx)); + let transaction_serialized_size = transaction_estimated_serialized_size(&mutable_tx.tx); + mutable_tx.calculated_non_contextual_masses = + Some(NonContextualMasses::new(transaction_serialized_size, transaction_serialized_size)); mutable_tx.entries[0] = Some(entry); mutable_tx diff --git a/mining/src/mempool/check_transaction_standard.rs b/mining/src/mempool/check_transaction_standard.rs index bf4e0cfb7f..2f9ff8a25d 100644 --- a/mining/src/mempool/check_transaction_standard.rs +++ b/mining/src/mempool/check_transaction_standard.rs @@ -2,12 +2,12 @@ use crate::mempool::{ errors::{NonStandardError, NonStandardResult}, Mempool, }; -use kaspa_consensus_core::hashing::sighash::SigHashReusedValuesUnsync; use kaspa_consensus_core::{ constants::{MAX_SCRIPT_PUBLIC_KEY_VERSION, MAX_SOMPI}, mass, tx::{MutableTransaction, PopulatedTransaction, TransactionOutput}, }; +use kaspa_consensus_core::{hashing::sighash::SigHashReusedValuesUnsync, mass::NonContextualMasses}; use kaspa_txscript::{get_sig_op_count_upper_bound, is_unspendable, script_class::ScriptClass}; /// MAX_STANDARD_P2SH_SIG_OPS is the maximum number of signature operations @@ -61,12 +61,12 @@ impl Mempool { // almost as much to process as the sender fees, limit the maximum // size of a transaction. This also helps mitigate CPU exhaustion // attacks. - if transaction.calculated_compute_mass.unwrap() > MAXIMUM_STANDARD_TRANSACTION_MASS { - return Err(NonStandardError::RejectMass( - transaction_id, - transaction.calculated_compute_mass.unwrap(), - MAXIMUM_STANDARD_TRANSACTION_MASS, - )); + let NonContextualMasses { compute_mass, transient_mass } = transaction.calculated_non_contextual_masses.unwrap(); + if compute_mass > MAXIMUM_STANDARD_TRANSACTION_MASS { + return Err(NonStandardError::RejectComputeMass(transaction_id, compute_mass, MAXIMUM_STANDARD_TRANSACTION_MASS)); + } + if transient_mass > MAXIMUM_STANDARD_TRANSACTION_MASS { + return Err(NonStandardError::RejectTransientMass(transaction_id, transient_mass, MAXIMUM_STANDARD_TRANSACTION_MASS)); } for (i, input) in transaction.tx.inputs.iter().enumerate() { @@ -172,9 +172,8 @@ impl Mempool { pub(crate) fn check_transaction_standard_in_context(&self, transaction: &MutableTransaction) -> NonStandardResult<()> { let transaction_id = transaction.id(); let contextual_mass = transaction.tx.mass(); - assert!(contextual_mass > 0, "expected to be set by consensus"); if contextual_mass > MAXIMUM_STANDARD_TRANSACTION_MASS { - return Err(NonStandardError::RejectContextualMass(transaction_id, contextual_mass, MAXIMUM_STANDARD_TRANSACTION_MASS)); + return Err(NonStandardError::RejectStorageMass(transaction_id, contextual_mass, MAXIMUM_STANDARD_TRANSACTION_MASS)); } for (i, input) in transaction.tx.inputs.iter().enumerate() { // It is safe to elide existence and index checks here since @@ -199,8 +198,10 @@ impl Mempool { } } - // TODO: For now, until wallets adapt, we don't require fee as function of full contextual_mass (but the fee/mass ratio will affect tx selection to block template) - let minimum_fee = self.minimum_required_transaction_relay_fee(transaction.calculated_compute_mass.unwrap()); + // TODO: For now, until wallets adapt, we only require minimum fee as function of compute mass (but the fee/mass ratio will + // use the max over all masses and will affect tx selection to block template) + let minimum_fee = + self.minimum_required_transaction_relay_fee(transaction.calculated_non_contextual_masses.unwrap().compute_mass); if transaction.calculated_fee.unwrap() < minimum_fee { return Err(NonStandardError::RejectInsufficientFee(transaction_id, transaction.calculated_fee.unwrap(), minimum_fee)); } @@ -241,6 +242,7 @@ mod tests { use kaspa_consensus_core::{ config::params::Params, constants::{MAX_TX_IN_SEQUENCE_NUM, SOMPI_PER_KASPA, TX_VERSION}, + mass::NonContextualMasses, network::NetworkType, subnets::SUBNETWORK_ID_NATIVE, tx::{ScriptPublicKey, ScriptVec, Transaction, TransactionInput, TransactionOutpoint, TransactionOutput}, @@ -292,7 +294,7 @@ mod tests { for test in tests.iter() { for net in NetworkType::iter() { let params: Params = net.into(); - let mut config = Config::build_default(params.target_time_per_block, false, params.max_block_mass); + let mut config = Config::build_default(params.target_time_per_block(), false, params.max_block_mass); config.minimum_relay_transaction_fee = test.minimum_relay_transaction_fee; let counters = Arc::new(MiningCounters::default()); let mempool = Mempool::new(Arc::new(config), counters); @@ -377,7 +379,7 @@ mod tests { for test in tests { for net in NetworkType::iter() { let params: Params = net.into(); - let mut config = Config::build_default(params.target_time_per_block, false, params.max_block_mass); + let mut config = Config::build_default(params.target_time_per_block(), false, params.max_block_mass); config.minimum_relay_transaction_fee = test.minimum_relay_transaction_fee; let counters = Arc::new(MiningCounters::default()); let mempool = Mempool::new(Arc::new(config), counters); @@ -412,7 +414,7 @@ mod tests { fn new_mtx(tx: Transaction, mass: u64) -> MutableTransaction { let mut mtx = MutableTransaction::from_tx(tx); - mtx.calculated_compute_mass = Some(mass); + mtx.calculated_non_contextual_masses = Some(NonContextualMasses::new(mass, mass)); mtx } @@ -557,7 +559,7 @@ mod tests { for test in tests { for net in NetworkType::iter() { let params: Params = net.into(); - let config = Config::build_default(params.target_time_per_block, false, params.max_block_mass); + let config = Config::build_default(params.target_time_per_block(), false, params.max_block_mass); let counters = Arc::new(MiningCounters::default()); let mempool = Mempool::new(Arc::new(config), counters); diff --git a/mining/src/mempool/config.rs b/mining/src/mempool/config.rs index 04407b411e..40527f9a5a 100644 --- a/mining/src/mempool/config.rs +++ b/mining/src/mempool/config.rs @@ -1,4 +1,4 @@ -use kaspa_consensus_core::constants::TX_VERSION; +use kaspa_consensus_core::{config::params::ForkedParam, constants::TX_VERSION}; pub(crate) const DEFAULT_MAXIMUM_TRANSACTION_COUNT: usize = 1_000_000; pub(crate) const DEFAULT_MEMPOOL_SIZE_LIMIT: usize = 1_000_000_000; @@ -30,14 +30,14 @@ pub struct Config { pub maximum_transaction_count: usize, pub mempool_size_limit: usize, pub maximum_build_block_template_attempts: u64, - pub transaction_expire_interval_daa_score: u64, - pub transaction_expire_scan_interval_daa_score: u64, + pub transaction_expire_interval_daa_score: ForkedParam, + pub transaction_expire_scan_interval_daa_score: ForkedParam, pub transaction_expire_scan_interval_milliseconds: u64, - pub accepted_transaction_expire_interval_daa_score: u64, - pub accepted_transaction_expire_scan_interval_daa_score: u64, + pub accepted_transaction_expire_interval_daa_score: ForkedParam, + pub accepted_transaction_expire_scan_interval_daa_score: ForkedParam, pub accepted_transaction_expire_scan_interval_milliseconds: u64, - pub orphan_expire_interval_daa_score: u64, - pub orphan_expire_scan_interval_daa_score: u64, + pub orphan_expire_interval_daa_score: ForkedParam, + pub orphan_expire_scan_interval_daa_score: ForkedParam, pub maximum_orphan_transaction_mass: u64, pub maximum_orphan_transaction_count: u64, pub accept_non_standard: bool, @@ -45,7 +45,7 @@ pub struct Config { pub minimum_relay_transaction_fee: u64, pub minimum_standard_transaction_version: u16, pub maximum_standard_transaction_version: u16, - pub network_blocks_per_second: u64, + pub network_blocks_per_second: ForkedParam, } impl Config { @@ -54,14 +54,14 @@ impl Config { maximum_transaction_count: usize, mempool_size_limit: usize, maximum_build_block_template_attempts: u64, - transaction_expire_interval_daa_score: u64, - transaction_expire_scan_interval_daa_score: u64, + transaction_expire_interval_daa_score: ForkedParam, + transaction_expire_scan_interval_daa_score: ForkedParam, transaction_expire_scan_interval_milliseconds: u64, - accepted_transaction_expire_interval_daa_score: u64, - accepted_transaction_expire_scan_interval_daa_score: u64, + accepted_transaction_expire_interval_daa_score: ForkedParam, + accepted_transaction_expire_scan_interval_daa_score: ForkedParam, accepted_transaction_expire_scan_interval_milliseconds: u64, - orphan_expire_interval_daa_score: u64, - orphan_expire_scan_interval_daa_score: u64, + orphan_expire_interval_daa_score: ForkedParam, + orphan_expire_scan_interval_daa_score: ForkedParam, maximum_orphan_transaction_mass: u64, maximum_orphan_transaction_count: u64, accept_non_standard: bool, @@ -69,7 +69,7 @@ impl Config { minimum_relay_transaction_fee: u64, minimum_standard_transaction_version: u16, maximum_standard_transaction_version: u16, - network_blocks_per_second: u64, + network_blocks_per_second: ForkedParam, ) -> Self { Self { maximum_transaction_count, @@ -96,22 +96,28 @@ impl Config { /// Build a default config. /// The arguments should be obtained from the current consensus [`kaspa_consensus_core::config::params::Params`] instance. - pub const fn build_default(target_milliseconds_per_block: u64, relay_non_std_transactions: bool, max_block_mass: u64) -> Self { + pub fn build_default( + target_milliseconds_per_block: ForkedParam, + relay_non_std_transactions: bool, + max_block_mass: u64, + ) -> Self { Self { maximum_transaction_count: DEFAULT_MAXIMUM_TRANSACTION_COUNT, mempool_size_limit: DEFAULT_MEMPOOL_SIZE_LIMIT, maximum_build_block_template_attempts: DEFAULT_MAXIMUM_BUILD_BLOCK_TEMPLATE_ATTEMPTS, - transaction_expire_interval_daa_score: DEFAULT_TRANSACTION_EXPIRE_INTERVAL_SECONDS * 1000 / target_milliseconds_per_block, - transaction_expire_scan_interval_daa_score: DEFAULT_TRANSACTION_EXPIRE_SCAN_INTERVAL_SECONDS * 1000 - / target_milliseconds_per_block, + transaction_expire_interval_daa_score: target_milliseconds_per_block + .map(|v| DEFAULT_TRANSACTION_EXPIRE_INTERVAL_SECONDS * 1000 / v), + transaction_expire_scan_interval_daa_score: target_milliseconds_per_block + .map(|v| DEFAULT_TRANSACTION_EXPIRE_SCAN_INTERVAL_SECONDS * 1000 / v), transaction_expire_scan_interval_milliseconds: DEFAULT_TRANSACTION_EXPIRE_SCAN_INTERVAL_SECONDS * 1000, - accepted_transaction_expire_interval_daa_score: DEFAULT_ACCEPTED_TRANSACTION_EXPIRE_INTERVAL_SECONDS * 1000 - / target_milliseconds_per_block, - accepted_transaction_expire_scan_interval_daa_score: DEFAULT_ACCEPTED_TRANSACTION_EXPIRE_SCAN_INTERVAL_SECONDS * 1000 - / target_milliseconds_per_block, + accepted_transaction_expire_interval_daa_score: target_milliseconds_per_block + .map(|v| DEFAULT_ACCEPTED_TRANSACTION_EXPIRE_INTERVAL_SECONDS * 1000 / v), + accepted_transaction_expire_scan_interval_daa_score: target_milliseconds_per_block + .map(|v| DEFAULT_ACCEPTED_TRANSACTION_EXPIRE_SCAN_INTERVAL_SECONDS * 1000 / v), accepted_transaction_expire_scan_interval_milliseconds: DEFAULT_ACCEPTED_TRANSACTION_EXPIRE_SCAN_INTERVAL_SECONDS * 1000, - orphan_expire_interval_daa_score: DEFAULT_ORPHAN_EXPIRE_INTERVAL_SECONDS * 1000 / target_milliseconds_per_block, - orphan_expire_scan_interval_daa_score: DEFAULT_ORPHAN_EXPIRE_SCAN_INTERVAL_SECONDS * 1000 / target_milliseconds_per_block, + orphan_expire_interval_daa_score: target_milliseconds_per_block.map(|v| DEFAULT_ORPHAN_EXPIRE_INTERVAL_SECONDS * 1000 / v), + orphan_expire_scan_interval_daa_score: target_milliseconds_per_block + .map(|v| DEFAULT_ORPHAN_EXPIRE_SCAN_INTERVAL_SECONDS * 1000 / v), maximum_orphan_transaction_mass: DEFAULT_MAXIMUM_ORPHAN_TRANSACTION_MASS, maximum_orphan_transaction_count: DEFAULT_MAXIMUM_ORPHAN_TRANSACTION_COUNT, accept_non_standard: relay_non_std_transactions, @@ -119,7 +125,7 @@ impl Config { minimum_relay_transaction_fee: DEFAULT_MINIMUM_RELAY_TRANSACTION_FEE, minimum_standard_transaction_version: DEFAULT_MINIMUM_STANDARD_TRANSACTION_VERSION, maximum_standard_transaction_version: DEFAULT_MAXIMUM_STANDARD_TRANSACTION_VERSION, - network_blocks_per_second: 1000 / target_milliseconds_per_block, + network_blocks_per_second: target_milliseconds_per_block.map(|v| 1000 / v), } } diff --git a/mining/src/mempool/model/accepted_transactions.rs b/mining/src/mempool/model/accepted_transactions.rs index 94ad0d0761..b22eddafb7 100644 --- a/mining/src/mempool/model/accepted_transactions.rs +++ b/mining/src/mempool/model/accepted_transactions.rs @@ -43,7 +43,8 @@ impl AcceptedTransactions { pub(crate) fn expire(&mut self, virtual_daa_score: u64) { let now = unix_now(); - if virtual_daa_score < self.last_expire_scan_daa_score + self.config.accepted_transaction_expire_scan_interval_daa_score + if virtual_daa_score + < self.last_expire_scan_daa_score + self.config.accepted_transaction_expire_scan_interval_daa_score.get(virtual_daa_score) || now < self.last_expire_scan_time + self.config.accepted_transaction_expire_scan_interval_milliseconds { return; @@ -53,7 +54,7 @@ impl AcceptedTransactions { .transactions .iter() .filter_map(|(transaction_id, daa_score)| { - if virtual_daa_score > daa_score + self.config.accepted_transaction_expire_interval_daa_score { + if virtual_daa_score > daa_score + self.config.accepted_transaction_expire_interval_daa_score.get(virtual_daa_score) { Some(*transaction_id) } else { None diff --git a/mining/src/mempool/model/frontier/feerate_key.rs b/mining/src/mempool/model/frontier/feerate_key.rs index 843ef0ff13..e1fa0e8a56 100644 --- a/mining/src/mempool/model/frontier/feerate_key.rs +++ b/mining/src/mempool/model/frontier/feerate_key.rs @@ -1,5 +1,5 @@ use crate::{block_template::selector::ALPHA, mempool::model::tx::MempoolTransaction}; -use kaspa_consensus_core::tx::Transaction; +use kaspa_consensus_core::{mass::ContextualMasses, tx::Transaction}; use std::sync::Arc; #[derive(Clone, Debug)] @@ -77,9 +77,13 @@ impl Ord for FeerateTransactionKey { impl From<&MempoolTransaction> for FeerateTransactionKey { fn from(tx: &MempoolTransaction) -> Self { - let mass = tx.mtx.tx.mass(); + // NOTE: The code below is a mempool simplification reducing the various block mass units to a + // single one-dimension value (making it easier to select transactions for block templates). + // Future mempool improvements are expected to refine this behavior and use the multi-dimension values + // in order to optimize and increase block space usage. + let mass = ContextualMasses::new(tx.mtx.tx.mass()) + .max(tx.mtx.calculated_non_contextual_masses.expect("masses are expected to be calculated")); let fee = tx.mtx.calculated_fee.expect("fee is expected to be populated"); - assert_ne!(mass, 0, "mass field is expected to be set when inserting to the mempool"); Self::new(fee, mass, tx.mtx.tx.clone()) } } diff --git a/mining/src/mempool/model/orphan_pool.rs b/mining/src/mempool/model/orphan_pool.rs index f813e1a56b..851a0ed1f5 100644 --- a/mining/src/mempool/model/orphan_pool.rs +++ b/mining/src/mempool/model/orphan_pool.rs @@ -99,9 +99,9 @@ impl OrphanPool { } fn check_orphan_mass(&self, transaction: &MutableTransaction) -> RuleResult<()> { - if transaction.calculated_compute_mass.unwrap() > self.config.maximum_orphan_transaction_mass { + if transaction.calculated_non_contextual_masses.unwrap().max() > self.config.maximum_orphan_transaction_mass { return Err(RuleError::RejectBadOrphanMass( - transaction.calculated_compute_mass.unwrap(), + transaction.calculated_non_contextual_masses.unwrap().max(), self.config.maximum_orphan_transaction_mass, )); } @@ -265,7 +265,7 @@ impl OrphanPool { } pub(crate) fn expire_low_priority_transactions(&mut self, virtual_daa_score: u64) -> RuleResult<()> { - if virtual_daa_score < self.last_expire_scan + self.config.orphan_expire_scan_interval_daa_score { + if virtual_daa_score < self.last_expire_scan + self.config.orphan_expire_scan_interval_daa_score.get(virtual_daa_score) { return Ok(()); } @@ -276,7 +276,7 @@ impl OrphanPool { .values() .filter_map(|x| { if (x.priority == Priority::Low) - && virtual_daa_score > x.added_at_daa_score + self.config.orphan_expire_interval_daa_score + && virtual_daa_score > x.added_at_daa_score + self.config.orphan_expire_interval_daa_score.get(virtual_daa_score) { Some(x.id()) } else { diff --git a/mining/src/mempool/model/transactions_pool.rs b/mining/src/mempool/model/transactions_pool.rs index 5741831d3f..c7a4f5a2b5 100644 --- a/mining/src/mempool/model/transactions_pool.rs +++ b/mining/src/mempool/model/transactions_pool.rs @@ -248,7 +248,7 @@ impl TransactionsPool { } // We are iterating ready txs by ascending feerate so the pending tx has lower feerate than all remaining txs - if tx.fee_rate() > feerate_threshold { + if tx.feerate() > feerate_threshold { let err = RuleError::RejectMempoolIsFull; debug!("Transaction {} with feerate {} has been rejected: {}", transaction.id(), feerate_threshold, err); return Err(err); @@ -314,7 +314,8 @@ impl TransactionsPool { pub(crate) fn collect_expired_low_priority_transactions(&mut self, virtual_daa_score: u64) -> Vec { let now = unix_now(); - if virtual_daa_score < self.last_expire_scan_daa_score + self.config.transaction_expire_scan_interval_daa_score + if virtual_daa_score + < self.last_expire_scan_daa_score + self.config.transaction_expire_scan_interval_daa_score.get(virtual_daa_score) || now < self.last_expire_scan_time + self.config.transaction_expire_scan_interval_milliseconds { return vec![]; @@ -329,7 +330,8 @@ impl TransactionsPool { .values() .filter_map(|x| { if (x.priority == Priority::Low) - && virtual_daa_score > x.added_at_daa_score + self.config.transaction_expire_interval_daa_score + && virtual_daa_score + > x.added_at_daa_score + self.config.transaction_expire_interval_daa_score.get(virtual_daa_score) { Some(x.id()) } else { diff --git a/mining/src/mempool/model/tx.rs b/mining/src/mempool/model/tx.rs index 27bb87d09d..280b5ef0d0 100644 --- a/mining/src/mempool/model/tx.rs +++ b/mining/src/mempool/model/tx.rs @@ -22,10 +22,8 @@ impl MempoolTransaction { self.mtx.tx.id() } - pub(crate) fn fee_rate(&self) -> f64 { - let contextual_mass = self.mtx.tx.mass(); - assert!(contextual_mass > 0, "expected to be called for validated txs only"); - self.mtx.calculated_fee.unwrap() as f64 / contextual_mass as f64 + pub(crate) fn feerate(&self) -> f64 { + self.mtx.calculated_feerate().unwrap() } } diff --git a/mining/src/mempool/validate_and_insert_transaction.rs b/mining/src/mempool/validate_and_insert_transaction.rs index 69e08019b6..db95f21108 100644 --- a/mining/src/mempool/validate_and_insert_transaction.rs +++ b/mining/src/mempool/validate_and_insert_transaction.rs @@ -25,7 +25,7 @@ impl Mempool { ) -> RuleResult { self.validate_transaction_unacceptance(&transaction)?; // Populate mass and estimated_size in the beginning, it will be used in multiple places throughout the validation and insertion. - transaction.calculated_compute_mass = Some(consensus.calculate_transaction_compute_mass(&transaction.tx)); + transaction.calculated_non_contextual_masses = Some(consensus.calculate_transaction_non_contextual_masses(&transaction.tx)); self.validate_transaction_in_isolation(&transaction)?; let feerate_threshold = self.get_replace_by_fee_constraint(&transaction, rbf_policy)?; self.populate_mempool_entries(&mut transaction); diff --git a/mining/src/monitor.rs b/mining/src/monitor.rs index 74449424c1..7358853883 100644 --- a/mining/src/monitor.rs +++ b/mining/src/monitor.rs @@ -1,5 +1,6 @@ use super::MiningCounters; use crate::manager::MiningManagerProxy; +use kaspa_consensusmanager::ConsensusManager; use kaspa_core::{ debug, info, task::{ @@ -16,6 +17,8 @@ const MONITOR: &str = "mempool-monitor"; pub struct MiningMonitor { mining_manager: MiningManagerProxy, + consensus_manager: Arc, + // Counters counters: Arc, @@ -28,11 +31,12 @@ pub struct MiningMonitor { impl MiningMonitor { pub fn new( mining_manager: MiningManagerProxy, + consensus_manager: Arc, counters: Arc, tx_script_cache_counters: Arc, tick_service: Arc, ) -> MiningMonitor { - MiningMonitor { mining_manager, counters, tx_script_cache_counters, tick_service } + MiningMonitor { mining_manager, consensus_manager, counters, tx_script_cache_counters, tick_service } } pub async fn worker(self: &Arc) { @@ -66,7 +70,11 @@ impl MiningMonitor { delta.low_priority_tx_counts, delta.tx_accepted_counts, ); - let feerate_estimations = self.mining_manager.clone().get_realtime_feerate_estimations().await; + let feerate_estimations = self + .mining_manager + .clone() + .get_realtime_feerate_estimations(self.consensus_manager.consensus().unguarded_session().get_virtual_daa_score()) + .await; debug!("Realtime feerate estimations: {}", feerate_estimations); } if delta.tx_evicted_counts > 0 { diff --git a/mining/src/testutils/consensus_mock.rs b/mining/src/testutils/consensus_mock.rs index 28d3f58974..c0328504e4 100644 --- a/mining/src/testutils/consensus_mock.rs +++ b/mining/src/testutils/consensus_mock.rs @@ -13,7 +13,7 @@ use kaspa_consensus_core::{ tx::{TxResult, TxRuleError}, }, header::Header, - mass::transaction_estimated_serialized_size, + mass::{transaction_estimated_serialized_size, ContextualMasses, NonContextualMasses}, merkle::calc_hash_merkle_root, tx::{MutableTransaction, Transaction, TransactionId, TransactionOutpoint, UtxoEntry}, utxo::utxo_collection::UtxoCollection, @@ -133,9 +133,7 @@ impl ConsensusApi for ConsensusMock { // At this point we know all UTXO entries are populated, so we can safely calculate the fee let total_in: u64 = mutable_tx.entries.iter().map(|x| x.as_ref().unwrap().amount).sum(); let total_out: u64 = mutable_tx.tx.outputs.iter().map(|x| x.value).sum(); - mutable_tx - .tx - .set_mass(self.calculate_transaction_storage_mass(mutable_tx).unwrap() + mutable_tx.calculated_compute_mass.unwrap()); + mutable_tx.tx.set_mass(self.calculate_transaction_contextual_masses(mutable_tx).unwrap().storage_mass); if mutable_tx.calculated_fee.is_none() { let calculated_fee = total_in - total_out; @@ -156,16 +154,13 @@ impl ConsensusApi for ConsensusMock { transactions.iter_mut().map(|x| self.validate_mempool_transaction(x, &Default::default())).collect() } - fn calculate_transaction_compute_mass(&self, transaction: &Transaction) -> u64 { - if transaction.is_coinbase() { - 0 - } else { - transaction_estimated_serialized_size(transaction) - } + fn calculate_transaction_non_contextual_masses(&self, transaction: &Transaction) -> NonContextualMasses { + let mass = if transaction.is_coinbase() { 0 } else { transaction_estimated_serialized_size(transaction) }; + NonContextualMasses::new(mass, mass) } - fn calculate_transaction_storage_mass(&self, _transaction: &MutableTransaction) -> Option { - Some(0) + fn calculate_transaction_contextual_masses(&self, _transaction: &MutableTransaction) -> Option { + Some(ContextualMasses::new(0)) } fn get_virtual_daa_score(&self) -> u64 { diff --git a/protocol/flows/Cargo.toml b/protocol/flows/Cargo.toml index ff282d4e79..12bab051fe 100644 --- a/protocol/flows/Cargo.toml +++ b/protocol/flows/Cargo.toml @@ -14,6 +14,7 @@ kaspa-core.workspace = true kaspa-consensus-core.workspace = true kaspa-consensus-notify.workspace = true kaspa-p2p-lib.workspace = true +kaspa-p2p-mining.workspace = true kaspa-utils.workspace = true kaspa-utils-tower.workspace = true kaspa-hashes.workspace = true diff --git a/protocol/flows/src/flow_context.rs b/protocol/flows/src/flow_context.rs index 14d4168aca..8ce2307da2 100644 --- a/protocol/flows/src/flow_context.rs +++ b/protocol/flows/src/flow_context.rs @@ -17,7 +17,7 @@ use kaspa_consensus_notify::{ notification::{Notification, PruningPointUtxoSetOverrideNotification}, root::ConsensusNotificationRoot, }; -use kaspa_consensusmanager::{BlockProcessingBatch, ConsensusInstance, ConsensusManager, ConsensusProxy}; +use kaspa_consensusmanager::{BlockProcessingBatch, ConsensusInstance, ConsensusManager, ConsensusProxy, ConsensusSessionOwned}; use kaspa_core::{ debug, info, kaspad_env::{name, version}, @@ -35,6 +35,7 @@ use kaspa_p2p_lib::{ pb::{kaspad_message::Payload, InvRelayBlockMessage}, ConnectionInitializer, Hub, KaspadHandshake, PeerKey, PeerProperties, Router, }; +use kaspa_p2p_mining::rule_engine::MiningRuleEngine; use kaspa_utils::iter::IterExtensions; use kaspa_utils::networking::PeerId; use parking_lot::{Mutex, RwLock}; @@ -228,9 +229,15 @@ pub struct FlowContextInner { // Special sampling logger used only for high-bps networks where logs must be throttled block_event_logger: Option, + // Bps upper bound + bps_upper_bound: usize, + // Orphan parameters orphan_resolution_range: u32, max_orphans: usize, + + // Mining rule engine + mining_rule_engine: Arc, } #[derive(Clone)] @@ -303,14 +310,16 @@ impl FlowContext { mining_manager: MiningManagerProxy, tick_service: Arc, notification_root: Arc, + hub: Hub, + mining_rule_engine: Arc, ) -> Self { - let hub = Hub::new(); - - let orphan_resolution_range = BASELINE_ORPHAN_RESOLUTION_RANGE + (config.bps() as f64).log2().ceil() as u32; + let bps_upper_bound = config.bps().upper_bound() as usize; + let orphan_resolution_range = BASELINE_ORPHAN_RESOLUTION_RANGE + (bps_upper_bound as f64).log2().ceil() as u32; // The maximum amount of orphans allowed in the orphans pool. This number is an approximation // of how many orphans there can possibly be on average bounded by an upper bound. - let max_orphans = (2u64.pow(orphan_resolution_range) as usize * config.ghostdag_k as usize).min(MAX_ORPHANS_UPPER_BOUND); + let max_orphans = + (2u64.pow(orphan_resolution_range) as usize * config.ghostdag_k().upper_bound() as usize).min(MAX_ORPHANS_UPPER_BOUND); Self { inner: Arc::new(FlowContextInner { node_id: Uuid::new_v4().into(), @@ -327,16 +336,18 @@ impl FlowContext { mining_manager, tick_service, notification_root, - block_event_logger: if config.bps() > 1 { Some(BlockEventLogger::new(config.bps() as usize)) } else { None }, + block_event_logger: if bps_upper_bound > 1 { Some(BlockEventLogger::new(bps_upper_bound)) } else { None }, + bps_upper_bound, orphan_resolution_range, max_orphans, config, + mining_rule_engine, }), } } pub fn block_invs_channel_size(&self) -> usize { - self.config.bps() as usize * Router::incoming_flow_baseline_channel_size() + self.bps_upper_bound * Router::incoming_flow_baseline_channel_size() } pub fn orphan_resolution_range(&self) -> u32 { @@ -495,12 +506,36 @@ impl FlowContext { // Broadcast as soon as the block has been validated and inserted into the DAG self.hub.broadcast(make_message!(Payload::InvRelayBlock, InvRelayBlockMessage { hash: Some(hash.into()) })).await; + let daa_score = block.header.daa_score; self.on_new_block(consensus, Default::default(), block, virtual_state_task).await; - self.log_block_event(BlockLogEvent::Submit(hash)); + self.log_new_block_event(BlockLogEvent::Submit(hash), daa_score); Ok(()) } + /// [Crescendo] temp crescendo countdown logging + pub(super) fn log_new_block_event(&self, event: BlockLogEvent, daa_score: u64) { + if self.config.bps().before() == 1 && !self.config.crescendo_activation.is_active(daa_score) { + if let Some(dist) = self.config.crescendo_activation.is_within_range_before_activation(daa_score, 3600) { + match event { + BlockLogEvent::Relay(hash) => info!("Accepted block {} via relay \t [Crescendo countdown: -{}]", hash, dist), + BlockLogEvent::Submit(hash) => { + info!("Accepted block {} via submit block \t [Crescendo countdown: -{}]", hash, dist) + } + _ => {} + } + } else { + match event { + BlockLogEvent::Relay(hash) => info!("Accepted block {} via relay", hash), + BlockLogEvent::Submit(hash) => info!("Accepted block {} via submit block", hash), + _ => {} + } + } + } else { + self.log_block_event(event); + } + } + pub fn log_block_event(&self, event: BlockLogEvent) { if let Some(logger) = self.block_event_logger.as_ref() { logger.log(event) @@ -556,7 +591,7 @@ impl FlowContext { } // Transaction relay is disabled if the node is out of sync and thus not mining - if !consensus.async_is_nearly_synced().await { + if !self.is_nearly_synced(consensus).await { return; } @@ -595,6 +630,11 @@ impl FlowContext { } } + pub async fn is_nearly_synced(&self, session: &ConsensusSessionOwned) -> bool { + let sink_daa_score_and_timestamp = session.async_get_sink_daa_score_timestamp().await; + self.mining_rule_engine.is_nearly_synced(sink_daa_score_and_timestamp) + } + /// Notifies that the UTXO set was reset due to pruning point change via IBD. pub fn on_pruning_point_utxoset_override(&self) { // Notifications from the flow context might be ignored if the inner channel is already closing diff --git a/protocol/flows/src/service.rs b/protocol/flows/src/service.rs index 33750697d0..1633e26db0 100644 --- a/protocol/flows/src/service.rs +++ b/protocol/flows/src/service.rs @@ -65,9 +65,12 @@ impl AsyncService for P2pService { // Prepare a shutdown signal receiver let shutdown_signal = self.shutdown.listener.clone(); - let p2p_adaptor = + let p2p_adaptor = if self.inbound_limit == 0 { + Adaptor::client_only(self.flow_context.hub().clone(), self.flow_context.clone(), self.counters.clone()) + } else { Adaptor::bidirectional(self.listen, self.flow_context.hub().clone(), self.flow_context.clone(), self.counters.clone()) - .unwrap(); + .unwrap() + }; let connection_manager = ConnectionManager::new( p2p_adaptor.clone(), self.outbound_target, diff --git a/protocol/flows/src/v5/blockrelay/flow.rs b/protocol/flows/src/v5/blockrelay/flow.rs index 49353c2865..37a34a6b1d 100644 --- a/protocol/flows/src/v5/blockrelay/flow.rs +++ b/protocol/flows/src/v5/blockrelay/flow.rs @@ -116,7 +116,7 @@ impl HandleRelayInvsFlow { } } - if self.ctx.is_ibd_running() && !session.async_is_nearly_synced().await { + if self.ctx.is_ibd_running() && !self.ctx.is_nearly_synced(&session).await { // Note: If the node is considered nearly synced we continue processing relay blocks even though an IBD is in progress. // For instance this means that downloading a side-chain from a delayed node does not interop the normal flow of live blocks. debug!("Got relay block {} while in IBD and the node is out of sync, continuing...", inv.hash); @@ -209,8 +209,9 @@ impl HandleRelayInvsFlow { // can continue processing the following relay blocks let ctx = self.ctx.clone(); tokio::spawn(async move { + let daa_score = block.header.daa_score; ctx.on_new_block(&session, ancestor_batch, block, virtual_state_task).await; - ctx.log_block_event(BlockLogEvent::Relay(inv.hash)); + ctx.log_new_block_event(BlockLogEvent::Relay(inv.hash), daa_score); }); } } diff --git a/protocol/flows/src/v5/ibd/flow.rs b/protocol/flows/src/v5/ibd/flow.rs index 0dd7fe64f1..c2509f9e4e 100644 --- a/protocol/flows/src/v5/ibd/flow.rs +++ b/protocol/flows/src/v5/ibd/flow.rs @@ -6,6 +6,7 @@ use crate::{ }, }; use futures::future::{join_all, select, try_join_all, Either}; +use itertools::Itertools; use kaspa_consensus_core::{ api::BlockValidationFuture, block::Block, @@ -181,22 +182,35 @@ impl IbdFlow { // means it's in its antichain (because if `highest_known_syncer_chain_hash` was in // the pruning point's past the pruning point itself would be // `highest_known_syncer_chain_hash`). So it means there's a finality conflict. - // TODO: consider performing additional actions on finality conflicts in addition to disconnecting from the peer (e.g., banning, rpc notification) + // + // TODO (relaxed): consider performing additional actions on finality conflicts in addition + // to disconnecting from the peer (e.g., banning, rpc notification) return Ok(IbdType::None); } let hst_header = consensus.async_get_header(consensus.async_get_headers_selected_tip().await).await.unwrap(); - if relay_header.blue_score >= hst_header.blue_score + self.ctx.config.pruning_depth - && relay_header.blue_work > hst_header.blue_work - { - if unix_now() > consensus.async_creation_timestamp().await + self.ctx.config.finality_duration() { + // [Crescendo]: use the post crescendo pruning depth depending on hst's DAA score. + // Having a shorter depth for this condition for the fork transition period (if hst is shortly before activation) + // is negligible since there are other conditions required for activating an headers proof IBD. The important + // thing is that we eventually adjust to the longer period. + let pruning_depth = self.ctx.config.pruning_depth().get(hst_header.daa_score); + if relay_header.blue_score >= hst_header.blue_score + pruning_depth && relay_header.blue_work > hst_header.blue_work { + // [Crescendo]: switch to the new *shorter* finality duration only after sufficient time has passed + // since activation (measured via the new *larger* finality depth). + // Note: these are not critical execution paths so such estimation heuristics are completely ok in this context. + let finality_duration_in_milliseconds = self + .ctx + .config + .finality_duration_in_milliseconds() + .get(hst_header.daa_score.saturating_sub(self.ctx.config.finality_depth().upper_bound())); + if unix_now() > consensus.async_creation_timestamp().await + finality_duration_in_milliseconds { let fp = consensus.async_finality_point().await; let fp_ts = consensus.async_get_header(fp).await?.timestamp; - if unix_now() < fp_ts + self.ctx.config.finality_duration() * 3 / 2 { + if unix_now() < fp_ts + finality_duration_in_milliseconds * 3 / 2 { // We reject the headers proof if the node has a relatively up-to-date finality point and current // consensus has matured for long enough (and not recently synced). This is mostly a spam-protector // since subsequent checks identify these violations as well - // TODO: consider performing additional actions on finality conflicts in addition to disconnecting from the peer (e.g., banning, rpc notification) + // TODO (relaxed): consider performing additional actions on finality conflicts in addition to disconnecting from the peer (e.g., banning, rpc notification) return Ok(IbdType::None); } } @@ -220,7 +234,7 @@ impl IbdFlow { let pruning_point = self.sync_and_validate_pruning_proof(&staging_session, relay_block).await?; self.sync_headers(&staging_session, syncer_virtual_selected_parent, pruning_point, relay_block).await?; - staging_session.async_validate_pruning_points().await?; + staging_session.async_validate_pruning_points(syncer_virtual_selected_parent).await?; self.validate_staging_timestamps(&self.ctx.consensus().session().await, &staging_session).await?; self.sync_pruning_point_utxoset(&staging_session, pruning_point).await?; Ok(()) @@ -232,7 +246,11 @@ impl IbdFlow { // Pruning proof generation and communication might take several minutes, so we allow a long 10 minute timeout let msg = dequeue_with_timeout!(self.incoming_route, Payload::PruningPointProof, Duration::from_secs(600))?; let proof: PruningPointProof = msg.try_into()?; - debug!("received proof with overall {} headers", proof.iter().map(|l| l.len()).sum::()); + info!( + "Received headers proof with overall {} headers ({} unique)", + proof.iter().map(|l| l.len()).sum::(), + proof.iter().flatten().unique_by(|h| h.hash).count() + ); let proof_metadata = PruningProofMetadata::new(relay_block.header.blue_work); @@ -272,7 +290,7 @@ impl IbdFlow { // Check if past pruning points violate finality of current consensus if self.ctx.consensus().session().await.async_are_pruning_points_violating_finality(pruning_points.clone()).await { - // TODO: consider performing additional actions on finality conflicts in addition to disconnecting from the peer (e.g., banning, rpc notification) + // TODO (relaxed): consider performing additional actions on finality conflicts in addition to disconnecting from the peer (e.g., banning, rpc notification) return Err(ProtocolError::Other("pruning points are violating finality")); } @@ -303,7 +321,7 @@ impl IbdFlow { .spawn_blocking(move |c| { let ref_proof = proof.clone(); c.apply_pruning_proof(proof, &trusted_set)?; - c.import_pruning_points(pruning_points); + c.import_pruning_points(pruning_points)?; info!("Building the proof which was just applied (sanity test)"); let built_proof = c.get_pruning_point_proof(); @@ -334,13 +352,13 @@ impl IbdFlow { .clone() .spawn_blocking(move |c| { c.apply_pruning_proof(proof, &trusted_set)?; - c.import_pruning_points(pruning_points); + c.import_pruning_points(pruning_points)?; Result::<_, ProtocolError>::Ok(trusted_set) }) .await?; } - // TODO: add logs to staging commit process + // TODO (relaxed): add logs to staging commit process info!("Starting to process {} trusted blocks", trusted_set.len()); let mut last_time = Instant::now(); @@ -353,7 +371,7 @@ impl IbdFlow { last_time = now; last_index = i; } - // TODO: queue and join in batches + // TODO (relaxed): queue and join in batches staging.validate_and_insert_trusted_block(tb).virtual_state_task.await?; } info!("Done processing trusted blocks"); @@ -413,6 +431,14 @@ impl IbdFlow { progress_reporter.report_completion(prev_chunk_len); } + if consensus.async_get_block_status(syncer_virtual_selected_parent).await.is_none() { + // If the syncer's claimed sink header has still not been received, the peer is misbehaving + return Err(ProtocolError::OtherOwned(format!( + "did not receive syncer's virtual selected parent {} from peer {} during header download", + syncer_virtual_selected_parent, self.router + ))); + } + self.sync_missing_relay_past_headers(consensus, syncer_virtual_selected_parent, relay_block.hash()).await?; Ok(()) @@ -453,7 +479,7 @@ impl IbdFlow { if consensus.async_get_block_status(relay_block_hash).await.is_none() { // If the relay block has still not been received, the peer is misbehaving Err(ProtocolError::OtherOwned(format!( - "did not receive relay block {} from peer {} during block download", + "did not receive relay block {} from peer {} during header download", relay_block_hash, self.router ))) } else { @@ -503,7 +529,7 @@ staging selected tip ({}) is too small or negative. Aborting IBD...", } async fn sync_missing_block_bodies(&mut self, consensus: &ConsensusProxy, high: Hash) -> Result<(), ProtocolError> { - // TODO: query consensus in batches + // TODO (relaxed): query consensus in batches let sleep_task = sleep(Duration::from_secs(2)); let hashes_task = consensus.async_get_missing_block_body_hashes(high); tokio::pin!(sleep_task); diff --git a/protocol/flows/src/v5/ibd/negotiate.rs b/protocol/flows/src/v5/ibd/negotiate.rs index b001d3a061..20963c14e7 100644 --- a/protocol/flows/src/v5/ibd/negotiate.rs +++ b/protocol/flows/src/v5/ibd/negotiate.rs @@ -131,7 +131,7 @@ impl IbdFlow { self.router, negotiation_restart_counter ))); } - if negotiation_restart_counter > self.ctx.config.bps() { + if negotiation_restart_counter > self.ctx.config.bps().upper_bound() { // bps is just an intuitive threshold here warn!("IBD chain negotiation with syncer {} restarted {} times", self.router, negotiation_restart_counter); } else { diff --git a/protocol/flows/src/v5/request_antipast.rs b/protocol/flows/src/v5/request_antipast.rs index f5881efc1a..521418a288 100644 --- a/protocol/flows/src/v5/request_antipast.rs +++ b/protocol/flows/src/v5/request_antipast.rs @@ -46,7 +46,9 @@ impl HandleAntipastRequests { // intersected by past of the relayed block. We do not expect the relay block to be too much after // the sink (in fact usually it should be in its past or anticone), hence we bound the expected traversal to be // in the order of `mergeset_size_limit`. - let hashes = session.async_get_antipast_from_pov(block, context, Some(self.ctx.config.mergeset_size_limit * 2)).await?; + let hashes = session + .async_get_antipast_from_pov(block, context, Some(self.ctx.config.mergeset_size_limit().upper_bound() * 4)) + .await?; let mut headers = session .spawn_blocking(|c| hashes.into_iter().map(|h| c.get_header(h)).collect::, ConsensusError>>()) .await?; diff --git a/protocol/flows/src/v5/request_headers.rs b/protocol/flows/src/v5/request_headers.rs index d8f12b6f06..38f2fcac81 100644 --- a/protocol/flows/src/v5/request_headers.rs +++ b/protocol/flows/src/v5/request_headers.rs @@ -37,7 +37,7 @@ impl RequestHeadersFlow { async fn start_impl(&mut self) -> Result<(), ProtocolError> { const MAX_BLOCKS: usize = 1 << 10; // Internal consensus logic requires that `max_blocks > mergeset_size_limit` - let max_blocks = max(MAX_BLOCKS, self.ctx.config.mergeset_size_limit as usize + 1); + let max_blocks = max(MAX_BLOCKS, self.ctx.config.mergeset_size_limit().upper_bound() as usize + 1); loop { let (msg, request_id) = dequeue_with_request_id!(self.incoming_route, Payload::RequestHeaders)?; diff --git a/protocol/flows/src/v5/txrelay/flow.rs b/protocol/flows/src/v5/txrelay/flow.rs index af7e2b6c7d..25e6dd6172 100644 --- a/protocol/flows/src/v5/txrelay/flow.rs +++ b/protocol/flows/src/v5/txrelay/flow.rs @@ -116,7 +116,7 @@ impl RelayTransactionsFlow { let session = self.ctx.consensus().unguarded_session(); // Transaction relay is disabled if the node is out of sync and thus not mining - if !session.async_is_nearly_synced().await { + if !self.ctx.is_nearly_synced(&session).await { continue; } diff --git a/protocol/flows/src/v6/mod.rs b/protocol/flows/src/v6/mod.rs index 8736f4e0b9..b73fae2d3f 100644 --- a/protocol/flows/src/v6/mod.rs +++ b/protocol/flows/src/v6/mod.rs @@ -128,7 +128,7 @@ pub fn register(ctx: FlowContext, router: Arc) -> Vec> { let invs_route = router.subscribe_with_capacity(vec![KaspadMessagePayloadType::InvRelayBlock], ctx.block_invs_channel_size()); let shared_invs_route = SharedIncomingRoute::new(invs_route); - let num_relay_flows = (ctx.config.bps() as usize / 2).max(1); + let num_relay_flows = (ctx.config.bps().upper_bound() as usize / 2).max(1); flows.extend((0..num_relay_flows).map(|_| { Box::new(HandleRelayInvsFlow::new( ctx.clone(), diff --git a/protocol/mining/Cargo.toml b/protocol/mining/Cargo.toml new file mode 100644 index 0000000000..03f1266324 --- /dev/null +++ b/protocol/mining/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "kaspa-p2p-mining" +description = "Kaspa p2p mining" +rust-version.workspace = true +version.workspace = true +edition.workspace = true +authors.workspace = true +include.workspace = true +license.workspace = true +repository.workspace = true + +[dependencies] +kaspa-core.workspace = true +kaspa-consensus-core.workspace = true +kaspa-consensusmanager.workspace = true +kaspa-mining-errors.workspace = true +kaspa-hashes.workspace = true +kaspa-math.workspace = true +kaspa-p2p-lib.workspace = true +kaspa-utils.workspace = true +kaspa-utils-tower.workspace = true +log.workspace = true +tokio.workspace = true diff --git a/protocol/mining/src/lib.rs b/protocol/mining/src/lib.rs new file mode 100644 index 0000000000..0dc982f48e --- /dev/null +++ b/protocol/mining/src/lib.rs @@ -0,0 +1,2 @@ +pub mod rule_engine; +pub mod rules; diff --git a/protocol/mining/src/rule_engine.rs b/protocol/mining/src/rule_engine.rs new file mode 100644 index 0000000000..ac00c08942 --- /dev/null +++ b/protocol/mining/src/rule_engine.rs @@ -0,0 +1,183 @@ +use std::{ + sync::{atomic::AtomicBool, Arc}, + time::{Duration, Instant}, +}; + +use kaspa_consensus_core::{ + api::counters::ProcessingCounters, + config::{params::NEW_DIFFICULTY_WINDOW_DURATION, Config}, + daa_score_timestamp::DaaScoreTimestamp, + mining_rules::MiningRules, + network::NetworkType::{Mainnet, Testnet}, +}; +use kaspa_consensusmanager::ConsensusManager; +use kaspa_core::{ + task::{ + service::{AsyncService, AsyncServiceFuture}, + tick::{TickReason, TickService}, + }, + time::unix_now, + trace, +}; +use kaspa_p2p_lib::Hub; + +use crate::rules::{ + blue_parents_only_rule::BlueParentsOnlyRule, mining_rule::MiningRule, no_transactions_rule::NoTransactionsRule, + sync_rate_rule::SyncRateRule, ExtraData, +}; + +const RULE_ENGINE: &str = "mining-rule-engine"; +pub const SNAPSHOT_INTERVAL: u64 = 10; + +#[derive(Clone)] +pub struct MiningRuleEngine { + config: Arc, + processing_counters: Arc, + tick_service: Arc, + // Sync Rate Rule: Allow mining if sync rate is below threshold AND finality point is "recent" (defined below) + use_sync_rate_rule: Arc, + consensus_manager: Arc, + hub: Hub, + mining_rules: Arc, + rules: Vec>, +} + +impl MiningRuleEngine { + pub async fn worker(self: &Arc) { + let mut last_snapshot = self.processing_counters.snapshot(); + let mut last_log_time = Instant::now(); + loop { + // START: Sync monitor + if let TickReason::Shutdown = self.tick_service.tick(Duration::from_secs(SNAPSHOT_INTERVAL)).await { + // Let the system print final logs before exiting + tokio::time::sleep(Duration::from_millis(500)).await; + break; + } + + let now = Instant::now(); + let elapsed_time = now - last_log_time; + if elapsed_time.as_secs() == 0 { + continue; + } + + let snapshot = self.processing_counters.snapshot(); + + // Subtract the snapshots + let delta = &snapshot - &last_snapshot; + + if elapsed_time.as_secs() > 0 { + let session = self.consensus_manager.consensus().unguarded_session(); + let sink_daa_timestamp = session.async_get_sink_daa_score_timestamp().await; + + let finality_point = session.async_finality_point().await; + let finality_point_timestamp = session.async_get_header(finality_point).await.unwrap().timestamp; + + let extra_data = ExtraData { + finality_point_timestamp, + target_time_per_block: self.config.target_time_per_block().get(sink_daa_timestamp.daa_score), + has_sufficient_peer_connectivity: self.has_sufficient_peer_connectivity(), + finality_duration: self.config.finality_duration_in_milliseconds().get(sink_daa_timestamp.daa_score), + elapsed_time, + sink_daa_score_timestamp: session.async_get_sink_daa_score_timestamp().await, + merge_depth: self.config.merge_depth().get(sink_daa_timestamp.daa_score), + }; + + trace!("Current Mining Rule: {:?}", self.mining_rules); + + // Check for all the rules + for rule in &self.rules { + rule.check_rule(&delta, &extra_data); + } + } + + last_snapshot = snapshot; + last_log_time = now; + } + } + + pub fn new( + consensus_manager: Arc, + config: Arc, + processing_counters: Arc, + tick_service: Arc, + hub: Hub, + mining_rules: Arc, + ) -> Self { + let use_sync_rate_rule = Arc::new(AtomicBool::new(false)); + let rules: Vec> = vec![ + Arc::new(SyncRateRule::new(use_sync_rate_rule.clone())), + Arc::new(BlueParentsOnlyRule::new(mining_rules.blue_parents_only.clone())), + Arc::new(NoTransactionsRule::new(mining_rules.no_transactions.clone())), + ]; + + Self { consensus_manager, config, processing_counters, tick_service, hub, use_sync_rate_rule, mining_rules, rules } + } + + pub fn should_mine(&self, sink_daa_score_timestamp: DaaScoreTimestamp) -> bool { + if !self.has_sufficient_peer_connectivity() { + return false; + } + + let is_nearly_synced = self.is_nearly_synced(sink_daa_score_timestamp); + + is_nearly_synced || self.use_sync_rate_rule.load(std::sync::atomic::Ordering::Relaxed) + } + + /// Returns whether the sink timestamp is recent enough and the node is considered synced or nearly synced. + /// + /// This info is used to determine if it's ok to use a block template from this node for mining purposes. + pub fn is_nearly_synced(&self, sink_daa_score_timestamp: DaaScoreTimestamp) -> bool { + let sink_timestamp = sink_daa_score_timestamp.timestamp; + + if self.config.net.is_mainnet() { + // We consider the node close to being synced if the sink (virtual selected parent) block + // timestamp is within DAA window duration far in the past. Blocks mined over such DAG state would + // enter the DAA window of fully-synced nodes and thus contribute to overall network difficulty + // + // [Crescendo]: both durations are nearly equal so this decision is negligible + unix_now() + < sink_timestamp + + self.config.expected_difficulty_window_duration_in_milliseconds().get(sink_daa_score_timestamp.daa_score) + } else { + // For testnets we consider the node to be synced if the sink timestamp is within a time range which + // is overwhelmingly unlikely to pass without mined blocks even if net hashrate decreased dramatically. + // + // This period is smaller than the above mainnet calculation in order to ensure that an IBDing miner + // with significant testnet hashrate does not overwhelm the network with deep side-DAGs. + // + // We use DAA duration as baseline and scale it down with BPS (and divide by 3 for mining only when very close to current time on 10BPS testnets) + let max_expected_duration_without_blocks_in_milliseconds = + self.config.prior_target_time_per_block * NEW_DIFFICULTY_WINDOW_DURATION / 3; // = DAA duration in milliseconds / bps / 3 + unix_now() < sink_timestamp + max_expected_duration_without_blocks_in_milliseconds + } + } + + fn has_sufficient_peer_connectivity(&self) -> bool { + // Other network types can be used in an isolated environment without peers + !matches!(self.config.net.network_type, Mainnet | Testnet) || self.hub.has_peers() + } +} + +impl AsyncService for MiningRuleEngine { + fn ident(self: Arc) -> &'static str { + RULE_ENGINE + } + + fn start(self: Arc) -> AsyncServiceFuture { + Box::pin(async move { + self.worker().await; + Ok(()) + }) + } + + fn signal_exit(self: Arc) { + trace!("sending an exit signal to {}", RULE_ENGINE); + } + + fn stop(self: Arc) -> AsyncServiceFuture { + Box::pin(async move { + trace!("{} stopped", RULE_ENGINE); + Ok(()) + }) + } +} diff --git a/protocol/mining/src/rules/blue_parents_only_rule.rs b/protocol/mining/src/rules/blue_parents_only_rule.rs new file mode 100644 index 0000000000..defcc551a6 --- /dev/null +++ b/protocol/mining/src/rules/blue_parents_only_rule.rs @@ -0,0 +1,83 @@ +use std::sync::{ + atomic::{AtomicBool, AtomicU64, Ordering}, + Arc, +}; + +use kaspa_consensus_core::api::counters::ProcessingCountersSnapshot; +use kaspa_core::{trace, warn}; + +use super::{mining_rule::MiningRule, ExtraData}; + +/// BlueParentsOnlyRule +/// Attempt to recover from high build block template times (possibly caused by merging red blocks) +/// by disallowing reds in the mergeset. +/// +/// Trigger: build block template call durations above threshold were observed and there were no calls +/// that were below threshold +/// Recovery: build block template call durations within threshold observed and +/// a merge depth bound period has passed +pub struct BlueParentsOnlyRule { + pub is_enabled: Arc, + pub trigger_daa_score: AtomicU64, + pub within_threshold_calls_after_trigger: AtomicU64, + pub above_threshold_calls_after_trigger: AtomicU64, +} + +impl BlueParentsOnlyRule { + pub fn new(is_enabled: Arc) -> Self { + Self { + is_enabled, + trigger_daa_score: AtomicU64::new(0), + within_threshold_calls_after_trigger: AtomicU64::new(0), + above_threshold_calls_after_trigger: AtomicU64::new(0), + } + } +} + +impl MiningRule for BlueParentsOnlyRule { + fn check_rule(&self, delta: &ProcessingCountersSnapshot, extra_data: &ExtraData) { + let sink_daa_score = extra_data.sink_daa_score_timestamp.daa_score; + // DAA score may not be monotonic, so use saturating_sub + let score_since_trigger = sink_daa_score.saturating_sub(self.trigger_daa_score.load(Ordering::Relaxed)); + + if self.is_enabled.load(Ordering::SeqCst) { + // Rule is triggered. Check for recovery + let within_threshold_calls = + self.within_threshold_calls_after_trigger.fetch_add(delta.build_block_template_within_threshold, Ordering::SeqCst) + + delta.build_block_template_within_threshold; + let above_threshold_calls = + self.above_threshold_calls_after_trigger.fetch_add(delta.build_block_template_above_threshold, Ordering::SeqCst) + + delta.build_block_template_above_threshold; + + if score_since_trigger >= extra_data.merge_depth && within_threshold_calls > 0 { + // Recovery condition met: A merge depth bound has passed and calls within threshold were observed + self.is_enabled.store(false, Ordering::SeqCst); + self.within_threshold_calls_after_trigger.store(0, Ordering::SeqCst); + self.above_threshold_calls_after_trigger.store(0, Ordering::SeqCst); + warn!("BlueParentsOnlyRule: recovered | No. of Block Template Build Times within/above threshold since trigger: {}/{} | Score since trigger: {}", + within_threshold_calls, above_threshold_calls, score_since_trigger); + } else { + warn!( + "BlueParentsOnlyRule: active | No. of Block Template Build Times within/above threshold since trigger: {}/{} | Score since trigger: {}", + within_threshold_calls, above_threshold_calls, score_since_trigger + ); + } + } else { + // Rule is not triggered. Check for trigger + if delta.build_block_template_within_threshold == 0 && delta.build_block_template_above_threshold > 0 { + self.is_enabled.store(true, Ordering::SeqCst); + self.trigger_daa_score.store(sink_daa_score, Ordering::SeqCst); + warn!( + "BlueParentsOnlyRule: triggered | No. of Block Template Build Times within/above threshold: {}/{}", + delta.build_block_template_within_threshold, delta.build_block_template_above_threshold + ); + } else { + trace!( + "BlueParentsOnlyRule: normal | No. of Block Template Build Times within/above threshold: {}/{}", + delta.build_block_template_within_threshold, + delta.build_block_template_above_threshold + ); + } + } + } +} diff --git a/protocol/mining/src/rules/mining_rule.rs b/protocol/mining/src/rules/mining_rule.rs new file mode 100644 index 0000000000..be60420bba --- /dev/null +++ b/protocol/mining/src/rules/mining_rule.rs @@ -0,0 +1,7 @@ +use kaspa_consensus_core::api::counters::ProcessingCountersSnapshot; + +use super::ExtraData; + +pub trait MiningRule: Send + Sync + 'static { + fn check_rule(&self, delta: &ProcessingCountersSnapshot, extra_data: &ExtraData); +} diff --git a/protocol/mining/src/rules/mod.rs b/protocol/mining/src/rules/mod.rs new file mode 100644 index 0000000000..03dff89ed2 --- /dev/null +++ b/protocol/mining/src/rules/mod.rs @@ -0,0 +1,19 @@ +use std::time::Duration; + +use kaspa_consensus_core::daa_score_timestamp::DaaScoreTimestamp; + +pub mod blue_parents_only_rule; +pub mod no_transactions_rule; +pub mod sync_rate_rule; + +pub mod mining_rule; + +pub struct ExtraData { + pub finality_point_timestamp: u64, + pub target_time_per_block: u64, + pub has_sufficient_peer_connectivity: bool, + pub finality_duration: u64, + pub elapsed_time: Duration, + pub sink_daa_score_timestamp: DaaScoreTimestamp, + pub merge_depth: u64, +} diff --git a/protocol/mining/src/rules/no_transactions_rule.rs b/protocol/mining/src/rules/no_transactions_rule.rs new file mode 100644 index 0000000000..35726aca73 --- /dev/null +++ b/protocol/mining/src/rules/no_transactions_rule.rs @@ -0,0 +1,60 @@ +use std::sync::{ + atomic::{AtomicBool, AtomicU8, Ordering}, + Arc, +}; + +use kaspa_consensus_core::api::counters::ProcessingCountersSnapshot; +use kaspa_core::{trace, warn}; + +use super::{mining_rule::MiningRule, ExtraData}; + +/// NoTransactionsRule +/// Attempt to recover from consistent BadMerkleRoot errors by mining blocks without +/// any transactions. +/// +/// Trigger: BadMerkleRoot error count is higher than the number of successfully validated blocks +/// Recovery: Two cooldown periods have passed +pub struct NoTransactionsRule { + pub is_enabled: Arc, + pub cooldown: AtomicU8, +} + +impl NoTransactionsRule { + pub fn new(is_enabled: Arc) -> Self { + Self { is_enabled, cooldown: AtomicU8::new(0) } + } +} + +impl MiningRule for NoTransactionsRule { + fn check_rule(&self, delta: &ProcessingCountersSnapshot, _extra_data: &ExtraData) { + let cooldown_count = self.cooldown.load(Ordering::SeqCst); + + if cooldown_count > 0 { + // Recovering + if delta.submit_block_success_count > 0 || self.cooldown.fetch_sub(1, Ordering::SeqCst) == 1 { + // Recovery condition #1: Any submit block RPC call succeeded in this interval + // Recovery condition #2: Cooldown period has passed (important for low hashrate miners whose successful blocks are few and far between) + self.cooldown.store(0, Ordering::SeqCst); + self.is_enabled.store(false, Ordering::SeqCst); + warn!("NoTransactionsRule: recovered | Bad Merkle Root Count: {}", delta.submit_block_bad_merkle_root_count); + } + } else if delta.submit_block_bad_merkle_root_count > 0 && delta.submit_block_success_count == 0 { + // Triggered state + // When submit block BadMerkleRoot errors occurred and there were no successfully submitted blocks + if let Ok(false) = self.is_enabled.compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst) { + warn!( + "NoTransactionsRule: triggered | Bad Merkle Root Count: {} | Successfully submitted blocks: {}", + delta.submit_block_bad_merkle_root_count, delta.submit_block_success_count + ); + self.cooldown.store(2, Ordering::Relaxed); + } + } else { + // Normal state + trace!( + "NoTransactionsRule: normal | Bad Merkle Root Count: {} | Successfully submitted blocks: {}", + delta.submit_block_bad_merkle_root_count, + delta.submit_block_success_count, + ); + } + } +} diff --git a/protocol/mining/src/rules/sync_rate_rule.rs b/protocol/mining/src/rules/sync_rate_rule.rs new file mode 100644 index 0000000000..02c32eba50 --- /dev/null +++ b/protocol/mining/src/rules/sync_rate_rule.rs @@ -0,0 +1,114 @@ +use std::{ + collections::VecDeque, + sync::{ + atomic::{AtomicBool, AtomicU64, Ordering}, + Arc, RwLock, + }, +}; + +use kaspa_consensus_core::api::counters::ProcessingCountersSnapshot; +use kaspa_core::{time::unix_now, trace, warn}; + +use crate::rule_engine::SNAPSHOT_INTERVAL; + +use super::{mining_rule::MiningRule, ExtraData}; + +// within a 5 minute period, we expect sync rate +const SYNC_RATE_THRESHOLD: f64 = 0.90; +// number of samples you expect in a 5 minute interval, sampled every 10s +const SYNC_RATE_WINDOW_MAX_SIZE: usize = 5 * 60 / (SNAPSHOT_INTERVAL as usize); +// number of samples required before considering this rule. This allows using the sync rate rule +// even before the full window size is reached. Represents the number of samples in 1 minute +const SYNC_RATE_WINDOW_MIN_THRESHOLD: usize = 60 / (SNAPSHOT_INTERVAL as usize); + +pub struct SyncRateRule { + pub use_sync_rate_rule: Arc, + sync_rate_samples: RwLock>, + total_expected_blocks: AtomicU64, + total_received_blocks: AtomicU64, +} + +impl SyncRateRule { + pub fn new(use_sync_rate_rule: Arc) -> Self { + Self { + use_sync_rate_rule, + sync_rate_samples: RwLock::new(VecDeque::new()), + total_expected_blocks: AtomicU64::new(0), + total_received_blocks: AtomicU64::new(0), + } + } + + /// Adds current observation of received and expected blocks to the sample window, and removes + /// old samples. Returns true if there are enough samples in the window to start triggering the + /// sync rate rule. + fn update_sync_rate_window(&self, received_blocks: u64, expected_blocks: u64) -> bool { + self.total_received_blocks.fetch_add(received_blocks, Ordering::SeqCst); + self.total_expected_blocks.fetch_add(expected_blocks, Ordering::SeqCst); + + let mut samples = self.sync_rate_samples.write().unwrap(); + + samples.push_back((received_blocks, expected_blocks)); + + // Remove old samples. Usually is a single op after the window is full per 10s: + while samples.len() > SYNC_RATE_WINDOW_MAX_SIZE { + let (old_received_blocks, old_expected_blocks) = samples.pop_front().unwrap(); + self.total_received_blocks.fetch_sub(old_received_blocks, Ordering::SeqCst); + self.total_expected_blocks.fetch_sub(old_expected_blocks, Ordering::SeqCst); + } + + samples.len() >= SYNC_RATE_WINDOW_MIN_THRESHOLD + } +} + +/// SyncRateRule +/// Allow mining even if the node is "not nearly synced" if the sync rate is below threshold +/// and the finality point is recent. This is to prevent the network from undermining and to allow +/// the network to automatically recover from any short-term mining halt. +/// +/// Trigger: Sync rate is below threshold and finality point is recent +/// Recovery: Sync rate is back above threshold +impl MiningRule for SyncRateRule { + fn check_rule(&self, delta: &ProcessingCountersSnapshot, extra_data: &ExtraData) { + let expected_blocks = (extra_data.elapsed_time.as_millis() as u64) / extra_data.target_time_per_block; + let received_blocks = delta.body_counts.max(delta.header_counts); + + if !self.update_sync_rate_window(received_blocks, expected_blocks) { + // Don't process the sync rule if the window doesn't have enough samples to filter out noise + return; + } + + let rate: f64 = + (self.total_received_blocks.load(Ordering::SeqCst) as f64) / (self.total_expected_blocks.load(Ordering::SeqCst) as f64); + + // Finality point is considered "recent" if it is within 3 finality durations from the current time + let is_finality_recent = extra_data.finality_point_timestamp >= unix_now().saturating_sub(extra_data.finality_duration * 3); + + trace!( + "Sync rate: {:.2} | Finality point recent: {} | Elapsed time: {}s | Connected: {} | Found/Expected blocks: {}/{}", + rate, + is_finality_recent, + extra_data.elapsed_time.as_secs(), + extra_data.has_sufficient_peer_connectivity, + delta.body_counts, + expected_blocks, + ); + + if is_finality_recent && rate < SYNC_RATE_THRESHOLD { + // if sync rate rule conditions are met: + if let Ok(false) = self.use_sync_rate_rule.compare_exchange(false, true, Ordering::Relaxed, Ordering::Relaxed) { + warn!("Sync rate {:.2} is below threshold: {}", rate, SYNC_RATE_THRESHOLD); + } + } else { + // else when sync rate conditions are not met: + if let Ok(true) = self.use_sync_rate_rule.compare_exchange(true, false, Ordering::Relaxed, Ordering::Relaxed) { + if !is_finality_recent { + warn!("Sync rate {:.2} recovered: {} by entering IBD", rate, SYNC_RATE_THRESHOLD); + } else { + warn!("Sync rate {:.2} recovered: {}", rate, SYNC_RATE_THRESHOLD); + } + } else if !is_finality_recent { + trace!("Finality period is old. Timestamp: {}. Sync rate: {:.2}", extra_data.finality_point_timestamp, rate); + } + } + } +} diff --git a/protocol/p2p/src/convert/messages.rs b/protocol/p2p/src/convert/messages.rs index fd695ec0cd..cfe90fd6d0 100644 --- a/protocol/p2p/src/convert/messages.rs +++ b/protocol/p2p/src/convert/messages.rs @@ -15,7 +15,7 @@ use kaspa_consensus_core::{ use kaspa_hashes::Hash; use kaspa_utils::networking::{IpAddress, PeerId}; -use std::sync::Arc; +use std::{collections::HashMap, sync::Arc}; // ---------------------------------------------------------------------------- // consensus_core to protowire @@ -85,7 +85,23 @@ impl TryFrom for (Option, O impl TryFrom for PruningPointProof { type Error = ConversionError; fn try_from(msg: protowire::PruningPointProofMessage) -> Result { - msg.headers.into_iter().map(|v| v.try_into()).collect() + // The pruning proof can contain many duplicate headers (across levels), so we use a local cache in order + // to make sure we hold a single Arc per header + let mut cache: HashMap> = HashMap::with_capacity(4000); + msg.headers + .into_iter() + .map(|level| { + level + .headers + .into_iter() + .map(|x| { + let header: Header = x.try_into()?; + // Clone the existing Arc if found + Ok(cache.entry(header.hash).or_insert_with(|| Arc::new(header)).clone()) + }) + .collect() + }) + .collect() } } diff --git a/protocol/p2p/src/core/hub.rs b/protocol/p2p/src/core/hub.rs index a93f88b677..752efcf5de 100644 --- a/protocol/p2p/src/core/hub.rs +++ b/protocol/p2p/src/core/hub.rs @@ -39,12 +39,12 @@ impl Hub { HubEvent::NewPeer(new_router) => { // If peer is outbound then connection initialization was already performed as part of the connect logic if new_router.is_outbound() { - info!("P2P Connected to outgoing peer {}", new_router); + info!("P2P Connected to outgoing peer {} (outbound: {})", new_router, self.peers_query(true) + 1); self.insert_new_router(new_router).await; } else { match initializer.initialize_connection(new_router.clone()).await { Ok(()) => { - info!("P2P Connected to incoming peer {}", new_router); + info!("P2P Connected to incoming peer {} (inbound: {})", new_router, self.peers_query(false) + 1); self.insert_new_router(new_router).await; } Err(err) => { @@ -182,6 +182,11 @@ impl Hub { self.peers.read().len() } + /// Returns the number of outbound/inbound active peers (depending on the `outbound` argument) + pub fn peers_query(&self, outbound: bool) -> usize { + self.peers.read().values().filter(|r| r.is_outbound() == outbound).count() + } + /// Returns whether there are currently active peers pub fn has_peers(&self) -> bool { !self.peers.read().is_empty() diff --git a/rothschild/src/main.rs b/rothschild/src/main.rs index 495e681e9a..e9d30d4413 100644 --- a/rothschild/src/main.rs +++ b/rothschild/src/main.rs @@ -4,7 +4,7 @@ use clap::{Arg, ArgAction, Command}; use itertools::Itertools; use kaspa_addresses::{Address, Prefix, Version}; use kaspa_consensus_core::{ - config::params::{TESTNET11_PARAMS, TESTNET_PARAMS}, + config::params::TESTNET_PARAMS, constants::{SOMPI_PER_KASPA, TX_VERSION}, sign::sign, subnets::SUBNETWORK_ID_NATIVE, @@ -213,8 +213,8 @@ async fn main() { let info = rpc_client.get_block_dag_info().await.expect("Failed to get block dag info."); let coinbase_maturity = match info.network.suffix { - Some(11) => TESTNET11_PARAMS.coinbase_maturity, - None | Some(_) => TESTNET_PARAMS.coinbase_maturity, + Some(11) => panic!("TN11 is not supported on this version"), + None | Some(_) => TESTNET_PARAMS.coinbase_maturity().upper_bound(), }; info!( "Node block-DAG info: \n\tNetwork: {}, \n\tBlock count: {}, \n\tHeader count: {}, \n\tDifficulty: {}, diff --git a/rpc/grpc/server/src/service.rs b/rpc/grpc/server/src/service.rs index 7d810bf974..0dd4bb8972 100644 --- a/rpc/grpc/server/src/service.rs +++ b/rpc/grpc/server/src/service.rs @@ -64,7 +64,7 @@ impl AsyncService for GrpcService { let manager = Manager::new(self.rpc_max_clients); let grpc_adaptor = Adaptor::server( self.net_address, - self.config.bps(), + self.config.bps().upper_bound(), manager, self.core_service.clone(), self.core_service.notifier(), diff --git a/rpc/service/Cargo.toml b/rpc/service/Cargo.toml index 54e9764088..9da3b10702 100644 --- a/rpc/service/Cargo.toml +++ b/rpc/service/Cargo.toml @@ -22,6 +22,7 @@ kaspa-mining.workspace = true kaspa-notify.workspace = true kaspa-p2p-flows.workspace = true kaspa-p2p-lib.workspace = true +kaspa-p2p-mining.workspace = true kaspa-perf-monitor.workspace = true kaspa-rpc-core.workspace = true kaspa-txscript.workspace = true diff --git a/rpc/service/src/converter/consensus.rs b/rpc/service/src/converter/consensus.rs index c744300e52..5080280476 100644 --- a/rpc/service/src/converter/consensus.rs +++ b/rpc/service/src/converter/consensus.rs @@ -125,7 +125,7 @@ impl ConsensusConverter { let verbose_data = Some(RpcTransactionVerboseData { transaction_id: transaction.id(), hash: hash(transaction, false), - compute_mass: consensus.calculate_transaction_compute_mass(transaction), + compute_mass: consensus.calculate_transaction_non_contextual_masses(transaction).compute_mass, // TODO: make block_hash an option block_hash: header.map_or_else(RpcHash::default, |x| x.hash), block_time: header.map_or(0, |x| x.timestamp), diff --git a/rpc/service/src/service.rs b/rpc/service/src/service.rs index c8c40c7707..9c2e7ddb10 100644 --- a/rpc/service/src/service.rs +++ b/rpc/service/src/service.rs @@ -3,9 +3,9 @@ use super::collector::{CollectorFromConsensus, CollectorFromIndex}; use crate::converter::feerate_estimate::{FeeEstimateConverter, FeeEstimateVerboseConverter}; use crate::converter::{consensus::ConsensusConverter, index::IndexConverter, protocol::ProtocolConverter}; -use crate::service::NetworkType::{Mainnet, Testnet}; use async_trait::async_trait; use kaspa_consensus_core::api::counters::ProcessingCounters; +use kaspa_consensus_core::daa_score_timestamp::DaaScoreTimestamp; use kaspa_consensus_core::errors::block::RuleError; use kaspa_consensus_core::utxo::utxo_inquirer::UtxoInquirerError; use kaspa_consensus_core::{ @@ -53,6 +53,7 @@ use kaspa_notify::{ }; use kaspa_p2p_flows::flow_context::FlowContext; use kaspa_p2p_lib::common::ProtocolError; +use kaspa_p2p_mining::rule_engine::MiningRuleEngine; use kaspa_perf_monitor::{counters::CountersSnapshot, Monitor as PerfMonitor}; use kaspa_rpc_core::{ api::{ @@ -119,6 +120,7 @@ pub struct RpcCoreService { system_info: SystemInfo, fee_estimate_cache: ExpiringCache, fee_estimate_verbose_cache: ExpiringCache>, + mining_rule_engine: Arc, } const RPC_CORE: &str = "rpc-core"; @@ -144,6 +146,7 @@ impl RpcCoreService { p2p_tower_counters: Arc, grpc_tower_counters: Arc, system_info: SystemInfo, + mining_rule_engine: Arc, ) -> Self { // This notifier UTXOs subscription granularity to index-processor or consensus notifier let policies = match index_notifier { @@ -222,6 +225,7 @@ impl RpcCoreService { system_info, fee_estimate_cache: ExpiringCache::new(Duration::from_millis(500), Duration::from_millis(1000)), fee_estimate_verbose_cache: ExpiringCache::new(Duration::from_millis(500), Duration::from_millis(1000)), + mining_rule_engine, } } @@ -270,11 +274,6 @@ impl RpcCoreService { .unwrap_or_default() } - fn has_sufficient_peer_connectivity(&self) -> bool { - // Other network types can be used in an isolated environment without peers - !matches!(self.flow_context.config.net.network_type, Mainnet | Testnet) || self.flow_context.hub().has_peers() - } - fn extract_tx_query(&self, filter_transaction_pool: bool, include_orphan_pool: bool) -> RpcResult { match (filter_transaction_pool, include_orphan_pool) { (true, true) => Ok(TransactionQuery::OrphansOnly), @@ -295,9 +294,10 @@ impl RpcApi for RpcCoreService { request: SubmitBlockRequest, ) -> RpcResult { let session = self.consensus_manager.consensus().unguarded_session(); + let sink_daa_score_timestamp = session.async_get_sink_daa_score_timestamp().await; // TODO: consider adding an error field to SubmitBlockReport to document both the report and error fields - let is_synced: bool = self.has_sufficient_peer_connectivity() && session.async_is_nearly_synced().await; + let is_synced: bool = self.mining_rule_engine.should_mine(sink_daa_score_timestamp); if !self.config.enable_unsynced_mining && !is_synced { // error = "Block not submitted - node is not synced" @@ -318,8 +318,14 @@ impl RpcApi for RpcCoreService { // A simple heuristic check which signals that the mined block is out of date // and should not be accepted unless user explicitly requests - let daa_window_block_duration = self.config.daa_window_duration_in_blocks(virtual_daa_score); - if virtual_daa_score > daa_window_block_duration && block.header.daa_score < virtual_daa_score - daa_window_block_duration + // + // [Crescendo]: switch to the larger duration only after a full window with the new duration is reached post activation + let difficulty_window_duration = self + .config + .difficulty_window_duration_in_block_units() + .get(virtual_daa_score.saturating_sub(self.config.difficulty_window_duration_in_block_units().after())); + if virtual_daa_score > difficulty_window_duration + && block.header.daa_score < virtual_daa_score - difficulty_window_duration { // error = format!("Block rejected. Reason: block DAA score {0} is too far behind virtual's DAA score {1}", block.header.daa_score, virtual_daa_score) return Ok(SubmitBlockResponse { report: SubmitBlockReport::Reject(SubmitBlockRejectReason::BlockInvalid) }); @@ -328,8 +334,13 @@ impl RpcApi for RpcCoreService { trace!("incoming SubmitBlockRequest for block {}", hash); match self.flow_context.submit_rpc_block(&session, block.clone()).await { - Ok(_) => Ok(SubmitBlockResponse { report: SubmitBlockReport::Success }), + Ok(_) => { + self.processing_counters.submit_block_success_count.fetch_add(1, Ordering::Relaxed); + Ok(SubmitBlockResponse { report: SubmitBlockReport::Success }) + } Err(ProtocolError::RuleError(RuleError::BadMerkleRoot(h1, h2))) => { + // Count the number of bad merkle root errors as this may trigger NoTransactions rule + self.processing_counters.submit_block_bad_merkle_root_count.fetch_add(1, Ordering::Relaxed); warn!( "The RPC submitted block triggered a {} error: {}. NOTE: This error usually indicates an RPC conversion error between the node and the miner. If you are on TN11 this is likely to reflect using a NON-SUPPORTED miner.", @@ -379,11 +390,12 @@ NOTE: This error usually indicates an RPC conversion error between the node and return Err(RpcError::CoinbasePayloadLengthAboveMax(self.config.max_coinbase_payload_len)); } - let is_nearly_synced = - self.config.is_nearly_synced(block_template.selected_parent_timestamp, block_template.selected_parent_daa_score); Ok(GetBlockTemplateResponse { block: block_template.block.into(), - is_synced: self.has_sufficient_peer_connectivity() && is_nearly_synced, + is_synced: self.mining_rule_engine.should_mine(DaaScoreTimestamp { + timestamp: block_template.selected_parent_timestamp, + daa_score: block_template.selected_parent_daa_score, + }), }) } @@ -439,7 +451,7 @@ NOTE: This error usually indicates an RPC conversion error between the node and // We use +1 because low_hash is also returned // max_blocks MUST be >= mergeset_size_limit + 1 - let max_blocks = self.config.mergeset_size_limit as usize + 1; + let max_blocks = self.config.mergeset_size_limit().upper_bound() as usize + 1; let (block_hashes, high_hash) = session.async_get_hashes_between(low_hash, sink_hash, max_blocks).await?; // If the high hash is equal to sink it means get_hashes_between didn't skip any hashes, and @@ -466,13 +478,14 @@ NOTE: This error usually indicates an RPC conversion error between the node and } async fn get_info_call(&self, _connection: Option<&DynRpcConnection>, _request: GetInfoRequest) -> RpcResult { - let is_nearly_synced = self.consensus_manager.consensus().unguarded_session().async_is_nearly_synced().await; + let sink_daa_score_timestamp = + self.consensus_manager.consensus().unguarded_session().async_get_sink_daa_score_timestamp().await; Ok(GetInfoResponse { p2p_id: self.flow_context.node_id.to_string(), mempool_size: self.mining_manager.transaction_count_sample(TransactionQuery::TransactionsOnly), server_version: version().to_string(), is_utxo_indexed: self.config.utxoindex, - is_synced: self.has_sufficient_peer_connectivity() && is_nearly_synced, + is_synced: self.mining_rule_engine.should_mine(sink_daa_score_timestamp), has_notify_command: true, has_message_id: true, }) @@ -616,7 +629,7 @@ NOTE: This error usually indicates an RPC conversion error between the node and // this bounds by number of merged blocks, if include_accepted_transactions = true // else it returns the batch_size amount on pure chain blocks. // Note: batch_size does not bound removed chain blocks, only added chain blocks. - let batch_size = (self.config.mergeset_size_limit * 10) as usize; + let batch_size = (self.config.mergeset_size_limit().upper_bound() * 10) as usize; let mut virtual_chain_batch = session.async_get_virtual_chain_from_block(request.start_hash, Some(batch_size)).await?; let accepted_transaction_ids = if request.include_accepted_transaction_ids { let accepted_transaction_ids = self @@ -718,6 +731,12 @@ NOTE: This error usually indicates an RPC conversion error between the node and let mut header_idx = 0; let mut req_idx = 0; + // TODO (relaxed; post-HF): the below interpolation should remain valid also after the hardfork as long + // as the two pruning points used are both either from before activation or after. The only exception are + // the two pruning points before and after activation. However this inaccuracy can be considered negligible. + // Alternatively, we can remedy this post the HF by manually adding a (DAA score, timestamp) point from the + // moment of activation. + // Loop runs at O(n + m) where n = # pp headers, m = # requested daa_scores // Loop will always end because in the worst case the last header with daa_score = 0 (the genesis) // will cause every remaining requested daa_score to be "found in range" @@ -732,7 +751,9 @@ NOTE: This error usually indicates an RPC conversion error between the node and // For daa_score later than the last header, we estimate in milliseconds based on the difference let time_adjustment = if header_idx == 0 { // estimate milliseconds = (daa_score * target_time_per_block) - (curr_daa_score - header.daa_score).checked_mul(self.config.target_time_per_block).unwrap_or(u64::MAX) + (curr_daa_score - header.daa_score) + .checked_mul(self.config.target_time_per_block().get(header.daa_score)) + .unwrap_or(u64::MAX) } else { // "next" header is the one that we processed last iteration let next_header = &headers[header_idx - 1]; @@ -766,8 +787,16 @@ NOTE: This error usually indicates an RPC conversion error between the node and _request: GetFeeEstimateRequest, ) -> RpcResult { let mining_manager = self.mining_manager.clone(); - let estimate = - self.fee_estimate_cache.get(async move { mining_manager.get_realtime_feerate_estimations().await.into_rpc() }).await; + let consensus_manager = self.consensus_manager.clone(); + let estimate = self + .fee_estimate_cache + .get(async move { + mining_manager + .get_realtime_feerate_estimations(consensus_manager.consensus().unguarded_session().get_virtual_daa_score()) + .await + .into_rpc() + }) + .await; Ok(GetFeeEstimateResponse { estimate }) } @@ -864,8 +893,8 @@ NOTE: This error usually indicates an RPC conversion error between the node and if !self.config.unsafe_rpc && request.window_size > MAX_SAFE_WINDOW_SIZE { return Err(RpcError::WindowSizeExceedingMaximum(request.window_size, MAX_SAFE_WINDOW_SIZE)); } - if request.window_size as u64 > self.config.pruning_depth { - return Err(RpcError::WindowSizeExceedingPruningDepth(request.window_size, self.config.pruning_depth)); + if request.window_size as u64 > self.config.pruning_depth().lower_bound() { + return Err(RpcError::WindowSizeExceedingPruningDepth(request.window_size, self.config.prior_pruning_depth)); } // In the previous golang implementation the convention for virtual was the following const. @@ -1120,7 +1149,8 @@ NOTE: This error usually indicates an RPC conversion error between the node and _request: GetServerInfoRequest, ) -> RpcResult { let session = self.consensus_manager.consensus().unguarded_session(); - let is_synced: bool = self.has_sufficient_peer_connectivity() && session.async_is_nearly_synced().await; + let sink_daa_score_timestamp = session.async_get_sink_daa_score_timestamp().await; + let is_synced: bool = self.mining_rule_engine.should_mine(sink_daa_score_timestamp); let virtual_daa_score = session.get_virtual_daa_score(); Ok(GetServerInfoResponse { @@ -1139,8 +1169,9 @@ NOTE: This error usually indicates an RPC conversion error between the node and _connection: Option<&DynRpcConnection>, _request: GetSyncStatusRequest, ) -> RpcResult { - let session = self.consensus_manager.consensus().unguarded_session(); - let is_synced: bool = self.has_sufficient_peer_connectivity() && session.async_is_nearly_synced().await; + let sink_daa_score_timestamp = + self.consensus_manager.consensus().unguarded_session().async_get_sink_daa_score_timestamp().await; + let is_synced: bool = self.mining_rule_engine.should_mine(sink_daa_score_timestamp); Ok(GetSyncStatusResponse { is_synced }) } diff --git a/simpa/src/main.rs b/simpa/src/main.rs index c35c0c640e..843f1dffd9 100644 --- a/simpa/src/main.rs +++ b/simpa/src/main.rs @@ -13,11 +13,11 @@ use kaspa_consensus::{ headers::HeaderStoreReader, relations::RelationsStoreReader, }, - params::{ForkActivation, Params, Testnet11Bps, DEVNET_PARAMS, NETWORK_DELAY_BOUND, TESTNET11_PARAMS}, + params::{ForkActivation, Params, TenBps, DEVNET_PARAMS, NETWORK_DELAY_BOUND, SIMNET_PARAMS}, }; use kaspa_consensus_core::{ api::ConsensusApi, block::Block, blockstatus::BlockStatus, config::bps::calculate_ghostdag_k, errors::block::BlockProcessResult, - BlockHashSet, BlockLevel, HashMapCustomHasher, + mining_rules::MiningRules, BlockHashSet, BlockLevel, HashMapCustomHasher, }; use kaspa_consensus_notify::root::ConsensusNotificationRoot; use kaspa_core::{ @@ -189,11 +189,11 @@ fn main_impl(mut args: Args) { args.miners ); } - args.bps = if args.testnet11 { Testnet11Bps::bps() as f64 } else { args.bps }; - let mut params = if args.testnet11 { TESTNET11_PARAMS } else { DEVNET_PARAMS }; - params.storage_mass_activation = ForkActivation::new(400); + args.bps = if args.testnet11 { TenBps::bps() as f64 } else { args.bps }; + let mut params = if args.testnet11 { SIMNET_PARAMS } else { DEVNET_PARAMS }; + params.crescendo_activation = ForkActivation::always(); + params.crescendo.coinbase_maturity = 200; params.storage_mass_parameter = 10_000; - params.payload_activation = ForkActivation::always(); let mut builder = ConfigBuilder::new(params) .apply_args(|config| apply_args_to_consensus_params(&args, &mut config.params)) .apply_args(|config| apply_args_to_perf_params(&args, &mut config.perf)) @@ -235,6 +235,7 @@ fn main_impl(mut args: Args) { Default::default(), Default::default(), unix_now(), + Arc::new(MiningRules::default()), )); (consensus, lifetime) } else { @@ -256,6 +257,11 @@ fn main_impl(mut args: Args) { }; if args.test_pruning { + let hashes = topologically_ordered_hashes(&consensus, consensus.pruning_point()); + let num_blocks = hashes.len(); + let num_txs = print_stats(&consensus, &hashes, args.delay, args.bps, config.ghostdag_k().before()); + info!("There are {num_blocks} blocks with {num_txs} transactions overall above the current pruning point"); + consensus.validate_pruning_points(consensus.get_sink()).unwrap(); drop(consensus); return; } @@ -272,6 +278,7 @@ fn main_impl(mut args: Args) { Default::default(), Default::default(), unix_now(), + Arc::new(MiningRules::default()), )); let handles2 = consensus2.run_processors(); if args.headers_first { @@ -293,47 +300,58 @@ fn apply_args_to_consensus_params(args: &Args, params: &mut Params) { if args.testnet11 { info!( "Using kaspa-testnet-11 configuration (GHOSTDAG K={}, DAA window size={}, Median time window size={})", - params.ghostdag_k, - params.difficulty_window_size(0), - params.past_median_time_window_size(0), + params.ghostdag_k().before(), + params.difficulty_window_size().before(), + params.past_median_time_window_size().before(), ); } else { let max_delay = args.delay.max(NETWORK_DELAY_BOUND as f64); - let k = u64::max(calculate_ghostdag_k(2.0 * max_delay * args.bps, 0.05), params.ghostdag_k as u64); + let k = u64::max(calculate_ghostdag_k(2.0 * max_delay * args.bps, 0.05), params.ghostdag_k().before() as u64); let k = u64::min(k, KType::MAX as u64) as KType; // Clamp to KType::MAX - params.ghostdag_k = k; - params.mergeset_size_limit = k as u64 * 10; - params.max_block_parents = u8::max((0.66 * k as f64) as u8, 10); - params.target_time_per_block = (1000.0 / args.bps) as u64; - params.merge_depth = (params.merge_depth as f64 * args.bps) as u64; - params.coinbase_maturity = (params.coinbase_maturity as f64 * f64::max(1.0, args.bps * args.delay * 0.25)) as u64; + params.prior_ghostdag_k = k; + params.prior_mergeset_size_limit = k as u64 * 10; + params.prior_max_block_parents = u8::max((0.66 * k as f64) as u8, 10); + params.prior_target_time_per_block = (1000.0 / args.bps) as u64; + params.prior_merge_depth = (params.prior_merge_depth as f64 * args.bps) as u64; + params.prior_coinbase_maturity = (params.prior_coinbase_maturity as f64 * f64::max(1.0, args.bps * args.delay * 0.25)) as u64; if args.daa_legacy { // Scale DAA and median-time windows linearly with BPS - params.sampling_activation = ForkActivation::never(); - params.legacy_timestamp_deviation_tolerance = (params.legacy_timestamp_deviation_tolerance as f64 * args.bps) as u64; - params.legacy_difficulty_window_size = (params.legacy_difficulty_window_size as f64 * args.bps) as usize; + params.crescendo_activation = ForkActivation::never(); + params.timestamp_deviation_tolerance = (params.timestamp_deviation_tolerance as f64 * args.bps) as u64; + params.prior_difficulty_window_size = (params.prior_difficulty_window_size as f64 * args.bps) as usize; } else { // Use the new sampling algorithms - params.sampling_activation = ForkActivation::always(); - params.past_median_time_sample_rate = (10.0 * args.bps) as u64; - params.new_timestamp_deviation_tolerance = (600.0 * args.bps) as u64; - params.difficulty_sample_rate = (2.0 * args.bps) as u64; + params.crescendo_activation = ForkActivation::always(); + params.timestamp_deviation_tolerance = (600.0 * args.bps) as u64; + params.crescendo.past_median_time_sample_rate = (10.0 * args.bps) as u64; + params.crescendo.difficulty_sample_rate = (2.0 * args.bps) as u64; } - info!("2Dλ={}, GHOSTDAG K={}, DAA window size={}", 2.0 * args.delay * args.bps, k, params.difficulty_window_size(0)); + info!("2Dλ={}, GHOSTDAG K={}, DAA window size={}", 2.0 * args.delay * args.bps, k, params.difficulty_window_size().before()); } if args.test_pruning { + params.crescendo_activation = ForkActivation::new(1250.min(args.target_blocks.map(|x| x / 2).unwrap_or(900))); + params.pruning_proof_m = 16; - params.legacy_difficulty_window_size = 64; - params.legacy_timestamp_deviation_tolerance = 16; - params.new_timestamp_deviation_tolerance = 16; - params.sampled_difficulty_window_size = params.sampled_difficulty_window_size.min(32); - params.finality_depth = 128; - params.merge_depth = 128; - params.mergeset_size_limit = 32; - params.pruning_depth = params.anticone_finalization_depth(); - info!("Setting pruning depth to {}", params.pruning_depth); + params.min_difficulty_window_size = 16; + params.prior_difficulty_window_size = 64; + params.timestamp_deviation_tolerance = 16; + params.crescendo.sampled_difficulty_window_size = params.crescendo.sampled_difficulty_window_size.min(32); + + params.prior_ghostdag_k = 10; + params.prior_finality_depth = 100; + params.prior_merge_depth = 64; + params.prior_mergeset_size_limit = 32; + params.prior_pruning_depth = 100 * 2 + 50; + + params.crescendo.ghostdag_k = 20; + params.crescendo.finality_depth = 100 * 2; + params.crescendo.merge_depth = 64 * 2; + params.crescendo.mergeset_size_limit = 32 * 2; + params.crescendo.pruning_depth = 100 * 2 * 2 + 50; + + info!("Setting pruning depth to {:?}", params.pruning_depth()); } } @@ -349,7 +367,7 @@ fn apply_args_to_perf_params(args: &Args, perf_params: &mut PerfParams) { async fn validate(src_consensus: &Consensus, dst_consensus: &Consensus, params: &Params, delay: f64, bps: f64, header_only: bool) { let hashes = topologically_ordered_hashes(src_consensus, params.genesis.hash); let num_blocks = hashes.len(); - let num_txs = print_stats(src_consensus, &hashes, delay, bps, params.ghostdag_k); + let num_txs = print_stats(src_consensus, &hashes, delay, bps, params.ghostdag_k().before()); if header_only { info!("Validating {num_blocks} headers..."); } else { diff --git a/simpa/src/simulator/miner.rs b/simpa/src/simulator/miner.rs index 958b4799e3..6848f72e27 100644 --- a/simpa/src/simulator/miner.rs +++ b/simpa/src/simulator/miner.rs @@ -157,7 +157,7 @@ impl Miner { .into_par_iter() .map(|mutable_tx| { let signed_tx = sign(mutable_tx, schnorr_key); - let mass = self.mass_calculator.calc_tx_overall_mass(&signed_tx.as_verifiable(), None).unwrap(); + let mass = self.mass_calculator.calc_contextual_masses(&signed_tx.as_verifiable()).unwrap().storage_mass; signed_tx.tx.set_mass(mass); let mut signed_tx = signed_tx.tx; signed_tx.finalize(); @@ -179,7 +179,8 @@ impl Miner { ) -> Option { let entry = utxo_view.get(&outpoint)?; if entry.amount < 2 - || (entry.is_coinbase && (virtual_daa_score as i64 - entry.block_daa_score as i64) <= self.params.coinbase_maturity as i64) + || (entry.is_coinbase + && (virtual_daa_score as i64 - entry.block_daa_score as i64) <= self.params.coinbase_maturity().upper_bound() as i64) { return None; } diff --git a/simpa/src/simulator/network.rs b/simpa/src/simulator/network.rs index 79ac6fad75..75e8ba121d 100644 --- a/simpa/src/simulator/network.rs +++ b/simpa/src/simulator/network.rs @@ -1,4 +1,5 @@ use async_channel::unbounded; +use kaspa_consensus_core::mining_rules::MiningRules; use kaspa_consensus_notify::root::ConsensusNotificationRoot; use kaspa_core::time::unix_now; use std::sync::Arc; @@ -86,6 +87,7 @@ impl KaspaNetworkSimulator { Default::default(), Default::default(), unix_now(), + Arc::new(MiningRules::default()), )); let handles = consensus.run_processors(); let (sk, pk) = secp.generate_keypair(&mut rng); diff --git a/testing/integration/src/consensus_integration_tests.rs b/testing/integration/src/consensus_integration_tests.rs index 719158c8b0..bb89f8b60c 100644 --- a/testing/integration/src/consensus_integration_tests.rs +++ b/testing/integration/src/consensus_integration_tests.rs @@ -17,20 +17,22 @@ use kaspa_consensus::model::stores::reachability::DbReachabilityStore; use kaspa_consensus::model::stores::relations::DbRelationsStore; use kaspa_consensus::model::stores::selected_chain::SelectedChainStoreReader; use kaspa_consensus::params::{ - ForkActivation, Params, DEVNET_PARAMS, MAINNET_PARAMS, MAX_DIFFICULTY_TARGET, MAX_DIFFICULTY_TARGET_AS_F64, + ForkActivation, Params, CRESCENDO, DEVNET_PARAMS, MAINNET_PARAMS, MAX_DIFFICULTY_TARGET, MAX_DIFFICULTY_TARGET_AS_F64, }; use kaspa_consensus::pipeline::monitor::ConsensusMonitor; use kaspa_consensus::pipeline::ProcessingCounters; use kaspa_consensus::processes::reachability::tests::{DagBlock, DagBuilder, StoreValidationExtensions}; use kaspa_consensus::processes::window::{WindowManager, WindowType}; +use kaspa_consensus_core::api::args::TransactionValidationArgs; use kaspa_consensus_core::api::{BlockValidationFutures, ConsensusApi}; use kaspa_consensus_core::block::Block; use kaspa_consensus_core::blockhash::new_unique; use kaspa_consensus_core::blockstatus::BlockStatus; use kaspa_consensus_core::coinbase::MinerData; -use kaspa_consensus_core::constants::{BLOCK_VERSION, SOMPI_PER_KASPA, STORAGE_MASS_PARAMETER}; +use kaspa_consensus_core::constants::{BLOCK_VERSION, SOMPI_PER_KASPA, STORAGE_MASS_PARAMETER, TRANSIENT_BYTE_TO_MASS_FACTOR}; use kaspa_consensus_core::errors::block::{BlockProcessResult, RuleError}; use kaspa_consensus_core::header::Header; +use kaspa_consensus_core::mining_rules::MiningRules; use kaspa_consensus_core::network::{NetworkId, NetworkType::Mainnet}; use kaspa_consensus_core::subnets::SubnetworkId; use kaspa_consensus_core::trusted::{ExternalGhostdagData, TrustedBlock}; @@ -45,6 +47,7 @@ use kaspa_core::task::tick::TickService; use kaspa_core::time::unix_now; use kaspa_database::utils::get_kaspa_tempdir; use kaspa_hashes::Hash; +use kaspa_utils::arc::ArcExtensions; use crate::common; use flate2::read::GzDecoder; @@ -262,8 +265,8 @@ async fn ghostdag_test() { .skip_proof_of_work() .edit_consensus_params(|p| { p.genesis.hash = string_to_hash(&test.genesis_id); - p.ghostdag_k = test.k; - p.min_difficulty_window_len = p.legacy_difficulty_window_size; + p.prior_ghostdag_k = test.k; + p.min_difficulty_window_size = p.prior_difficulty_window_size; }) .build(); let consensus = TestConsensus::new(&config); @@ -337,7 +340,7 @@ async fn block_window_test() { .skip_proof_of_work() .edit_consensus_params(|p| { p.genesis.hash = string_to_hash("A"); - p.ghostdag_k = 1; + p.prior_ghostdag_k = 1; }) .build(); let consensus = TestConsensus::new(&config); @@ -427,7 +430,7 @@ async fn header_in_isolation_validation_test() { block.header.hash = 2.into(); let now = unix_now(); - let block_ts = now + config.legacy_timestamp_deviation_tolerance * config.target_time_per_block + 2000; + let block_ts = now + config.timestamp_deviation_tolerance * config.prior_target_time_per_block + 2000; block.header.timestamp = block_ts; match consensus.validate_and_insert_block(block.to_immutable()).virtual_state_task.await { Err(RuleError::TimeTooFarIntoTheFuture(ts, _)) => { @@ -454,11 +457,11 @@ async fn header_in_isolation_validation_test() { { let mut block = block.clone(); block.header.hash = 4.into(); - block.header.parents_by_level[0] = (5..(config.max_block_parents + 6)).map(|x| (x as u64).into()).collect(); + block.header.parents_by_level[0] = (5..(config.prior_max_block_parents + 6)).map(|x| (x as u64).into()).collect(); match consensus.validate_and_insert_block(block.to_immutable()).virtual_state_task.await { Err(RuleError::TooManyParents(num_parents, limit)) => { - assert_eq!((config.max_block_parents + 1) as usize, num_parents); - assert_eq!(limit, config.max_block_parents as usize); + assert_eq!((config.prior_max_block_parents + 1) as usize, num_parents); + assert_eq!(limit, config.prior_max_block_parents as usize); } res => { panic!("Unexpected result: {res:?}") @@ -563,7 +566,7 @@ async fn median_time_test() { config: ConfigBuilder::new(MAINNET_PARAMS) .skip_proof_of_work() .edit_consensus_params(|p| { - p.sampling_activation = ForkActivation::never(); + p.crescendo_activation = ForkActivation::never(); }) .build(), }, @@ -572,10 +575,10 @@ async fn median_time_test() { config: ConfigBuilder::new(MAINNET_PARAMS) .skip_proof_of_work() .edit_consensus_params(|p| { - p.sampling_activation = ForkActivation::always(); - p.new_timestamp_deviation_tolerance = 120; - p.past_median_time_sample_rate = 3; - p.past_median_time_sampled_window_size = (2 * 120 - 1) / 3; + p.crescendo_activation = ForkActivation::always(); + p.timestamp_deviation_tolerance = 120; + p.crescendo.past_median_time_sample_rate = 3; + p.crescendo.past_median_time_sampled_window_size = (2 * 120 - 1) / 3; }) .build(), }, @@ -585,8 +588,9 @@ async fn median_time_test() { let consensus = TestConsensus::new(&test.config); let wait_handles = consensus.init(); - let num_blocks = test.config.past_median_time_window_size(0) as u64 * test.config.past_median_time_sample_rate(0); - let timestamp_deviation_tolerance = test.config.timestamp_deviation_tolerance(0); + let num_blocks = + test.config.past_median_time_window_size().before() as u64 * test.config.past_median_time_sample_rate().before(); + let timestamp_deviation_tolerance = test.config.timestamp_deviation_tolerance; for i in 1..(num_blocks + 1) { let parent = if i == 1 { test.config.genesis.hash } else { (i - 1).into() }; let mut block = consensus.build_block_with_parents(i.into(), vec![parent]); @@ -630,7 +634,7 @@ async fn mergeset_size_limit_test() { let consensus = TestConsensus::new(&config); let wait_handles = consensus.init(); - let num_blocks_per_chain = config.mergeset_size_limit + 1; + let num_blocks_per_chain = config.prior_mergeset_size_limit + 1; let mut tip1_hash = config.genesis.hash; for i in 1..(num_blocks_per_chain + 1) { @@ -649,8 +653,8 @@ async fn mergeset_size_limit_test() { let block = consensus.build_block_with_parents((3 * num_blocks_per_chain + 1).into(), vec![tip1_hash, tip2_hash]); match consensus.validate_and_insert_block(block.to_immutable()).virtual_state_task.await { Err(RuleError::MergeSetTooBig(a, b)) => { - assert_eq!(a, config.mergeset_size_limit + 1); - assert_eq!(b, config.mergeset_size_limit); + assert_eq!(a, config.prior_mergeset_size_limit + 1); + assert_eq!(b, config.prior_mergeset_size_limit); } res => { panic!("Unexpected result: {res:?}") @@ -811,45 +815,37 @@ impl KaspadGoParams { dns_seeders: &[], net: NetworkId { network_type: Mainnet, suffix: None }, genesis: GENESIS, - ghostdag_k: self.K, - legacy_timestamp_deviation_tolerance: self.TimestampDeviationTolerance, - new_timestamp_deviation_tolerance: self.TimestampDeviationTolerance, - past_median_time_sample_rate: 1, - past_median_time_sampled_window_size: 2 * self.TimestampDeviationTolerance - 1, - target_time_per_block: self.TargetTimePerBlock / 1_000_000, - sampling_activation: ForkActivation::never(), - max_block_parents: self.MaxBlockParents, + prior_ghostdag_k: self.K, + timestamp_deviation_tolerance: self.TimestampDeviationTolerance, + prior_target_time_per_block: self.TargetTimePerBlock / 1_000_000, + prior_max_block_parents: self.MaxBlockParents, max_difficulty_target: MAX_DIFFICULTY_TARGET, max_difficulty_target_f64: MAX_DIFFICULTY_TARGET_AS_F64, - difficulty_sample_rate: 1, - sampled_difficulty_window_size: self.DifficultyAdjustmentWindowSize, - legacy_difficulty_window_size: self.DifficultyAdjustmentWindowSize, - min_difficulty_window_len: self.DifficultyAdjustmentWindowSize, - mergeset_size_limit: self.MergeSetSizeLimit, - merge_depth: self.MergeDepth, - finality_depth, - pruning_depth: 2 * finality_depth + 4 * self.MergeSetSizeLimit * self.K as u64 + 2 * self.K as u64 + 2, + prior_difficulty_window_size: self.DifficultyAdjustmentWindowSize, + min_difficulty_window_size: self.DifficultyAdjustmentWindowSize, + prior_mergeset_size_limit: self.MergeSetSizeLimit, + prior_merge_depth: self.MergeDepth, + prior_finality_depth: finality_depth, + prior_pruning_depth: 2 * finality_depth + 4 * self.MergeSetSizeLimit * self.K as u64 + 2 * self.K as u64 + 2, coinbase_payload_script_public_key_max_len: self.CoinbasePayloadScriptPublicKeyMaxLength, max_coinbase_payload_len: self.MaxCoinbasePayloadLength, - max_tx_inputs: MAINNET_PARAMS.max_tx_inputs, - max_tx_outputs: MAINNET_PARAMS.max_tx_outputs, - max_signature_script_len: MAINNET_PARAMS.max_signature_script_len, - max_script_public_key_len: MAINNET_PARAMS.max_script_public_key_len, + prior_max_tx_inputs: MAINNET_PARAMS.prior_max_tx_inputs, + prior_max_tx_outputs: MAINNET_PARAMS.prior_max_tx_outputs, + prior_max_signature_script_len: MAINNET_PARAMS.prior_max_signature_script_len, + prior_max_script_public_key_len: MAINNET_PARAMS.prior_max_script_public_key_len, mass_per_tx_byte: self.MassPerTxByte, mass_per_script_pub_key_byte: self.MassPerScriptPubKeyByte, mass_per_sig_op: self.MassPerSigOp, max_block_mass: self.MaxBlockMass, storage_mass_parameter: STORAGE_MASS_PARAMETER, - storage_mass_activation: ForkActivation::never(), - kip10_activation: ForkActivation::never(), deflationary_phase_daa_score: self.DeflationaryPhaseDaaScore, pre_deflationary_phase_base_subsidy: self.PreDeflationaryPhaseBaseSubsidy, - coinbase_maturity: MAINNET_PARAMS.coinbase_maturity, + prior_coinbase_maturity: MAINNET_PARAMS.prior_coinbase_maturity, skip_proof_of_work: self.SkipProofOfWork, max_block_level: self.MaxBlockLevel, pruning_proof_m: self.PruningProofM, - payload_activation: ForkActivation::never(), - runtime_sig_op_counting: ForkActivation::never(), + crescendo: CRESCENDO, + crescendo_activation: ForkActivation::never(), } } } @@ -935,13 +931,13 @@ async fn json_test(file_path: &str, concurrency: bool) { let genesis_block = json_line_to_block(second_line); params.genesis = (genesis_block.header.as_ref(), DEVNET_PARAMS.genesis.coinbase_payload).into(); } - params.min_difficulty_window_len = params.legacy_difficulty_window_size; + params.min_difficulty_window_size = params.prior_difficulty_window_size; params } else { let genesis_block = json_line_to_block(first_line); let mut params = DEVNET_PARAMS; params.genesis = (genesis_block.header.as_ref(), params.genesis.coinbase_payload).into(); - params.min_difficulty_window_len = params.legacy_difficulty_window_size; + params.min_difficulty_window_size = params.prior_difficulty_window_size; params }; @@ -996,7 +992,7 @@ async fn json_test(file_path: &str, concurrency: bool) { gzip_file_lines(&main_path.join("past-pps.json.gz")).map(|line| json_line_to_block(line).header).collect_vec(); let pruning_point = past_pruning_points.last().unwrap().hash; - tc.import_pruning_points(past_pruning_points); + tc.import_pruning_points(past_pruning_points).unwrap(); info!("Processing {} trusted blocks...", trusted_blocks.len()); for tb in trusted_blocks.into_iter() { @@ -1267,18 +1263,21 @@ async fn bounded_merge_depth_test() { let config = ConfigBuilder::new(DEVNET_PARAMS) .skip_proof_of_work() .edit_consensus_params(|p| { - p.ghostdag_k = 5; - p.merge_depth = 7; + p.prior_ghostdag_k = 5; + p.prior_merge_depth = 7; }) .build(); - assert!((config.ghostdag_k as u64) < config.merge_depth, "K must be smaller than merge depth for this test to run"); + assert!( + (config.ghostdag_k().before() as u64) < config.prior_merge_depth, + "K must be smaller than merge depth for this test to run" + ); let consensus = TestConsensus::new(&config); let wait_handles = consensus.init(); let mut selected_chain = vec![config.genesis.hash]; - for i in 1..(config.merge_depth + 3) { + for i in 1..(config.prior_merge_depth + 3) { let hash: Hash = (i + 1).into(); consensus.add_block_with_parents(hash, vec![*selected_chain.last().unwrap()]).await.unwrap(); selected_chain.push(hash); @@ -1286,8 +1285,8 @@ async fn bounded_merge_depth_test() { // The length of block_chain_2 is shorter by one than selected_chain, so selected_chain will remain the selected chain. let mut block_chain_2 = vec![config.genesis.hash]; - for i in 1..(config.merge_depth + 2) { - let hash: Hash = (i + config.merge_depth + 3).into(); + for i in 1..(config.prior_merge_depth + 2) { + let hash: Hash = (i + config.prior_merge_depth + 3).into(); consensus.add_block_with_parents(hash, vec![*block_chain_2.last().unwrap()]).await.unwrap(); block_chain_2.push(hash); } @@ -1323,7 +1322,7 @@ async fn bounded_merge_depth_test() { .unwrap(); // We extend the selected chain until kosherizing_hash will be red from the virtual POV. - for i in 0..config.ghostdag_k { + for i in 0..config.ghostdag_k().before() { let hash = Hash::from_u64_word((i + 1) as u64 * 1000); consensus.add_block_with_parents(hash, vec![*selected_chain.last().unwrap()]).await.unwrap(); selected_chain.push(hash); @@ -1347,7 +1346,7 @@ async fn difficulty_test() { async fn add_block(consensus: &TestConsensus, block_time: Option, parents: Vec) -> Header { let selected_parent = consensus.ghostdag_manager().find_selected_parent(parents.iter().copied()); let block_time = block_time.unwrap_or_else(|| { - consensus.headers_store().get_timestamp(selected_parent).unwrap() + consensus.params().target_time_per_block(0) + consensus.headers_store().get_timestamp(selected_parent).unwrap() + consensus.params().prior_target_time_per_block }); let mut header = consensus.build_header_with_parents(new_unique(), parents); header.timestamp = block_time; @@ -1370,7 +1369,8 @@ async fn difficulty_test() { } fn full_window_bits(consensus: &TestConsensus, hash: Hash) -> u32 { - let window_size = consensus.params().difficulty_window_size(0) * consensus.params().difficulty_sample_rate(0) as usize; + let window_size = + consensus.params().difficulty_window_size().before() * consensus.params().difficulty_sample_rate().before() as usize; let ghostdag_data = &consensus.ghostdag_store().get_data(hash).unwrap(); let window = consensus.window_manager().block_window(ghostdag_data, WindowType::VaryingWindow(window_size)).unwrap(); assert_eq!(window.blocks.len(), window_size); @@ -1385,12 +1385,12 @@ async fn difficulty_test() { } const FULL_WINDOW_SIZE: usize = 90; - const SAMPLED_WINDOW_SIZE: usize = 11; + const SAMPLED_WINDOW_SIZE: u64 = 11; const SAMPLE_RATE: u64 = 6; const PMT_DEVIATION_TOLERANCE: u64 = 20; const PMT_SAMPLE_RATE: u64 = 3; const PMT_SAMPLED_WINDOW_SIZE: u64 = 13; - const HIGH_BPS_SAMPLED_WINDOW_SIZE: usize = 12; + const HIGH_BPS_SAMPLED_WINDOW_SIZE: u64 = 12; const HIGH_BPS: u64 = 4; let tests = vec![ Test { @@ -1399,12 +1399,12 @@ async fn difficulty_test() { config: ConfigBuilder::new(MAINNET_PARAMS) .skip_proof_of_work() .edit_consensus_params(|p| { - p.ghostdag_k = 1; - p.legacy_difficulty_window_size = FULL_WINDOW_SIZE; - p.sampling_activation = ForkActivation::never(); + p.prior_ghostdag_k = 1; + p.prior_difficulty_window_size = FULL_WINDOW_SIZE; + p.crescendo_activation = ForkActivation::never(); // Define past median time so that calls to add_block_with_min_time create blocks // which timestamps fit within the min-max timestamps found in the difficulty window - p.legacy_timestamp_deviation_tolerance = 60; + p.timestamp_deviation_tolerance = 60; }) .build(), }, @@ -1414,15 +1414,17 @@ async fn difficulty_test() { config: ConfigBuilder::new(MAINNET_PARAMS) .skip_proof_of_work() .edit_consensus_params(|p| { - p.ghostdag_k = 1; - p.sampled_difficulty_window_size = SAMPLED_WINDOW_SIZE; - p.difficulty_sample_rate = SAMPLE_RATE; - p.sampling_activation = ForkActivation::always(); + p.prior_ghostdag_k = 1; + p.crescendo.ghostdag_k = 1; + p.crescendo.sampled_difficulty_window_size = SAMPLED_WINDOW_SIZE; + p.crescendo.difficulty_sample_rate = SAMPLE_RATE; + p.crescendo_activation = ForkActivation::always(); + p.prior_target_time_per_block = p.crescendo.target_time_per_block; // Define past median time so that calls to add_block_with_min_time create blocks // which timestamps fit within the min-max timestamps found in the difficulty window - p.past_median_time_sample_rate = PMT_SAMPLE_RATE; - p.past_median_time_sampled_window_size = PMT_SAMPLED_WINDOW_SIZE; - p.new_timestamp_deviation_tolerance = PMT_DEVIATION_TOLERANCE; + p.crescendo.past_median_time_sample_rate = PMT_SAMPLE_RATE; + p.crescendo.past_median_time_sampled_window_size = PMT_SAMPLED_WINDOW_SIZE; + p.timestamp_deviation_tolerance = PMT_DEVIATION_TOLERANCE; }) .build(), }, @@ -1432,16 +1434,18 @@ async fn difficulty_test() { config: ConfigBuilder::new(MAINNET_PARAMS) .skip_proof_of_work() .edit_consensus_params(|p| { - p.ghostdag_k = 1; - p.target_time_per_block /= HIGH_BPS; - p.sampled_difficulty_window_size = HIGH_BPS_SAMPLED_WINDOW_SIZE; - p.difficulty_sample_rate = SAMPLE_RATE * HIGH_BPS; - p.sampling_activation = ForkActivation::always(); + p.prior_ghostdag_k = 1; + p.crescendo.ghostdag_k = 1; + p.prior_target_time_per_block /= HIGH_BPS; + p.crescendo.sampled_difficulty_window_size = HIGH_BPS_SAMPLED_WINDOW_SIZE; + p.crescendo.difficulty_sample_rate = SAMPLE_RATE * HIGH_BPS; + p.crescendo_activation = ForkActivation::always(); + p.prior_target_time_per_block = p.crescendo.target_time_per_block; // Define past median time so that calls to add_block_with_min_time create blocks // which timestamps fit within the min-max timestamps found in the difficulty window - p.past_median_time_sample_rate = PMT_SAMPLE_RATE * HIGH_BPS; - p.past_median_time_sampled_window_size = PMT_SAMPLED_WINDOW_SIZE; - p.new_timestamp_deviation_tolerance = PMT_DEVIATION_TOLERANCE; + p.crescendo.past_median_time_sample_rate = PMT_SAMPLE_RATE * HIGH_BPS; + p.crescendo.past_median_time_sampled_window_size = PMT_SAMPLED_WINDOW_SIZE; + p.timestamp_deviation_tolerance = PMT_DEVIATION_TOLERANCE; }) .build(), }, @@ -1452,8 +1456,8 @@ async fn difficulty_test() { let consensus = TestConsensus::new(&test.config); let wait_handles = consensus.init(); - let sample_rate = test.config.difficulty_sample_rate(0); - let expanded_window_size = test.config.difficulty_window_size(0) * sample_rate as usize; + let sample_rate = test.config.difficulty_sample_rate().before(); + let expanded_window_size = test.config.difficulty_window_size().before() * sample_rate as usize; let fake_genesis = Header { hash: test.config.genesis.hash, @@ -1569,7 +1573,7 @@ async fn difficulty_test() { for _ in 0..sample_rate { if (tip.daa_score + 1) % sample_rate == 0 { // This block should be part of the sampled window - let slow_block_time = tip.timestamp + test.config.target_time_per_block * 3; + let slow_block_time = tip.timestamp + test.config.prior_target_time_per_block * 3; let slow_block = add_block(&consensus, Some(slow_block_time), vec![tip.hash]).await; tip = slow_block; break; @@ -1668,7 +1672,7 @@ async fn selected_chain_test() { let config = ConfigBuilder::new(MAINNET_PARAMS) .skip_proof_of_work() .edit_consensus_params(|p| { - p.min_difficulty_window_len = p.legacy_difficulty_window_size; + p.min_difficulty_window_size = p.prior_difficulty_window_size; }) .build(); let consensus = TestConsensus::new(&config); @@ -1755,6 +1759,7 @@ async fn staging_consensus_test() { counters, tx_script_cache_counters, 200, + Arc::new(MiningRules::default()), )); let consensus_manager = Arc::new(ConsensusManager::new(consensus_factory)); @@ -1812,7 +1817,7 @@ async fn run_kip10_activation_test() { cfg.params.genesis.hash = genesis_header.hash; }) .edit_consensus_params(|p| { - p.kip10_activation = ForkActivation::new(KIP10_ACTIVATION_DAA_SCORE); + p.crescendo_activation = ForkActivation::new(KIP10_ACTIVATION_DAA_SCORE); }) .build(); @@ -1832,7 +1837,7 @@ async fn run_kip10_activation_test() { assert_eq!(consensus.get_virtual_daa_score(), index); // Create transaction that attempts to use the KIP-10 opcode - let mut spending_tx = Transaction::new( + let mut tx = Transaction::new( 0, vec![TransactionInput::new( initial_utxo_collection[0].0, @@ -1846,8 +1851,14 @@ async fn run_kip10_activation_test() { 0, vec![], ); - spending_tx.finalize(); - let tx_id = spending_tx.id(); + tx.finalize(); + let tx_id = tx.id(); + + let mut tx = MutableTransaction::from_tx(tx); + // This triggers storage mass population + let _ = consensus.validate_mempool_transaction(&mut tx, &TransactionValidationArgs::default()); + let tx = tx.tx.unwrap_or_clone(); + // Test 1: Build empty block, then manually insert invalid tx and verify consensus rejects it { let miner_data = MinerData::new(ScriptPublicKey::from_vec(0, vec![]), vec![]); @@ -1857,8 +1868,9 @@ async fn run_kip10_activation_test() { consensus.build_utxo_valid_block_with_parents((index + 1).into(), vec![index.into()], miner_data.clone(), vec![]); // Insert our test transaction and recalculate block hashes - block.transactions.push(spending_tx.clone()); - block.header.hash_merkle_root = calc_hash_merkle_root(block.transactions.iter(), false); + block.transactions.push(tx.clone()); + block.header.hash_merkle_root = + calc_hash_merkle_root(block.transactions.iter(), config.crescendo_activation.is_active(block.header.daa_score)); let block_status = consensus.validate_and_insert_block(block.to_immutable()).virtual_state_task.await; assert!(matches!(block_status, Ok(BlockStatus::StatusDisqualifiedFromChain))); assert_eq!(consensus.lkg_virtual_state.load().daa_score, 2); @@ -1869,7 +1881,7 @@ async fn run_kip10_activation_test() { index += 1; // Test 2: Verify the same transaction is accepted after activation - let status = consensus.add_utxo_valid_block_with_parents((index + 1).into(), vec![index.into()], vec![spending_tx.clone()]).await; + let status = consensus.add_utxo_valid_block_with_parents((index + 1).into(), vec![index.into()], vec![tx.clone()]).await; assert!(matches!(status, Ok(BlockStatus::StatusUTXOValid))); assert!(consensus.lkg_virtual_state.load().accepted_tx_ids.contains(&tx_id)); } @@ -1879,8 +1891,9 @@ async fn payload_test() { let config = ConfigBuilder::new(DEVNET_PARAMS) .skip_proof_of_work() .edit_consensus_params(|p| { - p.coinbase_maturity = 0; - p.payload_activation = ForkActivation::always() + p.prior_coinbase_maturity = 0; + p.crescendo.coinbase_maturity = 0; + p.crescendo_activation = ForkActivation::always() }) .build(); let consensus = TestConsensus::new(&config); @@ -1890,22 +1903,38 @@ async fn payload_test() { let b = consensus.build_utxo_valid_block_with_parents(1.into(), vec![config.genesis.hash], miner_data.clone(), vec![]); consensus.validate_and_insert_block(b.to_immutable()).virtual_state_task.await.unwrap(); let funding_block = consensus.build_utxo_valid_block_with_parents(2.into(), vec![1.into()], miner_data, vec![]); - let cb_id = { + let (cb_id, cb_amount) = { let mut cb = funding_block.transactions[0].clone(); cb.finalize(); - cb.id() + (cb.id(), cb.outputs[0].value) }; + consensus.validate_and_insert_block(funding_block.to_immutable()).virtual_state_task.await.unwrap(); - let tx = Transaction::new( + let mut txx = Transaction::new( 0, vec![TransactionInput::new(TransactionOutpoint { transaction_id: cb_id, index: 0 }, vec![], 0, 0)], - vec![TransactionOutput::new(1, ScriptPublicKey::default())], + vec![TransactionOutput::new(cb_amount / 2, ScriptPublicKey::default())], 0, SubnetworkId::default(), 0, - vec![0; (config.params.max_block_mass / 2) as usize], + vec![0; (config.params.max_block_mass / TRANSIENT_BYTE_TO_MASS_FACTOR / 2) as usize], ); - consensus.add_utxo_valid_block_with_parents(3.into(), vec![2.into()], vec![tx]).await.unwrap(); + + // Create a tx with transient mass over the block limit + txx.payload = vec![0; (2 * config.params.max_block_mass / TRANSIENT_BYTE_TO_MASS_FACTOR) as usize]; + let mut tx = MutableTransaction::from_tx(txx.clone()); + // This triggers storage mass population + consensus.validate_mempool_transaction(&mut tx, &TransactionValidationArgs::default()).unwrap(); + let consensus_res = consensus.add_utxo_valid_block_with_parents(4.into(), vec![2.into()], vec![tx.tx.unwrap_or_clone()]).await; + assert_match!(consensus_res, Err(RuleError::ExceedsTransientMassLimit(_, _))); + + // Fix the payload to be below the limit + txx.payload = vec![0; (config.params.max_block_mass / TRANSIENT_BYTE_TO_MASS_FACTOR / 2) as usize]; + let mut tx = MutableTransaction::from_tx(txx.clone()); + // This triggers storage mass population + consensus.validate_mempool_transaction(&mut tx, &TransactionValidationArgs::default()).unwrap(); + let status = consensus.add_utxo_valid_block_with_parents(3.into(), vec![2.into()], vec![tx.tx.unwrap_or_clone()]).await; + assert!(matches!(status, Ok(BlockStatus::StatusUTXOValid))); consensus.shutdown(wait_handles); } @@ -1943,7 +1972,7 @@ async fn payload_activation_test() { cfg.params.genesis.hash = genesis_header.hash; }) .edit_consensus_params(|p| { - p.payload_activation = ForkActivation::new(PAYLOAD_ACTIVATION_DAA_SCORE); + p.crescendo_activation = ForkActivation::new(PAYLOAD_ACTIVATION_DAA_SCORE); }) .build(); @@ -1963,7 +1992,7 @@ async fn payload_activation_test() { assert_eq!(consensus.get_virtual_daa_score(), index); // Create transaction with large payload - let large_payload = vec![0u8; (config.params.max_block_mass / 2) as usize]; + let large_payload = vec![0u8; (config.params.max_block_mass / TRANSIENT_BYTE_TO_MASS_FACTOR / 2) as usize]; let mut tx_with_payload = Transaction::new( 0, vec![TransactionInput::new( @@ -1989,10 +2018,15 @@ async fn payload_activation_test() { let mut block = consensus.build_utxo_valid_block_with_parents((index + 1).into(), vec![index.into()], miner_data.clone(), vec![]); + let mut tx = MutableTransaction::from_tx(tx_with_payload.clone()); + // This triggers storage mass population + let _ = consensus.validate_mempool_transaction(&mut tx, &TransactionValidationArgs::default()); + // Insert our test transaction and recalculate block hashes - block.transactions.push(tx_with_payload.clone()); + block.transactions.push(tx.tx.unwrap_or_clone()); - block.header.hash_merkle_root = calc_hash_merkle_root(block.transactions.iter(), false); + block.header.hash_merkle_root = + calc_hash_merkle_root(block.transactions.iter(), config.crescendo_activation.is_active(block.header.daa_score)); let block_status = consensus.validate_and_insert_block(block.to_immutable()).virtual_state_task.await; assert!(matches!(block_status, Err(RuleError::TxInContextFailed(tx, TxRuleError::NonCoinbaseTxHasPayload)) if tx == tx_id)); assert_eq!(consensus.lkg_virtual_state.load().daa_score, PAYLOAD_ACTIVATION_DAA_SCORE - 1); @@ -2003,9 +2037,13 @@ async fn payload_activation_test() { consensus.add_utxo_valid_block_with_parents((index + 1).into(), vec![(index - 1).into()], vec![]).await.unwrap(); index += 1; + let mut tx = MutableTransaction::from_tx(tx_with_payload.clone()); + // This triggers storage mass population + let _ = consensus.validate_mempool_transaction(&mut tx, &TransactionValidationArgs::default()); + // Test 2: Verify the same transaction is accepted after activation let status = - consensus.add_utxo_valid_block_with_parents((index + 1).into(), vec![index.into()], vec![tx_with_payload.clone()]).await; + consensus.add_utxo_valid_block_with_parents((index + 1).into(), vec![index.into()], vec![tx.tx.unwrap_or_clone()]).await; assert!(matches!(status, Ok(BlockStatus::StatusUTXOValid))); assert!(consensus.lkg_virtual_state.load().accepted_tx_ids.contains(&tx_id)); @@ -2067,7 +2105,7 @@ async fn runtime_sig_op_counting_test() { cfg.params.genesis.hash = genesis_header.hash; }) .edit_consensus_params(|p| { - p.runtime_sig_op_counting = ForkActivation::new(RUNTIME_SIGOP_ACTIVATION_DAA_SCORE); + p.crescendo_activation = ForkActivation::new(RUNTIME_SIGOP_ACTIVATION_DAA_SCORE); }) .build(); @@ -2120,13 +2158,19 @@ async fn runtime_sig_op_counting_test() { tx.finalize(); + let mut tx = MutableTransaction::from_tx(tx); + // This triggers storage mass population + let _ = consensus.validate_mempool_transaction(&mut tx, &TransactionValidationArgs::default()); + let tx = tx.tx.unwrap_or_clone(); + // Test 1: Before activation, tx should be rejected due to static sig op counting (sees 3 ops) { let miner_data = MinerData::new(ScriptPublicKey::from_vec(0, vec![]), vec![]); let mut block = consensus.build_utxo_valid_block_with_parents((index + 1).into(), vec![index.into()], miner_data.clone(), vec![]); block.transactions.push(tx.clone()); - block.header.hash_merkle_root = calc_hash_merkle_root(block.transactions.iter(), false); + block.header.hash_merkle_root = + calc_hash_merkle_root(block.transactions.iter(), config.crescendo_activation.is_active(block.header.daa_score)); let block_status = consensus.validate_and_insert_block(block.to_immutable()).virtual_state_task.await; assert!(matches!(block_status, Ok(BlockStatus::StatusDisqualifiedFromChain))); index += 1; diff --git a/testing/integration/src/consensus_pipeline_tests.rs b/testing/integration/src/consensus_pipeline_tests.rs index 0b252c3813..a6dc387133 100644 --- a/testing/integration/src/consensus_pipeline_tests.rs +++ b/testing/integration/src/consensus_pipeline_tests.rs @@ -94,7 +94,7 @@ async fn test_concurrent_pipeline_random() { let mut tips = vec![genesis]; let mut total = 1000i64; while total > 0 { - let v = min(config.max_block_parents as i64, poi.sample(&mut thread_rng) as i64); + let v = min(config.prior_max_block_parents as i64, poi.sample(&mut thread_rng) as i64); if v == 0 { continue; } diff --git a/testing/integration/src/daemon_integration_tests.rs b/testing/integration/src/daemon_integration_tests.rs index 25471cab13..ec32f012cd 100644 --- a/testing/integration/src/daemon_integration_tests.rs +++ b/testing/integration/src/daemon_integration_tests.rs @@ -141,7 +141,7 @@ async fn daemon_utxos_propagation_test() { }; let total_fd_limit = 10; - let coinbase_maturity = SIMNET_PARAMS.coinbase_maturity; + let coinbase_maturity = SIMNET_PARAMS.coinbase_maturity().before(); let mut kaspad1 = Daemon::new_random_with_args(args.clone(), total_fd_limit); let mut kaspad2 = Daemon::new_random_with_args(args, total_fd_limit); let rpc_client1 = kaspad1.start().await; @@ -217,7 +217,7 @@ async fn daemon_utxos_propagation_test() { async fn daa_score_reached(client: GrpcClient) -> bool { let virtual_daa_score = client.get_server_info().await.unwrap().virtual_daa_score; trace!("Virtual DAA score: {}", virtual_daa_score); - virtual_daa_score == SIMNET_PARAMS.coinbase_maturity + virtual_daa_score == SIMNET_PARAMS.coinbase_maturity().before() } Box::pin(daa_score_reached(check_client.clone())) }, diff --git a/testing/integration/src/mempool_benchmarks.rs b/testing/integration/src/mempool_benchmarks.rs index 00d9b78032..a8bed754fb 100644 --- a/testing/integration/src/mempool_benchmarks.rs +++ b/testing/integration/src/mempool_benchmarks.rs @@ -105,7 +105,7 @@ async fn bench_bbt_latency() { let bbt_client = daemon.new_client().await; // The time interval between Poisson(lambda) events distributes ~Exp(lambda) - let dist: Exp = Exp::new(params.bps() as f64).unwrap(); + let dist: Exp = Exp::new(params.bps().upper_bound() as f64).unwrap(); let comm_delay = 1000; // Mining key and address @@ -347,8 +347,15 @@ async fn bench_bbt_latency_2() { .launch() .await .task( - MinerGroupTask::build(network, client_manager.clone(), SUBMIT_BLOCK_CLIENTS, params.bps(), BLOCK_COUNT, Stopper::Signal) - .await, + MinerGroupTask::build( + network, + client_manager.clone(), + SUBMIT_BLOCK_CLIENTS, + params.bps().upper_bound(), + BLOCK_COUNT, + Stopper::Signal, + ) + .await, ) .task( TxSenderGroupTask::build( diff --git a/testing/integration/src/subscribe_benchmarks.rs b/testing/integration/src/subscribe_benchmarks.rs index 8efefd8427..367495eaa9 100644 --- a/testing/integration/src/subscribe_benchmarks.rs +++ b/testing/integration/src/subscribe_benchmarks.rs @@ -231,8 +231,15 @@ async fn utxos_changed_subscriptions_client(address_cycle_seconds: u64, address_ .task(TickTask::build(tick_service.clone())) .task(MemoryMonitorTask::build(tick_service.clone(), "client", Duration::from_secs(5), MAX_MEMORY)) .task( - MinerGroupTask::build(network, client_manager.clone(), SUBMIT_BLOCK_CLIENTS, params.bps(), BLOCK_COUNT, Stopper::Signal) - .await, + MinerGroupTask::build( + network, + client_manager.clone(), + SUBMIT_BLOCK_CLIENTS, + params.bps().upper_bound(), + BLOCK_COUNT, + Stopper::Signal, + ) + .await, ) .task( TxSenderGroupTask::build( @@ -250,7 +257,7 @@ async fn utxos_changed_subscriptions_client(address_cycle_seconds: u64, address_ SubscriberGroupTask::build( client_manager, SUBSCRIBE_WORKERS, - params.bps(), + params.bps().upper_bound(), vec![VirtualDaaScoreChangedScope {}.into()], 3, subscribing_addresses, diff --git a/wallet/core/src/tx/generator/test.rs b/wallet/core/src/tx/generator/test.rs index e1db97c446..bf9c71f9d4 100644 --- a/wallet/core/src/tx/generator/test.rs +++ b/wallet/core/src/tx/generator/test.rs @@ -140,7 +140,7 @@ impl GeneratorExtension for Generator { fn test_network_id() -> NetworkId { // TODO make this configurable - NetworkId::with_suffix(NetworkType::Testnet, 11) + NetworkId::with_suffix(NetworkType::Testnet, 10) } #[derive(Default)] diff --git a/wallet/core/src/tx/mass.rs b/wallet/core/src/tx/mass.rs index 01a194f638..036da03cde 100644 --- a/wallet/core/src/tx/mass.rs +++ b/wallet/core/src/tx/mass.rs @@ -329,8 +329,8 @@ impl MassCalculator { ) -> Option { consensus_calc_storage_mass( false, - inputs.iter().map(|entry| entry.amount()), - outputs.iter().map(|out| out.value), + inputs.iter().map(|entry| entry.into()), + outputs.iter().map(|out| out.into()), self.storage_mass_parameter, ) } diff --git a/wallet/core/src/utxo/test.rs b/wallet/core/src/utxo/test.rs index a1b41f9987..516383aab8 100644 --- a/wallet/core/src/utxo/test.rs +++ b/wallet/core/src/utxo/test.rs @@ -22,7 +22,7 @@ async fn test_utxo_subsystem_bootstrap() -> Result<()> { #[test] fn test_utxo_generator_empty_utxo_noop() -> Result<()> { - let network_id = NetworkId::with_suffix(NetworkType::Testnet, 11); + let network_id = NetworkId::with_suffix(NetworkType::Testnet, 10); let output_address = output_address(network_id.into()); let payment_output = PaymentOutput::new(output_address, kaspa_to_sompi(2.0)); diff --git a/wallet/core/src/wallet/mod.rs b/wallet/core/src/wallet/mod.rs index e9316a13b1..8fd4a8ce52 100644 --- a/wallet/core/src/wallet/mod.rs +++ b/wallet/core/src/wallet/mod.rs @@ -1368,7 +1368,6 @@ impl Wallet { .into_iter() .map(|mnemonic| { decrypt_mnemonic(file.num_threads, mnemonic, import_secret.as_ref()) - .map_err(Error::from) .and_then(|decrypted| Mnemonic::new(decrypted.trim(), Language::English).map_err(Error::from)) }) .map(|r| r.map(|m| (m, >::None))) @@ -1410,7 +1409,6 @@ impl Wallet { .into_iter() .map(|mnemonic| { decrypt_mnemonic(MultisigWalletFileV1::::NUM_THREADS, mnemonic, import_secret.as_ref()) - .map_err(Error::from) .and_then(|decrypted| Mnemonic::new(decrypted.trim(), Language::English).map_err(Error::from)) }) .map(|r| r.map(|m| (m, >::None)))