From c127af56bdc90d2887e8952dfa300a5aa5e68a0b Mon Sep 17 00:00:00 2001 From: elldeeone <73735118+elldeeone@users.noreply.github.com> Date: Wed, 12 Nov 2025 20:48:00 +1100 Subject: [PATCH 1/6] Add Tor integration and proxy controls --- .gitignore | 1 + Cargo.lock | 1218 ++++++++++++++--- Cargo.toml | 4 +- cli/src/error.rs | 5 +- cli/src/modules/rpc.rs | 5 +- components/addressmanager/src/lib.rs | 99 +- .../src/stores/address_store.rs | 80 +- components/addressmanager/src/stores/mod.rs | 35 +- components/connectionmanager/src/lib.rs | 86 +- kaspad/Cargo.toml | 35 +- kaspad/src/args.rs | 350 ++++- kaspad/src/daemon.rs | 439 +++++- kaspad/src/lib.rs | 1 + kaspad/src/tor_manager.rs | 254 ++++ protocol/flows/Cargo.toml | 1 + protocol/flows/src/flow_context.rs | 105 +- protocol/flows/src/service.rs | 49 +- protocol/flows/src/v5/address.rs | 87 +- protocol/p2p/Cargo.toml | 3 + protocol/p2p/proto/p2p.proto | 5 +- protocol/p2p/src/bin/client.rs | 2 +- protocol/p2p/src/bin/server.rs | 3 +- protocol/p2p/src/convert/error.rs | 4 +- protocol/p2p/src/convert/messages.rs | 4 +- protocol/p2p/src/convert/net_address.rs | 100 +- protocol/p2p/src/core/adaptor.rs | 17 +- protocol/p2p/src/core/connection_handler.rs | 86 +- protocol/p2p/src/core/peer.rs | 23 +- protocol/p2p/src/core/router.rs | 15 +- protocol/p2p/src/echo.rs | 10 +- protocol/p2p/src/flags.rs | 12 + protocol/p2p/src/lib.rs | 3 + rpc/core/src/error.rs | 8 +- rpc/grpc/core/src/convert/message.rs | 10 +- rpc/grpc/core/src/convert/peer.rs | 9 +- rpc/grpc/server/src/connection_handler.rs | 3 +- rpc/service/src/converter/protocol.rs | 2 +- rpc/service/src/service.rs | 2 +- rpc/wrpc/server/src/address.rs | 10 +- utils/Cargo.toml | 1 + utils/src/networking.rs | 401 +++++- 41 files changed, 3150 insertions(+), 437 deletions(-) create mode 100644 kaspad/src/tor_manager.rs create mode 100644 protocol/p2p/src/flags.rs diff --git a/.gitignore b/.gitignore index 0199232fe6..e1a68e2cfd 100644 --- a/.gitignore +++ b/.gitignore @@ -14,3 +14,4 @@ testing/integration/perflogs* Servers.toml release package-sizes.js +CLAUDE.md diff --git a/Cargo.lock b/Cargo.lock index efdd871231..078bc075bd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11,7 +11,7 @@ dependencies = [ "macroific", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.106", ] [[package]] @@ -48,6 +48,7 @@ dependencies = [ "cfg-if 1.0.0", "cipher", "cpufeatures", + "zeroize", ] [[package]] @@ -57,7 +58,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if 1.0.0", - "getrandom", + "getrandom 0.2.15", "once_cell", "version_check", "zerocopy", @@ -187,6 +188,44 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" +[[package]] +name = "asn1-rs" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56624a96882bb8c26d61312ae18cb45868e5a9992ea73c58e45c3101e56a1e60" +dependencies = [ + "asn1-rs-derive", + "asn1-rs-impl", + "displaydoc", + "nom", + "num-traits", + "rusticata-macros", + "thiserror 2.0.17", +] + +[[package]] +name = "asn1-rs-derive" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3109e49b1e4909e9db6515a30c633684d68cdeaa252f215214cb4fa1a5bfee2c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", + "synstructure", +] + +[[package]] +name = "asn1-rs-impl" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + [[package]] name = "async-attributes" version = "1.1.2" @@ -278,6 +317,16 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "async-socks5" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8da2537846e16b96d2972ee52a3b355663872a1a687ce6d57a3b6f6b6a181c89" +dependencies = [ + "thiserror 1.0.64", + "tokio", +] + [[package]] name = "async-std" version = "1.13.0" @@ -324,7 +373,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.106", ] [[package]] @@ -341,7 +390,7 @@ checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.106", ] [[package]] @@ -499,7 +548,7 @@ dependencies = [ "regex", "rustc-hash 2.1.1", "shlex", - "syn 2.0.79", + "syn 2.0.106", ] [[package]] @@ -576,7 +625,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.106", "syn_derive", ] @@ -648,7 +697,7 @@ dependencies = [ "semver", "serde", "serde_json", - "thiserror", + "thiserror 1.0.64", ] [[package]] @@ -741,7 +790,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "01c631c2cf4b95746cf065f732219ec0f2eb1497cd4c7fe07cb336ddf0d7c503" dependencies = [ "js-sys", - "thiserror", + "thiserror 1.0.64", "wasm-bindgen", "wasm-bindgen-futures", ] @@ -856,7 +905,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.106", ] [[package]] @@ -893,6 +942,12 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "const-oid" +version = "0.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" + [[package]] name = "constant_time_eq" version = "0.3.1" @@ -920,6 +975,24 @@ dependencies = [ "unicode-segmentation", ] +[[package]] +name = "convert_case" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb402b8d4c85569410425650ce3eddc7d698ed96d39a73f941b08fb63082f1e7" +dependencies = [ + "unicode-segmentation", +] + +[[package]] +name = "cookie-factory" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9885fa71e26b8ab7855e2ec7cae6e9b380edff76cd052e07c683a0319d51b3a2" +dependencies = [ + "futures", +] + [[package]] name = "core-foundation" version = "0.9.4" @@ -1060,7 +1133,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", - "rand_core", + "rand_core 0.6.4", "typenum", ] @@ -1095,6 +1168,15 @@ dependencies = [ "zeroize", ] +[[package]] +name = "ctr" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0369ee1ad671834580515889b80f2ea915f23b8be8d0daa4bbaf2ac5c7590835" +dependencies = [ + "cipher", +] + [[package]] name = "ctrlc" version = "3.4.5" @@ -1114,6 +1196,7 @@ dependencies = [ "cfg-if 1.0.0", "cpufeatures", "curve25519-dalek-derive", + "digest", "fiat-crypto", "rustc_version", "subtle", @@ -1128,7 +1211,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.106", ] [[package]] @@ -1152,7 +1235,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.79", + "syn 2.0.106", ] [[package]] @@ -1163,7 +1246,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.79", + "syn 2.0.106", ] [[package]] @@ -1186,6 +1269,26 @@ version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2" +[[package]] +name = "data-encoding-macro" +version = "0.1.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1559b6cba622276d6d63706db152618eeb15b89b3e4041446b05876e352e639" +dependencies = [ + "data-encoding", + "data-encoding-macro-internal", +] + +[[package]] +name = "data-encoding-macro-internal" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "332d754c0af53bc87c108fed664d121ecf59207ec4196041f04d6ab9002ad33f" +dependencies = [ + "data-encoding", + "syn 1.0.109", +] + [[package]] name = "delegate-display" version = "2.1.1" @@ -1195,7 +1298,32 @@ dependencies = [ "macroific", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.106", +] + +[[package]] +name = "der" +version = "0.7.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7c1832837b905bbfb5101e07cc24c8deddf52f93225eee6ead5f4d63d53ddcb" +dependencies = [ + "const-oid", + "pem-rfc7468", + "zeroize", +] + +[[package]] +name = "der-parser" +version = "10.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07da5016415d5a3c4dd39b11ed26f915f52fc4e0dc197d87908bc916e51bc1a6" +dependencies = [ + "asn1-rs", + "cookie-factory", + "displaydoc", + "nom", + "num-traits", + "rusticata-macros", ] [[package]] @@ -1237,7 +1365,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.106", ] [[package]] @@ -1247,7 +1375,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4abae7035bf79b9877b779505d8cf3749285b80c43941eda66604841889451dc" dependencies = [ "derive_builder_core", - "syn 2.0.79", + "syn 2.0.106", ] [[package]] @@ -1260,7 +1388,29 @@ dependencies = [ "proc-macro2", "quote", "rustc_version", - "syn 2.0.79", + "syn 2.0.106", +] + +[[package]] +name = "derive_more" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "093242cf7570c207c83073cf82f79706fe7b8317e98620a47d5be7c3d8497678" +dependencies = [ + "derive_more-impl", +] + +[[package]] +name = "derive_more-impl" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" +dependencies = [ + "convert_case 0.7.1", + "proc-macro2", + "quote", + "syn 2.0.106", + "unicode-xid", ] [[package]] @@ -1298,6 +1448,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer", + "const-oid", "crypto-common", "subtle", ] @@ -1323,12 +1474,36 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "displaydoc" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + [[package]] name = "doc-comment" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" +[[package]] +name = "domain" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cd50aea158e9a57c9c9075ca7a3dfa4c08d9a468b405832383876f9df85379b" +dependencies = [ + "bytes", + "octseq", + "pin-project-lite", + "rand 0.8.5", + "time", +] + [[package]] name = "downcast" version = "0.11.0" @@ -1359,6 +1534,44 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2334658684d7c213e18602aa72ce37e94d1c9b535882ef6e30bc444b7514a1ee" +[[package]] +name = "ed25519" +version = "2.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" +dependencies = [ + "pkcs8", + "signature 2.2.0", +] + +[[package]] +name = "ed25519-dalek" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70e796c081cee67dc755e1a36a0a172b897fab85fc3f6bc48307991f64e4eca9" +dependencies = [ + "curve25519-dalek", + "ed25519", + "merlin", + "rand_core 0.6.4", + "serde", + "sha2", + "subtle", + "zeroize", +] + +[[package]] +name = "educe" +version = "0.4.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f0042ff8246a363dbe77d2ceedb073339e85a804b9a47636c6e016a9a32c05f" +dependencies = [ + "enum-ordinalize", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "either" version = "1.13.0" @@ -1380,6 +1593,19 @@ dependencies = [ "cfg-if 1.0.0", ] +[[package]] +name = "enum-ordinalize" +version = "3.1.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bf1fa3f06bbff1ea5b1a9c7b14aa992a39657db60a2759457328d7e058f49ee" +dependencies = [ + "num-bigint", + "num-traits", + "proc-macro2", + "quote", + "syn 2.0.106", +] + [[package]] name = "enum-primitive-derive" version = "0.3.0" @@ -1388,7 +1614,7 @@ checksum = "ba7795da175654fe16979af73f81f26a8ea27638d8d9823d317016888a63dc4c" dependencies = [ "num-traits", "quote", - "syn 2.0.79", + "syn 2.0.106", ] [[package]] @@ -1465,7 +1691,7 @@ dependencies = [ "macroific", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.106", ] [[package]] @@ -1526,6 +1752,12 @@ dependencies = [ "miniz_oxide", ] +[[package]] +name = "fluid-let" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "749cff877dc1af878a0b31a41dd221a753634401ea0ef2f87b62d3171522485a" + [[package]] name = "fnv" version = "1.0.7" @@ -1610,7 +1842,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.106", ] [[package]] @@ -1667,6 +1899,18 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "getrandom" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "r-efi", + "wasip2", +] + [[package]] name = "gimli" version = "0.31.1" @@ -1995,6 +2239,21 @@ dependencies = [ "webpki-roots", ] +[[package]] +name = "hyper-socks2" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51c227614c208f7e7c2e040526912604a1a957fe467c9c2f5b06c5d032337dab" +dependencies = [ + "async-socks5", + "http 1.1.0", + "hyper 1.4.1", + "hyper-util", + "thiserror 1.0.64", + "tokio", + "tower-service", +] + [[package]] name = "hyper-timeout" version = "0.5.1" @@ -2050,6 +2309,92 @@ dependencies = [ "cc", ] +[[package]] +name = "icu_collections" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "200072f5d0e3614556f94a9930d5dc3e0662a652823904c3a75dc3b0af7fee47" +dependencies = [ + "displaydoc", + "potential_utf", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locale_core" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0cde2700ccaed3872079a65fb1a78f6c0a36c91570f28755dda67bc8f7d9f00a" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_normalizer" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "436880e8e18df4d7bbc06d58432329d6458cc84531f7ac5f024e93deadb37979" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00210d6893afc98edb752b664b8890f0ef174c8adbb8d0be9710fa66fbbf72d3" + +[[package]] +name = "icu_properties" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "016c619c1eeb94efb86809b015c58f479963de65bdb6253345c1a1276f22e32b" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_locale_core", + "icu_properties_data", + "icu_provider", + "potential_utf", + "zerotrie", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "298459143998310acd25ffe6810ed544932242d3f07083eee1084d83a71bd632" + +[[package]] +name = "icu_provider" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03c80da27b5f4187909049ee2d72f276f0d9f99a42c306bd0131ecfe04d8e5af" +dependencies = [ + "displaydoc", + "icu_locale_core", + "stable_deref_trait", + "tinystr", + "writeable", + "yoke", + "zerofrom", + "zerotrie", + "zerovec", +] + [[package]] name = "ident_case" version = "1.0.1" @@ -2066,6 +2411,27 @@ dependencies = [ "unicode-normalization", ] +[[package]] +name = "idna" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b0875f23caa03898994f6ddc501886a45c7d3d62d04d2d90788d47be1b1e4de" +dependencies = [ + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" +dependencies = [ + "icu_normalizer", + "icu_properties", +] + [[package]] name = "igd-next" version = "0.14.3" @@ -2079,7 +2445,7 @@ dependencies = [ "http 0.2.12", "hyper 0.14.30", "log", - "rand", + "rand 0.8.5", "tokio", "url", "xmltree", @@ -2252,7 +2618,7 @@ dependencies = [ "js-sys", "serde", "smallvec", - "thiserror", + "thiserror 1.0.64", "wasm-bindgen", "wasm-bindgen-test", "web-sys", @@ -2274,11 +2640,11 @@ dependencies = [ "local-ip-address", "log", "parking_lot", - "rand", + "rand 0.8.5", "rocksdb", "rv", "serde", - "thiserror", + "thiserror 1.0.64", "tokio", ] @@ -2296,21 +2662,21 @@ dependencies = [ "borsh", "bs58", "faster-hex", - "getrandom", + "getrandom 0.2.15", "hmac", "js-sys", "kaspa-consensus-core", "kaspa-utils", "once_cell", "pbkdf2", - "rand", - "rand_core", + "rand 0.8.5", + "rand_core 0.6.4", "ripemd", "secp256k1", "serde", "sha2", "subtle", - "thiserror", + "thiserror 1.0.64", "wasm-bindgen", "workflow-wasm", "zeroize", @@ -2349,7 +2715,7 @@ dependencies = [ "serde", "serde_json", "textwrap 0.16.1", - "thiserror", + "thiserror 1.0.64", "tokio", "wasm-bindgen", "web-sys", @@ -2376,7 +2742,7 @@ dependencies = [ "kaspa-utils", "log", "parking_lot", - "rand", + "rand 0.8.5", "tokio", ] @@ -2412,7 +2778,7 @@ dependencies = [ "log", "once_cell", "parking_lot", - "rand", + "rand 0.8.5", "rand_distr", "rayon", "rocksdb", @@ -2420,7 +2786,7 @@ dependencies = [ "serde", "serde_json", "smallvec", - "thiserror", + "thiserror 1.0.64", "tokio", ] @@ -2441,12 +2807,12 @@ dependencies = [ "kaspa-txscript", "kaspa-utils", "kaspa-wasm-core", - "rand", + "rand 0.8.5", "secp256k1", "serde", "serde-wasm-bindgen", "serde_json", - "thiserror", + "thiserror 1.0.64", "wasm-bindgen", "workflow-log", "workflow-wasm", @@ -2465,7 +2831,7 @@ dependencies = [ "criterion", "faster-hex", "futures-util", - "getrandom", + "getrandom 0.2.15", "itertools 0.13.0", "js-sys", "kaspa-addresses", @@ -2476,13 +2842,13 @@ dependencies = [ "kaspa-muhash", "kaspa-txscript-errors", "kaspa-utils", - "rand", + "rand 0.8.5", "secp256k1", "serde", "serde-wasm-bindgen", "serde_json", "smallvec", - "thiserror", + "thiserror 1.0.64", "wasm-bindgen", "wasm-bindgen-test", "web-sys", @@ -2498,7 +2864,7 @@ version = "1.0.1" dependencies = [ "async-channel 2.3.1", "cfg-if 1.0.0", - "derive_more", + "derive_more 0.99.18", "futures", "kaspa-consensus-core", "kaspa-core", @@ -2507,7 +2873,7 @@ dependencies = [ "kaspa-utils", "log", "paste", - "thiserror", + "thiserror 1.0.64", "triggered", ] @@ -2524,12 +2890,12 @@ dependencies = [ "kaspa-hashes", "kaspa-txscript", "kaspa-utils", - "rand", + "rand 0.8.5", "secp256k1", "serde", "serde-wasm-bindgen", "serde_json", - "thiserror", + "thiserror 1.0.64", "wasm-bindgen", "workflow-log", "workflow-wasm", @@ -2550,7 +2916,7 @@ dependencies = [ "kaspa-utils", "log", "parking_lot", - "rand", + "rand 0.8.5", "tokio", ] @@ -2566,7 +2932,7 @@ dependencies = [ "log", "log4rs", "num_cpus", - "thiserror", + "thiserror 1.0.64", "tokio", "triggered", "wasm-bindgen", @@ -2587,7 +2953,7 @@ dependencies = [ "kaspa-wallet-core", "nw-sys", "serde", - "thiserror", + "thiserror 1.0.64", "workflow-core", "workflow-log", "workflow-node", @@ -2609,12 +2975,12 @@ dependencies = [ "num-traits", "num_cpus", "parking_lot", - "rand", + "rand 0.8.5", "rocksdb", "serde", "smallvec", "tempfile", - "thiserror", + "thiserror 1.0.64", ] [[package]] @@ -2639,10 +3005,10 @@ dependencies = [ "parking_lot", "paste", "prost", - "rand", + "rand 0.8.5", "regex", "rustls", - "thiserror", + "thiserror 1.0.64", "tokio", "tokio-stream", "tonic", @@ -2669,9 +3035,9 @@ dependencies = [ "log", "paste", "prost", - "rand", + "rand 0.8.5", "regex", - "thiserror", + "thiserror 1.0.64", "tokio", "tokio-stream", "tonic", @@ -2706,9 +3072,9 @@ dependencies = [ "parking_lot", "paste", "prost", - "rand", + "rand 0.8.5", "rustls", - "thiserror", + "thiserror 1.0.64", "tokio", "tokio-stream", "tonic", @@ -2729,7 +3095,7 @@ dependencies = [ "kaspa-utils", "keccak", "once_cell", - "rand", + "rand 0.8.5", "serde", "sha2", "sha3", @@ -2743,7 +3109,7 @@ version = "1.0.1" dependencies = [ "async-channel 2.3.1", "async-trait", - "derive_more", + "derive_more 0.99.18", "futures", "kaspa-consensus-core", "kaspa-hashes", @@ -2752,7 +3118,7 @@ dependencies = [ "log", "paste", "serde", - "thiserror", + "thiserror 1.0.64", "triggered", ] @@ -2762,7 +3128,7 @@ version = "1.0.1" dependencies = [ "async-channel 2.3.1", "async-trait", - "derive_more", + "derive_more 0.99.18", "futures", "kaspa-consensus", "kaspa-consensus-core", @@ -2778,8 +3144,8 @@ dependencies = [ "log", "parking_lot", "paste", - "rand", - "thiserror", + "rand 0.8.5", + "thiserror 1.0.64", "tokio", "triggered", ] @@ -2795,10 +3161,10 @@ dependencies = [ "kaspa-utils", "malachite-base", "malachite-nz", - "rand_chacha", + "rand_chacha 0.3.1", "serde", "serde-wasm-bindgen", - "thiserror", + "thiserror 1.0.64", "wasm-bindgen", "workflow-core", "workflow-log", @@ -2823,7 +3189,7 @@ dependencies = [ "kaspa-rpc-core", "separator", "serde", - "thiserror", + "thiserror 1.0.64", "workflow-core", "workflow-log", ] @@ -2846,12 +3212,12 @@ dependencies = [ "kaspa-utils", "log", "parking_lot", - "rand", + "rand 0.8.5", "secp256k1", "serde", "smallvec", "sweep-bptree", - "thiserror", + "thiserror 1.0.64", "tokio", ] @@ -2860,7 +3226,7 @@ name = "kaspa-mining-errors" version = "1.0.1" dependencies = [ "kaspa-consensus-core", - "thiserror", + "thiserror 1.0.64", ] [[package]] @@ -2870,8 +3236,8 @@ dependencies = [ "criterion", "kaspa-hashes", "kaspa-math", - "rand", - "rand_chacha", + "rand 0.8.5", + "rand_chacha 0.3.1", "rayon", "serde", ] @@ -2884,7 +3250,7 @@ dependencies = [ "async-trait", "borsh", "criterion", - "derive_more", + "derive_more 0.99.18", "futures", "futures-util", "indexmap 2.6.0", @@ -2901,9 +3267,9 @@ dependencies = [ "log", "parking_lot", "paste", - "rand", + "rand 0.8.5", "serde", - "thiserror", + "thiserror 1.0.64", "tokio", "triggered", "workflow-core", @@ -2937,10 +3303,11 @@ dependencies = [ "kaspa-utils-tower", "log", "parking_lot", - "rand", - "thiserror", + "rand 0.8.5", + "thiserror 1.0.64", "tokio", "tokio-stream", + "tor-interface", "uuid 1.10.0", ] @@ -2953,6 +3320,7 @@ dependencies = [ "futures", "h2 0.4.6", "hex", + "hyper-util", "itertools 0.13.0", "kaspa-consensus-core", "kaspa-core", @@ -2964,14 +3332,16 @@ dependencies = [ "log", "parking_lot", "prost", - "rand", + "rand 0.8.5", "seqlock", "serde", - "thiserror", + "thiserror 1.0.64", "tokio", + "tokio-socks", "tokio-stream", "tonic", "tonic-build", + "tower 0.5.1", "uuid 1.10.0", ] @@ -2999,7 +3369,7 @@ dependencies = [ "kaspa-core", "log", "portable-atomic", - "thiserror", + "thiserror 1.0.64", "tokio", "workflow-perf-monitor", ] @@ -3028,7 +3398,7 @@ dependencies = [ "async-trait", "borsh", "cfg-if 1.0.0", - "derive_more", + "derive_more 0.99.18", "downcast", "faster-hex", "hex", @@ -3049,12 +3419,12 @@ dependencies = [ "kaspa-utils", "log", "paste", - "rand", + "rand 0.8.5", "serde", "serde-wasm-bindgen", "serde_json", "smallvec", - "thiserror", + "thiserror 1.0.64", "uuid 1.10.0", "wasm-bindgen", "workflow-core", @@ -3150,7 +3520,7 @@ dependencies = [ "kaspad", "log", "parking_lot", - "rand", + "rand 0.8.5", "rand_distr", "rayon", "rocksdb", @@ -3159,7 +3529,7 @@ dependencies = [ "serde_json", "smallvec", "tempfile", - "thiserror", + "thiserror 1.0.64", "tokio", "workflow-perf-monitor", ] @@ -3184,14 +3554,14 @@ dependencies = [ "kaspa-wasm-core", "log", "parking_lot", - "rand", + "rand 0.8.5", "secp256k1", "serde", "serde-wasm-bindgen", "serde_json", "sha2", "smallvec", - "thiserror", + "thiserror 1.0.64", "wasm-bindgen", "workflow-wasm", ] @@ -3201,7 +3571,7 @@ name = "kaspa-txscript-errors" version = "1.0.1" dependencies = [ "secp256k1", - "thiserror", + "thiserror 1.0.64", ] [[package]] @@ -3215,6 +3585,7 @@ dependencies = [ "borsh", "cfg-if 1.0.0", "criterion", + "data-encoding", "duct", "event-listener 2.5.3", "faster-hex", @@ -3226,14 +3597,14 @@ dependencies = [ "num_cpus", "once_cell", "parking_lot", - "rand", + "rand 0.8.5", "rlimit", "serde", "serde_json", "sha2", "smallvec", "sysinfo", - "thiserror", + "thiserror 1.0.64", "tokio", "triggered", "uuid 1.10.0", @@ -3271,10 +3642,10 @@ dependencies = [ "kaspa-utils", "log", "parking_lot", - "rand", + "rand 0.8.5", "rocksdb", "serde", - "thiserror", + "thiserror 1.0.64", ] [[package]] @@ -3356,7 +3727,7 @@ dependencies = [ "md-5", "pad", "pbkdf2", - "rand", + "rand 0.8.5", "regex", "ripemd", "secp256k1", @@ -3369,7 +3740,7 @@ dependencies = [ "sha2", "slugify-rs", "sorted-insert", - "thiserror", + "thiserror 1.0.64", "tokio", "wasm-bindgen", "wasm-bindgen-futures", @@ -3401,14 +3772,14 @@ dependencies = [ "kaspa-txscript-errors", "kaspa-utils", "kaspa-wasm-core", - "rand", + "rand 0.8.5", "ripemd", "secp256k1", "serde", "serde-wasm-bindgen", "serde_json", "sha2", - "thiserror", + "thiserror 1.0.64", "tokio", "wasm-bindgen", "wasm-bindgen-futures", @@ -3453,7 +3824,7 @@ dependencies = [ "serde-wasm-bindgen", "serde_json", "serde_repr", - "thiserror", + "thiserror 1.0.64", "wasm-bindgen", "wasm-bindgen-test", "web-sys", @@ -3516,13 +3887,13 @@ dependencies = [ "kaspa-rpc-core", "kaspa-rpc-macros", "paste", - "rand", + "rand 0.8.5", "regex", "rustls", "serde", "serde-wasm-bindgen", "serde_json", - "thiserror", + "thiserror 1.0.64", "toml", "wasm-bindgen", "wasm-bindgen-futures", @@ -3562,7 +3933,7 @@ dependencies = [ "kaspa-rpc-macros", "kaspa-wrpc-server", "num_cpus", - "thiserror", + "thiserror 1.0.64", "tokio", "workflow-core", "workflow-log", @@ -3589,7 +3960,7 @@ dependencies = [ "paste", "rustls", "serde", - "thiserror", + "thiserror 1.0.64", "tokio", "workflow-core", "workflow-log", @@ -3647,10 +4018,12 @@ dependencies = [ "dhat", "dirs", "futures-util", + "hex", "itertools 0.13.0", "kaspa-addresses", "kaspa-addressmanager", "kaspa-alloc", + "kaspa-connectionmanager", "kaspa-consensus", "kaspa-consensus-core", "kaspa-consensus-notify", @@ -3675,16 +4048,17 @@ dependencies = [ "kaspa-wrpc-server", "log", "num_cpus", - "rand", + "rand 0.8.5", "rayon", "rocksdb", "serde", "serde_json", "serde_with", "tempfile", - "thiserror", + "thiserror 1.0.64", "tokio", "toml", + "tor-interface", "workflow-log", ] @@ -3711,6 +4085,9 @@ name = "lazy_static" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" +dependencies = [ + "spin", +] [[package]] name = "lazycell" @@ -3813,6 +4190,12 @@ version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" +[[package]] +name = "litemap" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956" + [[package]] name = "local-ip-address" version = "0.6.3" @@ -3821,7 +4204,7 @@ checksum = "3669cf5561f8d27e8fc84cc15e58350e70f557d4d65f70e3154e54cd2f8e1782" dependencies = [ "libc", "neli", - "thiserror", + "thiserror 1.0.64", "windows-sys 0.59.0", ] @@ -3869,12 +4252,12 @@ dependencies = [ "log-mdc", "once_cell", "parking_lot", - "rand", + "rand 0.8.5", "serde", "serde-value", "serde_json", "serde_yaml", - "thiserror", + "thiserror 1.0.64", "thread-id", "typemap-ors", "winapi", @@ -3938,7 +4321,7 @@ dependencies = [ "cfg-if 1.0.0", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.106", ] [[package]] @@ -3949,7 +4332,7 @@ checksum = "13198c120864097a565ccb3ff947672d969932b7975ebd4085732c9f09435e55" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.106", ] [[package]] @@ -3962,7 +4345,7 @@ dependencies = [ "macroific_core", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.106", ] [[package]] @@ -4041,6 +4424,18 @@ dependencies = [ "autocfg", ] +[[package]] +name = "merlin" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58c38e2799fc0978b65dfff8023ec7843e2330bb462f19198840b34b6582397d" +dependencies = [ + "byteorder", + "keccak", + "rand_core 0.6.4", + "zeroize", +] + [[package]] name = "mimalloc" version = "0.1.48" @@ -4123,7 +4518,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3ffa00dec017b5b1a8b7cf5e2c008bfda1aa7e0697ac1508b491fdf2622fb4d8" dependencies = [ - "rand", + "rand 0.8.5", ] [[package]] @@ -4232,6 +4627,23 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-bigint-dig" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc84195820f291c7697304f3cbdadd1cb7199c0efc917ff5eafd71225c136151" +dependencies = [ + "byteorder", + "lazy_static", + "libm", + "num-integer", + "num-iter", + "num-traits", + "rand 0.8.5", + "smallvec", + "zeroize", +] + [[package]] name = "num-complex" version = "0.4.6" @@ -4321,7 +4733,7 @@ checksum = "8ebcbbf8ce75f465eea419ed8396efaf9ad9da87ad83fe9fce9c8789de00ca79" dependencies = [ "cfg-if 1.0.0", "js-sys", - "thiserror", + "thiserror 1.0.64", "wasm-bindgen", "web-sys", ] @@ -4335,6 +4747,15 @@ dependencies = [ "memchr", ] +[[package]] +name = "octseq" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "126c3ca37c9c44cec575247f43a3e4374d8927684f129d2beeb0d2cef262fe12" +dependencies = [ + "bytes", +] + [[package]] name = "once_cell" version = "1.20.2" @@ -4439,7 +4860,7 @@ checksum = "70df726c43c645ef1dde24c7ae14692036ebe5457c92c5f0ec4cfceb99634ff6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.106", ] [[package]] @@ -4449,7 +4870,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "346f04948ba92c43e8469c1ee6736c7563d71012b17d40745260fe106aac2166" dependencies = [ "base64ct", - "rand_core", + "rand_core 0.6.4", "subtle", ] @@ -4475,6 +4896,15 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" +[[package]] +name = "pem-rfc7468" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88b39c9bfcfc231068454382784bb460aae594343fb030d46e9f50a645418412" +dependencies = [ + "base64ct", +] + [[package]] name = "percent-encoding" version = "2.3.1" @@ -4491,7 +4921,7 @@ dependencies = [ "order-stat", "peroxide-ad", "puruspe", - "rand", + "rand 0.8.5", "rand_distr", ] @@ -4532,7 +4962,7 @@ checksum = "a4502d8515ca9f32f1fb543d987f63d95a14934883db45bdb48060b6b69257f8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.106", ] [[package]] @@ -4558,6 +4988,27 @@ dependencies = [ "futures-io", ] +[[package]] +name = "pkcs1" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8ffb9f10fa047879315e6625af03c164b16962a5368d724ed16323b68ace47f" +dependencies = [ + "der", + "pkcs8", + "spki", +] + +[[package]] +name = "pkcs8" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" +dependencies = [ + "der", + "spki", +] + [[package]] name = "pkg-config" version = "0.3.31" @@ -4596,6 +5047,15 @@ version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cc9c68a3f6da06753e9335d63e27f6b9754dd1920d941135b7ea8224f141adb2" +[[package]] +name = "potential_utf" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84df19adbe5b5a0782edcab45899906947ab039ccf4573713735ee7de1e6b08a" +dependencies = [ + "zerovec", +] + [[package]] name = "powerfmt" version = "0.2.0" @@ -4618,7 +5078,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "479cf940fbbb3426c32c5d5176f62ad57549a0bb84773423ba8be9d089f5faba" dependencies = [ "proc-macro2", - "syn 2.0.79", + "syn 2.0.106", ] [[package]] @@ -4655,9 +5115,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.86" +version = "1.0.101" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" +checksum = "89ae43fd86e4158d6db51ad8e2b80f313af9cc74f5c0e03ccb87de09998732de" dependencies = [ "unicode-ident", ] @@ -4689,7 +5149,7 @@ dependencies = [ "prost", "prost-types", "regex", - "syn 2.0.79", + "syn 2.0.106", "tempfile", ] @@ -4703,7 +5163,7 @@ dependencies = [ "itertools 0.13.0", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.106", ] [[package]] @@ -4734,7 +5194,7 @@ dependencies = [ "rustc-hash 2.1.1", "rustls", "socket2", - "thiserror", + "thiserror 1.0.64", "tokio", "tracing", ] @@ -4746,12 +5206,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fadfaed2cd7f389d0161bb73eeb07b7b78f8691047a6f3e73caaeae55310a4a6" dependencies = [ "bytes", - "rand", + "rand 0.8.5", "ring", "rustc-hash 2.1.1", "rustls", "slab", - "thiserror", + "thiserror 1.0.64", "tinyvec", "tracing", ] @@ -4778,6 +5238,12 @@ dependencies = [ "proc-macro2", ] +[[package]] +name = "r-efi" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" + [[package]] name = "rand" version = "0.8.5" @@ -4785,8 +5251,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", - "rand_chacha", - "rand_core", + "rand_chacha 0.3.1", + "rand_core 0.6.4", +] + +[[package]] +name = "rand" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" +dependencies = [ + "rand_chacha 0.9.0", + "rand_core 0.9.3", ] [[package]] @@ -4796,7 +5272,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_chacha" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" +dependencies = [ + "ppv-lite86", + "rand_core 0.9.3", ] [[package]] @@ -4805,7 +5291,16 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom", + "getrandom 0.2.15", +] + +[[package]] +name = "rand_core" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" +dependencies = [ + "getrandom 0.3.4", ] [[package]] @@ -4815,7 +5310,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32cb0b9bc82b0a0876c2dd994a7e7a2683d3e7390ca40e6886785ef0c7e3ee31" dependencies = [ "num-traits", - "rand", + "rand 0.8.5", +] + +[[package]] +name = "rand_jitter" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b16df48f071248e67b8fc5e866d9448d45c08ad8b672baaaf796e2f15e606ff0" +dependencies = [ + "libc", + "rand_core 0.9.3", + "winapi", ] [[package]] @@ -4844,6 +5350,15 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "rdrand" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d92195228612ac8eed47adbc2ed0f04e513a4ccb98175b6f2bd04d963b533655" +dependencies = [ + "rand_core 0.6.4", +] + [[package]] name = "redox_syscall" version = "0.5.7" @@ -4859,9 +5374,9 @@ version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" dependencies = [ - "getrandom", + "getrandom 0.2.15", "libredox", - "thiserror", + "thiserror 1.0.64", ] [[package]] @@ -4946,7 +5461,7 @@ checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", "cfg-if 1.0.0", - "getrandom", + "getrandom 0.2.15", "libc", "spin", "untrusted", @@ -5000,12 +5515,32 @@ dependencies = [ "kaspa-utils", "log", "parking_lot", - "rand", + "rand 0.8.5", "rayon", "secp256k1", "tokio", ] +[[package]] +name = "rsa" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78928ac1ed176a5ca1d17e578a1825f3d81ca54cf41053a592584b020cfd691b" +dependencies = [ + "const-oid", + "digest", + "num-bigint-dig", + "num-integer", + "num-traits", + "pkcs1", + "pkcs8", + "rand_core 0.6.4", + "signature 2.2.0", + "spki", + "subtle", + "zeroize", +] + [[package]] name = "rustc-demangle" version = "0.1.24" @@ -5033,6 +5568,15 @@ dependencies = [ "semver", ] +[[package]] +name = "rusticata-macros" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "faf0c4a6ece9950b9abdb62b1cfcf2a68b3b67a10ba445b3bb85be2a293d0632" +dependencies = [ + "nom", +] + [[package]] name = "rustix" version = "0.38.37" @@ -5104,7 +5648,7 @@ dependencies = [ "num", "num-traits", "peroxide", - "rand", + "rand 0.8.5", "rand_distr", "special", ] @@ -5115,6 +5659,19 @@ version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" +[[package]] +name = "safelog" +version = "0.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a4e1c994fbc7521a5003e5c1c54304654ea0458881e777f6e2638520c2de8c5" +dependencies = [ + "derive_more 2.0.1", + "educe", + "either", + "fluid-let", + "thiserror 2.0.17", +] + [[package]] name = "salsa20" version = "0.10.2" @@ -5145,7 +5702,7 @@ version = "0.29.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9465315bc9d4566e1724f0fffcbcc446268cb522e60f9a27bcded6b19c108113" dependencies = [ - "rand", + "rand 0.8.5", "secp256k1-sys", "serde", ] @@ -5221,7 +5778,7 @@ checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.106", ] [[package]] @@ -5244,7 +5801,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.106", ] [[package]] @@ -5295,7 +5852,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.106", ] [[package]] @@ -5389,6 +5946,22 @@ dependencies = [ "libc", ] +[[package]] +name = "signature" +version = "1.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" + +[[package]] +name = "signature" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" +dependencies = [ + "digest", + "rand_core 0.6.4", +] + [[package]] name = "simpa" version = "1.0.1" @@ -5412,7 +5985,7 @@ dependencies = [ "kaspa-utils", "log", "num_cpus", - "rand", + "rand 0.8.5", "rand_distr", "rayon", "secp256k1", @@ -5464,6 +6037,17 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "socks" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0c3dbbd9ae980613c6dd8e28a9407b50509d3803b57624d5dfe8315218cd58b" +dependencies = [ + "byteorder", + "libc", + "winapi", +] + [[package]] name = "sorted-insert" version = "0.2.3" @@ -5485,12 +6069,28 @@ version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" +[[package]] +name = "spki" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" +dependencies = [ + "base64ct", + "der", +] + [[package]] name = "stable_deref_trait" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + [[package]] name = "strsim" version = "0.8.0" @@ -5528,9 +6128,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.79" +version = "2.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89132cd0bf050864e1d38dc3bbc07a0eb8e7530af26344d3d2bbbef83499f590" +checksum = "ede7c438028d4436d71104916910f5bb611972c5cfd7f89b8300a8186e6fada6" dependencies = [ "proc-macro2", "quote", @@ -5546,7 +6146,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.106", ] [[package]] @@ -5564,6 +6164,17 @@ dependencies = [ "futures-core", ] +[[package]] +name = "synstructure" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + [[package]] name = "sysinfo" version = "0.31.4" @@ -5647,7 +6258,16 @@ version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84" dependencies = [ - "thiserror-impl", + "thiserror-impl 1.0.64", +] + +[[package]] +name = "thiserror" +version = "2.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f63587ca0f12b72a0600bcba1d40081f830876000bb46dd2337a3051618f4fc8" +dependencies = [ + "thiserror-impl 2.0.17", ] [[package]] @@ -5658,7 +6278,18 @@ checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.106", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", ] [[package]] @@ -5719,6 +6350,16 @@ dependencies = [ "time-core", ] +[[package]] +name = "tinystr" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d4f6d1145dcb577acf783d4e601bc1d76a13337bb54e6233add580b07344c8b" +dependencies = [ + "displaydoc", + "zerovec", +] + [[package]] name = "tinytemplate" version = "1.2.1" @@ -5769,7 +6410,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.106", ] [[package]] @@ -5783,6 +6424,18 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-socks" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d4770b8024672c1101b3f6733eab95b18007dbe0847a8afe341fcf79e06043f" +dependencies = [ + "either", + "futures-util", + "thiserror 1.0.64", + "tokio", +] + [[package]] name = "tokio-stream" version = "0.1.16" @@ -5902,7 +6555,82 @@ dependencies = [ "prost-build", "prost-types", "quote", - "syn 2.0.79", + "syn 2.0.106", +] + +[[package]] +name = "tor-interface" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c96df00d4374dea330e7dc27ba6365cc68c34cce75f660e66c4bd2377906f151" +dependencies = [ + "curve25519-dalek", + "data-encoding", + "data-encoding-macro", + "domain", + "hmac", + "idna 1.1.0", + "rand 0.9.2", + "rand_core 0.9.3", + "regex", + "sha1", + "sha2", + "sha3", + "signature 1.6.4", + "socks", + "static_assertions", + "thiserror 1.0.64", + "tor-llcrypto", + "zeroize", +] + +[[package]] +name = "tor-llcrypto" +version = "0.33.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b92fa9a99a066f06cd266287f6f89270c010693cce3c4c2fa38c27abfcda5fb" +dependencies = [ + "aes", + "base64ct", + "ctr", + "curve25519-dalek", + "der-parser", + "derive_more 2.0.1", + "digest", + "ed25519-dalek", + "educe", + "getrandom 0.3.4", + "hex", + "rand 0.9.2", + "rand_chacha 0.9.0", + "rand_core 0.6.4", + "rand_core 0.9.3", + "rand_jitter", + "rdrand", + "rsa", + "safelog", + "serde", + "sha1", + "sha2", + "sha3", + "signature 2.2.0", + "subtle", + "thiserror 2.0.17", + "visibility", + "x25519-dalek", + "zeroize", +] + +[[package]] +name = "tor-prototype" +version = "0.1.0" +dependencies = [ + "anyhow", + "clap 4.5.19", + "hyper 1.4.1", + "hyper-socks2", + "tokio", + "tor-interface", ] [[package]] @@ -5916,7 +6644,7 @@ dependencies = [ "indexmap 1.9.3", "pin-project", "pin-project-lite", - "rand", + "rand 0.8.5", "slab", "tokio", "tokio-util", @@ -5986,7 +6714,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.106", ] [[package]] @@ -6022,11 +6750,11 @@ dependencies = [ "http 1.1.0", "httparse", "log", - "rand", + "rand 0.8.5", "rustls", "rustls-pki-types", "sha1", - "thiserror", + "thiserror 1.0.64", "utf-8", ] @@ -6084,6 +6812,12 @@ version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" +[[package]] +name = "unicode-xid" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" + [[package]] name = "universal-hash" version = "0.5.1" @@ -6122,7 +6856,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" dependencies = [ "form_urlencoded", - "idna", + "idna 0.5.0", "percent-encoding", ] @@ -6132,6 +6866,12 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + [[package]] name = "utf8parse" version = "0.2.2" @@ -6144,7 +6884,7 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" dependencies = [ - "getrandom", + "getrandom 0.2.15", ] [[package]] @@ -6153,8 +6893,8 @@ version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314" dependencies = [ - "getrandom", - "rand", + "getrandom 0.2.15", + "rand 0.8.5", "serde", "wasm-bindgen", ] @@ -6198,6 +6938,17 @@ version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" +[[package]] +name = "visibility" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d674d135b4a8c1d7e813e2f8d1c9a58308aee4a680323066025e53132218bd91" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + [[package]] name = "walkdir" version = "2.5.0" @@ -6223,6 +6974,15 @@ version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +[[package]] +name = "wasip2" +version = "1.0.1+wasi-0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7" +dependencies = [ + "wit-bindgen", +] + [[package]] name = "wasm-bindgen" version = "0.2.100" @@ -6247,7 +7007,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.106", "wasm-bindgen-shared", ] @@ -6282,7 +7042,7 @@ checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.106", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -6317,7 +7077,7 @@ checksum = "17d5042cc5fa009658f9a7333ef24291b1291a25b6382dd68862a7f3b969f69b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.106", ] [[package]] @@ -6421,7 +7181,7 @@ checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.106", ] [[package]] @@ -6432,7 +7192,7 @@ checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.106", ] [[package]] @@ -6631,6 +7391,12 @@ dependencies = [ "memchr", ] +[[package]] +name = "wit-bindgen" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" + [[package]] name = "workflow-chrome" version = "0.18.0" @@ -6640,7 +7406,7 @@ dependencies = [ "cfg-if 1.0.0", "chrome-sys", "js-sys", - "thiserror", + "thiserror 1.0.64", "wasm-bindgen", "workflow-core", "workflow-log", @@ -6661,14 +7427,14 @@ dependencies = [ "dirs", "faster-hex", "futures", - "getrandom", + "getrandom 0.2.15", "instant", "js-sys", - "rand", + "rand 0.8.5", "rlimit", "serde", "serde-wasm-bindgen", - "thiserror", + "thiserror 1.0.64", "tokio", "triggered", "vergen", @@ -6705,7 +7471,7 @@ dependencies = [ "futures", "js-sys", "regex", - "thiserror", + "thiserror 1.0.64", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", @@ -6724,7 +7490,7 @@ dependencies = [ "reqwest", "serde", "serde_json", - "thiserror", + "thiserror 1.0.64", "tokio", "wasm-bindgen", "workflow-core", @@ -6771,7 +7537,7 @@ dependencies = [ "lazy_static", "node-sys", "serde", - "thiserror", + "thiserror 1.0.64", "wasm-bindgen", "wasm-bindgen-futures", "workflow-core", @@ -6792,10 +7558,10 @@ dependencies = [ "futures", "js-sys", "nw-sys", - "rand", + "rand 0.8.5", "serde", "serde-wasm-bindgen", - "thiserror", + "thiserror 1.0.64", "wasm-bindgen", "web-sys", "workflow-core", @@ -6825,7 +7591,7 @@ dependencies = [ "cc", "libc", "mach", - "thiserror", + "thiserror 1.0.64", "windows-sys 0.48.0", ] @@ -6842,12 +7608,12 @@ dependencies = [ "downcast-rs", "futures", "futures-util", - "getrandom", + "getrandom 0.2.15", "manual_future", - "rand", + "rand 0.8.5", "serde", "serde_json", - "thiserror", + "thiserror 1.0.64", "tokio", "tungstenite", "wasm-bindgen", @@ -6900,7 +7666,7 @@ dependencies = [ "lazy_static", "serde", "serde_json", - "thiserror", + "thiserror 1.0.64", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", @@ -6918,7 +7684,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3d1a90743bb4d3f68606cb4e9a78551a53399ebc35ddba981cbb56bf2b31940a" dependencies = [ "futures", - "thiserror", + "thiserror 1.0.64", "workflow-core", "workflow-task-macros", ] @@ -6957,7 +7723,7 @@ dependencies = [ "pad", "regex", "textwrap 0.16.1", - "thiserror", + "thiserror 1.0.64", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", @@ -6996,7 +7762,7 @@ dependencies = [ "js-sys", "serde", "serde-wasm-bindgen", - "thiserror", + "thiserror 1.0.64", "wasm-bindgen", "wasm-bindgen-futures", "workflow-core", @@ -7034,7 +7800,7 @@ dependencies = [ "futures", "futures-util", "js-sys", - "thiserror", + "thiserror 1.0.64", "tokio", "tokio-tungstenite", "triggered", @@ -7048,6 +7814,24 @@ dependencies = [ "workflow-wasm", ] +[[package]] +name = "writeable" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb" + +[[package]] +name = "x25519-dalek" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7e468321c81fb07fa7f4c636c3972b9100f0346e5b6a9f2bd0603a52f7ed277" +dependencies = [ + "curve25519-dalek", + "rand_core 0.6.4", + "serde", + "zeroize", +] + [[package]] name = "xml-rs" version = "0.8.22" @@ -7069,6 +7853,30 @@ version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a5cbf750400958819fb6178eaa83bee5cd9c29a26a40cc241df8c70fdd46984" +[[package]] +name = "yoke" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f41bb01b8226ef4bfd589436a297c53d118f65921786300e427be8d487695cc" +dependencies = [ + "serde", + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", + "synstructure", +] + [[package]] name = "zerocopy" version = "0.7.35" @@ -7087,7 +7895,28 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.106", +] + +[[package]] +name = "zerofrom" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", + "synstructure", ] [[package]] @@ -7095,6 +7924,53 @@ name = "zeroize" version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" +dependencies = [ + "zeroize_derive", +] + +[[package]] +name = "zeroize_derive" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "zerotrie" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36f0bbd478583f79edad978b407914f61b2972f5af6fa089686016be8f9af595" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", +] + +[[package]] +name = "zerovec" +version = "0.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7aa2bd55086f1ab526693ecbe444205da57e25f4489879da80635a46d90e73b" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] [[package]] name = "zstd-sys" diff --git a/Cargo.toml b/Cargo.toml index 5b960b0975..b3dc35acba 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -59,6 +59,7 @@ members = [ "metrics/core", "metrics/perf_monitor", "utils/alloc", + "research/tor-integration/tor-prototype", ] [workspace.package] @@ -177,6 +178,7 @@ enum-primitive-derive = "0.3.0" event-listener = "2.5.3" # TODO "3.0.1" evpkdf = "0.2.0" faster-hex = "0.9.0" +data-encoding = "2.5.0" fixedstr = { version = "0.5.4", features = ["serde"] } flate2 = "1.0.28" futures = { version = "0.3.29" } @@ -339,4 +341,4 @@ strip = false [workspace.lints.clippy] empty_docs = "allow" -uninlined_format_args = "allow" \ No newline at end of file +uninlined_format_args = "allow" diff --git a/cli/src/error.rs b/cli/src/error.rs index 23bb261243..be56dcbade 100644 --- a/cli/src/error.rs +++ b/cli/src/error.rs @@ -1,6 +1,5 @@ -use std::net::AddrParseError; - use downcast::DowncastError; +use kaspa_utils::networking::NetAddressError; use kaspa_wallet_core::error::Error as WalletError; use workflow_core::channel::ChannelError; use workflow_terminal::error::Error as TerminalError; @@ -46,7 +45,7 @@ pub enum Error { ParseHexError(#[from] faster_hex::Error), #[error(transparent)] - AddrParseError(#[from] AddrParseError), + NetAddressError(#[from] NetAddressError), #[error("account '{0}' not found")] AccountNotFound(String), diff --git a/cli/src/modules/rpc.rs b/cli/src/modules/rpc.rs index 75bc50f421..79f5ff78b8 100644 --- a/cli/src/modules/rpc.rs +++ b/cli/src/modules/rpc.rs @@ -1,6 +1,7 @@ use crate::imports::*; use convert_case::{Case, Casing}; use kaspa_rpc_core::api::ops::RpcApiOps; +use kaspa_utils::networking::NetAddressError; #[derive(Default, Handler)] #[help("Execute RPC commands against the connected Kaspa node")] @@ -200,7 +201,7 @@ impl Rpc { if argv.is_empty() { return Err(Error::custom("Please specify peer IP address")); } - let ip: RpcIpAddress = argv.remove(0).parse()?; + let ip: RpcIpAddress = argv.remove(0).parse().map_err(NetAddressError::from)?; let result = rpc.ban_call(None, BanRequest { ip }).await?; self.println(&ctx, result); } @@ -208,7 +209,7 @@ impl Rpc { if argv.is_empty() { return Err(Error::custom("Please specify peer IP address")); } - let ip: RpcIpAddress = argv.remove(0).parse()?; + let ip: RpcIpAddress = argv.remove(0).parse().map_err(NetAddressError::from)?; let result = rpc.unban_call(None, UnbanRequest { ip }).await?; self.println(&ctx, result); } diff --git a/components/addressmanager/src/lib.rs b/components/addressmanager/src/lib.rs index b220a8ab1b..1be4fb18cc 100644 --- a/components/addressmanager/src/lib.rs +++ b/components/addressmanager/src/lib.rs @@ -56,19 +56,42 @@ pub struct AddressManager { address_store: address_store_with_cache::Store, config: Arc, local_net_addresses: Vec, + allow_ipv4: bool, + allow_ipv6: bool, + allow_onion: bool, } impl AddressManager { - pub fn new(config: Arc, db: Arc, tick_service: Arc) -> (Arc>, Option) { + pub fn new( + config: Arc, + db: Arc, + tick_service: Arc, + allow_ipv4: bool, + allow_ipv6: bool, + allow_onion: bool, + ) -> (Arc>, Option) { let mut instance = Self { banned_address_store: DbBannedAddressesStore::new(db.clone(), CachePolicy::Count(MAX_ADDRESSES)), address_store: address_store_with_cache::new(db), local_net_addresses: Vec::new(), config, + allow_ipv4, + allow_ipv6, + allow_onion, }; let extender = instance.init_local_addresses(tick_service); + if !instance.allow_onion { + instance.prune_onion_addresses(); + } + if !instance.allow_ipv4 { + instance.prune_ipv4_addresses(); + } + if !instance.allow_ipv6 { + instance.prune_ipv6_addresses(); + } + (Arc::new(Mutex::new(instance)), extender) } @@ -115,7 +138,7 @@ impl AddressManager { fn local_addresses(&self) -> impl Iterator + '_ { match self.config.externalip { // An external IP was passed, we will try to bind that if it's valid - Some(local_net_address) if local_net_address.ip.is_publicly_routable() => { + Some(local_net_address) if local_net_address.as_ip().map_or(false, |ip| ip.is_publicly_routable()) => { info!("External address is publicly routable {}", local_net_address); return Left(iter::once(local_net_address)); } @@ -132,10 +155,10 @@ impl AddressManager { // check whatever was passed as listen address (if routable) // otherwise(listen_address === 0.0.0.0) check all interfaces let listen_address = self.config.p2p_listen_address.normalize(self.config.default_p2p_port()); - if listen_address.ip.is_publicly_routable() { - info!("Publicly routable local address found: {}", listen_address.ip); + if listen_address.as_ip().map_or(false, |ip| ip.is_publicly_routable()) { + info!("Publicly routable local address found: {}", listen_address); Left(Left(iter::once(listen_address))) - } else if listen_address.ip.is_unspecified() { + } else if listen_address.as_ip().map_or(false, |ip| ip.is_unspecified()) { let network_interfaces = list_afinet_netifas(); let Ok(network_interfaces) = network_interfaces else { warn!("Error getting network interfaces: {:?}", network_interfaces); @@ -164,10 +187,10 @@ impl AddressManager { info!("[UPnP] Got external ip from gateway using upnp: {ip}"); let normalized_p2p_listen_address = self.config.p2p_listen_address.normalize(self.config.default_p2p_port()); - let local_addr = if normalized_p2p_listen_address.ip.is_unspecified() { + let local_addr = if normalized_p2p_listen_address.as_ip().map_or(false, |ip| ip.is_unspecified()) { SocketAddr::new(local_ip_address::local_ip().unwrap(), normalized_p2p_listen_address.port) } else { - normalized_p2p_listen_address.into() + normalized_p2p_listen_address.to_socket_addr().expect("expected listen address to be IP-based") }; // If an operator runs a node and specifies a non-standard local port, it implies that they also wish to use a non-standard public address. The variable 'desired_external_port' is set to the port number from the normalized peer-to-peer listening address. @@ -210,7 +233,7 @@ impl AddressManager { let port = gateway.add_any_port(igd::PortMappingProtocol::TCP, local_addr, UPNP_DEADLINE_SEC as u32, UPNP_REGISTRATION_NAME)?; info!("[UPnP] Added port mapping to random external port: {ip}:{port}"); - return Ok(Some((NetAddress { ip, port }, ExtendHelper { gateway, local_addr, external_port: port }))); + return Ok(Some((NetAddress::new(ip, port), ExtendHelper { gateway, local_addr, external_port: port }))); } match gateway.add_port( @@ -223,7 +246,7 @@ impl AddressManager { Ok(_) => { info!("[UPnP] Added port mapping to default external port: {ip}:{desired_external_port}"); Ok(Some(( - NetAddress { ip, port: desired_external_port }, + NetAddress::new(ip, desired_external_port), ExtendHelper { gateway, local_addr, external_port: desired_external_port }, ))) } @@ -235,7 +258,7 @@ impl AddressManager { UPNP_REGISTRATION_NAME, )?; info!("[UPnP] Added port mapping to random external port: {ip}:{port}"); - Ok(Some((NetAddress { ip, port }, ExtendHelper { gateway, local_addr, external_port: port }))) + Ok(Some((NetAddress::new(ip, port), ExtendHelper { gateway, local_addr, external_port: port }))) } Err(err) => Err(err.into()), } @@ -252,11 +275,24 @@ impl AddressManager { } pub fn add_address(&mut self, address: NetAddress) { - if address.ip.is_loopback() || address.ip.is_unspecified() { - debug!("[Address manager] skipping local address {}", address.ip); + if !self.allow_onion && address.as_onion().is_some() { + debug!("[Address manager] skipping onion address {} (onion disabled)", address); return; } - + if !self.allow_ipv4 && address.as_ip().map_or(false, |ip| ip.is_ipv4()) { + debug!("[Address manager] skipping IPv4 address {} (ipv4 disabled)", address); + return; + } + if !self.allow_ipv6 && address.as_ip().map_or(false, |ip| ip.is_ipv6()) { + debug!("[Address manager] skipping IPv6 address {} (ipv6 disabled)", address); + return; + } + if let Some(ip) = address.as_ip() { + if ip.is_loopback() || ip.is_unspecified() { + debug!("[Address manager] skipping local address {}", address); + return; + } + } if self.address_store.has(address) { return; } @@ -286,6 +322,29 @@ impl AddressManager { self.address_store.set(address, 0); } + fn prune_onion_addresses(&mut self) { + let to_remove: Vec<_> = self.address_store.iterate_addresses().filter(|addr| addr.as_onion().is_some()).collect(); + for addr in to_remove { + self.address_store.remove(addr); + } + } + + fn prune_ipv4_addresses(&mut self) { + let to_remove: Vec<_> = + self.address_store.iterate_addresses().filter(|addr| addr.as_ip().map_or(false, |ip| ip.is_ipv4())).collect(); + for addr in to_remove { + self.address_store.remove(addr); + } + } + + fn prune_ipv6_addresses(&mut self) { + let to_remove: Vec<_> = + self.address_store.iterate_addresses().filter(|addr| addr.as_ip().map_or(false, |ip| ip.is_ipv6())).collect(); + for addr in to_remove { + self.address_store.remove(addr); + } + } + pub fn iterate_addresses(&self) -> impl Iterator + '_ { self.address_store.iterate_addresses() } @@ -337,6 +396,7 @@ mod address_store_with_cache { }; use itertools::Itertools; + use kaspa_core::warn; use kaspa_database::prelude::{CachePolicy, DB}; use kaspa_utils::networking::PrefixBucket; use rand::{ @@ -362,8 +422,15 @@ mod address_store_with_cache { // We manage the cache ourselves on this level, so we disable the inner builtin cache let db_store = DbAddressesStore::new(db, CachePolicy::Empty); let mut addresses = HashMap::new(); - for (key, entry) in db_store.iterator().map(|res| res.unwrap()) { - addresses.insert(key, entry); + for result in db_store.iterator() { + match result { + Ok((key, entry)) => { + addresses.insert(key, entry); + } + Err(err) => { + warn!("Failed to load address entry from store: {err}"); + } + } } Self { db_store, addresses } @@ -548,7 +615,7 @@ mod address_store_with_cache { let db = create_temp_db!(ConnBuilder::default().with_files_limit(10)); let config = Config::new(SIMNET_PARAMS); - let (am, _) = AddressManager::new(Arc::new(config), db.1, Arc::new(TickService::default())); + let (am, _) = AddressManager::new(Arc::new(config), db.1, Arc::new(TickService::default()), false, false); let mut am_guard = am.lock(); diff --git a/components/addressmanager/src/stores/address_store.rs b/components/addressmanager/src/stores/address_store.rs index fe4ddb244b..ea2f0d8513 100644 --- a/components/addressmanager/src/stores/address_store.rs +++ b/components/addressmanager/src/stores/address_store.rs @@ -4,10 +4,13 @@ use kaspa_database::{ prelude::{CachedDbAccess, DirectDbWriter}, registry::DatabaseStorePrefixes, }; -use kaspa_utils::mem_size::MemSizeEstimator; +use kaspa_utils::{ + mem_size::MemSizeEstimator, + networking::{AddressKind, IpAddress, OnionAddress}, +}; use serde::{Deserialize, Serialize}; -use std::net::Ipv6Addr; -use std::{error::Error, fmt::Display, sync::Arc}; +use std::net::{IpAddr, Ipv6Addr}; +use std::{convert::TryInto, error::Error, fmt::Display, sync::Arc}; use super::AddressKey; use crate::NetAddress; @@ -32,9 +35,13 @@ pub trait AddressesStore: AddressesStoreReader { fn remove(&mut self, key: AddressKey) -> StoreResult<()>; } +const ADDRESS_KIND_TAG_LEN: usize = 1; const IPV6_LEN: usize = 16; +const ONION_LEN: usize = 35; +const ADDRESS_DATA_LEN: usize = ONION_LEN; const PORT_LEN: usize = 2; -pub const ADDRESS_KEY_SIZE: usize = IPV6_LEN + PORT_LEN; +const LEGACY_ADDRESS_KEY_SIZE: usize = IPV6_LEN + PORT_LEN; +pub const ADDRESS_KEY_SIZE: usize = ADDRESS_KIND_TAG_LEN + ADDRESS_DATA_LEN + PORT_LEN; // TODO: This pattern is used a lot. Think of some macro or any other way to generalize it. #[derive(Eq, Hash, PartialEq, Debug, Copy, Clone)] @@ -49,26 +56,59 @@ impl AsRef<[u8]> for DbAddressKey { impl Display for DbAddressKey { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let ip_port: AddressKey = (*self).into(); - write!(f, "{}:{}", ip_port.0, ip_port.1) + match ip_port.kind() { + AddressKind::Ip(ip) => write!(f, "{}:{}", IpAddr::from(ip), ip_port.port()), + AddressKind::Onion(onion) => write!(f, "{}:{}", onion, ip_port.port()), + } } } impl From for DbAddressKey { fn from(key: AddressKey) -> Self { let mut bytes = [0; ADDRESS_KEY_SIZE]; - bytes[..IPV6_LEN].copy_from_slice(&key.0.octets()); - bytes[IPV6_LEN..].copy_from_slice(&key.1.to_le_bytes()); + match key.kind() { + AddressKind::Ip(ip) => { + bytes[0] = 0; + let ip_addr = match IpAddr::from(ip) { + IpAddr::V4(ipv4) => ipv4.to_ipv6_mapped(), + IpAddr::V6(ipv6) => ipv6, + }; + bytes[1..1 + IPV6_LEN].copy_from_slice(&ip_addr.octets()); + } + AddressKind::Onion(onion) => { + bytes[0] = 1; + let raw = onion.raw(); + bytes[1..1 + raw.len()].copy_from_slice(raw); + } + } + let port_bytes = key.port().to_le_bytes(); + bytes[ADDRESS_KEY_SIZE - PORT_LEN..].copy_from_slice(&port_bytes); Self(bytes) } } impl From for AddressKey { fn from(k: DbAddressKey) -> Self { - let ip_byte_array: [u8; 16] = k.0[..IPV6_LEN].try_into().unwrap(); - let ip: Ipv6Addr = ip_byte_array.into(); - let port_byte_array: [u8; 2] = k.0[IPV6_LEN..].try_into().unwrap(); - let port = u16::from_le_bytes(port_byte_array); - AddressKey::new(ip, port) + let tag = k.0[0]; + let port_offset = ADDRESS_KEY_SIZE - PORT_LEN; + let port_bytes: [u8; PORT_LEN] = k.0[port_offset..].try_into().unwrap(); + let port = u16::from_le_bytes(port_bytes); + match tag { + 0 => { + let mut ipv6_bytes = [0u8; IPV6_LEN]; + ipv6_bytes.copy_from_slice(&k.0[1..1 + IPV6_LEN]); + let ipv6 = Ipv6Addr::from(ipv6_bytes); + let ip = ipv6.to_ipv4().map_or(IpAddr::V6(ipv6), IpAddr::V4); + AddressKey::new(AddressKind::Ip(IpAddress::from(ip)), port) + } + 1 => { + let mut raw = [0u8; ONION_LEN]; + raw.copy_from_slice(&k.0[1..1 + ONION_LEN]); + let onion = OnionAddress::from_raw(raw); + AddressKey::new(AddressKind::Onion(onion), port) + } + other => panic!("invalid address key variant {}", other), + } } } @@ -85,13 +125,23 @@ impl DbAddressesStore { pub fn iterator(&self) -> impl Iterator>> + '_ { self.access.iterator().map(|iter_result| match iter_result { - Ok((key_bytes, connection_failed_count)) => match <[u8; ADDRESS_KEY_SIZE]>::try_from(&key_bytes[..]) { - Ok(address_key_slice) => { + Ok((key_bytes, connection_failed_count)) => match key_bytes.len() { + ADDRESS_KEY_SIZE => { + let address_key_slice: [u8; ADDRESS_KEY_SIZE] = key_bytes[..].try_into().expect("slice size checked"); let addr_key = DbAddressKey(address_key_slice); let address: AddressKey = addr_key.into(); Ok((address, connection_failed_count)) } - Err(e) => Err(e.into()), + LEGACY_ADDRESS_KEY_SIZE => { + let port_offset = LEGACY_ADDRESS_KEY_SIZE - PORT_LEN; + let ipv6_bytes: [u8; IPV6_LEN] = key_bytes[..IPV6_LEN].try_into().expect("slice size checked"); + let port_bytes: [u8; PORT_LEN] = key_bytes[port_offset..].try_into().expect("slice size checked"); + let ipv6 = Ipv6Addr::from(ipv6_bytes); + let ip = ipv6.to_ipv4().map_or(IpAddr::V6(ipv6), IpAddr::V4); + let address = AddressKey::new(AddressKind::Ip(IpAddress::from(ip)), u16::from_le_bytes(port_bytes)); + Ok((address, connection_failed_count)) + } + len => Err(format!("invalid address key length {}", len).into()), }, Err(e) => Err(e), }) diff --git a/components/addressmanager/src/stores/mod.rs b/components/addressmanager/src/stores/mod.rs index 11721d2ce7..02b631e40b 100644 --- a/components/addressmanager/src/stores/mod.rs +++ b/components/addressmanager/src/stores/mod.rs @@ -1,34 +1,39 @@ -use std::net::{IpAddr, Ipv6Addr}; +use std::net::IpAddr; -pub use kaspa_utils::networking::NetAddress; +pub use kaspa_utils::networking::{AddressKind, NetAddress}; pub(super) mod address_store; pub(super) mod banned_address_store; #[derive(PartialEq, Eq, Hash, Clone, Copy)] -pub struct AddressKey(Ipv6Addr, u16); +pub struct AddressKey { + kind: AddressKind, + port: u16, +} impl AddressKey { - pub fn new(ip: Ipv6Addr, port: u16) -> Self { - Self(ip, port) + pub fn new(kind: AddressKind, port: u16) -> Self { + Self { kind, port } } pub fn is_ip(&self, ip: IpAddr) -> bool { - match ip { - IpAddr::V4(ip) => ip.to_ipv6_mapped() == self.0, - IpAddr::V6(ip) => ip == self.0, + match self.kind { + AddressKind::Ip(stored) => IpAddr::from(stored) == ip, + AddressKind::Onion(_) => false, } } + + pub fn kind(&self) -> AddressKind { + self.kind + } + + pub fn port(&self) -> u16 { + self.port + } } impl From for AddressKey { fn from(value: NetAddress) -> Self { - AddressKey::new( - match value.ip.0 { - IpAddr::V4(ip) => ip.to_ipv6_mapped(), - IpAddr::V6(ip) => ip, - }, - value.port, - ) + AddressKey::new(value.ip, value.port) } } diff --git a/components/connectionmanager/src/lib.rs b/components/connectionmanager/src/lib.rs index 2146ec62d1..1d989bb612 100644 --- a/components/connectionmanager/src/lib.rs +++ b/components/connectionmanager/src/lib.rs @@ -24,6 +24,39 @@ use tokio::{ time::{interval, MissedTickBehavior}, }; +#[derive(Clone, Copy, Debug)] +pub struct AllowedNetworks { + allow_ipv4: bool, + allow_ipv6: bool, + allow_onion: bool, +} + +impl AllowedNetworks { + pub fn new(allow_ipv4: bool, allow_ipv6: bool, allow_onion: bool) -> Self { + Self { allow_ipv4, allow_ipv6, allow_onion } + } + + pub fn allow_all() -> Self { + Self { allow_ipv4: true, allow_ipv6: true, allow_onion: true } + } + + pub fn is_allowed(&self, address: &NetAddress) -> bool { + if let Some(onion) = address.as_onion() { + if !self.allow_onion { + debug!("[Connection manager] skipping {} (onion not allowed)", onion); + } + self.allow_onion + } else if let Some(ip) = address.as_ip() { + match IpAddr::from(ip) { + IpAddr::V4(_) => self.allow_ipv4, + IpAddr::V6(_) => self.allow_ipv6, + } + } else { + true + } + } +} + pub struct ConnectionManager { p2p_adaptor: Arc, outbound_target: usize, @@ -31,9 +64,10 @@ pub struct ConnectionManager { dns_seeders: &'static [&'static str], default_port: u16, address_manager: Arc>, - connection_requests: TokioMutex>, + connection_requests: TokioMutex>, force_next_iteration: UnboundedSender<()>, shutdown_signal: SingleTrigger, + allowed_networks: AllowedNetworks, } #[derive(Clone, Debug)] @@ -57,6 +91,7 @@ impl ConnectionManager { dns_seeders: &'static [&'static str], default_port: u16, address_manager: Arc>, + allowed_networks: AllowedNetworks, ) -> Arc { let (tx, rx) = unbounded_channel::<()>(); let manager = Arc::new(Self { @@ -69,6 +104,7 @@ impl ConnectionManager { shutdown_signal: SingleTrigger::new(), dns_seeders, default_port, + allowed_networks, }); manager.clone().start_event_loop(rx); manager.force_next_iteration.send(()).unwrap(); @@ -96,14 +132,18 @@ impl ConnectionManager { async fn handle_event(self: Arc) { debug!("Starting connection loop iteration"); let peers = self.p2p_adaptor.active_peers(); - let peer_by_address: HashMap = peers.into_iter().map(|peer| (peer.net_address(), peer)).collect(); + let peer_by_address: HashMap = peers.into_iter().map(|peer| (peer.net_address(), peer)).collect(); self.handle_connection_requests(&peer_by_address).await; self.handle_outbound_connections(&peer_by_address).await; self.handle_inbound_connections(&peer_by_address).await; } - pub async fn add_connection_request(&self, address: SocketAddr, is_permanent: bool) { + pub async fn add_connection_request(&self, address: NetAddress, is_permanent: bool) { + if !self.allowed_networks.is_allowed(&address) { + debug!("Ignoring connection request {} due to network policy", address); + return; + } // If the request already exists, it resets the attempts count and overrides the `is_permanent` setting. self.connection_requests.lock().await.insert(address, ConnectionRequest::new(is_permanent)); self.force_next_iteration.send(()).unwrap(); // We force the next iteration of the connection loop. @@ -113,7 +153,7 @@ impl ConnectionManager { self.shutdown_signal.trigger.trigger() } - async fn handle_connection_requests(self: &Arc, peer_by_address: &HashMap) { + async fn handle_connection_requests(self: &Arc, peer_by_address: &HashMap) { let mut requests = self.connection_requests.lock().await; let mut new_requests = HashMap::with_capacity(requests.len()); for (address, request) in requests.iter() { @@ -127,6 +167,10 @@ impl ConnectionManager { if !is_connected && request.next_attempt <= SystemTime::now() { debug!("Connecting to peer request {}", address); + if !self.allowed_networks.is_allowed(&address) { + debug!("Skipping peer request {} due to network policy", address); + continue; + } match self.p2p_adaptor.connect_peer(address.to_string()).await { Err(err) => { debug!("Failed connecting to peer request: {}, {}", address, err); @@ -159,9 +203,9 @@ impl ConnectionManager { *requests = new_requests; } - async fn handle_outbound_connections(self: &Arc, peer_by_address: &HashMap) { + async fn handle_outbound_connections(self: &Arc, peer_by_address: &HashMap) { let active_outbound: HashSet = - peer_by_address.values().filter(|peer| peer.is_outbound()).map(|peer| peer.net_address().into()).collect(); + peer_by_address.values().filter(|peer| peer.is_outbound()).map(|peer| peer.net_address()).collect(); if active_outbound.len() >= self.outbound_target { return; } @@ -177,15 +221,18 @@ impl ConnectionManager { } let mut addrs_to_connect = Vec::with_capacity(missing_connections); let mut jobs = Vec::with_capacity(missing_connections); - for _ in 0..missing_connections { + while addrs_to_connect.len() < missing_connections { let Some(net_addr) = addr_iter.next() else { connecting = false; break; }; - let socket_addr = SocketAddr::new(net_addr.ip.into(), net_addr.port).to_string(); - debug!("Connecting to {}", &socket_addr); + if !self.allowed_networks.is_allowed(&net_addr) { + continue; + } + let target = net_addr.to_string(); + debug!("Connecting to {}", &target); addrs_to_connect.push(net_addr); - jobs.push(self.p2p_adaptor.connect_peer(socket_addr.clone())); + jobs.push(self.p2p_adaptor.connect_peer(target)); } if progressing && !jobs.is_empty() { @@ -226,7 +273,7 @@ impl ConnectionManager { } } - if missing_connections > 0 && !self.dns_seeders.is_empty() { + if self.allowed_networks.allow_ipv4 && missing_connections > 0 && !self.dns_seeders.is_empty() { if missing_connections > self.outbound_target / 2 { // If we are missing more than half of our target, query all in parallel. // This will always be the case on new node start-up and is the most resilient strategy in such a case. @@ -238,7 +285,7 @@ impl ConnectionManager { } } - async fn handle_inbound_connections(self: &Arc, peer_by_address: &HashMap) { + async fn handle_inbound_connections(self: &Arc, peer_by_address: &HashMap) { let active_inbound = peer_by_address.values().filter(|peer| !peer.is_outbound()).collect_vec(); let active_inbound_len = active_inbound.len(); if self.inbound_limit >= active_inbound_len { @@ -316,8 +363,10 @@ impl ConnectionManager { return; } for peer in self.p2p_adaptor.active_peers() { - if peer.net_address().ip() == ip { - self.p2p_adaptor.terminate(peer.key()).await; + if let Some(peer_ip) = peer.net_address().as_ip() { + if IpAddr::from(peer_ip) == ip { + self.p2p_adaptor.terminate(peer.key()).await; + } } } self.address_manager.lock().ban(ip.into()); @@ -330,11 +379,16 @@ impl ConnectionManager { /// Returns whether the given address is a permanent request. pub async fn is_permanent(&self, address: &SocketAddr) -> bool { - self.connection_requests.lock().await.contains_key(address) + let net_address: NetAddress = (*address).into(); + self.connection_requests.lock().await.contains_key(&net_address) } /// Returns whether the given IP has some permanent request. pub async fn ip_has_permanent_connection(&self, ip: IpAddr) -> bool { - self.connection_requests.lock().await.iter().any(|(address, request)| request.is_permanent && address.ip() == ip) + self.connection_requests + .lock() + .await + .iter() + .any(|(address, request)| request.is_permanent && address.as_ip().map_or(false, |addr_ip| IpAddr::from(addr_ip) == ip)) } } diff --git a/kaspad/Cargo.toml b/kaspad/Cargo.toml index 4d1f43066b..2c94fd3eeb 100644 --- a/kaspad/Cargo.toml +++ b/kaspad/Cargo.toml @@ -17,12 +17,13 @@ crate-type = ["cdylib", "lib"] [dependencies] kaspa-alloc.workspace = true # This changes the global allocator for all of the next dependencies so should be kept first -kaspa-addresses.workspace = true -kaspa-addressmanager.workspace = true -kaspa-consensus-core.workspace = true -kaspa-consensus-notify.workspace = true -kaspa-consensus.workspace = true -kaspa-consensusmanager.workspace = true +kaspa-addresses.workspace = true +kaspa-addressmanager.workspace = true +kaspa-consensus-core.workspace = true +kaspa-consensus-notify.workspace = true +kaspa-consensus.workspace = true +kaspa-consensusmanager.workspace = true +kaspa-connectionmanager.workspace = true kaspa-core.workspace = true kaspa-database.workspace = true kaspa-grpc-server.workspace = true @@ -53,16 +54,18 @@ log.workspace = true num_cpus.workspace = true rand.workspace = true rayon.workspace = true -rocksdb.workspace = true -serde.workspace = true -tempfile.workspace = true -thiserror.workspace = true -tokio = { workspace = true, features = ["rt", "macros", "rt-multi-thread"] } -workflow-log.workspace = true -serde_json.workspace = true - -toml = "0.8.10" -serde_with = "3.7.0" +rocksdb.workspace = true +serde.workspace = true +tempfile.workspace = true +thiserror.workspace = true +tokio = { workspace = true, features = ["rt", "macros", "rt-multi-thread"] } +workflow-log.workspace = true +serde_json.workspace = true +tor-interface = { version = "0.6.0", features = ["legacy-tor-provider"] } +hex = "0.4" + +toml = "0.8.10" +serde_with = "3.7.0" [features] heap = ["dhat", "kaspa-alloc/heap"] diff --git a/kaspad/src/args.rs b/kaspad/src/args.rs index 3680bf4afa..3be88d4178 100644 --- a/kaspad/src/args.rs +++ b/kaspad/src/args.rs @@ -5,11 +5,11 @@ use kaspa_consensus_core::{ }; use kaspa_core::kaspad_env::version; use kaspa_notify::address::tracker::Tracker; -use kaspa_utils::networking::ContextualNetAddress; +use kaspa_utils::networking::{ContextualNetAddress, NetAddress}; use kaspa_wrpc_server::address::WrpcNetAddress; -use serde::Deserialize; +use serde::{Deserialize, Serialize}; use serde_with::{serde_as, DisplayFromStr}; -use std::{ffi::OsString, fs}; +use std::{ffi::OsString, fmt, fs, net::SocketAddr, path::PathBuf, str::FromStr}; use toml::from_str; #[cfg(feature = "devnet-prealloc")] @@ -50,6 +50,25 @@ pub struct Args { pub add_peers: Vec, #[serde_as(as = "Option")] pub listen: Option, + #[serde_as(as = "Option")] + pub proxy: Option, + #[serde_as(as = "Vec")] + #[serde(default)] + pub proxy_net: Vec, + #[serde_as(as = "Option")] + pub tor_proxy: Option, + #[serde_as(as = "Option")] + pub tor_control: Option, + pub tor_password: Option, + pub tor_cookie: Option, + pub tor_bootstrap_timeout_sec: u64, + pub listen_onion: bool, + pub tor_onion_port: Option, + pub tor_onion_key: Option, + pub tor_only: bool, + #[serde_as(as = "Vec")] + #[serde(default)] + pub onlynet: Vec, #[serde(rename = "uacomment")] pub user_agent_comments: Vec, pub utxoindex: bool, @@ -125,6 +144,18 @@ impl Default for Args { connect_peers: vec![], add_peers: vec![], listen: None, + proxy: None, + proxy_net: vec![], + tor_proxy: None, + tor_control: None, + tor_password: None, + tor_cookie: None, + tor_bootstrap_timeout_sec: 60, + listen_onion: false, + tor_onion_port: None, + tor_onion_key: None, + tor_only: false, + onlynet: vec![], user_agent_comments: vec![], yes: false, perf_metrics: false, @@ -150,9 +181,49 @@ impl Default for Args { } impl Args { + pub fn proxy_settings(&self) -> ProxySettings { + let mut settings = ProxySettings::default(); + settings.default = self.proxy.clone(); + for rule in &self.proxy_net { + match rule.network { + ProxyNetwork::Ipv4 => settings.ipv4 = Some(rule.address.clone()), + ProxyNetwork::Ipv6 => settings.ipv6 = Some(rule.address.clone()), + ProxyNetwork::Onion => settings.onion = Some(rule.address.clone()), + } + } + if let Some(tor_specific) = self.tor_proxy.clone() { + settings.onion = Some(tor_specific); + } + settings + } + + pub fn allowed_networks(&self) -> AllowedNetworksChoice { + let mut allow_ipv4 = false; + let mut allow_ipv6 = false; + let mut allow_onion = false; + + if self.onlynet.is_empty() { + if self.tor_only { + allow_onion = true; + } else { + return AllowedNetworksChoice::All; + } + } else { + for net in &self.onlynet { + match net { + OnlyNet::Ipv4 => allow_ipv4 = true, + OnlyNet::Ipv6 => allow_ipv6 = true, + OnlyNet::Onion => allow_onion = true, + } + } + } + + AllowedNetworksChoice::Custom { allow_ipv4, allow_ipv6, allow_onion } + } + pub fn apply_to_config(&self, config: &mut Config) { config.utxoindex = self.utxoindex; - config.disable_upnp = self.disable_upnp; + config.disable_upnp = self.disable_upnp || self.tor_only; config.unsafe_rpc = self.unsafe_rpc; config.enable_unsynced_mining = self.enable_unsynced_mining; config.enable_mainnet_mining = self.enable_mainnet_mining; @@ -161,8 +232,19 @@ impl Args { config.enable_sanity_checks = true; config.user_agent_comments.clone_from(&self.user_agent_comments); config.block_template_cache_lifetime = self.block_template_cache_lifetime; - config.p2p_listen_address = self.listen.unwrap_or(ContextualNetAddress::unspecified()); - config.externalip = self.externalip.map(|v| v.normalize(config.default_p2p_port())); + let listen_addr = self.listen.unwrap_or_else(|| { + if self.tor_only { + ContextualNetAddress::loopback() + } else { + ContextualNetAddress::unspecified() + } + }); + config.p2p_listen_address = listen_addr; + if self.tor_only { + config.externalip = None; + } else { + config.externalip = self.externalip.map(|v| v.normalize(config.default_p2p_port())); + } config.ram_scale = self.ram_scale; config.retention_period_days = self.retention_period_days; @@ -283,6 +365,98 @@ pub fn cli() -> Command { .value_parser(clap::value_parser!(ContextualNetAddress)) .help("Add an interface:port to listen for connections (default all interfaces port: 16111, testnet: 16211)."), ) + .arg( + Arg::new("proxy") + .long("proxy") + .value_name("IP[:PORT]") + .require_equals(true) + .value_parser(clap::value_parser!(ContextualNetAddress)) + .help("Route outbound clearnet P2P connections through the provided SOCKS5 proxy (default port: 9050)."), + ) + .arg( + Arg::new("proxy-net") + .long("proxy-net") + .value_name("NETWORK=IP[:PORT]") + .require_equals(true) + .action(ArgAction::Append) + .value_parser(clap::value_parser!(ProxyRule)) + .help("Override the SOCKS5 proxy for a specific network (ipv4|ipv6|onion). May be provided multiple times."), + ) + .arg( + Arg::new("tor-proxy") + .long("tor-proxy") + .value_name("IP[:PORT]") + .require_equals(true) + .value_parser(clap::value_parser!(ContextualNetAddress)) + .help("Route outbound P2P connections through the provided SOCKS5 proxy (default port: 9050)."), + ) + .arg( + Arg::new("tor-control") + .long("tor-control") + .value_name("IP[:PORT]") + .require_equals(true) + .value_parser(clap::value_parser!(ContextualNetAddress)) + .help("Tor control host:port used for hidden-service management (default port: 9051)."), + ) + .arg( + Arg::new("tor-password") + .long("tor-password") + .value_name("PASSWORD") + .require_equals(true) + .value_parser(clap::value_parser!(String)) + .help("Authenticate to the Tor control port using the provided password."), + ) + .arg( + Arg::new("tor-cookie") + .long("tor-cookie") + .value_name("PATH") + .require_equals(true) + .value_parser(clap::value_parser!(PathBuf)) + .help("Authenticate to the Tor control port using the cookie file at PATH."), + ) + .arg( + Arg::new("tor-bootstrap-timeout-sec") + .long("tor-bootstrap-timeout-sec") + .require_equals(true) + .value_parser(clap::value_parser!(u64)) + .help("Seconds to wait for Tor bootstrap completion before aborting (default: 60)."), + ) + .arg( + Arg::new("listen-onion") + .long("listen-onion") + .action(ArgAction::SetTrue) + .help("Publish a Tor hidden service for the P2P listener (requires --tor-control)."), + ) + .arg( + Arg::new("tor-only") + .long("tor-only") + .action(ArgAction::SetTrue) + .help("Disable clearnet peers and operate exclusively over Tor."), + ) + .arg( + Arg::new("onlynet") + .long("onlynet") + .value_name("NETWORK") + .require_equals(true) + .action(ArgAction::Append) + .value_parser(clap::value_parser!(OnlyNet)) + .help("Limit connections to the specified network (ipv4|ipv6|onion). May be passed multiple times."), + ) + .arg( + Arg::new("tor-onion-port") + .long("tor-onion-port") + .require_equals(true) + .value_parser(clap::value_parser!(u16)) + .help("Virtual port to expose via the Tor hidden service (defaults to the P2P port)."), + ) + .arg( + Arg::new("tor-onion-key") + .long("tor-onion-key") + .value_name("PATH") + .require_equals(true) + .value_parser(clap::value_parser!(PathBuf)) + .help("Path to a persistent v3 onion private key (will be created if missing)."), + ) .arg( Arg::new("outpeers") .long("outpeers") @@ -443,6 +617,24 @@ impl Args { connect_peers: arg_match_many_unwrap_or::(&m, "connect-peers", defaults.connect_peers), add_peers: arg_match_many_unwrap_or::(&m, "add-peers", defaults.add_peers), listen: m.get_one::("listen").cloned().or(defaults.listen), + proxy: m.get_one::("proxy").cloned().or(defaults.proxy.clone()), + proxy_net: m + .get_many::("proxy-net") + .map(|values| values.cloned().collect()) + .unwrap_or_else(|| defaults.proxy_net.clone()), + tor_proxy: m.get_one::("tor-proxy").cloned().or(defaults.tor_proxy), + tor_control: m.get_one::("tor-control").cloned().or(defaults.tor_control), + tor_password: m.get_one::("tor-password").cloned().or(defaults.tor_password), + tor_cookie: m.get_one::("tor-cookie").cloned().or(defaults.tor_cookie), + tor_bootstrap_timeout_sec: arg_match_unwrap_or::(&m, "tor-bootstrap-timeout-sec", defaults.tor_bootstrap_timeout_sec), + listen_onion: arg_match_unwrap_or::(&m, "listen-onion", defaults.listen_onion), + tor_onion_port: m.get_one::("tor-onion-port").cloned().or(defaults.tor_onion_port), + tor_onion_key: m.get_one::("tor-onion-key").cloned().or(defaults.tor_onion_key), + tor_only: arg_match_unwrap_or::(&m, "tor-only", defaults.tor_only), + onlynet: m + .get_many::("onlynet") + .map(|values| values.cloned().collect()) + .unwrap_or_else(|| defaults.onlynet.clone()), outbound_target: arg_match_unwrap_or::(&m, "outpeers", defaults.outbound_target), inbound_limit: arg_match_unwrap_or::(&m, "maxinpeers", defaults.inbound_limit), rpc_max_clients: arg_match_unwrap_or::(&m, "rpcmaxclients", defaults.rpc_max_clients), @@ -583,3 +775,149 @@ fn arg_match_many_unwrap_or(m: &clap::ArgMatch -s, --service= Service command {install, remove, start, stop} --nogrpc Don't initialize the gRPC server */ +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Deserialize, Serialize)] +#[serde(rename_all = "lowercase")] +pub enum ProxyNetwork { + Ipv4, + Ipv6, + Onion, +} + +impl fmt::Display for ProxyNetwork { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let name = match self { + ProxyNetwork::Ipv4 => "ipv4", + ProxyNetwork::Ipv6 => "ipv6", + ProxyNetwork::Onion => "onion", + }; + f.write_str(name) + } +} + +impl FromStr for ProxyNetwork { + type Err = ProxyRuleParseError; + + fn from_str(s: &str) -> Result { + match s.to_ascii_lowercase().as_str() { + "ipv4" => Ok(ProxyNetwork::Ipv4), + "ipv6" => Ok(ProxyNetwork::Ipv6), + "onion" => Ok(ProxyNetwork::Onion), + other => Err(ProxyRuleParseError(format!("unknown proxy network '{other}'"))), + } + } +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct ProxyRule { + pub network: ProxyNetwork, + pub address: ContextualNetAddress, +} + +impl fmt::Display for ProxyRule { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}={}", self.network, self.address) + } +} + +impl FromStr for ProxyRule { + type Err = ProxyRuleParseError; + + fn from_str(s: &str) -> Result { + let (net, addr) = s.split_once('=').ok_or_else(|| ProxyRuleParseError("expected NETWORK=ADDRESS".into()))?; + let network = ProxyNetwork::from_str(net.trim())?; + let address = + ContextualNetAddress::from_str(addr.trim()).map_err(|err| ProxyRuleParseError(format!("invalid proxy address: {err}")))?; + Ok(Self { network, address }) + } +} + +#[derive(Debug, Clone)] +pub struct ProxyRuleParseError(String); + +impl fmt::Display for ProxyRuleParseError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(&self.0) + } +} + +impl std::error::Error for ProxyRuleParseError {} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Deserialize, Serialize)] +#[serde(rename_all = "lowercase")] +pub enum OnlyNet { + Ipv4, + Ipv6, + Onion, +} + +impl fmt::Display for OnlyNet { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + ProxyNetwork::from(*self).fmt(f) + } +} + +impl FromStr for OnlyNet { + type Err = ProxyRuleParseError; + + fn from_str(s: &str) -> Result { + ProxyNetwork::from_str(s).map(Into::into) + } +} + +impl From for OnlyNet { + fn from(value: ProxyNetwork) -> Self { + match value { + ProxyNetwork::Ipv4 => OnlyNet::Ipv4, + ProxyNetwork::Ipv6 => OnlyNet::Ipv6, + ProxyNetwork::Onion => OnlyNet::Onion, + } + } +} + +impl From for ProxyNetwork { + fn from(value: OnlyNet) -> Self { + match value { + OnlyNet::Ipv4 => ProxyNetwork::Ipv4, + OnlyNet::Ipv6 => ProxyNetwork::Ipv6, + OnlyNet::Onion => ProxyNetwork::Onion, + } + } +} + +#[derive(Debug, Clone, Default)] +pub struct ProxySettings { + pub default: Option, + pub ipv4: Option, + pub ipv6: Option, + pub onion: Option, +} + +#[derive(Debug, Clone, Copy, Default)] +pub struct ResolvedProxySettings { + pub default: Option, + pub ipv4: Option, + pub ipv6: Option, + pub onion: Option, +} + +#[derive(Debug, Clone, Copy)] +pub enum AllowedNetworksChoice { + All, + Custom { allow_ipv4: bool, allow_ipv6: bool, allow_onion: bool }, +} + +impl ProxySettings { + pub fn resolve(&self, default_port: u16) -> ResolvedProxySettings { + fn normalize(addr: &ContextualNetAddress, default_port: u16) -> SocketAddr { + let net_addr: NetAddress = addr.clone().normalize(default_port); + net_addr.to_socket_addr().expect("expected IP address") + } + + ResolvedProxySettings { + default: self.default.as_ref().map(|addr| normalize(addr, default_port)), + ipv4: self.ipv4.as_ref().map(|addr| normalize(addr, default_port)), + ipv6: self.ipv6.as_ref().map(|addr| normalize(addr, default_port)), + onion: self.onion.as_ref().map(|addr| normalize(addr, default_port)), + } + } +} diff --git a/kaspad/src/daemon.rs b/kaspad/src/daemon.rs index 1d8e3439c9..cd29170546 100644 --- a/kaspad/src/daemon.rs +++ b/kaspad/src/daemon.rs @@ -1,6 +1,14 @@ -use std::{fs, path::PathBuf, process::exit, sync::Arc, time::Duration}; +use std::{ + fs, + net::SocketAddr, + path::{Path, PathBuf}, + process::exit, + sync::{Arc, Mutex}, + time::Duration, +}; use async_channel::unbounded; +use kaspa_consensus_core::network::NetworkId; use kaspa_consensus_core::{ config::ConfigBuilder, constants::TRANSIENT_BYTE_TO_MASS_FACTOR, @@ -8,8 +16,16 @@ use kaspa_consensus_core::{ mining_rules::MiningRules, }; use kaspa_consensus_notify::{root::ConsensusNotificationRoot, service::NotifyService}; -use kaspa_core::{core::Core, debug, info, trace}; -use kaspa_core::{kaspad_env::version, task::tick::TickService}; +use kaspa_core::{ + core::Core, + debug, info, + kaspad_env::version, + task::{ + service::{AsyncService, AsyncServiceFuture}, + tick::TickService, + }, + trace, warn, +}; use kaspa_database::{ prelude::{CachePolicy, DbWriter, DirectDbWriter}, registry::DatabaseStorePrefixes, @@ -21,11 +37,17 @@ use kaspa_p2p_mining::rule_engine::MiningRuleEngine; use kaspa_rpc_service::service::RpcCoreService; use kaspa_txscript::caches::TxScriptCacheCounters; use kaspa_utils::git; -use kaspa_utils::networking::ContextualNetAddress; use kaspa_utils::sysinfo::SystemInfo; +use kaspa_utils::{ + networking::{ContextualNetAddress, NetAddress}, + triggers::SingleTrigger, +}; use kaspa_utils_tower::counters::TowerConnectionCounters; +use tokio::sync::watch; +use crate::args::{AllowedNetworksChoice, Args}; use kaspa_addressmanager::AddressManager; +use kaspa_connectionmanager::AllowedNetworks; use kaspa_consensus::{ consensus::factory::Factory as ConsensusFactory, params::{OverrideParams, Params}, @@ -63,7 +85,11 @@ pub const MINIMUM_DAEMON_SOFT_FD_LIMIT: u64 = 4 * 1024; const MINIMUM_RETENTION_PERIOD_DAYS: f64 = 2.0; const ONE_GIGABYTE: f64 = 1_000_000_000.0; -use crate::args::Args; +use crate::tor_manager::{TorManager, TorManagerError, TorSystemConfig}; +use tor_interface::{ + tor_crypto::{Ed25519PrivateKey, V3OnionServiceId}, + tor_provider::TorEvent, +}; const DEFAULT_DATA_DIR: &str = "datadir"; const CONSENSUS_DB: &str = "consensus"; @@ -141,6 +167,256 @@ fn get_user_approval_or_exit(message: &str, approve: bool) { } } +fn validate_tor_args(args: &Args) { + if args.listen_onion && args.tor_control.is_none() { + println!("--listen-onion requires specifying --tor-control"); + exit(1); + } + + if args.tor_cookie.is_some() && args.tor_password.is_some() { + println!("Specify only one of --tor-cookie or --tor-password"); + exit(1); + } + + let proxy_settings = args.proxy_settings(); + let tor_proxy_present = proxy_settings.onion.is_some() || proxy_settings.default.is_some(); + if args.listen_onion && !tor_proxy_present { + println!("--listen-onion requires a Tor SOCKS proxy (--tor-proxy or --proxy)"); + exit(1); + } + + if args.tor_only && !tor_proxy_present { + println!("--tor-only requires a Tor SOCKS proxy (--tor-proxy or --proxy)"); + exit(1); + } +} + +fn compute_tor_system_config(args: &Args) -> Option { + use tor_interface::legacy_tor_client::TorAuth; + + let control = args.tor_control?; + let control_addr = contextual_to_socket(control, 9051); + + let proxy_settings = args.proxy_settings(); + let socks_source = proxy_settings + .onion + .clone() + .or_else(|| proxy_settings.default.clone()) + .unwrap_or_else(|| ContextualNetAddress::loopback().with_port(9050)); + let socks_addr = contextual_to_socket(socks_source, 9050); + + let auth = if let Some(cookie) = args.tor_cookie.as_ref() { + TorAuth::CookieFile(cookie.clone()) + } else if let Some(password) = args.tor_password.as_ref() { + TorAuth::Password(password.clone()) + } else { + TorAuth::Null + }; + + Some(TorSystemConfig { control_addr, socks_addr, auth, bootstrap_timeout: Duration::from_secs(args.tor_bootstrap_timeout_sec) }) +} + +fn contextual_to_socket(addr: ContextualNetAddress, default_port: u16) -> SocketAddr { + let net_addr: NetAddress = addr.normalize(default_port); + net_addr.to_socket_addr().expect("expected IP address") +} + +fn report_tor_init_error(err: &TorManagerError) { + match err { + TorManagerError::BootstrapTimeout(timeout) => { + println!( + "Tor bootstrap timed out after {:?}. Ensure the Tor daemon is running and reachable, or disable Tor integration.", + timeout + ); + } + _ => { + println!("Failed to initialise Tor integration: {err}"); + } + } +} + +fn setup_tor_onion_service( + tor_manager: &TorManager, + args: &Args, + app_dir: &Path, + network: &NetworkId, + p2p_addr: SocketAddr, +) -> Option { + let key_path = args.tor_onion_key.clone().unwrap_or_else(|| { + let mut path = app_dir.join(network.to_prefixed()).join("tor"); + path.push("p2p_onion.key"); + path + }); + + let key = if key_path.exists() { + match TorManager::load_onion_key(&key_path) { + Ok(key) => { + info!("Loaded Tor onion key from {}", key_path.display()); + info!("Back up this file to preserve your persistent onion address."); + key + } + Err(err) => { + warn!("Failed to load Tor onion key from {}: {err}", key_path.display()); + return None; + } + } + } else { + let key = Ed25519PrivateKey::generate(); + if let Err(err) = TorManager::save_onion_key(&key, &key_path) { + warn!("Failed to persist Tor onion key to {}: {err}", key_path.display()); + } else { + info!("Generated Tor onion key at {}", key_path.display()); + info!("Back up this file to preserve your persistent onion address."); + } + key + }; + + let virt_port = args.tor_onion_port.unwrap_or(p2p_addr.port()); + match tor_manager.publish_hidden_service(&key, virt_port, p2p_addr) { + Ok(service_id) => { + info!("Tor hidden service published at {}.onion:{}", service_id, virt_port); + info!("Onion service key stored at {}", key_path.display()); + Some(TorOnionServiceInfo { id: service_id, virt_port }) + } + Err(err) => { + warn!("Failed to publish Tor hidden service: {err}"); + None + } + } +} + +struct TorOnionServiceInfo { + id: V3OnionServiceId, + virt_port: u16, +} + +struct TorRuntimeService { + manager: Arc, + onion_service_id: Option, + shutdown: SingleTrigger, + state: Mutex, + bootstrap_tx: Option>, +} + +#[derive(Default)] +struct TorServiceState { + last_bootstrap_progress: Option, + bootstrap_complete_logged: bool, +} + +impl TorRuntimeService { + const IDENT: &'static str = "tor-service"; + const POLL_INTERVAL: Duration = Duration::from_secs(5); + + fn new(manager: Arc, onion_service_id: Option, bootstrap_tx: Option>) -> Self { + Self { + manager, + onion_service_id, + shutdown: SingleTrigger::default(), + state: Mutex::new(TorServiceState::default()), + bootstrap_tx, + } + } + + fn poll_events(&self) -> Result<(), TorManagerError> { + let events = self.manager.update()?; + if events.is_empty() { + return Ok(()); + } + + for event in events { + match event { + TorEvent::BootstrapStatus { progress, tag, summary } => { + let mut state = self.state.lock().unwrap(); + if state.last_bootstrap_progress != Some(progress) { + info!("Tor bootstrap {progress}% - {tag}: {summary}"); + state.last_bootstrap_progress = Some(progress); + if progress >= 100 { + if let Some(tx) = &self.bootstrap_tx { + let _ = tx.send(true); + } + } + } + } + TorEvent::BootstrapComplete => { + let mut state = self.state.lock().unwrap(); + if !state.bootstrap_complete_logged { + info!("Tor bootstrap complete"); + state.bootstrap_complete_logged = true; + if let Some(tx) = &self.bootstrap_tx { + let _ = tx.send(true); + } + } + } + TorEvent::LogReceived { line } => { + debug!("Tor: {line}"); + } + TorEvent::OnionServicePublished { service_id } => { + info!("Tor hidden service {service_id} is now published"); + } + TorEvent::ConnectComplete { .. } => { + trace!("Tor outbound connect completed"); + } + TorEvent::ConnectFailed { error, .. } => { + warn!("Tor outbound connect failed: {error}"); + } + } + } + Ok(()) + } +} + +impl AsyncService for TorRuntimeService { + fn ident(self: Arc) -> &'static str { + Self::IDENT + } + + fn start(self: Arc) -> AsyncServiceFuture { + Box::pin(async move { + trace!("{} starting event loop", Self::IDENT); + if let Some(tx) = &self.bootstrap_tx { + let _ = tx.send(true); + } + let shutdown = self.shutdown.listener.clone(); + tokio::pin!(shutdown); + let mut ticker = tokio::time::interval(Self::POLL_INTERVAL); + loop { + tokio::select! { + _ = &mut shutdown => break, + _ = ticker.tick() => { + if let Err(err) = self.poll_events() { + warn!("Tor event polling failed: {}", err); + } + } + } + } + trace!("{} stopping event loop", Self::IDENT); + Ok(()) + }) + } + + fn signal_exit(self: Arc) { + trace!("sending an exit signal to {}", Self::IDENT); + self.shutdown.trigger.trigger(); + } + + fn stop(self: Arc) -> AsyncServiceFuture { + Box::pin(async move { + if let Some(service_id) = self.onion_service_id.clone() { + let service_id_str = service_id.to_string(); + let manager = self.manager.clone(); + match tokio::task::spawn_blocking(move || manager.remove_hidden_service(&service_id)).await { + Ok(Ok(())) => info!("Tor hidden service {service_id_str} removed"), + Ok(Err(err)) => warn!("Failed to remove Tor hidden service {service_id_str}: {err}"), + Err(join_err) => warn!("Failed to remove Tor hidden service {service_id_str}: {join_err}"), + } + } + trace!("{} stopped", Self::IDENT); + Ok(()) + }) + } +} + /// Runtime configuration struct for the application. #[derive(Default)] pub struct Runtime { @@ -238,6 +514,24 @@ pub fn create_core_with_runtime(runtime: &Runtime, args: &Args, fd_total_budget: println!("{}", err); exit(1); } + validate_tor_args(args); + + let tor_manager = if let Some(config) = compute_tor_system_config(args) { + info!("Tor integration enabled (control {} / SOCKS {})", config.control_addr, config.socks_addr); + match TorManager::connect_system(config) { + Ok(manager) => Some(Arc::new(manager)), + Err(err) => { + report_tor_init_error(&err); + if args.tor_only || args.listen_onion { + println!("Tor is required for --tor-only/--listen-onion. Exiting."); + exit(1); + } + None + } + } + } else { + None + }; let params = { let params: Params = network.into(); @@ -262,6 +556,52 @@ pub fn create_core_with_runtime(runtime: &Runtime, args: &Args, fd_total_budget: } }; + let proxy_settings = args.proxy_settings(); + let resolved_proxies = proxy_settings.resolve(9050); + let mut default_proxy_addr = resolved_proxies.default; + let mut proxy_ipv4_addr = resolved_proxies.ipv4; + let mut proxy_ipv6_addr = resolved_proxies.ipv6; + let tor_proxy_override_addr = resolved_proxies.onion; + + let mut proxy_descriptions = Vec::new(); + if let Some(addr) = default_proxy_addr { + proxy_descriptions.push(format!("default={addr}")); + } + if let Some(addr) = proxy_ipv4_addr { + proxy_descriptions.push(format!("ipv4={addr}")); + } + if let Some(addr) = proxy_ipv6_addr { + proxy_descriptions.push(format!("ipv6={addr}")); + } + if let Some(addr) = tor_proxy_override_addr { + proxy_descriptions.push(format!("onion={addr}")); + } + if !proxy_descriptions.is_empty() { + info!("Configured SOCKS proxies: {}", proxy_descriptions.join(", ")); + } + let tor_proxy_from_manager = tor_manager.as_ref().map(|mgr| mgr.socks_addr()); + let effective_tor_proxy = tor_proxy_from_manager.or(tor_proxy_override_addr).or(default_proxy_addr); + if let Some(proxy) = effective_tor_proxy { + info!("Effective Tor proxy: {proxy}"); + if default_proxy_addr.is_none() { + default_proxy_addr = Some(proxy); + } + if proxy_ipv4_addr.is_none() { + proxy_ipv4_addr = default_proxy_addr; + } + if proxy_ipv6_addr.is_none() { + proxy_ipv6_addr = default_proxy_addr; + } + let mut effective_descriptions = Vec::new(); + if let Some(addr) = default_proxy_addr { effective_descriptions.push(format!("default={addr}")); } + if let Some(addr) = proxy_ipv4_addr { effective_descriptions.push(format!("ipv4={addr}")); } + if let Some(addr) = proxy_ipv6_addr { effective_descriptions.push(format!("ipv6={addr}")); } + if let Some(addr) = tor_proxy_override_addr { effective_descriptions.push(format!("onion={addr}")); } + if !effective_descriptions.is_empty() { + info!("Effective SOCKS routing: {}", effective_descriptions.join(", ")); + } + } + let config = Arc::new( ConfigBuilder::new(params).adjust_perf_params_to_consensus_params().apply_args(|config| args.apply_to_config(config)).build(), ); @@ -507,11 +847,12 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm let connect_peers = args.connect_peers.iter().map(|x| x.normalize(config.default_p2p_port())).collect::>(); let add_peers = args.add_peers.iter().map(|x| x.normalize(config.default_p2p_port())).collect(); - let p2p_server_addr = args.listen.unwrap_or(ContextualNetAddress::unspecified()).normalize(config.default_p2p_port()); + let default_listen = if args.tor_only { ContextualNetAddress::loopback() } else { ContextualNetAddress::unspecified() }; + let p2p_server_addr = args.listen.unwrap_or(default_listen).normalize(config.default_p2p_port()); // connect_peers means no DNS seeding and no outbound/inbound peers let outbound_target = if connect_peers.is_empty() { args.outbound_target } else { 0 }; let inbound_limit = if connect_peers.is_empty() { args.inbound_limit } else { 0 }; - let dns_seeders = if connect_peers.is_empty() && !args.disable_dns_seeding { config.dns_seeders } else { &[] }; + let dns_seeders = if connect_peers.is_empty() && !args.disable_dns_seeding && !args.tor_only { config.dns_seeders } else { &[] }; let grpc_server_addr = args.rpclisten.unwrap_or(ContextualNetAddress::loopback()).normalize(config.default_rpc_port()); @@ -581,7 +922,45 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm None }; - let (address_manager, port_mapping_extender_svc) = AddressManager::new(config.clone(), meta_db, tick_service.clone()); + let tor_enabled = effective_tor_proxy.is_some(); + let allowed_networks_choice = args.allowed_networks(); + let mut allow_ipv4 = true; + let mut allow_ipv6 = true; + let mut allow_onion = true; + match allowed_networks_choice { + AllowedNetworksChoice::All => {} + AllowedNetworksChoice::Custom { allow_ipv4: v4, allow_ipv6: v6, allow_onion: onion } => { + allow_ipv4 = v4; + allow_ipv6 = v6; + allow_onion = onion; + } + } + + if !tor_enabled { + allow_onion = false; + } + + let mut allowed_networks_log = Vec::new(); + if allow_ipv4 { + allowed_networks_log.push("ipv4"); + } + if allow_ipv6 { + allowed_networks_log.push("ipv6"); + } + if allow_onion { + allowed_networks_log.push("onion"); + } + + if allowed_networks_log.is_empty() { + warn!("No P2P networks enabled via --onlynet; the node will not establish peer connections"); + } else { + info!("Allowed peer networks: {}", allowed_networks_log.join(", ")); + if args.tor_only && args.onlynet.is_empty() { + info!("Tor-only mode active: restricting P2P connections to onion peers"); + } + } + let (address_manager, port_mapping_extender_svc) = + AddressManager::new(config.clone(), meta_db, tick_service.clone(), allow_ipv4, allow_ipv6, allow_onion); let mining_manager = MiningManagerProxy::new(Arc::new(MiningManager::new_with_extended_config( config.target_time_per_block(), @@ -608,6 +987,31 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm hub.clone(), mining_rules, )); + let onion_service_info = if args.listen_onion { + match tor_manager.as_ref() { + Some(manager) => setup_tor_onion_service( + manager.as_ref(), + args, + app_dir.as_path(), + &network, + p2p_server_addr.to_socket_addr().expect("P2P server address must be IP when binding local listener"), + ), + None => { + warn!("--listen-onion requested but Tor initialisation failed; skipping hidden service setup"); + None + } + } + } else { + None + }; + + let (tor_bootstrap_tx, tor_bootstrap_rx) = if tor_manager.is_some() { + let (tx, rx) = watch::channel(false); + (Some(tx), Some(rx)) + } else { + (None, None) + }; + let flow_context = Arc::new(FlowContext::new( consensus_manager.clone(), address_manager, @@ -617,7 +1021,22 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm notification_root, hub.clone(), mining_rule_engine.clone(), + default_proxy_addr, + proxy_ipv4_addr, + proxy_ipv6_addr, + effective_tor_proxy, + args.tor_only, + onion_service_info.as_ref().map(|info| (info.id.clone(), info.virt_port)), + tor_bootstrap_rx, )); + let tor_async_service = tor_manager.as_ref().map(|manager| { + Arc::new(TorRuntimeService::new( + manager.clone(), + onion_service_info.as_ref().map(|info| info.id.clone()), + tor_bootstrap_tx.clone(), + )) + }); + let allowed_networks = AllowedNetworks::new(allow_ipv4, allow_ipv6, allow_onion); let p2p_service = Arc::new(P2pService::new( flow_context.clone(), connect_peers, @@ -628,6 +1047,7 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm dns_seeders, config.default_p2p_port(), p2p_tower_counters.clone(), + allowed_networks, )); let rpc_core_service = Arc::new(RpcCoreService::new( @@ -667,6 +1087,9 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm let async_runtime = Arc::new(AsyncRuntime::new(args.async_threads)); async_runtime.register(tick_service); async_runtime.register(notify_service); + if let Some(tor_service) = tor_async_service { + async_runtime.register(tor_service); + } if let Some(index_service) = index_service { async_runtime.register(index_service) }; diff --git a/kaspad/src/lib.rs b/kaspad/src/lib.rs index aa5106724f..84ff564723 100644 --- a/kaspad/src/lib.rs +++ b/kaspad/src/lib.rs @@ -1,2 +1,3 @@ pub mod args; pub mod daemon; +pub mod tor_manager; diff --git a/kaspad/src/tor_manager.rs b/kaspad/src/tor_manager.rs new file mode 100644 index 0000000000..92294a24af --- /dev/null +++ b/kaspad/src/tor_manager.rs @@ -0,0 +1,254 @@ +use std::{ + fs, + io::{BufRead, BufReader, Write}, + net::{IpAddr, SocketAddr, TcpStream}, + path::PathBuf, + sync::Mutex, + thread, + time::{Duration, Instant}, +}; + +use hex::encode as hex_encode; +use kaspa_core::{info, warn}; +use thiserror::Error; +use tor_interface::{ + legacy_tor_client::{LegacyTorClient, LegacyTorClientConfig, TorAuth}, + tor_crypto::{Ed25519PrivateKey, V3OnionServiceId, X25519PublicKey}, + tor_provider::{OnionListener, TorEvent, TorProvider}, +}; + +/// Arguments required to connect to (or launch) a Tor daemon using the legacy c-tor backend. +#[derive(Clone, Debug)] +pub struct TorSystemConfig { + pub control_addr: SocketAddr, + pub socks_addr: SocketAddr, + pub auth: TorAuth, + pub bootstrap_timeout: Duration, +} + +/// Errors emitted by [`TorManager`]. +#[derive(Debug, Error)] +pub enum TorManagerError { + #[error("failed communicating with legacy tor daemon: {0}")] + Legacy(#[from] tor_interface::legacy_tor_client::Error), + #[error("tor provider error: {0}")] + Provider(#[from] tor_interface::tor_provider::Error), + #[error("tor crypto error: {0}")] + Crypto(#[from] tor_interface::tor_crypto::Error), + #[error("io error: {0}")] + Io(#[from] std::io::Error), + #[error("tor control protocol error: {0}")] + Control(String), + #[error("tor bootstrap timed out after {0:?}")] + BootstrapTimeout(Duration), +} + +/// Thin wrapper around `tor-interface`'s [`LegacyTorClient`] with some kaspad-specific conveniences. +/// +/// For now the manager only supports connecting to an already running tor daemon (matching Bitcoin Core's +/// system-tor integration). Future work will extend this to manage a bundled tor binary when one is not +/// present on the host. +pub struct TorManager { + client: Mutex, + socks_addr: SocketAddr, + control_addr: SocketAddr, + auth: TorAuth, +} + +impl TorManager { + /// Connect to an existing tor daemon, authenticate, and wait for bootstrap completion. + pub fn connect_system(config: TorSystemConfig) -> Result { + let TorSystemConfig { control_addr, socks_addr, auth, bootstrap_timeout } = config; + + let mut client = LegacyTorClient::new(LegacyTorClientConfig::SystemTor { + tor_socks_addr: socks_addr, + tor_control_addr: control_addr, + tor_control_auth: auth.clone(), + })?; + + let version = client.version(); + info!("Connected to Tor daemon version {}", version); + + client.bootstrap()?; + wait_for_bootstrap(&mut client, bootstrap_timeout)?; + + Ok(Self { client: Mutex::new(client), socks_addr, control_addr, auth }) + } + + /// Return the SOCKS listener address that should be supplied to outbound networking components. + pub fn socks_addr(&self) -> SocketAddr { + self.socks_addr + } + + pub fn control_addr(&self) -> SocketAddr { + self.control_addr + } + + pub fn remove_hidden_service(&self, service_id: &V3OnionServiceId) -> Result<(), TorManagerError> { + let mut stream = TcpStream::connect(self.control_addr)?; + stream.set_read_timeout(Some(Duration::from_secs(10)))?; + stream.set_write_timeout(Some(Duration::from_secs(10)))?; + let mut reader = BufReader::new(stream.try_clone()?); + + self.authenticate_control(&mut stream, &mut reader)?; + + let command = format!("DEL_ONION {}\r\n", service_id); + stream.write_all(command.as_bytes())?; + + loop { + let mut line = String::new(); + let bytes = reader.read_line(&mut line)?; + if bytes == 0 { + return Err(TorManagerError::Control("unexpected EOF while waiting for DEL_ONION response".into())); + } + let trimmed = line.trim(); + if trimmed.starts_with("250 ") { + break; + } else if trimmed.starts_with('5') { + return Err(TorManagerError::Control(trimmed.to_string())); + } + } + + Ok(()) + } + + /// Poll underlying tor events. Consumers should call this periodically to drain bootstrap/log events. + pub fn update(&self) -> Result, TorManagerError> { + Ok(self.client.lock().unwrap().update()?) + } + + /// Create a persistent onion service bound to the provided virtual port and local target. + /// + /// The manager expects the caller to take responsibility for running an application server on the + /// returned [`OnionListener`]. The listener is configured in non-blocking mode by the caller. + pub fn create_onion_listener( + &mut self, + private_key: &Ed25519PrivateKey, + virt_port: u16, + authorized_clients: Option<&[X25519PublicKey]>, + ) -> Result { + let listener = self.client.lock().unwrap().listener(private_key, virt_port, authorized_clients)?; + Ok(listener) + } + + /// Convenience for deriving the v3 onion identifier from a private key. + pub fn onion_id_for(private_key: &Ed25519PrivateKey) -> V3OnionServiceId { + V3OnionServiceId::from_private_key(private_key) + } + + /// Load an Ed25519 onion service key from disk (c-tor key-blob format). + pub fn load_onion_key(path: &PathBuf) -> Result { + let blob = fs::read_to_string(path)?; + Ok(Ed25519PrivateKey::from_key_blob(blob.trim())?) + } + + pub fn save_onion_key(key: &Ed25519PrivateKey, path: &PathBuf) -> Result<(), TorManagerError> { + if let Some(parent) = path.parent() { + fs::create_dir_all(parent)?; + } + fs::write(path, key.to_key_blob())?; + Ok(()) + } + + pub fn publish_hidden_service( + &self, + private_key: &Ed25519PrivateKey, + virt_port: u16, + target: SocketAddr, + ) -> Result { + let mut stream = TcpStream::connect(self.control_addr)?; + stream.set_read_timeout(Some(Duration::from_secs(10)))?; + stream.set_write_timeout(Some(Duration::from_secs(10)))?; + let mut reader = BufReader::new(stream.try_clone()?); + + self.authenticate_control(&mut stream, &mut reader)?; + + let key_blob = private_key.to_key_blob(); + let target_repr = format_socket_addr(target); + let command = format!("ADD_ONION {} Flags=Detach Port={},{}\r\n", key_blob, virt_port, target_repr); + stream.write_all(command.as_bytes())?; + + let mut service_id: Option = None; + loop { + let mut line = String::new(); + let bytes = reader.read_line(&mut line)?; + if bytes == 0 { + return Err(TorManagerError::Control("unexpected EOF while waiting for ADD_ONION response".into())); + } + let trimmed = line.trim(); + if trimmed.starts_with("250-ServiceID=") { + service_id = Some(trimmed["250-ServiceID=".len()..].to_string()); + } else if trimmed.starts_with("250 ") { + break; + } else if trimmed.starts_with('5') { + return Err(TorManagerError::Control(trimmed.to_string())); + } + } + + let service_id = service_id.ok_or_else(|| TorManagerError::Control("missing ServiceID in ADD_ONION reply".into()))?; + Ok(V3OnionServiceId::from_string(&service_id)?) + } + + fn authenticate_control(&self, stream: &mut TcpStream, reader: &mut BufReader) -> Result<(), TorManagerError> { + let command = match &self.auth { + TorAuth::Null => "AUTHENTICATE\r\n".to_string(), + TorAuth::Password(password) => format!("AUTHENTICATE \"{}\"\r\n", escape_control_password(password)), + TorAuth::CookieFile(path) => { + let cookie = fs::read(path)?; + format!("AUTHENTICATE {}\r\n", hex_encode(cookie)) + } + }; + + stream.write_all(command.as_bytes())?; + let mut line = String::new(); + reader.read_line(&mut line)?; + if !line.trim().starts_with("250") { + return Err(TorManagerError::Control(format!("authentication failed: {}", line.trim()))); + } + Ok(()) + } +} + +fn wait_for_bootstrap(client: &mut LegacyTorClient, timeout: Duration) -> Result<(), TorManagerError> { + let deadline = Instant::now() + timeout; + let mut last_progress: Option = None; + + loop { + for event in client.update()? { + match event { + TorEvent::BootstrapStatus { progress, tag, summary } => { + if last_progress != Some(progress) { + info!("Tor bootstrap {progress}% - {tag}: {summary}"); + last_progress = Some(progress); + } + } + TorEvent::BootstrapComplete => { + info!("Tor bootstrap complete"); + return Ok(()); + } + TorEvent::LogReceived { line } => { + // Tor can be quite chatty; downgrade to debug once we have more granular logging controls. + warn!("tor: {}", line); + } + _ => {} + } + } + + if Instant::now() > deadline { + return Err(TorManagerError::BootstrapTimeout(timeout)); + } + + thread::sleep(Duration::from_millis(200)); + } +} + +fn escape_control_password(input: &str) -> String { + input.replace('\\', "\\\\").replace('"', "\\\"") +} + +fn format_socket_addr(addr: SocketAddr) -> String { + match addr.ip() { + IpAddr::V6(v6) => format!("[{}]:{}", v6, addr.port()), + IpAddr::V4(_) => format!("{}:{}", addr.ip(), addr.port()), + } +} diff --git a/protocol/flows/Cargo.toml b/protocol/flows/Cargo.toml index 0a00f1436e..779bb6b57c 100644 --- a/protocol/flows/Cargo.toml +++ b/protocol/flows/Cargo.toml @@ -37,6 +37,7 @@ tokio = { workspace = true, features = ["rt-multi-thread", "macros", "signal"] } tokio-stream = { workspace = true, features = ["net"] } uuid = { workspace = true, features = ["v4", "fast-rng"] } chrono.workspace = true +tor-interface = "0.6.0" [lints] workspace = true diff --git a/protocol/flows/src/flow_context.rs b/protocol/flows/src/flow_context.rs index 9bcc9b3688..6be89a0f42 100644 --- a/protocol/flows/src/flow_context.rs +++ b/protocol/flows/src/flow_context.rs @@ -36,13 +36,16 @@ use kaspa_p2p_lib::{ convert::model::version::Version, make_message, pb::{kaspad_message::Payload, InvRelayBlockMessage}, - ConnectionInitializer, Hub, KaspadHandshake, PeerKey, PeerProperties, Router, + service_flags, ConnectionInitializer, Hub, KaspadHandshake, PeerKey, PeerProperties, Router, }; use kaspa_p2p_mining::rule_engine::MiningRuleEngine; -use kaspa_utils::iter::IterExtensions; -use kaspa_utils::networking::PeerId; +use kaspa_utils::{ + iter::IterExtensions, + networking::{NetAddress, OnionAddress, PeerId}, +}; use parking_lot::{Mutex, RwLock}; use std::collections::HashMap; +use std::net::SocketAddr; use std::time::Instant; use std::{collections::hash_map::Entry, fmt::Display}; use std::{ @@ -56,9 +59,10 @@ use std::{ }; use tokio::sync::{ mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender}, - RwLock as AsyncRwLock, + watch, RwLock as AsyncRwLock, }; use tokio_stream::{wrappers::UnboundedReceiverStream, StreamExt}; +use tor_interface::tor_crypto::V3OnionServiceId; use uuid::Uuid; /// The P2P protocol version. @@ -241,6 +245,13 @@ pub struct FlowContextInner { // Mining rule engine mining_rule_engine: Arc, + proxy: Option, + proxy_ipv4: Option, + proxy_ipv6: Option, + tor_proxy: Option, + tor_only: bool, + onion_service: Option<(V3OnionServiceId, NetAddress)>, + tor_bootstrap_rx: Mutex>>, } #[derive(Clone)] @@ -315,10 +326,32 @@ impl FlowContext { notification_root: Arc, hub: Hub, mining_rule_engine: Arc, + proxy_default: Option, + proxy_ipv4: Option, + proxy_ipv6: Option, + tor_proxy: Option, + tor_only: bool, + onion_service: Option<(V3OnionServiceId, u16)>, + tor_bootstrap_rx: Option>, ) -> Self { let bps_upper_bound = config.bps().upper_bound() as usize; let orphan_resolution_range = BASELINE_ORPHAN_RESOLUTION_RANGE + (bps_upper_bound as f64).log2().ceil() as u32; + let onion_service = onion_service.and_then(|(id, port)| { + let onion_host = format!("{}.onion", id); + match OnionAddress::try_from(onion_host.as_str()) { + Ok(onion) => Some((id, NetAddress::new_onion(onion, port))), + Err(err) => { + warn!("Failed to construct onion address {}: {}", onion_host, err); + None + } + } + }); + + if let Some((_, address)) = onion_service.as_ref() { + address_manager.lock().add_address(*address); + } + // The maximum amount of orphans allowed in the orphans pool. This number is an approximation // of how many orphans there can possibly be on average bounded by an upper bound. let max_orphans = @@ -345,6 +378,13 @@ impl FlowContext { max_orphans, config, mining_rule_engine, + proxy: proxy_default, + proxy_ipv4, + proxy_ipv6, + tor_proxy, + tor_only, + onion_service, + tor_bootstrap_rx: Mutex::new(tor_bootstrap_rx), }), } } @@ -357,6 +397,38 @@ impl FlowContext { self.orphan_resolution_range } + pub fn tor_proxy(&self) -> Option { + self.tor_proxy + } + + pub fn proxy(&self) -> Option { + self.proxy + } + + pub fn proxy_ipv4(&self) -> Option { + self.proxy_ipv4 + } + + pub fn proxy_ipv6(&self) -> Option { + self.proxy_ipv6 + } + + pub fn tor_only(&self) -> bool { + self.tor_only + } + + pub fn onion_service_id(&self) -> Option { + self.onion_service.as_ref().map(|(id, _)| id.clone()) + } + + pub fn onion_service_address(&self) -> Option { + self.onion_service.as_ref().map(|(_, addr)| *addr) + } + + pub fn tor_bootstrap_receiver(&self) -> Option> { + self.tor_bootstrap_rx.lock().clone() + } + pub fn max_orphans(&self) -> usize { self.max_orphans } @@ -751,7 +823,10 @@ impl ConnectionInitializer for FlowContext { // Build the local version message // Subnets are not currently supported + // NOTE: We currently avoid advertising onion addresses in the handshake since legacy peers + // do not understand them. When Tor-only, we intentionally fall back to `None`. let mut self_version_message = Version::new(local_address, self.node_id, network_name.clone(), None, PROTOCOL_VERSION); + self_version_message.services |= service_flags::ADDR_V2; self_version_message.add_user_agent(name(), version(), &self.config.user_agent_comments); // TODO: get number of live services // TODO: disable_relay_tx from config/cmd @@ -762,6 +837,8 @@ impl ConnectionInitializer for FlowContext { let time_offset = unix_now() as i64 - peer_version_message.timestamp; let peer_version: Version = peer_version_message.try_into()?; + let peer_services = peer_version.services; + let peer_supports_addrv2 = (peer_services & service_flags::ADDR_V2) != 0; router.set_identity(peer_version.id); // Avoid duplicate connections if self.hub.has_peer(router.key()) { @@ -800,11 +877,13 @@ impl ConnectionInitializer for FlowContext { // Build and register the peer properties let peer_properties = Arc::new(PeerProperties { user_agent: peer_version.user_agent.to_owned(), + services: peer_services, advertised_protocol_version: peer_version.protocol_version, protocol_version: applied_protocol_version, disable_relay_tx: peer_version.disable_relay_tx, subnetwork_id: peer_version.subnetwork_id.to_owned(), time_offset, + supports_addrv2: peer_supports_addrv2, }); router.set_properties(peer_properties); @@ -818,15 +897,29 @@ impl ConnectionInitializer for FlowContext { flow.launch(); } + let tor_active = self.tor_proxy.is_some() || self.onion_service.is_some() || self.tor_only; + if router.is_outbound() || peer_version.address.is_some() { let mut address_manager = self.address_manager.lock(); if router.is_outbound() { - address_manager.add_address(router.net_address().into()); + let net_addr = router.net_address(); + if net_addr.as_onion().is_some() && !tor_active { + debug!("Skipping outbound onion address {} (tor inactive)", net_addr); + } else { + address_manager.add_address(net_addr); + } } if let Some(peer_ip_address) = peer_version.address { - address_manager.add_address(peer_ip_address); + if peer_ip_address.as_onion().is_some() && !(tor_active && peer_supports_addrv2) { + debug!( + "Skipping peer-advertised onion address {} from {} (tor_active={}, addr_v2={})", + peer_ip_address, router, tor_active, peer_supports_addrv2 + ); + } else { + address_manager.add_address(peer_ip_address); + } } } diff --git a/protocol/flows/src/service.rs b/protocol/flows/src/service.rs index 1633e26db0..0a16de7ef8 100644 --- a/protocol/flows/src/service.rs +++ b/protocol/flows/src/service.rs @@ -1,12 +1,13 @@ use std::sync::Arc; use kaspa_addressmanager::NetAddress; -use kaspa_connectionmanager::ConnectionManager; +use kaspa_connectionmanager::{AllowedNetworks, ConnectionManager}; use kaspa_core::{ + info, task::service::{AsyncService, AsyncServiceFuture}, - trace, + trace, warn, }; -use kaspa_p2p_lib::Adaptor; +use kaspa_p2p_lib::{Adaptor, SocksProxyConfig}; use kaspa_utils::triggers::SingleTrigger; use kaspa_utils_tower::counters::TowerConnectionCounters; @@ -25,6 +26,7 @@ pub struct P2pService { default_port: u16, shutdown: SingleTrigger, counters: Arc, + allowed_networks: AllowedNetworks, } impl P2pService { @@ -38,6 +40,7 @@ impl P2pService { dns_seeders: &'static [&'static str], default_port: u16, counters: Arc, + allowed_networks: AllowedNetworks, ) -> Self { Self { flow_context, @@ -50,6 +53,7 @@ impl P2pService { dns_seeders, default_port, counters, + allowed_networks, } } } @@ -65,11 +69,27 @@ impl AsyncService for P2pService { // Prepare a shutdown signal receiver let shutdown_signal = self.shutdown.listener.clone(); + let default_proxy = self.flow_context.proxy(); + let ipv4_proxy = self.flow_context.proxy_ipv4(); + let ipv6_proxy = self.flow_context.proxy_ipv6(); + let tor_proxy = self.flow_context.tor_proxy(); + let socks_proxy = if default_proxy.is_some() || ipv4_proxy.is_some() || ipv6_proxy.is_some() || tor_proxy.is_some() { + Some(SocksProxyConfig { default: default_proxy, ipv4: ipv4_proxy, ipv6: ipv6_proxy, onion: tor_proxy }) + } else { + None + }; + let p2p_adaptor = if self.inbound_limit == 0 { - Adaptor::client_only(self.flow_context.hub().clone(), self.flow_context.clone(), self.counters.clone()) + Adaptor::client_only(self.flow_context.hub().clone(), self.flow_context.clone(), self.counters.clone(), socks_proxy) } else { - Adaptor::bidirectional(self.listen, self.flow_context.hub().clone(), self.flow_context.clone(), self.counters.clone()) - .unwrap() + Adaptor::bidirectional( + self.listen, + self.flow_context.hub().clone(), + self.flow_context.clone(), + self.counters.clone(), + socks_proxy, + ) + .unwrap() }; let connection_manager = ConnectionManager::new( p2p_adaptor.clone(), @@ -78,6 +98,7 @@ impl AsyncService for P2pService { self.dns_seeders, self.default_port, self.flow_context.address_manager.clone(), + self.allowed_networks, ); self.flow_context.set_connection_manager(connection_manager.clone()); @@ -85,8 +106,22 @@ impl AsyncService for P2pService { // Launch the service and wait for a shutdown signal Box::pin(async move { + if let Some(mut bootstrap_rx) = self.flow_context.tor_bootstrap_receiver() { + if !*bootstrap_rx.borrow() { + info!("P2P service waiting for Tor bootstrap to complete before enabling networking"); + } + while !*bootstrap_rx.borrow() { + if bootstrap_rx.changed().await.is_err() { + warn!("Tor bootstrap signal dropped before completion; continuing with P2P startup"); + break; + } + } + if *bootstrap_rx.borrow() { + trace!("Tor bootstrap complete; starting P2P networking"); + } + } for peer_address in self.connect_peers.iter().cloned().chain(self.add_peers.iter().cloned()) { - connection_manager.add_connection_request(peer_address.into(), true).await; + connection_manager.add_connection_request(peer_address, true).await; } // Keep the P2P server running until a service shutdown signal is received diff --git a/protocol/flows/src/v5/address.rs b/protocol/flows/src/v5/address.rs index dc7d212081..bc6f3775c9 100644 --- a/protocol/flows/src/v5/address.rs +++ b/protocol/flows/src/v5/address.rs @@ -1,5 +1,4 @@ use crate::{flow_context::FlowContext, flow_trait::Flow}; -use itertools::Itertools; use kaspa_addressmanager::NetAddress; use kaspa_p2p_lib::{ common::ProtocolError, @@ -7,7 +6,6 @@ use kaspa_p2p_lib::{ pb::{kaspad_message::Payload, AddressesMessage, RequestAddressesMessage}, IncomingRoute, Router, }; -use kaspa_utils::networking::IpAddress; use rand::seq::SliceRandom; use std::sync::Arc; @@ -18,6 +16,22 @@ const MAX_ADDRESSES_SEND: usize = 1000; /// If a peer exceeds this value we consider it a protocol error. const MAX_ADDRESSES_RECEIVE: usize = 2500; +fn allow_onion_addresses( + tor_proxy_configured: bool, + tor_only_mode: bool, + onion_service_active: bool, + peer_supports_addrv2: bool, +) -> bool { + (tor_proxy_configured || onion_service_active || tor_only_mode) && peer_supports_addrv2 +} + +fn collect_gossipable_addresses(addresses: I, allow_onion: bool) -> Vec +where + I: IntoIterator, +{ + addresses.into_iter().filter(|addr| allow_onion || addr.as_onion().is_none()).collect() +} + pub struct ReceiveAddressesFlow { ctx: FlowContext, router: Arc, @@ -49,13 +63,22 @@ impl ReceiveAddressesFlow { .await?; let msg = dequeue_with_timeout!(self.incoming_route, Payload::Addresses)?; - let address_list: Vec<(IpAddress, u16)> = msg.try_into()?; + let address_list: Vec = msg.try_into()?; if address_list.len() > MAX_ADDRESSES_RECEIVE { return Err(ProtocolError::OtherOwned(format!("address count {} exceeded {}", address_list.len(), MAX_ADDRESSES_RECEIVE))); } + let peer_properties = self.router.properties(); + let allow_onion = allow_onion_addresses( + self.ctx.tor_proxy().is_some(), + self.ctx.tor_only(), + self.ctx.onion_service_address().is_some(), + peer_properties.supports_addrv2, + ); + let filtered_addresses = collect_gossipable_addresses(address_list, allow_onion); + let mut amgr_lock = self.ctx.address_manager.lock(); - for (ip, port) in address_list { - amgr_lock.add_address(NetAddress::new(ip, port)) + for addr in filtered_addresses { + amgr_lock.add_address(addr) } Ok(()) @@ -87,12 +110,56 @@ impl SendAddressesFlow { async fn start_impl(&mut self) -> Result<(), ProtocolError> { loop { dequeue!(self.incoming_route, Payload::RequestAddresses)?; - let addresses = self.ctx.address_manager.lock().iterate_addresses().collect_vec(); - let address_list = addresses - .choose_multiple(&mut rand::thread_rng(), MAX_ADDRESSES_SEND) - .map(|addr| (addr.ip, addr.port).into()) - .collect(); + let peer_properties = self.router.properties(); + let allow_onion = allow_onion_addresses( + self.ctx.tor_proxy().is_some(), + self.ctx.tor_only(), + self.ctx.onion_service_address().is_some(), + peer_properties.supports_addrv2, + ); + let addresses = { + let manager = self.ctx.address_manager.lock(); + collect_gossipable_addresses(manager.iterate_addresses(), allow_onion) + }; + let address_list = + addresses.choose_multiple(&mut rand::thread_rng(), MAX_ADDRESSES_SEND).map(|addr| (*addr).into()).collect(); self.router.enqueue(make_message!(Payload::Addresses, AddressesMessage { address_list })).await?; } } } + +#[cfg(test)] +mod tests { + use super::*; + use kaspa_utils::networking::{IpAddress, OnionAddress}; + use std::net::Ipv4Addr; + + #[test] + fn allow_onion_addresses_requires_tor_and_peer_support() { + // No Tor configuration, even if the peer supports addrv2 we keep onions disabled. + assert!(!allow_onion_addresses(false, false, false, true)); + // Tor is configured but the peer does not signal addrv2 support. + assert!(!allow_onion_addresses(true, false, false, false)); + // Either tor proxy, tor-only mode or an onion service combined with addrv2 enables onions. + assert!(allow_onion_addresses(true, false, false, true)); + assert!(allow_onion_addresses(false, true, false, true)); + assert!(allow_onion_addresses(false, false, true, true)); + // Onion service without addrv2 support should still reject onions. + assert!(!allow_onion_addresses(false, false, true, false)); + } + + #[test] + fn collect_gossipable_addresses_filters_when_not_allowed() { + let ipv4 = NetAddress::new(IpAddress::from(Ipv4Addr::LOCALHOST), 16110); + let onion = NetAddress::new_onion( + OnionAddress::try_from("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.onion").unwrap(), + 16110, + ); + + let filtered = collect_gossipable_addresses(vec![ipv4, onion], false); + assert_eq!(filtered, vec![ipv4]); + + let filtered = collect_gossipable_addresses(vec![ipv4, onion], true); + assert_eq!(filtered, vec![ipv4, onion]); + } +} diff --git a/protocol/p2p/Cargo.toml b/protocol/p2p/Cargo.toml index b03a941b07..7b098f64d6 100644 --- a/protocol/p2p/Cargo.toml +++ b/protocol/p2p/Cargo.toml @@ -45,6 +45,9 @@ tokio = { workspace = true, features = [ "rt-multi-thread", "macros", "signal" ] tokio-stream = { workspace = true, features = ["net"] } tonic = { workspace = true, features = ["tls", "gzip"] } uuid.workspace = true +tokio-socks = "0.5.2" +tower = "0.5.1" +hyper-util = { version = "0.1", features = ["tokio"] } [build-dependencies] tonic-build = { workspace = true, features = ["prost"] } diff --git a/protocol/p2p/proto/p2p.proto b/protocol/p2p/proto/p2p.proto index 149ad62792..00f406e725 100644 --- a/protocol/p2p/proto/p2p.proto +++ b/protocol/p2p/proto/p2p.proto @@ -14,7 +14,10 @@ message AddressesMessage{ message NetAddress{ int64 timestamp = 1; - bytes ip = 3; + oneof address { + bytes ip = 3; + string onion = 5; + } uint32 port = 4; } diff --git a/protocol/p2p/src/bin/client.rs b/protocol/p2p/src/bin/client.rs index b5cee2d38c..481c91c4ab 100644 --- a/protocol/p2p/src/bin/client.rs +++ b/protocol/p2p/src/bin/client.rs @@ -8,7 +8,7 @@ async fn main() { kaspa_core::log::init_logger(None, "debug"); // [0] - init p2p-adaptor let initializer = Arc::new(EchoFlowInitializer::new()); - let adaptor = kaspa_p2p_lib::Adaptor::client_only(kaspa_p2p_lib::Hub::new(), initializer, Default::default()); + let adaptor = kaspa_p2p_lib::Adaptor::client_only(kaspa_p2p_lib::Hub::new(), initializer, Default::default(), None); // [1] - connect 128 peers + flows let ip_port = String::from("[::1]:50051"); for i in 0..1 { diff --git a/protocol/p2p/src/bin/server.rs b/protocol/p2p/src/bin/server.rs index 9ce333911c..afd299024d 100644 --- a/protocol/p2p/src/bin/server.rs +++ b/protocol/p2p/src/bin/server.rs @@ -10,7 +10,8 @@ async fn main() { // [0] - init p2p-adaptor - server side let ip_port = NetAddress::from_str("[::1]:50051").unwrap(); let initializer = Arc::new(EchoFlowInitializer::new()); - let adaptor = kaspa_p2p_lib::Adaptor::bidirectional(ip_port, kaspa_p2p_lib::Hub::new(), initializer, Default::default()).unwrap(); + let adaptor = + kaspa_p2p_lib::Adaptor::bidirectional(ip_port, kaspa_p2p_lib::Hub::new(), initializer, Default::default(), None).unwrap(); // [1] - connect to a few peers let ip_port = String::from("[::1]:16111"); for i in 0..1 { diff --git a/protocol/p2p/src/convert/error.rs b/protocol/p2p/src/convert/error.rs index 14b2cec1a8..9f088a660f 100644 --- a/protocol/p2p/src/convert/error.rs +++ b/protocol/p2p/src/convert/error.rs @@ -21,8 +21,8 @@ pub enum ConversionError { #[error("Integer parsing error: {0}")] IntCastingError(#[from] std::num::TryFromIntError), - #[error(transparent)] - AddressParsingError(#[from] std::net::AddrParseError), + #[error(transparent)] + AddressParsingError(#[from] kaspa_utils::networking::NetAddressError), #[error(transparent)] IdentityError(#[from] uuid::Error), diff --git a/protocol/p2p/src/convert/messages.rs b/protocol/p2p/src/convert/messages.rs index 654eebd285..d1e0dcf5ee 100644 --- a/protocol/p2p/src/convert/messages.rs +++ b/protocol/p2p/src/convert/messages.rs @@ -13,7 +13,7 @@ use kaspa_consensus_core::{ tx::{TransactionId, TransactionOutpoint, UtxoEntry}, }; use kaspa_hashes::Hash; -use kaspa_utils::networking::{IpAddress, PeerId}; +use kaspa_utils::networking::{NetAddress, PeerId}; use std::{collections::HashMap, sync::Arc}; @@ -198,7 +198,7 @@ impl TryFrom for Vec { } } -impl TryFrom for Vec<(IpAddress, u16)> { +impl TryFrom for Vec { type Error = ConversionError; fn try_from(msg: protowire::AddressesMessage) -> Result { diff --git a/protocol/p2p/src/convert/net_address.rs b/protocol/p2p/src/convert/net_address.rs index c525300ef8..72e9b23157 100644 --- a/protocol/p2p/src/convert/net_address.rs +++ b/protocol/p2p/src/convert/net_address.rs @@ -1,73 +1,60 @@ use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use super::error::ConversionError; -use crate::pb as protowire; +use crate::pb::{self as protowire, net_address::Address as ProtoAddress}; -use itertools::Itertools; -use kaspa_utils::networking::{IpAddress, NetAddress}; +use kaspa_utils::networking::{AddressKind, IpAddress, NetAddress, OnionAddress}; // ---------------------------------------------------------------------------- // consensus_core to protowire // ---------------------------------------------------------------------------- -impl From<(IpAddress, u16)> for protowire::NetAddress { - fn from((ip, port): (IpAddress, u16)) -> Self { - Self { - timestamp: 0, // This field is not used anymore - ip: match ip.0 { - // We follow the IP encoding of golang's net.IP type - IpAddr::V4(ip) => ip.octets().to_vec(), - IpAddr::V6(ip) => ip.octets().to_vec(), - }, - port: port as u32, - } - } -} - impl From for protowire::NetAddress { fn from(item: NetAddress) -> Self { - (item.ip, item.port).into() + let address = match item.kind() { + AddressKind::Ip(ip) => Some(ProtoAddress::Ip(match IpAddr::from(ip) { + IpAddr::V4(v4) => v4.octets().to_vec(), + IpAddr::V6(v6) => v6.octets().to_vec(), + })), + AddressKind::Onion(onion) => Some(ProtoAddress::Onion(onion.to_string())), + }; + Self { timestamp: 0, address, port: item.port as u32 } } } // ---------------------------------------------------------------------------- // protowire to consensus_core // ---------------------------------------------------------------------------- - -impl TryFrom for (IpAddress, u16) { - type Error = ConversionError; - - fn try_from(addr: protowire::NetAddress) -> Result { - // We follow the IP encoding of golang's net.IP type - let ip: IpAddress = match addr.ip.len() { - 4 => Ok(Ipv4Addr::new(addr.ip[0], addr.ip[1], addr.ip[2], addr.ip[3]).into()), - 16 => { - let octets = addr - .ip - .chunks(size_of::()) - .map(|chunk| u16::from_be_bytes(chunk.try_into().expect("We already checked the number of bytes"))) - .collect_vec(); - let ipv6 = Ipv6Addr::from(<[u16; 8]>::try_from(octets).unwrap()); - Ok(ipv6.into()) - } - len => Err(ConversionError::IllegalIPLength(len)), - }?; - Ok((ip, addr.port.try_into()?)) - } -} - impl TryFrom for NetAddress { type Error = ConversionError; fn try_from(item: protowire::NetAddress) -> Result { - let (ip, port) = item.try_into()?; - Ok(NetAddress::new(ip, port)) + let port: u16 = item.port.try_into()?; + let address = item.address.ok_or(ConversionError::NoneValue)?; + match address { + ProtoAddress::Ip(bytes) => { + let ip = match bytes.len() { + 4 => IpAddress::from(Ipv4Addr::new(bytes[0], bytes[1], bytes[2], bytes[3])), + 16 => { + let mut arr = [0u8; 16]; + arr.copy_from_slice(&bytes); + IpAddress::from(Ipv6Addr::from(arr)) + } + len => return Err(ConversionError::IllegalIPLength(len)), + }; + Ok(NetAddress::new(ip, port)) + } + ProtoAddress::Onion(value) => { + let onion = OnionAddress::try_from(value.as_str())?; + Ok(NetAddress::new_onion(onion, port)) + } + } } } #[cfg(test)] mod tests { - use kaspa_utils::networking::IpAddress; + use kaspa_utils::networking::{NetAddress, OnionAddress}; use crate::pb; use std::{ @@ -77,14 +64,27 @@ mod tests { #[test] fn test_netaddress() { - let net_addr_ipv4 = pb::NetAddress { timestamp: 0, ip: hex::decode("6a0a8af0").unwrap(), port: 123 }; + let net_addr_ipv4 = + pb::NetAddress { timestamp: 0, address: Some(pb::net_address::Address::Ip(hex::decode("6a0a8af0").unwrap())), port: 123 }; let ipv4 = Ipv4Addr::from_str("106.10.138.240").unwrap().into(); - assert_eq!(<(IpAddress, u16)>::try_from(net_addr_ipv4.clone()).unwrap(), (ipv4, 123u16)); - assert_eq!(pb::NetAddress::from((ipv4, 123u16)), net_addr_ipv4); + assert_eq!(NetAddress::try_from(net_addr_ipv4.clone()).unwrap(), NetAddress::new(ipv4, 123u16)); + assert_eq!(pb::NetAddress::from(NetAddress::new(ipv4, 123u16)), net_addr_ipv4); - let net_addr_ipv6 = pb::NetAddress { timestamp: 0, ip: hex::decode("20010db885a3000000008a2e03707334").unwrap(), port: 456 }; + let net_addr_ipv6 = pb::NetAddress { + timestamp: 0, + address: Some(pb::net_address::Address::Ip(hex::decode("20010db885a3000000008a2e03707334").unwrap())), + port: 456, + }; let ipv6 = Ipv6Addr::from_str("2001:0db8:85a3:0000:0000:8a2e:0370:7334").unwrap().into(); - assert_eq!(<(IpAddress, u16)>::try_from(net_addr_ipv6.clone()).unwrap(), (ipv6, 456u16)); - assert_eq!(pb::NetAddress::from((ipv6, 456u16)), net_addr_ipv6); + assert_eq!(NetAddress::try_from(net_addr_ipv6.clone()).unwrap(), NetAddress::new(ipv6, 456u16)); + assert_eq!(pb::NetAddress::from(NetAddress::new(ipv6, 456u16)), net_addr_ipv6); + + let onion = NetAddress::new_onion( + OnionAddress::try_from("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.onion").unwrap(), + 9000, + ); + let proto = pb::NetAddress::from(onion); + assert!(matches!(proto.address, Some(pb::net_address::Address::Onion(_)))); + assert_eq!(NetAddress::try_from(proto).unwrap(), onion); } } diff --git a/protocol/p2p/src/core/adaptor.rs b/protocol/p2p/src/core/adaptor.rs index 4c3872702d..462e37d4d1 100644 --- a/protocol/p2p/src/core/adaptor.rs +++ b/protocol/p2p/src/core/adaptor.rs @@ -1,7 +1,10 @@ use crate::common::ProtocolError; use crate::core::hub::Hub; use crate::ConnectionError; -use crate::{core::connection_handler::ConnectionHandler, Router}; +use crate::{ + core::connection_handler::{ConnectionHandler, SocksProxyConfig}, + Router, +}; use kaspa_utils::networking::NetAddress; use kaspa_utils_tower::counters::TowerConnectionCounters; use std::ops::Deref; @@ -46,9 +49,14 @@ impl Adaptor { } /// Creates a P2P adaptor with only client-side support. Typical Kaspa nodes should use `Adaptor::bidirectional` - pub fn client_only(hub: Hub, initializer: Arc, counters: Arc) -> Arc { + pub fn client_only( + hub: Hub, + initializer: Arc, + counters: Arc, + socks_proxy: Option, + ) -> Arc { let (hub_sender, hub_receiver) = mpsc_channel(Self::hub_channel_size()); - let connection_handler = ConnectionHandler::new(hub_sender, initializer.clone(), counters); + let connection_handler = ConnectionHandler::new(hub_sender, initializer.clone(), counters, socks_proxy); let adaptor = Arc::new(Adaptor::new(None, connection_handler, hub)); adaptor.hub.clone().start_event_loop(hub_receiver, initializer); adaptor @@ -60,9 +68,10 @@ impl Adaptor { hub: Hub, initializer: Arc, counters: Arc, + socks_proxy: Option, ) -> Result, ConnectionError> { let (hub_sender, hub_receiver) = mpsc_channel(Self::hub_channel_size()); - let connection_handler = ConnectionHandler::new(hub_sender, initializer.clone(), counters); + let connection_handler = ConnectionHandler::new(hub_sender, initializer.clone(), counters, socks_proxy); let server_termination = connection_handler.serve(serve_address)?; let adaptor = Arc::new(Adaptor::new(Some(server_termination), connection_handler, hub)); adaptor.hub.clone().start_event_loop(hub_receiver, initializer); diff --git a/protocol/p2p/src/core/connection_handler.rs b/protocol/p2p/src/core/connection_handler.rs index 54d387043c..3f718f625a 100644 --- a/protocol/p2p/src/core/connection_handler.rs +++ b/protocol/p2p/src/core/connection_handler.rs @@ -5,29 +5,32 @@ use crate::pb::{ }; use crate::{ConnectionInitializer, Router}; use futures::FutureExt; +use hyper_util::rt::TokioIo; use kaspa_core::{debug, info}; -use kaspa_utils::networking::NetAddress; +use kaspa_utils::networking::{NetAddress, NetAddressError}; use kaspa_utils_tower::{ counters::TowerConnectionCounters, middleware::{BodyExt, CountBytesBody, MapRequestBodyLayer, MapResponseBodyLayer, ServiceBuilder}, }; -use std::net::ToSocketAddrs; +use rand::{distributions::Alphanumeric, Rng}; +use std::io; +use std::net::{IpAddr, SocketAddr}; use std::pin::Pin; +use std::str::FromStr; use std::sync::Arc; use std::time::Duration; use thiserror::Error; use tokio::sync::mpsc::{channel as mpsc_channel, Sender as MpscSender}; use tokio::sync::oneshot::{channel as oneshot_channel, Sender as OneshotSender}; +use tokio_socks::tcp::socks5::Socks5Stream; use tokio_stream::wrappers::ReceiverStream; use tokio_stream::StreamExt; -use tonic::transport::{Error as TonicError, Server as TonicServer}; +use tonic::transport::{Error as TonicError, Server as TonicServer, Uri}; use tonic::{Request, Response, Status as TonicStatus, Streaming}; +use tower::service_fn; #[derive(Error, Debug)] pub enum ConnectionError { - #[error("missing socket address")] - NoAddress, - #[error("{0}")] IoError(#[from] std::io::Error), @@ -39,6 +42,9 @@ pub enum ConnectionError { #[error("{0}")] ProtocolError(#[from] ProtocolError), + + #[error(transparent)] + AddressParsingError(#[from] NetAddressError), } /// Maximum P2P decoded gRPC message size to send and receive @@ -51,6 +57,15 @@ pub struct ConnectionHandler { hub_sender: MpscSender, initializer: Arc, counters: Arc, + socks_proxy: Option, +} + +#[derive(Clone, Copy, Default)] +pub struct SocksProxyConfig { + pub default: Option, + pub ipv4: Option, + pub ipv6: Option, + pub onion: Option, } impl ConnectionHandler { @@ -58,8 +73,9 @@ impl ConnectionHandler { hub_sender: MpscSender, initializer: Arc, counters: Arc, + socks_proxy: Option, ) -> Self { - Self { hub_sender, initializer, counters } + Self { hub_sender, initializer, counters, socks_proxy } } /// Launches a P2P server listener loop @@ -70,6 +86,7 @@ impl ConnectionHandler { let bytes_tx = self.counters.bytes_tx.clone(); let bytes_rx = self.counters.bytes_rx.clone(); + let serve_socket = serve_address.to_socket_addr().expect("server must bind to an IP address"); tokio::spawn(async move { let proto_server = ProtoP2pServer::new(connection_handler) @@ -82,7 +99,7 @@ impl ConnectionHandler { .layer(MapRequestBodyLayer::new(move |body| CountBytesBody::new(body, bytes_rx.clone()).boxed_unsync())) .layer(MapResponseBodyLayer::new(move |body| CountBytesBody::new(body, bytes_tx.clone()))) .add_service(proto_server) - .serve_with_shutdown(serve_address.into(), termination_receiver.map(drop)) + .serve_with_shutdown(serve_socket, termination_receiver.map(drop)) .await; match serve_result { @@ -95,17 +112,44 @@ impl ConnectionHandler { /// Connect to a new peer pub(crate) async fn connect(&self, peer_address: String) -> Result, ConnectionError> { - let Some(socket_address) = peer_address.to_socket_addrs()?.next() else { - return Err(ConnectionError::NoAddress); - }; + let peer_net_address = NetAddress::from_str(&peer_address)?; let peer_address = format!("http://{}", peer_address); // Add scheme prefix as required by Tonic - let channel = tonic::transport::Endpoint::new(peer_address)? + let endpoint = tonic::transport::Endpoint::new(peer_address)? .timeout(Duration::from_millis(Self::communication_timeout())) .connect_timeout(Duration::from_millis(Self::connect_timeout())) - .tcp_keepalive(Some(Duration::from_millis(Self::keep_alive()))) - .connect() - .await?; + .tcp_keepalive(Some(Duration::from_millis(Self::keep_alive()))); + + let channel = if let Some(proxy_addr) = self.socks_proxy.and_then(|cfg| { + if peer_net_address.as_onion().is_some() { + cfg.onion.or(cfg.default) + } else if let Some(ip) = peer_net_address.as_ip() { + match IpAddr::from(ip) { + IpAddr::V4(_) => cfg.ipv4.or(cfg.default), + IpAddr::V6(_) => cfg.ipv6.or(cfg.default), + } + } else { + cfg.default + } + }) { + let connector = service_fn(move |uri: Uri| { + let proxy_addr = proxy_addr; + async move { + let host = + uri.host().ok_or_else(|| io::Error::new(io::ErrorKind::InvalidInput, "missing host in URI"))?.to_string(); + let port = uri.port_u16().unwrap_or(80); + let target = format!("{}:{}", host, port); + let (username, password) = generate_socks_credentials(); + let stream = Socks5Stream::connect_with_password(proxy_addr, target, &username, &password) + .await + .map_err(|err| io::Error::new(io::ErrorKind::Other, err))?; + Ok::<_, io::Error>(TokioIo::new(stream.into_inner())) + } + }); + endpoint.connect_with_connector(connector).await? + } else { + endpoint.connect().await? + }; let channel = ServiceBuilder::new() .layer(MapResponseBodyLayer::new(move |body| CountBytesBody::new(body, self.counters.bytes_rx.clone()))) @@ -120,7 +164,7 @@ impl ConnectionHandler { let (outgoing_route, outgoing_receiver) = mpsc_channel(Self::outgoing_network_channel_size()); let incoming_stream = client.message_stream(ReceiverStream::new(outgoing_receiver)).await?.into_inner(); - let router = Router::new(socket_address, true, self.hub_sender.clone(), incoming_stream, outgoing_route).await; + let router = Router::new(peer_net_address, true, self.hub_sender.clone(), incoming_stream, outgoing_route).await; // For outbound peers, we perform the initialization as part of the connect logic match self.initializer.initialize_connection(router.clone()).await { @@ -194,6 +238,14 @@ impl ConnectionHandler { } } +fn generate_socks_credentials() -> (String, String) { + const USERNAME_PREFIX: &str = "kaspa"; + let mut rng = rand::thread_rng(); + let suffix: String = (&mut rng).sample_iter(&Alphanumeric).take(16).map(char::from).collect(); + let password: String = (&mut rng).sample_iter(&Alphanumeric).take(32).map(char::from).collect(); + (format!("{USERNAME_PREFIX}-{suffix}"), password) +} + #[tonic::async_trait] impl ProtoP2p for ConnectionHandler { type MessageStreamStream = Pin> + Send + 'static>>; @@ -212,7 +264,7 @@ impl ProtoP2p for ConnectionHandler { let incoming_stream = request.into_inner(); // Build the router object - let router = Router::new(remote_address, false, self.hub_sender.clone(), incoming_stream, outgoing_route).await; + let router = Router::new(remote_address.into(), false, self.hub_sender.clone(), incoming_stream, outgoing_route).await; // Notify the central Hub about the new peer self.hub_sender.send(HubEvent::NewPeer(router)).await.expect("hub receiver should never drop before senders"); diff --git a/protocol/p2p/src/core/peer.rs b/protocol/p2p/src/core/peer.rs index 90b52e9ee3..dcb101d4a3 100644 --- a/protocol/p2p/src/core/peer.rs +++ b/protocol/p2p/src/core/peer.rs @@ -1,22 +1,23 @@ use kaspa_consensus_core::subnets::SubnetworkId; -use kaspa_utils::networking::{IpAddress, PeerId}; -use std::{fmt::Display, net::SocketAddr, sync::Arc, time::Instant}; +use kaspa_utils::networking::{AddressKind, NetAddress, PeerId}; +use std::{fmt::Display, sync::Arc, time::Instant}; #[derive(Debug, Clone, Default)] pub struct PeerProperties { pub user_agent: String, - // TODO: add services + pub services: u64, pub advertised_protocol_version: u32, pub protocol_version: u32, pub disable_relay_tx: bool, pub subnetwork_id: Option, pub time_offset: i64, + pub supports_addrv2: bool, } #[derive(Debug)] pub struct Peer { identity: PeerId, - net_address: SocketAddr, + net_address: NetAddress, is_outbound: bool, connection_started: Instant, properties: Arc, @@ -26,7 +27,7 @@ pub struct Peer { impl Peer { pub fn new( identity: PeerId, - net_address: SocketAddr, + net_address: NetAddress, is_outbound: bool, connection_started: Instant, properties: Arc, @@ -41,7 +42,7 @@ impl Peer { } /// The socket address of this peer - pub fn net_address(&self) -> SocketAddr { + pub fn net_address(&self) -> NetAddress { self.net_address } @@ -70,23 +71,23 @@ impl Peer { #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub struct PeerKey { identity: PeerId, - ip: IpAddress, + address: AddressKind, } impl PeerKey { - pub fn new(identity: PeerId, ip: IpAddress) -> Self { - Self { identity, ip } + pub fn new(identity: PeerId, address: AddressKind) -> Self { + Self { identity, address } } } impl From<&Peer> for PeerKey { fn from(value: &Peer) -> Self { - Self::new(value.identity, value.net_address.ip().into()) + Self::new(value.identity, value.net_address.kind()) } } impl Display for PeerKey { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}+{}", self.identity, self.ip) + write!(f, "{}+{}", self.identity, self.address) } } diff --git a/protocol/p2p/src/core/router.rs b/protocol/p2p/src/core/router.rs index 55caf26c43..c4bc17c42a 100644 --- a/protocol/p2p/src/core/router.rs +++ b/protocol/p2p/src/core/router.rs @@ -4,11 +4,10 @@ use crate::pb::{kaspad_message::Payload as KaspadMessagePayload, KaspadMessage}; use crate::{common::ProtocolError, KaspadMessagePayloadType}; use crate::{make_message, Peer}; use kaspa_core::{debug, error, info, trace, warn}; -use kaspa_utils::networking::PeerId; +use kaspa_utils::networking::{NetAddress, PeerId}; use parking_lot::{Mutex, RwLock}; use seqlock::SeqLock; use std::fmt::{Debug, Display}; -use std::net::SocketAddr; use std::ops::{Deref, DerefMut}; use std::sync::atomic::{AtomicU32, Ordering}; use std::time::Instant; @@ -117,8 +116,8 @@ pub struct Router { /// Internal identity of this peer identity: SeqLock, - /// The socket address of this peer - net_address: SocketAddr, + /// The advertised network address of this peer + net_address: NetAddress, /// Indicates whether this connection is an outbound connection is_outbound: bool, @@ -149,7 +148,7 @@ impl Display for Router { impl From<&Router> for PeerKey { fn from(value: &Router) -> Self { - Self::new(value.identity.read(), value.net_address.ip().into()) + Self::new(value.identity.read(), value.net_address.kind()) } } @@ -174,7 +173,7 @@ fn message_summary(msg: &KaspadMessage) -> impl Debug { impl Router { pub(crate) async fn new( - net_address: SocketAddr, + net_address: NetAddress, is_outbound: bool, hub_sender: MpscSender, mut incoming_stream: Streaming, @@ -255,8 +254,8 @@ impl Router { *self.identity.lock_write() = identity; } - /// The socket address of this peer - pub fn net_address(&self) -> SocketAddr { + /// The advertised network address of this peer + pub fn net_address(&self) -> NetAddress { self.net_address } diff --git a/protocol/p2p/src/echo.rs b/protocol/p2p/src/echo.rs index 07a26aac6b..0c1efd7c6c 100644 --- a/protocol/p2p/src/echo.rs +++ b/protocol/p2p/src/echo.rs @@ -3,7 +3,7 @@ use crate::{ core::adaptor::ConnectionInitializer, handshake::KaspadHandshake, pb::{self, VersionMessage}, - IncomingRoute, KaspadMessagePayloadType, Router, + service_flags, IncomingRoute, KaspadMessagePayloadType, Router, }; use kaspa_core::{debug, time::unix_now, trace, warn}; use std::sync::Arc; @@ -93,7 +93,7 @@ pub struct EchoFlowInitializer {} fn build_dummy_version_message() -> VersionMessage { pb::VersionMessage { protocol_version: 5, - services: 0, + services: service_flags::ADDR_V2, timestamp: unix_now() as i64, address: None, id: Vec::from(Uuid::new_v4().as_bytes()), @@ -157,10 +157,12 @@ mod tests { kaspa_core::log::try_init_logger("debug"); let address1 = NetAddress::from_str("[::1]:50053").unwrap(); - let adaptor1 = Adaptor::bidirectional(address1, Hub::new(), Arc::new(EchoFlowInitializer::new()), Default::default()).unwrap(); + let adaptor1 = + Adaptor::bidirectional(address1, Hub::new(), Arc::new(EchoFlowInitializer::new()), Default::default(), None).unwrap(); let address2 = NetAddress::from_str("[::1]:50054").unwrap(); - let adaptor2 = Adaptor::bidirectional(address2, Hub::new(), Arc::new(EchoFlowInitializer::new()), Default::default()).unwrap(); + let adaptor2 = + Adaptor::bidirectional(address2, Hub::new(), Arc::new(EchoFlowInitializer::new()), Default::default(), None).unwrap(); // Initiate the connection from `adaptor1` (outbound) to `adaptor2` (inbound) let peer2_id = adaptor1 diff --git a/protocol/p2p/src/flags.rs b/protocol/p2p/src/flags.rs new file mode 100644 index 0000000000..b648bb179a --- /dev/null +++ b/protocol/p2p/src/flags.rs @@ -0,0 +1,12 @@ +/// Service flags advertised in the P2P version handshake. +/// +/// The values mirror the feature bits used in Bitcoin Core where possible in +/// order to ease interoperability and future parity work. +pub mod service { + /// BIP155 (ADDRv2) support flag. + /// + /// Bitcoin Core uses bit 23 (`1 << 23`) to signal support for the ADDRv2 + /// message format and associated address gossip semantics. We keep the same + /// value so downstream tooling can reason about Kaspa nodes in the same way. + pub const ADDR_V2: u64 = 1 << 23; +} diff --git a/protocol/p2p/src/lib.rs b/protocol/p2p/src/lib.rs index 6fde0d48fb..838a987d48 100644 --- a/protocol/p2p/src/lib.rs +++ b/protocol/p2p/src/lib.rs @@ -6,14 +6,17 @@ pub mod pb { pub mod common; pub mod convert; pub mod echo; +pub mod flags; mod core; mod handshake; pub use crate::core::adaptor::{Adaptor, ConnectionInitializer}; pub use crate::core::connection_handler::ConnectionError; +pub use crate::core::connection_handler::SocksProxyConfig; pub use crate::core::hub::Hub; pub use crate::core::payload_type::KaspadMessagePayloadType; pub use crate::core::peer::{Peer, PeerKey, PeerProperties}; pub use crate::core::router::{IncomingRoute, Router, SharedIncomingRoute, BLANK_ROUTE_ID}; +pub use flags::service as service_flags; pub use handshake::KaspadHandshake; diff --git a/rpc/core/src/error.rs b/rpc/core/src/error.rs index 54763c71ba..df9e59ef4a 100644 --- a/rpc/core/src/error.rs +++ b/rpc/core/src/error.rs @@ -3,8 +3,8 @@ //! use kaspa_consensus_core::{subnets::SubnetworkConversionError, tx::TransactionId, utxo::utxo_inquirer::UtxoInquirerError}; -use kaspa_utils::networking::IpAddress; -use std::{net::AddrParseError, num::TryFromIntError}; +use kaspa_utils::networking::{IpAddress, NetAddressError}; +use std::num::TryFromIntError; use thiserror::Error; use workflow_core::channel::ChannelError; @@ -27,8 +27,8 @@ pub enum RpcError { #[error("Integer parsing error: {0}")] ParseIntError(#[from] std::num::ParseIntError), - #[error("Ip address parsing error {0}")] - ParseIpAddressError(#[from] AddrParseError), + #[error("Network address parsing error {0}")] + ParseIpAddressError(#[from] NetAddressError), #[error("Wrong rpc api version format")] RpcApiVersionFormatError, diff --git a/rpc/grpc/core/src/convert/message.rs b/rpc/grpc/core/src/convert/message.rs index 254710b5ff..fec60b4d42 100644 --- a/rpc/grpc/core/src/convert/message.rs +++ b/rpc/grpc/core/src/convert/message.rs @@ -27,7 +27,7 @@ use kaspa_rpc_core::{ RpcContextualPeerAddress, RpcError, RpcExtraData, RpcHash, RpcIpAddress, RpcNetworkType, RpcPeerAddress, RpcResult, SubmitBlockRejectReason, SubmitBlockReport, }; -use kaspa_utils::hex::*; +use kaspa_utils::{hex::*, networking::NetAddressError}; use std::str::FromStr; macro_rules! from { @@ -848,10 +848,14 @@ try_from!(item: &protowire::GetSinkBlueScoreResponseMessage, RpcResult); -try_from!(item: &protowire::UnbanRequestMessage, kaspa_rpc_core::UnbanRequest, { Self { ip: RpcIpAddress::from_str(&item.ip)? } }); +try_from!(item: &protowire::UnbanRequestMessage, kaspa_rpc_core::UnbanRequest, { + Self { ip: RpcIpAddress::from_str(&item.ip).map_err(NetAddressError::from)? } +}); try_from!(&protowire::UnbanResponseMessage, RpcResult); try_from!(item: &protowire::EstimateNetworkHashesPerSecondRequestMessage, kaspa_rpc_core::EstimateNetworkHashesPerSecondRequest, { diff --git a/rpc/grpc/core/src/convert/peer.rs b/rpc/grpc/core/src/convert/peer.rs index fcb7cef88c..8662186f86 100644 --- a/rpc/grpc/core/src/convert/peer.rs +++ b/rpc/grpc/core/src/convert/peer.rs @@ -3,6 +3,7 @@ use std::str::FromStr; use crate::protowire; use crate::{from, try_from}; use kaspa_rpc_core::{RpcError, RpcNodeId, RpcPeerAddress}; +use kaspa_utils::networking::NetAddressError; // ---------------------------------------------------------------------------- // rpc_core to protowire @@ -43,5 +44,9 @@ try_from!(item: &protowire::GetConnectedPeerInfoMessage, kaspa_rpc_core::RpcPeer } }); -try_from!(item: &protowire::GetPeerAddressesKnownAddressMessage, kaspa_rpc_core::RpcPeerAddress, { Self::from_str(&item.addr)? }); -try_from!(item: &protowire::GetPeerAddressesKnownAddressMessage, kaspa_rpc_core::RpcIpAddress, { Self::from_str(&item.addr)? }); +try_from!(item: &protowire::GetPeerAddressesKnownAddressMessage, kaspa_rpc_core::RpcPeerAddress, { + Self::from_str(&item.addr).map_err(NetAddressError::from)? +}); +try_from!(item: &protowire::GetPeerAddressesKnownAddressMessage, kaspa_rpc_core::RpcIpAddress, { + Self::from_str(&item.addr).map_err(NetAddressError::from)? +}); diff --git a/rpc/grpc/server/src/connection_handler.rs b/rpc/grpc/server/src/connection_handler.rs index fd13cf9bb0..988a62b730 100644 --- a/rpc/grpc/server/src/connection_handler.rs +++ b/rpc/grpc/server/src/connection_handler.rs @@ -132,6 +132,7 @@ impl ConnectionHandler { let bytes_rx = self.counters.bytes_rx.clone(); // Spawn server task + let serve_socket = serve_address.to_socket_addr().expect("gRPC server must bind to an IP address"); let server_handle = tokio::spawn(async move { let protowire_server = RpcServer::new(connection_handler) .accept_compressed(CompressionEncoding::Gzip) @@ -148,7 +149,7 @@ impl ConnectionHandler { .layer(MapResponseBodyLayer::new(move |body| CountBytesBody::new(body, bytes_tx.clone()))) .add_service(protowire_server) .serve_with_shutdown( - serve_address.into(), + serve_socket, signal_receiver.map(|_| { debug!("GRPC, Server received the shutdown signal"); }), diff --git a/rpc/service/src/converter/protocol.rs b/rpc/service/src/converter/protocol.rs index 5edd3894a9..de9cfe8397 100644 --- a/rpc/service/src/converter/protocol.rs +++ b/rpc/service/src/converter/protocol.rs @@ -17,7 +17,7 @@ impl ProtocolConverter { let properties = peer.properties(); RpcPeerInfo { id: peer.identity(), - address: peer.net_address().into(), + address: peer.net_address(), is_outbound: peer.is_outbound(), is_ibd_peer: ibd_peer_key.is_some() && peer.key() == *ibd_peer_key.as_ref().unwrap(), last_ping_duration: peer.last_ping_duration(), diff --git a/rpc/service/src/service.rs b/rpc/service/src/service.rs index 0993167d69..9ba84adfde 100644 --- a/rpc/service/src/service.rs +++ b/rpc/service/src/service.rs @@ -995,7 +995,7 @@ NOTE: This error usually indicates an RPC conversion error between the node and } let peer_address = request.peer_address.normalize(self.config.net.default_p2p_port()); if let Some(connection_manager) = self.flow_context.connection_manager() { - connection_manager.add_connection_request(peer_address.into(), request.is_permanent).await; + connection_manager.add_connection_request(peer_address, request.is_permanent).await; } else { return Err(RpcError::NoConnectionManager); } diff --git a/rpc/wrpc/server/src/address.rs b/rpc/wrpc/server/src/address.rs index 1100860e26..8d479fe482 100644 --- a/rpc/wrpc/server/src/address.rs +++ b/rpc/wrpc/server/src/address.rs @@ -1,8 +1,8 @@ use crate::service::WrpcEncoding; use kaspa_consensus_core::network::NetworkType; -use kaspa_utils::networking::ContextualNetAddress; +use kaspa_utils::networking::{ContextualNetAddress, NetAddressError}; use serde::Deserialize; -use std::{net::AddrParseError, str::FromStr}; +use std::str::FromStr; #[derive(Clone, Debug, Deserialize)] #[serde(rename = "lowercase")] @@ -44,7 +44,7 @@ impl WrpcNetAddress { } } impl FromStr for WrpcNetAddress { - type Err = AddrParseError; + type Err = NetAddressError; fn from_str(s: &str) -> Result { match s { "default" => Ok(WrpcNetAddress::Default), @@ -58,7 +58,7 @@ impl FromStr for WrpcNetAddress { } impl TryFrom<&str> for WrpcNetAddress { - type Error = AddrParseError; + type Error = NetAddressError; fn try_from(s: &str) -> Result { WrpcNetAddress::from_str(s) @@ -66,7 +66,7 @@ impl TryFrom<&str> for WrpcNetAddress { } impl TryFrom for WrpcNetAddress { - type Error = AddrParseError; + type Error = NetAddressError; fn try_from(s: String) -> Result { WrpcNetAddress::from_str(&s) diff --git a/utils/Cargo.toml b/utils/Cargo.toml index 7a2ae4935f..5199fd92ea 100644 --- a/utils/Cargo.toml +++ b/utils/Cargo.toml @@ -33,6 +33,7 @@ thiserror.workspace = true triggered.workspace = true uuid.workspace = true wasm-bindgen.workspace = true +data-encoding.workspace = true [target.'cfg(not(target_arch = "wasm32"))'.dependencies] mac_address.workspace = true diff --git a/utils/src/networking.rs b/utils/src/networking.rs index b7a3397780..f9f0eaf950 100644 --- a/utils/src/networking.rs +++ b/utils/src/networking.rs @@ -1,10 +1,14 @@ // #![allow(dead_code)] use borsh::{BorshDeserialize, BorshSerialize}; +use data_encoding::BASE32_NOPAD; use ipnet::IpNet; -use serde::{Deserialize, Serialize}; +use serde::{de::Error as SerdeDeError, Deserialize, Deserializer, Serialize, Serializer}; use std::{ - fmt::Display, + convert::TryInto, + error::Error as StdError, + fmt::{Display, Formatter}, net::{AddrParseError, IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, + num::ParseIntError, ops::Deref, str::FromStr, }; @@ -218,21 +222,284 @@ impl BorshDeserialize for IpAddress { } } +const ONION_HOST_SUFFIX: &str = ".onion"; +const ONION_HOST_LENGTH: usize = 56; +const ONION_RAW_LENGTH: usize = 35; + +#[derive(Debug, Clone)] +pub enum NetAddressError { + InvalidSyntax(String), + Addr(AddrParseError), + InvalidPort(ParseIntError), + MissingPort, + InvalidOnion(String), + NonIpAddress, +} + +impl Display for NetAddressError { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + NetAddressError::InvalidSyntax(value) => write!(f, "invalid address syntax: {value}"), + NetAddressError::Addr(err) => err.fmt(f), + NetAddressError::InvalidPort(err) => write!(f, "invalid port: {err}"), + NetAddressError::MissingPort => write!(f, "missing port"), + NetAddressError::InvalidOnion(value) => write!(f, "invalid onion address: {value}"), + NetAddressError::NonIpAddress => write!(f, "address does not represent an IP endpoint"), + } + } +} + +impl StdError for NetAddressError { + fn source(&self) -> Option<&(dyn StdError + 'static)> { + match self { + NetAddressError::Addr(err) => Some(err), + NetAddressError::InvalidPort(err) => Some(err), + _ => None, + } + } +} + +impl From for NetAddressError { + fn from(value: AddrParseError) -> Self { + NetAddressError::Addr(value) + } +} + +impl From for NetAddressError { + fn from(value: ParseIntError) -> Self { + NetAddressError::InvalidPort(value) + } +} + +#[derive(PartialEq, Eq, Hash, Copy, Clone, Debug)] +pub struct OnionAddress { + raw: [u8; ONION_RAW_LENGTH], +} + +impl OnionAddress { + pub fn raw(&self) -> &[u8; ONION_RAW_LENGTH] { + &self.raw + } + + pub fn from_raw(raw: [u8; ONION_RAW_LENGTH]) -> Self { + Self { raw } + } + + fn host_part(&self) -> String { + let mut host = BASE32_NOPAD.encode(&self.raw); + host.make_ascii_lowercase(); + host + } +} + +impl Display for OnionAddress { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "{}{ONION_HOST_SUFFIX}", self.host_part()) + } +} + +impl TryFrom<&str> for OnionAddress { + type Error = NetAddressError; + + fn try_from(value: &str) -> Result { + let lower = value.to_ascii_lowercase(); + if !lower.ends_with(ONION_HOST_SUFFIX) { + return Err(NetAddressError::InvalidOnion(value.to_string())); + } + let host = &lower[..lower.len() - ONION_HOST_SUFFIX.len()]; + if host.len() != ONION_HOST_LENGTH { + return Err(NetAddressError::InvalidOnion(value.to_string())); + } + let upper = host.to_ascii_uppercase(); + let decoded = BASE32_NOPAD.decode(upper.as_bytes()).map_err(|_| NetAddressError::InvalidOnion(value.to_string()))?; + let raw: [u8; ONION_RAW_LENGTH] = decoded.try_into().map_err(|_| NetAddressError::InvalidOnion(value.to_string()))?; + Ok(Self { raw }) + } +} + +impl TryFrom for OnionAddress { + type Error = NetAddressError; + + fn try_from(value: String) -> Result { + OnionAddress::try_from(value.as_str()) + } +} + +impl Serialize for OnionAddress { + fn serialize(&self, serializer: S) -> Result { + let value = self.to_string(); + serializer.serialize_str(&value) + } +} + +impl<'de> Deserialize<'de> for OnionAddress { + fn deserialize>(deserializer: D) -> Result { + let value = ::deserialize(deserializer)?; + OnionAddress::try_from(value.as_str()).map_err(SerdeDeError::custom) + } +} + +impl BorshSerialize for OnionAddress { + fn serialize(&self, writer: &mut W) -> ::core::result::Result<(), std::io::Error> { + borsh::BorshSerialize::serialize(&self.raw, writer) + } +} + +impl BorshDeserialize for OnionAddress { + fn deserialize_reader(reader: &mut R) -> ::core::result::Result { + let raw: [u8; ONION_RAW_LENGTH] = BorshDeserialize::deserialize_reader(reader)?; + Ok(Self { raw }) + } +} + +#[derive(PartialEq, Eq, Hash, Copy, Clone, Debug)] +pub enum AddressKind { + Ip(IpAddress), + Onion(OnionAddress), +} + +impl AddressKind { + pub fn is_ip(&self) -> bool { + matches!(self, AddressKind::Ip(_)) + } + + pub fn as_ip(&self) -> Option { + match self { + AddressKind::Ip(ip) => Some(*ip), + _ => None, + } + } + + pub fn as_onion(&self) -> Option { + match self { + AddressKind::Onion(addr) => Some(*addr), + _ => None, + } + } +} + +impl From for AddressKind { + fn from(value: IpAddress) -> Self { + AddressKind::Ip(value) + } +} + +impl From for AddressKind { + fn from(value: OnionAddress) -> Self { + AddressKind::Onion(value) + } +} + +impl Display for AddressKind { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + AddressKind::Ip(ip) => Display::fmt(ip, f), + AddressKind::Onion(addr) => Display::fmt(addr, f), + } + } +} + +impl Serialize for AddressKind { + fn serialize(&self, serializer: S) -> Result { + let value = self.to_string(); + serializer.serialize_str(&value) + } +} + +impl<'de> Deserialize<'de> for AddressKind { + fn deserialize>(deserializer: D) -> Result { + let value = ::deserialize(deserializer)?; + if value.to_ascii_lowercase().ends_with(ONION_HOST_SUFFIX) { + OnionAddress::try_from(value.as_str()).map(AddressKind::Onion).map_err(SerdeDeError::custom) + } else { + IpAddress::from_str(&value).map(AddressKind::Ip).map_err(SerdeDeError::custom) + } + } +} + +impl BorshSerialize for AddressKind { + fn serialize(&self, writer: &mut W) -> ::core::result::Result<(), std::io::Error> { + match self { + AddressKind::Ip(ip) => { + writer.write_all(&[0])?; + BorshSerialize::serialize(&ip, writer) + } + AddressKind::Onion(addr) => { + writer.write_all(&[1])?; + BorshSerialize::serialize(&addr, writer) + } + } + } +} + +impl BorshDeserialize for AddressKind { + fn deserialize_reader(reader: &mut R) -> ::core::result::Result { + let variant_idx: u8 = BorshDeserialize::deserialize_reader(reader)?; + match variant_idx { + 0 => Ok(AddressKind::Ip(BorshDeserialize::deserialize_reader(reader)?)), + 1 => Ok(AddressKind::Onion(BorshDeserialize::deserialize_reader(reader)?)), + _ => Err(std::io::Error::new(std::io::ErrorKind::InvalidInput, "invalid address kind variant").into()), + } + } +} + +impl From<&AddressKind> for PrefixBucket { + fn from(kind: &AddressKind) -> Self { + match kind { + AddressKind::Ip(ip) => PrefixBucket::from(ip), + AddressKind::Onion(onion) => { + let mut bytes = [0u8; 8]; + bytes.copy_from_slice(&onion.raw()[..8]); + PrefixBucket(u64::from_be_bytes(bytes)) + } + } + } +} + /// A network address, equivalent of a [SocketAddr]. #[derive(PartialEq, Eq, Hash, Copy, Clone, Serialize, Deserialize, Debug, BorshSerialize, BorshDeserialize)] pub struct NetAddress { - pub ip: IpAddress, + pub ip: AddressKind, pub port: u16, } impl NetAddress { pub fn new(ip: IpAddress, port: u16) -> Self { - Self { ip, port } + Self { ip: AddressKind::Ip(ip), port } + } + + pub fn new_onion(address: OnionAddress, port: u16) -> Self { + Self { ip: AddressKind::Onion(address), port } + } + + pub fn from_kind(kind: AddressKind, port: u16) -> Self { + Self { ip: kind, port } + } + + pub fn is_ip(&self) -> bool { + self.ip.is_ip() + } + + pub fn as_ip(&self) -> Option { + self.ip.as_ip() + } + + pub fn as_onion(&self) -> Option { + self.ip.as_onion() + } + + pub fn kind(&self) -> AddressKind { + self.ip } pub fn prefix_bucket(&self) -> PrefixBucket { PrefixBucket::from(self) } + + pub fn to_socket_addr(&self) -> Result { + let ip = self.as_ip().ok_or(NetAddressError::NonIpAddress)?; + Ok(SocketAddr::new(ip.into(), self.port)) + } } impl From for NetAddress { @@ -241,23 +508,38 @@ impl From for NetAddress { } } -impl From for SocketAddr { - fn from(value: NetAddress) -> Self { - Self::new(value.ip.0, value.port) +impl TryFrom for SocketAddr { + type Error = NetAddressError; + + fn try_from(value: NetAddress) -> Result { + value.to_socket_addr() + } +} + +impl TryFrom<&NetAddress> for SocketAddr { + type Error = NetAddressError; + + fn try_from(value: &NetAddress) -> Result { + value.to_socket_addr() } } impl FromStr for NetAddress { - type Err = AddrParseError; + type Err = NetAddressError; fn from_str(s: &str) -> Result { - SocketAddr::from_str(s).map(NetAddress::from) + let contextual = ContextualNetAddress::from_str(s)?; + let port = contextual.port.ok_or(NetAddressError::MissingPort)?; + Ok(Self { ip: contextual.ip, port }) } } impl Display for NetAddress { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - SocketAddr::from(self.to_owned()).fmt(f) + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self.ip { + AddressKind::Ip(ip) => SocketAddr::new(ip.into(), self.port).fmt(f), + AddressKind::Onion(addr) => write!(f, "{}:{}", addr, self.port), + } } } @@ -266,29 +548,57 @@ impl Display for NetAddress { /// Use `normalize` to get a fully determined address. #[derive(PartialEq, Eq, Hash, Copy, Clone, Serialize, Deserialize, Debug, BorshSerialize, BorshDeserialize)] pub struct ContextualNetAddress { - ip: IpAddress, + ip: AddressKind, port: Option, } impl ContextualNetAddress { pub fn new(ip: IpAddress, port: Option) -> Self { - Self { ip, port } + Self { ip: AddressKind::Ip(ip), port } + } + + pub fn new_onion(address: OnionAddress, port: Option) -> Self { + Self { ip: AddressKind::Onion(address), port } + } + + pub fn from_kind(kind: AddressKind, port: Option) -> Self { + Self { ip: kind, port } } pub fn has_port(&self) -> bool { self.port.is_some() } + pub fn is_ip(&self) -> bool { + self.ip.is_ip() + } + + pub fn as_ip(&self) -> Option { + self.ip.as_ip() + } + + pub fn as_onion(&self) -> Option { + self.ip.as_onion() + } + + pub fn kind(&self) -> AddressKind { + self.ip + } + + pub fn port(&self) -> Option { + self.port + } + pub fn normalize(&self, default_port: u16) -> NetAddress { - NetAddress::new(self.ip, self.port.unwrap_or(default_port)) + NetAddress::from_kind(self.ip, self.port.unwrap_or(default_port)) } pub fn unspecified() -> Self { - Self { ip: IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)).into(), port: None } + Self::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)).into(), None) } pub fn loopback() -> Self { - Self { ip: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)).into(), port: None } + Self::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)).into(), None) } pub fn port_not_specified(&self) -> bool { @@ -298,27 +608,52 @@ impl ContextualNetAddress { pub fn with_port(&self, port: u16) -> Self { Self { ip: self.ip, port: Some(port) } } + + pub fn into_parts(self) -> (AddressKind, Option) { + (self.ip, self.port) + } + + pub fn to_socket_addr(&self) -> Result { + let ip = self.as_ip().ok_or(NetAddressError::NonIpAddress)?; + let port = self.port.ok_or(NetAddressError::MissingPort)?; + Ok(SocketAddr::new(ip.into(), port)) + } } impl From for ContextualNetAddress { fn from(value: NetAddress) -> Self { - Self::new(value.ip, Some(value.port)) + Self { ip: value.ip, port: Some(value.port) } } } impl FromStr for ContextualNetAddress { - type Err = AddrParseError; + type Err = NetAddressError; fn from_str(s: &str) -> Result { - match SocketAddr::from_str(s) { - Ok(socket) => Ok(Self::new(socket.ip().into(), Some(socket.port()))), - Err(_) => Ok(Self::new(IpAddress::from_str(s)?, None)), + if let Ok(socket) = SocketAddr::from_str(s) { + return Ok(Self::new(socket.ip().into(), Some(socket.port()))); } + + if let Ok(ip) = IpAddress::from_str(s) { + return Ok(Self::new(ip, None)); + } + + if let Some((host, port_str)) = s.rsplit_once(':') { + if !host.contains(':') { + if let Ok(onion) = OnionAddress::try_from(host) { + let port = port_str.parse::()?; + return Ok(Self::new_onion(onion, Some(port))); + } + } + } + + let onion = OnionAddress::try_from(s)?; + Ok(Self::new_onion(onion, None)) } } impl TryFrom<&str> for ContextualNetAddress { - type Error = AddrParseError; + type Error = NetAddressError; fn try_from(s: &str) -> Result { ContextualNetAddress::from_str(s) @@ -326,7 +661,7 @@ impl TryFrom<&str> for ContextualNetAddress { } impl TryFrom for ContextualNetAddress { - type Error = AddrParseError; + type Error = NetAddressError; fn try_from(s: String) -> Result { ContextualNetAddress::from_str(&s) @@ -334,10 +669,12 @@ impl TryFrom for ContextualNetAddress { } impl Display for ContextualNetAddress { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self.port { - Some(port) => SocketAddr::new(self.ip.into(), port).fmt(f), - None => self.ip.fmt(f), + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match (self.ip, self.port) { + (AddressKind::Ip(ip), Some(port)) => SocketAddr::new(ip.into(), port).fmt(f), + (AddressKind::Ip(ip), None) => Display::fmt(&ip, f), + (AddressKind::Onion(addr), Some(port)) => write!(f, "{}:{}", addr, port), + (AddressKind::Onion(addr), None) => Display::fmt(&addr, f), } } } @@ -445,6 +782,9 @@ mod tests { assert!(addr_v4.is_ok()); let addr_v6 = NetAddress::from_str("[2a01:4f8:191:1143::2]:5678"); assert!(addr_v6.is_ok()); + let onion_host: String = std::iter::repeat('a').take(ONION_HOST_LENGTH).collect(); + let addr_onion = NetAddress::from_str(&format!("{}{}:9735", onion_host, ONION_HOST_SUFFIX)); + assert!(addr_onion.is_ok()); } #[test] @@ -461,6 +801,13 @@ mod tests { let net_addr = ContextualNetAddress::new(addr, port); let s = serde_json::to_string(&net_addr).unwrap(); assert_eq!(s, r#"{"ip":"127.0.0.1","port":1234}"#); + + let onion_host: String = std::iter::repeat('b').take(ONION_HOST_LENGTH).collect(); + let onion_addr = ContextualNetAddress::from_str(&format!("{}{}", onion_host, ONION_HOST_SUFFIX)).unwrap(); + assert!(onion_addr.as_onion().is_some()); + assert!(onion_addr.port().is_none()); + let serialized = serde_json::to_string(&onion_addr).unwrap(); + assert_eq!(serialized, format!(r#"{{"ip":"{}{}","port":null}}"#, onion_host, ONION_HOST_SUFFIX)); } #[test] From 5cef6381754ae76b4f0ab8b29b4bd645db3cb8d2 Mon Sep 17 00:00:00 2001 From: elldeeone <73735118+elldeeone@users.noreply.github.com> Date: Wed, 12 Nov 2025 21:03:05 +1100 Subject: [PATCH 2/6] Fix tor bootstrap gate and workspace members --- Cargo.toml | 1 - kaspad/src/daemon.rs | 3 --- 2 files changed, 4 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index b3dc35acba..ca87d47363 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -59,7 +59,6 @@ members = [ "metrics/core", "metrics/perf_monitor", "utils/alloc", - "research/tor-integration/tor-prototype", ] [workspace.package] diff --git a/kaspad/src/daemon.rs b/kaspad/src/daemon.rs index cd29170546..517a71ae6a 100644 --- a/kaspad/src/daemon.rs +++ b/kaspad/src/daemon.rs @@ -374,9 +374,6 @@ impl AsyncService for TorRuntimeService { fn start(self: Arc) -> AsyncServiceFuture { Box::pin(async move { trace!("{} starting event loop", Self::IDENT); - if let Some(tx) = &self.bootstrap_tx { - let _ = tx.send(true); - } let shutdown = self.shutdown.listener.clone(); tokio::pin!(shutdown); let mut ticker = tokio::time::interval(Self::POLL_INTERVAL); From 8453759616f2c8aa5cbf26c1a1bb23116c09c794 Mon Sep 17 00:00:00 2001 From: elldeeone <73735118+elldeeone@users.noreply.github.com> Date: Wed, 12 Nov 2025 21:16:58 +1100 Subject: [PATCH 3/6] Improve proxy validation and Tor logs --- kaspad/src/args.rs | 20 ++++++++++---------- kaspad/src/daemon.rs | 21 ++++++++++++++++----- kaspad/src/tor_manager.rs | 5 ++--- protocol/p2p/src/core/connection_handler.rs | 15 ++------------- 4 files changed, 30 insertions(+), 31 deletions(-) diff --git a/kaspad/src/args.rs b/kaspad/src/args.rs index 3be88d4178..d6fa0b493b 100644 --- a/kaspad/src/args.rs +++ b/kaspad/src/args.rs @@ -5,7 +5,7 @@ use kaspa_consensus_core::{ }; use kaspa_core::kaspad_env::version; use kaspa_notify::address::tracker::Tracker; -use kaspa_utils::networking::{ContextualNetAddress, NetAddress}; +use kaspa_utils::networking::{ContextualNetAddress, NetAddress, NetAddressError}; use kaspa_wrpc_server::address::WrpcNetAddress; use serde::{Deserialize, Serialize}; use serde_with::{serde_as, DisplayFromStr}; @@ -907,17 +907,17 @@ pub enum AllowedNetworksChoice { } impl ProxySettings { - pub fn resolve(&self, default_port: u16) -> ResolvedProxySettings { - fn normalize(addr: &ContextualNetAddress, default_port: u16) -> SocketAddr { + pub fn resolve(&self, default_port: u16) -> Result { + fn normalize(addr: &ContextualNetAddress, default_port: u16) -> Result { let net_addr: NetAddress = addr.clone().normalize(default_port); - net_addr.to_socket_addr().expect("expected IP address") + net_addr.to_socket_addr() } - ResolvedProxySettings { - default: self.default.as_ref().map(|addr| normalize(addr, default_port)), - ipv4: self.ipv4.as_ref().map(|addr| normalize(addr, default_port)), - ipv6: self.ipv6.as_ref().map(|addr| normalize(addr, default_port)), - onion: self.onion.as_ref().map(|addr| normalize(addr, default_port)), - } + Ok(ResolvedProxySettings { + default: self.default.as_ref().map(|addr| normalize(addr, default_port)).transpose()?, + ipv4: self.ipv4.as_ref().map(|addr| normalize(addr, default_port)).transpose()?, + ipv6: self.ipv6.as_ref().map(|addr| normalize(addr, default_port)).transpose()?, + onion: self.onion.as_ref().map(|addr| normalize(addr, default_port)).transpose()?, + }) } } diff --git a/kaspad/src/daemon.rs b/kaspad/src/daemon.rs index 517a71ae6a..664e8fc943 100644 --- a/kaspad/src/daemon.rs +++ b/kaspad/src/daemon.rs @@ -554,7 +554,10 @@ pub fn create_core_with_runtime(runtime: &Runtime, args: &Args, fd_total_budget: }; let proxy_settings = args.proxy_settings(); - let resolved_proxies = proxy_settings.resolve(9050); + let resolved_proxies = proxy_settings.resolve(9050).unwrap_or_else(|err| { + println!("Invalid proxy configuration (use IP:port): {err}"); + exit(1); + }); let mut default_proxy_addr = resolved_proxies.default; let mut proxy_ipv4_addr = resolved_proxies.ipv4; let mut proxy_ipv6_addr = resolved_proxies.ipv6; @@ -590,10 +593,18 @@ pub fn create_core_with_runtime(runtime: &Runtime, args: &Args, fd_total_budget: proxy_ipv6_addr = default_proxy_addr; } let mut effective_descriptions = Vec::new(); - if let Some(addr) = default_proxy_addr { effective_descriptions.push(format!("default={addr}")); } - if let Some(addr) = proxy_ipv4_addr { effective_descriptions.push(format!("ipv4={addr}")); } - if let Some(addr) = proxy_ipv6_addr { effective_descriptions.push(format!("ipv6={addr}")); } - if let Some(addr) = tor_proxy_override_addr { effective_descriptions.push(format!("onion={addr}")); } + if let Some(addr) = default_proxy_addr { + effective_descriptions.push(format!("default={addr}")); + } + if let Some(addr) = proxy_ipv4_addr { + effective_descriptions.push(format!("ipv4={addr}")); + } + if let Some(addr) = proxy_ipv6_addr { + effective_descriptions.push(format!("ipv6={addr}")); + } + if let Some(addr) = tor_proxy_override_addr { + effective_descriptions.push(format!("onion={addr}")); + } if !effective_descriptions.is_empty() { info!("Effective SOCKS routing: {}", effective_descriptions.join(", ")); } diff --git a/kaspad/src/tor_manager.rs b/kaspad/src/tor_manager.rs index 92294a24af..d878e48e63 100644 --- a/kaspad/src/tor_manager.rs +++ b/kaspad/src/tor_manager.rs @@ -9,7 +9,7 @@ use std::{ }; use hex::encode as hex_encode; -use kaspa_core::{info, warn}; +use kaspa_core::{debug, info, warn}; use thiserror::Error; use tor_interface::{ legacy_tor_client::{LegacyTorClient, LegacyTorClientConfig, TorAuth}, @@ -227,8 +227,7 @@ fn wait_for_bootstrap(client: &mut LegacyTorClient, timeout: Duration) -> Result return Ok(()); } TorEvent::LogReceived { line } => { - // Tor can be quite chatty; downgrade to debug once we have more granular logging controls. - warn!("tor: {}", line); + debug!("tor: {}", line); } _ => {} } diff --git a/protocol/p2p/src/core/connection_handler.rs b/protocol/p2p/src/core/connection_handler.rs index 3f718f625a..919bc486aa 100644 --- a/protocol/p2p/src/core/connection_handler.rs +++ b/protocol/p2p/src/core/connection_handler.rs @@ -12,7 +12,6 @@ use kaspa_utils_tower::{ counters::TowerConnectionCounters, middleware::{BodyExt, CountBytesBody, MapRequestBodyLayer, MapResponseBodyLayer, ServiceBuilder}, }; -use rand::{distributions::Alphanumeric, Rng}; use std::io; use std::net::{IpAddr, SocketAddr}; use std::pin::Pin; @@ -139,10 +138,8 @@ impl ConnectionHandler { uri.host().ok_or_else(|| io::Error::new(io::ErrorKind::InvalidInput, "missing host in URI"))?.to_string(); let port = uri.port_u16().unwrap_or(80); let target = format!("{}:{}", host, port); - let (username, password) = generate_socks_credentials(); - let stream = Socks5Stream::connect_with_password(proxy_addr, target, &username, &password) - .await - .map_err(|err| io::Error::new(io::ErrorKind::Other, err))?; + let stream = + Socks5Stream::connect(proxy_addr, target).await.map_err(|err| io::Error::new(io::ErrorKind::Other, err))?; Ok::<_, io::Error>(TokioIo::new(stream.into_inner())) } }); @@ -238,14 +235,6 @@ impl ConnectionHandler { } } -fn generate_socks_credentials() -> (String, String) { - const USERNAME_PREFIX: &str = "kaspa"; - let mut rng = rand::thread_rng(); - let suffix: String = (&mut rng).sample_iter(&Alphanumeric).take(16).map(char::from).collect(); - let password: String = (&mut rng).sample_iter(&Alphanumeric).take(32).map(char::from).collect(); - (format!("{USERNAME_PREFIX}-{suffix}"), password) -} - #[tonic::async_trait] impl ProtoP2p for ConnectionHandler { type MessageStreamStream = Pin> + Send + 'static>>; From 84b671a9f6c3492a5e633b59845e9e44a118a289 Mon Sep 17 00:00:00 2001 From: elldeeone <73735118+elldeeone@users.noreply.github.com> Date: Wed, 12 Nov 2025 22:16:52 +1100 Subject: [PATCH 4/6] Remove unused warn import in Tor manager --- kaspad/src/tor_manager.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kaspad/src/tor_manager.rs b/kaspad/src/tor_manager.rs index d878e48e63..535d95a860 100644 --- a/kaspad/src/tor_manager.rs +++ b/kaspad/src/tor_manager.rs @@ -9,7 +9,7 @@ use std::{ }; use hex::encode as hex_encode; -use kaspa_core::{debug, info, warn}; +use kaspa_core::{debug, info}; use thiserror::Error; use tor_interface::{ legacy_tor_client::{LegacyTorClient, LegacyTorClientConfig, TorAuth}, From e8093e1caa367b0059b564bc01f63cfb6bdde5da Mon Sep 17 00:00:00 2001 From: elldeeone <73735118+elldeeone@users.noreply.github.com> Date: Thu, 13 Nov 2025 09:48:09 +1100 Subject: [PATCH 5/6] Tor integration proxy auth + address store fix --- components/addressmanager/src/lib.rs | 17 ++- .../src/stores/address_store.rs | 6 ++ consensus/core/src/errors/config.rs | 3 + kaspad/src/args.rs | 91 ++++++++++++---- kaspad/src/daemon.rs | 64 ++++++----- protocol/flows/src/flow_context.rs | 35 +++--- protocol/flows/src/service.rs | 7 +- protocol/p2p/src/core/connection_handler.rs | 102 ++++++++++++++---- protocol/p2p/src/lib.rs | 3 +- 9 files changed, 233 insertions(+), 95 deletions(-) diff --git a/components/addressmanager/src/lib.rs b/components/addressmanager/src/lib.rs index 1be4fb18cc..228d2fc8f1 100644 --- a/components/addressmanager/src/lib.rs +++ b/components/addressmanager/src/lib.rs @@ -422,17 +422,32 @@ mod address_store_with_cache { // We manage the cache ourselves on this level, so we disable the inner builtin cache let db_store = DbAddressesStore::new(db, CachePolicy::Empty); let mut addresses = HashMap::new(); + let mut error_count = 0usize; for result in db_store.iterator() { match result { Ok((key, entry)) => { addresses.insert(key, entry); } Err(err) => { - warn!("Failed to load address entry from store: {err}"); + error_count += 1; + if error_count <= 5 { + warn!("Failed to load address entry from store: {err}"); + } } } } + if error_count > 0 { + if error_count > 5 { + warn!("Additional {} address entries failed to load", error_count - 5); + } + match db_store.clear() { + Ok(_) => warn!("Address store reset due to incompatible entries; it will repopulate automatically"), + Err(err) => warn!("Failed to reset address store after load errors: {err}"), + } + addresses.clear(); + } + Self { db_store, addresses } } diff --git a/components/addressmanager/src/stores/address_store.rs b/components/addressmanager/src/stores/address_store.rs index ea2f0d8513..87b9570da4 100644 --- a/components/addressmanager/src/stores/address_store.rs +++ b/components/addressmanager/src/stores/address_store.rs @@ -168,3 +168,9 @@ impl AddressesStore for DbAddressesStore { self.set(key, Entry { connection_failed_count, address: entry.address }) } } + +impl DbAddressesStore { + pub fn clear(&self) -> StoreResult<()> { + self.access.delete_all(DirectDbWriter::new(&self.db)) + } +} diff --git a/consensus/core/src/errors/config.rs b/consensus/core/src/errors/config.rs index fb2656d463..1d3a4b560c 100644 --- a/consensus/core/src/errors/config.rs +++ b/consensus/core/src/errors/config.rs @@ -24,6 +24,9 @@ pub enum ConfigError { #[cfg(feature = "devnet-prealloc")] #[error("--num-prealloc-utxos has to appear with --prealloc-address and vice versa")] MissingPreallocNumOrAddress, + + #[error("Configuration: specify both --proxyuser and --proxypass")] + IncompleteProxyAuth, } pub type ConfigResult = std::result::Result; diff --git a/kaspad/src/args.rs b/kaspad/src/args.rs index d6fa0b493b..e603ac9476 100644 --- a/kaspad/src/args.rs +++ b/kaspad/src/args.rs @@ -5,11 +5,12 @@ use kaspa_consensus_core::{ }; use kaspa_core::kaspad_env::version; use kaspa_notify::address::tracker::Tracker; +use kaspa_p2p_lib::{SocksAuth, SocksProxyParams}; use kaspa_utils::networking::{ContextualNetAddress, NetAddress, NetAddressError}; use kaspa_wrpc_server::address::WrpcNetAddress; use serde::{Deserialize, Serialize}; use serde_with::{serde_as, DisplayFromStr}; -use std::{ffi::OsString, fmt, fs, net::SocketAddr, path::PathBuf, str::FromStr}; +use std::{ffi::OsString, fmt, fs, path::PathBuf, str::FromStr, sync::Arc}; use toml::from_str; #[cfg(feature = "devnet-prealloc")] @@ -52,6 +53,8 @@ pub struct Args { pub listen: Option, #[serde_as(as = "Option")] pub proxy: Option, + pub proxy_user: Option, + pub proxy_pass: Option, #[serde_as(as = "Vec")] #[serde(default)] pub proxy_net: Vec, @@ -145,6 +148,8 @@ impl Default for Args { add_peers: vec![], listen: None, proxy: None, + proxy_user: None, + proxy_pass: None, proxy_net: vec![], tor_proxy: None, tor_control: None, @@ -183,16 +188,20 @@ impl Default for Args { impl Args { pub fn proxy_settings(&self) -> ProxySettings { let mut settings = ProxySettings::default(); - settings.default = self.proxy.clone(); + if let Some(default_proxy) = self.proxy.clone() { + settings.default = Some(ProxyConfigEntry { address: default_proxy, auth: self.proxy_auth(false) }); + } for rule in &self.proxy_net { + let entry = + ProxyConfigEntry { address: rule.address.clone(), auth: self.proxy_auth(matches!(rule.network, ProxyNetwork::Onion)) }; match rule.network { - ProxyNetwork::Ipv4 => settings.ipv4 = Some(rule.address.clone()), - ProxyNetwork::Ipv6 => settings.ipv6 = Some(rule.address.clone()), - ProxyNetwork::Onion => settings.onion = Some(rule.address.clone()), + ProxyNetwork::Ipv4 => settings.ipv4 = Some(entry), + ProxyNetwork::Ipv6 => settings.ipv6 = Some(entry), + ProxyNetwork::Onion => settings.onion = Some(entry), } } if let Some(tor_specific) = self.tor_proxy.clone() { - settings.onion = Some(tor_specific); + settings.onion = Some(ProxyConfigEntry { address: tor_specific, auth: self.proxy_auth(true) }); } settings } @@ -221,6 +230,19 @@ impl Args { AllowedNetworksChoice::Custom { allow_ipv4, allow_ipv6, allow_onion } } + fn proxy_auth(&self, requires_isolation: bool) -> SocksAuth { + match (&self.proxy_user, &self.proxy_pass) { + (Some(user), Some(pass)) => SocksAuth::Static { username: Arc::from(user.as_str()), password: Arc::from(pass.as_str()) }, + _ => { + if requires_isolation { + SocksAuth::Randomized + } else { + SocksAuth::None + } + } + } + } + pub fn apply_to_config(&self, config: &mut Config) { config.utxoindex = self.utxoindex; config.disable_upnp = self.disable_upnp || self.tor_only; @@ -373,6 +395,22 @@ pub fn cli() -> Command { .value_parser(clap::value_parser!(ContextualNetAddress)) .help("Route outbound clearnet P2P connections through the provided SOCKS5 proxy (default port: 9050)."), ) + .arg( + Arg::new("proxy-user") + .long("proxyuser") + .require_equals(true) + .value_name("USER") + .value_parser(clap::value_parser!(String)) + .help("Username for the default SOCKS proxy (applies to --proxy and network-specific overrides)."), + ) + .arg( + Arg::new("proxy-pass") + .long("proxypass") + .require_equals(true) + .value_name("PASSWORD") + .value_parser(clap::value_parser!(String)) + .help("Password for the default SOCKS proxy (requires --proxyuser)."), + ) .arg( Arg::new("proxy-net") .long("proxy-net") @@ -618,6 +656,8 @@ impl Args { add_peers: arg_match_many_unwrap_or::(&m, "add-peers", defaults.add_peers), listen: m.get_one::("listen").cloned().or(defaults.listen), proxy: m.get_one::("proxy").cloned().or(defaults.proxy.clone()), + proxy_user: m.get_one::("proxy-user").cloned().or(defaults.proxy_user.clone()), + proxy_pass: m.get_one::("proxy-pass").cloned().or(defaults.proxy_pass.clone()), proxy_net: m .get_many::("proxy-net") .map(|values| values.cloned().collect()) @@ -884,20 +924,26 @@ impl From for ProxyNetwork { } } +#[derive(Debug, Clone)] +pub struct ProxyConfigEntry { + pub address: ContextualNetAddress, + pub auth: SocksAuth, +} + #[derive(Debug, Clone, Default)] pub struct ProxySettings { - pub default: Option, - pub ipv4: Option, - pub ipv6: Option, - pub onion: Option, + pub default: Option, + pub ipv4: Option, + pub ipv6: Option, + pub onion: Option, } -#[derive(Debug, Clone, Copy, Default)] +#[derive(Debug, Clone, Default)] pub struct ResolvedProxySettings { - pub default: Option, - pub ipv4: Option, - pub ipv6: Option, - pub onion: Option, + pub default: Option, + pub ipv4: Option, + pub ipv6: Option, + pub onion: Option, } #[derive(Debug, Clone, Copy)] @@ -908,16 +954,17 @@ pub enum AllowedNetworksChoice { impl ProxySettings { pub fn resolve(&self, default_port: u16) -> Result { - fn normalize(addr: &ContextualNetAddress, default_port: u16) -> Result { - let net_addr: NetAddress = addr.clone().normalize(default_port); - net_addr.to_socket_addr() + fn normalize(entry: &ProxyConfigEntry, default_port: u16) -> Result { + let net_addr: NetAddress = entry.address.clone().normalize(default_port); + let addr = net_addr.to_socket_addr()?; + Ok(SocksProxyParams { address: addr, auth: entry.auth.clone() }) } Ok(ResolvedProxySettings { - default: self.default.as_ref().map(|addr| normalize(addr, default_port)).transpose()?, - ipv4: self.ipv4.as_ref().map(|addr| normalize(addr, default_port)).transpose()?, - ipv6: self.ipv6.as_ref().map(|addr| normalize(addr, default_port)).transpose()?, - onion: self.onion.as_ref().map(|addr| normalize(addr, default_port)).transpose()?, + default: self.default.as_ref().map(|entry| normalize(entry, default_port)).transpose()?, + ipv4: self.ipv4.as_ref().map(|entry| normalize(entry, default_port)).transpose()?, + ipv6: self.ipv6.as_ref().map(|entry| normalize(entry, default_port)).transpose()?, + onion: self.onion.as_ref().map(|entry| normalize(entry, default_port)).transpose()?, }) } } diff --git a/kaspad/src/daemon.rs b/kaspad/src/daemon.rs index 664e8fc943..5e77c757c0 100644 --- a/kaspad/src/daemon.rs +++ b/kaspad/src/daemon.rs @@ -32,7 +32,7 @@ use kaspa_database::{ }; use kaspa_grpc_server::service::GrpcService; use kaspa_notify::{address::tracker::Tracker, subscription::context::SubscriptionContext}; -use kaspa_p2p_lib::Hub; +use kaspa_p2p_lib::{Hub, SocksAuth, SocksProxyParams}; use kaspa_p2p_mining::rule_engine::MiningRuleEngine; use kaspa_rpc_service::service::RpcCoreService; use kaspa_txscript::caches::TxScriptCacheCounters; @@ -140,6 +140,9 @@ pub fn validate_args(args: &Args) -> ConfigResult<()> { if args.max_tracked_addresses > Tracker::MAX_ADDRESS_UPPER_BOUND { return Err(ConfigError::MaxTrackedAddressesTooHigh(Tracker::MAX_ADDRESS_UPPER_BOUND)); } + if args.proxy_user.is_some() ^ args.proxy_pass.is_some() { + return Err(ConfigError::IncompleteProxyAuth); + } Ok(()) } @@ -200,8 +203,9 @@ fn compute_tor_system_config(args: &Args) -> Option { let proxy_settings = args.proxy_settings(); let socks_source = proxy_settings .onion - .clone() - .or_else(|| proxy_settings.default.clone()) + .as_ref() + .or_else(|| proxy_settings.default.as_ref()) + .map(|entry| entry.address.clone()) .unwrap_or_else(|| ContextualNetAddress::loopback().with_port(9050)); let socks_addr = contextual_to_socket(socks_source, 9050); @@ -564,46 +568,54 @@ pub fn create_core_with_runtime(runtime: &Runtime, args: &Args, fd_total_budget: let tor_proxy_override_addr = resolved_proxies.onion; let mut proxy_descriptions = Vec::new(); - if let Some(addr) = default_proxy_addr { - proxy_descriptions.push(format!("default={addr}")); + if let Some(entry) = default_proxy_addr.as_ref() { + proxy_descriptions.push(format!("default={}", entry.address)); } - if let Some(addr) = proxy_ipv4_addr { - proxy_descriptions.push(format!("ipv4={addr}")); + if let Some(entry) = proxy_ipv4_addr.as_ref() { + proxy_descriptions.push(format!("ipv4={}", entry.address)); } - if let Some(addr) = proxy_ipv6_addr { - proxy_descriptions.push(format!("ipv6={addr}")); + if let Some(entry) = proxy_ipv6_addr.as_ref() { + proxy_descriptions.push(format!("ipv6={}", entry.address)); } - if let Some(addr) = tor_proxy_override_addr { - proxy_descriptions.push(format!("onion={addr}")); + if let Some(entry) = tor_proxy_override_addr.as_ref() { + proxy_descriptions.push(format!("onion={}", entry.address)); } if !proxy_descriptions.is_empty() { info!("Configured SOCKS proxies: {}", proxy_descriptions.join(", ")); } - let tor_proxy_from_manager = tor_manager.as_ref().map(|mgr| mgr.socks_addr()); - let effective_tor_proxy = tor_proxy_from_manager.or(tor_proxy_override_addr).or(default_proxy_addr); - if let Some(proxy) = effective_tor_proxy { - info!("Effective Tor proxy: {proxy}"); + let tor_proxy_from_manager = + tor_manager.as_ref().map(|mgr| SocksProxyParams { address: mgr.socks_addr(), auth: SocksAuth::Randomized }); + let promote_tor_proxy = |entry: &SocksProxyParams| match entry.auth { + SocksAuth::None => SocksProxyParams { address: entry.address, auth: SocksAuth::Randomized }, + _ => entry.clone(), + }; + let effective_tor_proxy = tor_proxy_from_manager + .clone() + .or_else(|| tor_proxy_override_addr.clone()) + .or_else(|| default_proxy_addr.as_ref().map(|entry| promote_tor_proxy(entry))); + if let Some(proxy) = effective_tor_proxy.as_ref() { + info!("Effective Tor proxy: {}", proxy.address); if default_proxy_addr.is_none() { - default_proxy_addr = Some(proxy); + default_proxy_addr = Some(proxy.clone()); } if proxy_ipv4_addr.is_none() { - proxy_ipv4_addr = default_proxy_addr; + proxy_ipv4_addr = default_proxy_addr.clone(); } if proxy_ipv6_addr.is_none() { - proxy_ipv6_addr = default_proxy_addr; + proxy_ipv6_addr = default_proxy_addr.clone(); } let mut effective_descriptions = Vec::new(); - if let Some(addr) = default_proxy_addr { - effective_descriptions.push(format!("default={addr}")); + if let Some(entry) = default_proxy_addr.as_ref() { + effective_descriptions.push(format!("default={}", entry.address)); } - if let Some(addr) = proxy_ipv4_addr { - effective_descriptions.push(format!("ipv4={addr}")); + if let Some(entry) = proxy_ipv4_addr.as_ref() { + effective_descriptions.push(format!("ipv4={}", entry.address)); } - if let Some(addr) = proxy_ipv6_addr { - effective_descriptions.push(format!("ipv6={addr}")); + if let Some(entry) = proxy_ipv6_addr.as_ref() { + effective_descriptions.push(format!("ipv6={}", entry.address)); } - if let Some(addr) = tor_proxy_override_addr { - effective_descriptions.push(format!("onion={addr}")); + if let Some(entry) = tor_proxy_override_addr.as_ref() { + effective_descriptions.push(format!("onion={}", entry.address)); } if !effective_descriptions.is_empty() { info!("Effective SOCKS routing: {}", effective_descriptions.join(", ")); diff --git a/protocol/flows/src/flow_context.rs b/protocol/flows/src/flow_context.rs index 6be89a0f42..5d8a3f5773 100644 --- a/protocol/flows/src/flow_context.rs +++ b/protocol/flows/src/flow_context.rs @@ -36,7 +36,7 @@ use kaspa_p2p_lib::{ convert::model::version::Version, make_message, pb::{kaspad_message::Payload, InvRelayBlockMessage}, - service_flags, ConnectionInitializer, Hub, KaspadHandshake, PeerKey, PeerProperties, Router, + service_flags, ConnectionInitializer, Hub, KaspadHandshake, PeerKey, PeerProperties, Router, SocksProxyParams, }; use kaspa_p2p_mining::rule_engine::MiningRuleEngine; use kaspa_utils::{ @@ -45,7 +45,6 @@ use kaspa_utils::{ }; use parking_lot::{Mutex, RwLock}; use std::collections::HashMap; -use std::net::SocketAddr; use std::time::Instant; use std::{collections::hash_map::Entry, fmt::Display}; use std::{ @@ -245,10 +244,10 @@ pub struct FlowContextInner { // Mining rule engine mining_rule_engine: Arc, - proxy: Option, - proxy_ipv4: Option, - proxy_ipv6: Option, - tor_proxy: Option, + proxy: Option, + proxy_ipv4: Option, + proxy_ipv6: Option, + tor_proxy: Option, tor_only: bool, onion_service: Option<(V3OnionServiceId, NetAddress)>, tor_bootstrap_rx: Mutex>>, @@ -326,10 +325,10 @@ impl FlowContext { notification_root: Arc, hub: Hub, mining_rule_engine: Arc, - proxy_default: Option, - proxy_ipv4: Option, - proxy_ipv6: Option, - tor_proxy: Option, + proxy_default: Option, + proxy_ipv4: Option, + proxy_ipv6: Option, + tor_proxy: Option, tor_only: bool, onion_service: Option<(V3OnionServiceId, u16)>, tor_bootstrap_rx: Option>, @@ -397,20 +396,20 @@ impl FlowContext { self.orphan_resolution_range } - pub fn tor_proxy(&self) -> Option { - self.tor_proxy + pub fn tor_proxy(&self) -> Option { + self.tor_proxy.clone() } - pub fn proxy(&self) -> Option { - self.proxy + pub fn proxy(&self) -> Option { + self.proxy.clone() } - pub fn proxy_ipv4(&self) -> Option { - self.proxy_ipv4 + pub fn proxy_ipv4(&self) -> Option { + self.proxy_ipv4.clone() } - pub fn proxy_ipv6(&self) -> Option { - self.proxy_ipv6 + pub fn proxy_ipv6(&self) -> Option { + self.proxy_ipv6.clone() } pub fn tor_only(&self) -> bool { diff --git a/protocol/flows/src/service.rs b/protocol/flows/src/service.rs index 0a16de7ef8..6d9d0df9cb 100644 --- a/protocol/flows/src/service.rs +++ b/protocol/flows/src/service.rs @@ -73,11 +73,8 @@ impl AsyncService for P2pService { let ipv4_proxy = self.flow_context.proxy_ipv4(); let ipv6_proxy = self.flow_context.proxy_ipv6(); let tor_proxy = self.flow_context.tor_proxy(); - let socks_proxy = if default_proxy.is_some() || ipv4_proxy.is_some() || ipv6_proxy.is_some() || tor_proxy.is_some() { - Some(SocksProxyConfig { default: default_proxy, ipv4: ipv4_proxy, ipv6: ipv6_proxy, onion: tor_proxy }) - } else { - None - }; + let proxy_config = SocksProxyConfig { default: default_proxy, ipv4: ipv4_proxy, ipv6: ipv6_proxy, onion: tor_proxy }; + let socks_proxy = if proxy_config.is_empty() { None } else { Some(proxy_config) }; let p2p_adaptor = if self.inbound_limit == 0 { Adaptor::client_only(self.flow_context.hub().clone(), self.flow_context.clone(), self.counters.clone(), socks_proxy) diff --git a/protocol/p2p/src/core/connection_handler.rs b/protocol/p2p/src/core/connection_handler.rs index 919bc486aa..d3ac05d888 100644 --- a/protocol/p2p/src/core/connection_handler.rs +++ b/protocol/p2p/src/core/connection_handler.rs @@ -12,13 +12,19 @@ use kaspa_utils_tower::{ counters::TowerConnectionCounters, middleware::{BodyExt, CountBytesBody, MapRequestBodyLayer, MapResponseBodyLayer, ServiceBuilder}, }; +use rand::{rngs::OsRng, RngCore}; +use std::fmt::Write; use std::io; use std::net::{IpAddr, SocketAddr}; use std::pin::Pin; use std::str::FromStr; -use std::sync::Arc; +use std::sync::{ + atomic::{AtomicU64, Ordering}, + Arc, OnceLock, +}; use std::time::Duration; use thiserror::Error; +use tokio::net::TcpStream; use tokio::sync::mpsc::{channel as mpsc_channel, Sender as MpscSender}; use tokio::sync::oneshot::{channel as oneshot_channel, Sender as OneshotSender}; use tokio_socks::tcp::socks5::Socks5Stream; @@ -59,14 +65,49 @@ pub struct ConnectionHandler { socks_proxy: Option, } -#[derive(Clone, Copy, Default)] +#[derive(Clone, Debug)] +pub enum SocksAuth { + None, + Static { username: Arc, password: Arc }, + Randomized, +} + +#[derive(Clone, Debug)] +pub struct SocksProxyParams { + pub address: SocketAddr, + pub auth: SocksAuth, +} + +#[derive(Clone, Default)] pub struct SocksProxyConfig { - pub default: Option, - pub ipv4: Option, - pub ipv6: Option, - pub onion: Option, + pub default: Option, + pub ipv4: Option, + pub ipv6: Option, + pub onion: Option, +} + +impl SocksProxyConfig { + pub fn is_empty(&self) -> bool { + self.default.is_none() && self.ipv4.is_none() && self.ipv6.is_none() && self.onion.is_none() + } + + pub fn entry_for(&self, address: &NetAddress) -> Option { + if address.as_onion().is_some() { + return self.onion.clone().or_else(|| self.default.clone()); + } + if let Some(ip) = address.as_ip() { + return match IpAddr::from(ip) { + IpAddr::V4(_) => self.ipv4.clone().or_else(|| self.default.clone()), + IpAddr::V6(_) => self.ipv6.clone().or_else(|| self.default.clone()), + }; + } + self.default.clone() + } } +static STREAM_ISOLATION_PREFIX: OnceLock = OnceLock::new(); +static STREAM_ISOLATION_COUNTER: AtomicU64 = AtomicU64::new(0); + impl ConnectionHandler { pub(crate) fn new( hub_sender: MpscSender, @@ -119,27 +160,15 @@ impl ConnectionHandler { .connect_timeout(Duration::from_millis(Self::connect_timeout())) .tcp_keepalive(Some(Duration::from_millis(Self::keep_alive()))); - let channel = if let Some(proxy_addr) = self.socks_proxy.and_then(|cfg| { - if peer_net_address.as_onion().is_some() { - cfg.onion.or(cfg.default) - } else if let Some(ip) = peer_net_address.as_ip() { - match IpAddr::from(ip) { - IpAddr::V4(_) => cfg.ipv4.or(cfg.default), - IpAddr::V6(_) => cfg.ipv6.or(cfg.default), - } - } else { - cfg.default - } - }) { + let channel = if let Some(proxy_params) = self.socks_proxy.as_ref().and_then(|cfg| cfg.entry_for(&peer_net_address)) { let connector = service_fn(move |uri: Uri| { - let proxy_addr = proxy_addr; + let proxy_params = proxy_params.clone(); async move { let host = uri.host().ok_or_else(|| io::Error::new(io::ErrorKind::InvalidInput, "missing host in URI"))?.to_string(); let port = uri.port_u16().unwrap_or(80); let target = format!("{}:{}", host, port); - let stream = - Socks5Stream::connect(proxy_addr, target).await.map_err(|err| io::Error::new(io::ErrorKind::Other, err))?; + let stream = connect_via_socks(proxy_params, target).await?; Ok::<_, io::Error>(TokioIo::new(stream.into_inner())) } }); @@ -262,3 +291,34 @@ impl ProtoP2p for ConnectionHandler { Ok(Response::new(Box::pin(ReceiverStream::new(outgoing_receiver).map(Ok)) as Self::MessageStreamStream)) } } + +fn random_isolation_prefix() -> String { + let mut bytes = [0u8; 8]; + OsRng.fill_bytes(&mut bytes); + let mut prefix = String::with_capacity(bytes.len() * 2 + 1); + for byte in &bytes { + let _ = write!(&mut prefix, "{:02x}", byte); + } + prefix.push('-'); + prefix +} + +fn next_stream_isolation_credentials() -> (String, String) { + let prefix = STREAM_ISOLATION_PREFIX.get_or_init(random_isolation_prefix); + let counter = STREAM_ISOLATION_COUNTER.fetch_add(1, Ordering::Relaxed); + let value = format!("{prefix}{counter}"); + (value.clone(), value) +} + +async fn connect_via_socks(params: SocksProxyParams, target: String) -> io::Result> { + let address = params.address; + let result = match params.auth { + SocksAuth::None => Socks5Stream::connect(address, target).await, + SocksAuth::Static { username, password } => Socks5Stream::connect_with_password(address, target, &username, &password).await, + SocksAuth::Randomized => { + let (username, password) = next_stream_isolation_credentials(); + Socks5Stream::connect_with_password(address, target, username.as_str(), password.as_str()).await + } + }; + result.map_err(|err| io::Error::new(io::ErrorKind::Other, err)) +} diff --git a/protocol/p2p/src/lib.rs b/protocol/p2p/src/lib.rs index 838a987d48..26b9430e1a 100644 --- a/protocol/p2p/src/lib.rs +++ b/protocol/p2p/src/lib.rs @@ -12,8 +12,7 @@ mod core; mod handshake; pub use crate::core::adaptor::{Adaptor, ConnectionInitializer}; -pub use crate::core::connection_handler::ConnectionError; -pub use crate::core::connection_handler::SocksProxyConfig; +pub use crate::core::connection_handler::{ConnectionError, SocksAuth, SocksProxyConfig, SocksProxyParams}; pub use crate::core::hub::Hub; pub use crate::core::payload_type::KaspadMessagePayloadType; pub use crate::core::peer::{Peer, PeerKey, PeerProperties}; From 43381d41273c691da78205cc11e41581f5f57749 Mon Sep 17 00:00:00 2001 From: elldeeone <73735118+elldeeone@users.noreply.github.com> Date: Thu, 13 Nov 2025 10:46:48 +1100 Subject: [PATCH 6/6] Refactor Tor proxy config structs --- components/addressmanager/src/lib.rs | 16 +++---- components/connectionmanager/src/lib.rs | 2 +- kaspad/src/args.rs | 9 ++-- kaspad/src/daemon.rs | 28 +++++++----- kaspad/src/tor_manager.rs | 4 +- protocol/flows/src/flow_context.rs | 48 +++++++++++++-------- protocol/p2p/src/core/connection_handler.rs | 2 +- rpc/grpc/core/src/convert/peer.rs | 4 +- utils/src/networking.rs | 2 +- 9 files changed, 67 insertions(+), 48 deletions(-) diff --git a/components/addressmanager/src/lib.rs b/components/addressmanager/src/lib.rs index 228d2fc8f1..5b31d24425 100644 --- a/components/addressmanager/src/lib.rs +++ b/components/addressmanager/src/lib.rs @@ -138,7 +138,7 @@ impl AddressManager { fn local_addresses(&self) -> impl Iterator + '_ { match self.config.externalip { // An external IP was passed, we will try to bind that if it's valid - Some(local_net_address) if local_net_address.as_ip().map_or(false, |ip| ip.is_publicly_routable()) => { + Some(local_net_address) if local_net_address.as_ip().is_some_and(|ip| ip.is_publicly_routable()) => { info!("External address is publicly routable {}", local_net_address); return Left(iter::once(local_net_address)); } @@ -155,10 +155,10 @@ impl AddressManager { // check whatever was passed as listen address (if routable) // otherwise(listen_address === 0.0.0.0) check all interfaces let listen_address = self.config.p2p_listen_address.normalize(self.config.default_p2p_port()); - if listen_address.as_ip().map_or(false, |ip| ip.is_publicly_routable()) { + if listen_address.as_ip().is_some_and(|ip| ip.is_publicly_routable()) { info!("Publicly routable local address found: {}", listen_address); Left(Left(iter::once(listen_address))) - } else if listen_address.as_ip().map_or(false, |ip| ip.is_unspecified()) { + } else if listen_address.as_ip().is_some_and(|ip| ip.is_unspecified()) { let network_interfaces = list_afinet_netifas(); let Ok(network_interfaces) = network_interfaces else { warn!("Error getting network interfaces: {:?}", network_interfaces); @@ -187,7 +187,7 @@ impl AddressManager { info!("[UPnP] Got external ip from gateway using upnp: {ip}"); let normalized_p2p_listen_address = self.config.p2p_listen_address.normalize(self.config.default_p2p_port()); - let local_addr = if normalized_p2p_listen_address.as_ip().map_or(false, |ip| ip.is_unspecified()) { + let local_addr = if normalized_p2p_listen_address.as_ip().is_some_and(|ip| ip.is_unspecified()) { SocketAddr::new(local_ip_address::local_ip().unwrap(), normalized_p2p_listen_address.port) } else { normalized_p2p_listen_address.to_socket_addr().expect("expected listen address to be IP-based") @@ -279,11 +279,11 @@ impl AddressManager { debug!("[Address manager] skipping onion address {} (onion disabled)", address); return; } - if !self.allow_ipv4 && address.as_ip().map_or(false, |ip| ip.is_ipv4()) { + if !self.allow_ipv4 && address.as_ip().is_some_and(|ip| ip.is_ipv4()) { debug!("[Address manager] skipping IPv4 address {} (ipv4 disabled)", address); return; } - if !self.allow_ipv6 && address.as_ip().map_or(false, |ip| ip.is_ipv6()) { + if !self.allow_ipv6 && address.as_ip().is_some_and(|ip| ip.is_ipv6()) { debug!("[Address manager] skipping IPv6 address {} (ipv6 disabled)", address); return; } @@ -331,7 +331,7 @@ impl AddressManager { fn prune_ipv4_addresses(&mut self) { let to_remove: Vec<_> = - self.address_store.iterate_addresses().filter(|addr| addr.as_ip().map_or(false, |ip| ip.is_ipv4())).collect(); + self.address_store.iterate_addresses().filter(|addr| addr.as_ip().is_some_and(|ip| ip.is_ipv4())).collect(); for addr in to_remove { self.address_store.remove(addr); } @@ -339,7 +339,7 @@ impl AddressManager { fn prune_ipv6_addresses(&mut self) { let to_remove: Vec<_> = - self.address_store.iterate_addresses().filter(|addr| addr.as_ip().map_or(false, |ip| ip.is_ipv6())).collect(); + self.address_store.iterate_addresses().filter(|addr| addr.as_ip().is_some_and(|ip| ip.is_ipv6())).collect(); for addr in to_remove { self.address_store.remove(addr); } diff --git a/components/connectionmanager/src/lib.rs b/components/connectionmanager/src/lib.rs index 1d989bb612..7e7b9a6ee9 100644 --- a/components/connectionmanager/src/lib.rs +++ b/components/connectionmanager/src/lib.rs @@ -389,6 +389,6 @@ impl ConnectionManager { .lock() .await .iter() - .any(|(address, request)| request.is_permanent && address.as_ip().map_or(false, |addr_ip| IpAddr::from(addr_ip) == ip)) + .any(|(address, request)| request.is_permanent && address.as_ip().is_some_and(|addr_ip| IpAddr::from(addr_ip) == ip)) } } diff --git a/kaspad/src/args.rs b/kaspad/src/args.rs index e603ac9476..c6047489d8 100644 --- a/kaspad/src/args.rs +++ b/kaspad/src/args.rs @@ -188,19 +188,18 @@ impl Default for Args { impl Args { pub fn proxy_settings(&self) -> ProxySettings { let mut settings = ProxySettings::default(); - if let Some(default_proxy) = self.proxy.clone() { + if let Some(default_proxy) = self.proxy { settings.default = Some(ProxyConfigEntry { address: default_proxy, auth: self.proxy_auth(false) }); } for rule in &self.proxy_net { - let entry = - ProxyConfigEntry { address: rule.address.clone(), auth: self.proxy_auth(matches!(rule.network, ProxyNetwork::Onion)) }; + let entry = ProxyConfigEntry { address: rule.address, auth: self.proxy_auth(matches!(rule.network, ProxyNetwork::Onion)) }; match rule.network { ProxyNetwork::Ipv4 => settings.ipv4 = Some(entry), ProxyNetwork::Ipv6 => settings.ipv6 = Some(entry), ProxyNetwork::Onion => settings.onion = Some(entry), } } - if let Some(tor_specific) = self.tor_proxy.clone() { + if let Some(tor_specific) = self.tor_proxy { settings.onion = Some(ProxyConfigEntry { address: tor_specific, auth: self.proxy_auth(true) }); } settings @@ -655,7 +654,7 @@ impl Args { connect_peers: arg_match_many_unwrap_or::(&m, "connect-peers", defaults.connect_peers), add_peers: arg_match_many_unwrap_or::(&m, "add-peers", defaults.add_peers), listen: m.get_one::("listen").cloned().or(defaults.listen), - proxy: m.get_one::("proxy").cloned().or(defaults.proxy.clone()), + proxy: m.get_one::("proxy").cloned().or(defaults.proxy), proxy_user: m.get_one::("proxy-user").cloned().or(defaults.proxy_user.clone()), proxy_pass: m.get_one::("proxy-pass").cloned().or(defaults.proxy_pass.clone()), proxy_net: m diff --git a/kaspad/src/daemon.rs b/kaspad/src/daemon.rs index 5e77c757c0..6763e8b635 100644 --- a/kaspad/src/daemon.rs +++ b/kaspad/src/daemon.rs @@ -64,7 +64,10 @@ use kaspa_mining::{ monitor::MiningMonitor, MiningCounters, }; -use kaspa_p2p_flows::{flow_context::FlowContext, service::P2pService}; +use kaspa_p2p_flows::{ + flow_context::{FlowContext, ProxyEndpoints, TorConfig}, + service::P2pService, +}; use itertools::Itertools; use kaspa_perf_monitor::{builder::Builder as PerfMonitorBuilder, counters::CountersSnapshot}; @@ -204,8 +207,8 @@ fn compute_tor_system_config(args: &Args) -> Option { let socks_source = proxy_settings .onion .as_ref() - .or_else(|| proxy_settings.default.as_ref()) - .map(|entry| entry.address.clone()) + .or(proxy_settings.default.as_ref()) + .map(|entry| entry.address) .unwrap_or_else(|| ContextualNetAddress::loopback().with_port(9050)); let socks_addr = contextual_to_socket(socks_source, 9050); @@ -592,7 +595,7 @@ pub fn create_core_with_runtime(runtime: &Runtime, args: &Args, fd_total_budget: let effective_tor_proxy = tor_proxy_from_manager .clone() .or_else(|| tor_proxy_override_addr.clone()) - .or_else(|| default_proxy_addr.as_ref().map(|entry| promote_tor_proxy(entry))); + .or_else(|| default_proxy_addr.as_ref().map(promote_tor_proxy)); if let Some(proxy) = effective_tor_proxy.as_ref() { info!("Effective Tor proxy: {}", proxy.address); if default_proxy_addr.is_none() { @@ -1032,6 +1035,14 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm (None, None) }; + let proxy_endpoints = ProxyEndpoints::new(default_proxy_addr.clone(), proxy_ipv4_addr.clone(), proxy_ipv6_addr.clone()); + let tor_config = TorConfig { + proxy: effective_tor_proxy.clone(), + tor_only: args.tor_only, + onion_service: onion_service_info.as_ref().map(|info| (info.id.clone(), info.virt_port)), + bootstrap_rx: tor_bootstrap_rx, + }; + let flow_context = Arc::new(FlowContext::new( consensus_manager.clone(), address_manager, @@ -1041,13 +1052,8 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm notification_root, hub.clone(), mining_rule_engine.clone(), - default_proxy_addr, - proxy_ipv4_addr, - proxy_ipv6_addr, - effective_tor_proxy, - args.tor_only, - onion_service_info.as_ref().map(|info| (info.id.clone(), info.virt_port)), - tor_bootstrap_rx, + proxy_endpoints, + tor_config, )); let tor_async_service = tor_manager.as_ref().map(|manager| { Arc::new(TorRuntimeService::new( diff --git a/kaspad/src/tor_manager.rs b/kaspad/src/tor_manager.rs index 535d95a860..6834646b1c 100644 --- a/kaspad/src/tor_manager.rs +++ b/kaspad/src/tor_manager.rs @@ -176,8 +176,8 @@ impl TorManager { return Err(TorManagerError::Control("unexpected EOF while waiting for ADD_ONION response".into())); } let trimmed = line.trim(); - if trimmed.starts_with("250-ServiceID=") { - service_id = Some(trimmed["250-ServiceID=".len()..].to_string()); + if let Some(stripped) = trimmed.strip_prefix("250-ServiceID=") { + service_id = Some(stripped.to_string()); } else if trimmed.starts_with("250 ") { break; } else if trimmed.starts_with('5') { diff --git a/protocol/flows/src/flow_context.rs b/protocol/flows/src/flow_context.rs index 5d8a3f5773..6474f93b34 100644 --- a/protocol/flows/src/flow_context.rs +++ b/protocol/flows/src/flow_context.rs @@ -215,6 +215,27 @@ impl BlockEventLogger { } } +#[derive(Clone, Default)] +pub struct ProxyEndpoints { + pub default: Option, + pub ipv4: Option, + pub ipv6: Option, +} + +impl ProxyEndpoints { + pub fn new(default: Option, ipv4: Option, ipv6: Option) -> Self { + Self { default, ipv4, ipv6 } + } +} + +#[derive(Clone)] +pub struct TorConfig { + pub proxy: Option, + pub tor_only: bool, + pub onion_service: Option<(V3OnionServiceId, u16)>, + pub bootstrap_rx: Option>, +} + pub struct FlowContextInner { pub node_id: PeerId, pub consensus_manager: Arc, @@ -244,9 +265,7 @@ pub struct FlowContextInner { // Mining rule engine mining_rule_engine: Arc, - proxy: Option, - proxy_ipv4: Option, - proxy_ipv6: Option, + proxy_endpoints: ProxyEndpoints, tor_proxy: Option, tor_only: bool, onion_service: Option<(V3OnionServiceId, NetAddress)>, @@ -325,17 +344,14 @@ impl FlowContext { notification_root: Arc, hub: Hub, mining_rule_engine: Arc, - proxy_default: Option, - proxy_ipv4: Option, - proxy_ipv6: Option, - tor_proxy: Option, - tor_only: bool, - onion_service: Option<(V3OnionServiceId, u16)>, - tor_bootstrap_rx: Option>, + proxy_endpoints: ProxyEndpoints, + tor_config: TorConfig, ) -> Self { let bps_upper_bound = config.bps().upper_bound() as usize; let orphan_resolution_range = BASELINE_ORPHAN_RESOLUTION_RANGE + (bps_upper_bound as f64).log2().ceil() as u32; + let TorConfig { proxy: tor_proxy, tor_only, onion_service, bootstrap_rx } = tor_config; + let onion_service = onion_service.and_then(|(id, port)| { let onion_host = format!("{}.onion", id); match OnionAddress::try_from(onion_host.as_str()) { @@ -377,13 +393,11 @@ impl FlowContext { max_orphans, config, mining_rule_engine, - proxy: proxy_default, - proxy_ipv4, - proxy_ipv6, + proxy_endpoints, tor_proxy, tor_only, onion_service, - tor_bootstrap_rx: Mutex::new(tor_bootstrap_rx), + tor_bootstrap_rx: Mutex::new(bootstrap_rx), }), } } @@ -401,15 +415,15 @@ impl FlowContext { } pub fn proxy(&self) -> Option { - self.proxy.clone() + self.proxy_endpoints.default.clone() } pub fn proxy_ipv4(&self) -> Option { - self.proxy_ipv4.clone() + self.proxy_endpoints.ipv4.clone() } pub fn proxy_ipv6(&self) -> Option { - self.proxy_ipv6.clone() + self.proxy_endpoints.ipv6.clone() } pub fn tor_only(&self) -> bool { diff --git a/protocol/p2p/src/core/connection_handler.rs b/protocol/p2p/src/core/connection_handler.rs index d3ac05d888..dc4863d53a 100644 --- a/protocol/p2p/src/core/connection_handler.rs +++ b/protocol/p2p/src/core/connection_handler.rs @@ -320,5 +320,5 @@ async fn connect_via_socks(params: SocksProxyParams, target: String) -> io::Resu Socks5Stream::connect_with_password(address, target, username.as_str(), password.as_str()).await } }; - result.map_err(|err| io::Error::new(io::ErrorKind::Other, err)) + result.map_err(io::Error::other) } diff --git a/rpc/grpc/core/src/convert/peer.rs b/rpc/grpc/core/src/convert/peer.rs index 8662186f86..b6bbde1394 100644 --- a/rpc/grpc/core/src/convert/peer.rs +++ b/rpc/grpc/core/src/convert/peer.rs @@ -45,8 +45,8 @@ try_from!(item: &protowire::GetConnectedPeerInfoMessage, kaspa_rpc_core::RpcPeer }); try_from!(item: &protowire::GetPeerAddressesKnownAddressMessage, kaspa_rpc_core::RpcPeerAddress, { - Self::from_str(&item.addr).map_err(NetAddressError::from)? + Self::from_str(&item.addr).map_err(RpcError::from)? }); try_from!(item: &protowire::GetPeerAddressesKnownAddressMessage, kaspa_rpc_core::RpcIpAddress, { - Self::from_str(&item.addr).map_err(NetAddressError::from)? + Self::from_str(&item.addr).map_err(|err| RpcError::from(NetAddressError::from(err)))? }); diff --git a/utils/src/networking.rs b/utils/src/networking.rs index f9f0eaf950..ec5b62e89e 100644 --- a/utils/src/networking.rs +++ b/utils/src/networking.rs @@ -438,7 +438,7 @@ impl BorshDeserialize for AddressKind { match variant_idx { 0 => Ok(AddressKind::Ip(BorshDeserialize::deserialize_reader(reader)?)), 1 => Ok(AddressKind::Onion(BorshDeserialize::deserialize_reader(reader)?)), - _ => Err(std::io::Error::new(std::io::ErrorKind::InvalidInput, "invalid address kind variant").into()), + _ => Err(std::io::Error::new(std::io::ErrorKind::InvalidInput, "invalid address kind variant")), } } }