diff --git a/.gitmodules b/.gitmodules deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/Cargo.lock b/Cargo.lock index 6ed7bfd0b60..264d9ec7566 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -132,9 +132,9 @@ checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "alloy-chains" -version = "0.2.20" +version = "0.2.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bc32535569185cbcb6ad5fa64d989a47bccb9a08e27284b1f2a3ccf16e6d010" +checksum = "35d744058a9daa51a8cf22a3009607498fcf82d3cf4c5444dd8056cdf651f471" dependencies = [ "alloy-primitives", "num_enum", @@ -156,7 +156,7 @@ dependencies = [ "auto_impl", "borsh", "c-kzg", - "derive_more 2.0.1", + "derive_more 2.1.0", "either", "k256", "once_cell", @@ -249,7 +249,7 @@ dependencies = [ "auto_impl", "borsh", "c-kzg", - "derive_more 2.0.1", + "derive_more 2.1.0", "either", "serde", "serde_with", @@ -277,7 +277,7 @@ checksum = "f72cf87cda808e593381fb9f005ffa4d2475552b7a6c5ac33d087bf77d82abd0" dependencies = [ "alloy-primitives", "alloy-sol-types", - "http 1.3.1", + "http 1.4.0", "serde", "serde_json", "thiserror 2.0.17", @@ -303,7 +303,7 @@ dependencies = [ "alloy-sol-types", "async-trait", "auto_impl", - "derive_more 2.0.1", + "derive_more 2.1.0", "futures-utils-wasm", "serde", "serde_json", @@ -334,11 +334,11 @@ dependencies = [ "bytes", "cfg-if", "const-hex", - "derive_more 2.0.1", + "derive_more 2.1.0", "foldhash 0.2.0", "getrandom 0.3.4", - "hashbrown 0.16.0", - "indexmap 2.12.0", + "hashbrown 0.16.1", + "indexmap 2.12.1", "itoa", "k256", "keccak-asm", @@ -411,7 +411,7 @@ checksum = "64b728d511962dda67c1bc7ea7c03736ec275ed2cf4c35d9585298ac9ccf3b73" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -522,7 +522,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -534,11 +534,11 @@ dependencies = [ "alloy-sol-macro-input", "const-hex", "heck", - "indexmap 2.12.0", + "indexmap 2.12.1", "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", "syn-solidity", "tiny-keccak", ] @@ -555,7 +555,7 @@ dependencies = [ "macro-string", "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", "syn-solidity", ] @@ -590,7 +590,7 @@ dependencies = [ "alloy-json-rpc", "auto_impl", "base64 0.22.1", - "derive_more 2.0.1", + "derive_more 2.1.0", "futures", "futures-utils-wasm", "parking_lot", @@ -628,7 +628,7 @@ dependencies = [ "alloy-primitives", "alloy-rlp", "arrayvec", - "derive_more 2.0.1", + "derive_more 2.1.0", "nybbles", "serde", "smallvec", @@ -644,7 +644,7 @@ dependencies = [ "darling 0.21.3", "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -827,7 +827,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62945a2f7e6de02a31fe400aa489f0e0f5b2502e69f95f853adb82a96c7a6b60" dependencies = [ "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -865,7 +865,7 @@ dependencies = [ "num-traits", "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -976,7 +976,7 @@ checksum = "3109e49b1e4909e9db6515a30c633684d68cdeaa252f215214cb4fa1a5bfee2c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", "synstructure", ] @@ -988,7 +988,7 @@ checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -1067,7 +1067,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -1078,7 +1078,7 @@ checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -1107,7 +1107,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16e2cdb6d5ed835199484bb92bb8b3edd526effe995c61732580439c1a67e2e9" dependencies = [ "base64 0.22.1", - "http 1.3.1", + "http 1.4.0", "log", "url", ] @@ -1120,7 +1120,7 @@ checksum = "ffdcb70bdbc4d478427380519163274ac86e52916e10f0a8889adf0f96d3fee7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -1139,7 +1139,7 @@ dependencies = [ "axum-core", "bytes", "futures-util", - "http 1.3.1", + "http 1.4.0", "http-body 1.0.1", "http-body-util", "itoa", @@ -1165,7 +1165,7 @@ dependencies = [ "async-trait", "bytes", "futures-util", - "http 1.3.1", + "http 1.4.0", "http-body 1.0.1", "http-body-util", "mime", @@ -1218,9 +1218,9 @@ checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "base64ct" -version = "1.8.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55248b47b0caf0546f7988906588779981c43bb1bc9d0c44087278f80cdb44ba" +checksum = "0e050f626429857a27ddccb31e0aca21356bfa709c04041aefddac081a8f068a" [[package]] name = "beacon_chain" @@ -1391,7 +1391,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.110", + "syn 2.0.111", "which", ] @@ -1412,15 +1412,15 @@ checksum = "5e764a1d40d510daf35e07be9eb06e75770908c27d411ee6c92109c9840eaaf7" [[package]] name = "bitcoin-io" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b47c4ab7a93edb0c7198c5535ed9b52b63095f4e9b45279c6736cec4b856baf" +checksum = "2dee39a0ee5b4095224a0cfc6bf4cc1baf0f9624b96b367e53b66d974e51d953" [[package]] name = "bitcoin_hashes" -version = "0.14.0" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb18c03d0db0247e147a21a6faafd5a7eb851c743db062de72018b6b7e8e4d16" +checksum = "26ec84b80c482df901772e931a9a681e26a1b9ee2302edeff23cb30328745c8b" dependencies = [ "bitcoin-io", "hex-conservative", @@ -1557,9 +1557,9 @@ dependencies = [ [[package]] name = "borsh" -version = "1.5.7" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad8646f98db542e39fc66e68a20b2144f6a732636df7c2354e74645faaa433ce" +checksum = "d1da5ab77c1437701eeff7c88d968729e7766172279eab0676857b3d63af7a6f" dependencies = [ "borsh-derive", "cfg_aliases", @@ -1567,15 +1567,15 @@ dependencies = [ [[package]] name = "borsh-derive" -version = "1.5.7" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdd1d3c0c2f5833f22386f252fe8ed005c7f59fdcddeef025c01b4c3b9fd9ac3" +checksum = "0686c856aa6aac0c4498f936d7d6a02df690f614c03e4d906d1018062b5c5e2c" dependencies = [ "once_cell", "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -1712,9 +1712,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.2.46" +version = "1.2.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b97463e1064cb1b1c1384ad0a0b9c8abd0988e2a91f52606c80ef14aadb63e36" +checksum = "90583009037521a116abf44494efecd645ba48b6622457080f080b85544e2215" dependencies = [ "find-msvc-tools", "jobserver", @@ -1871,7 +1871,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -2107,6 +2107,15 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" +[[package]] +name = "convert_case" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "633458d4ef8c78b72454de2d54fd6ab2e60f9e02be22f3c6104cdc8a4e0fceb9" +dependencies = [ + "unicode-segmentation", +] + [[package]] name = "core-foundation" version = "0.9.4" @@ -2153,9 +2162,9 @@ dependencies = [ [[package]] name = "crc" -version = "3.3.0" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9710d3b3739c2e349eb44fe848ad0b7c8cb1e42bd87ee49371df2f7acaf3e675" +checksum = "5eb8a2a1cd12ab0d987a5d5e825195d372001a4094a0376319d5a0ad71c1ba0d" dependencies = [ "crc-catalog", ] @@ -2343,7 +2352,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -2401,7 +2410,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -2416,7 +2425,7 @@ dependencies = [ "quote", "serde", "strsim 0.11.1", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -2438,7 +2447,7 @@ checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" dependencies = [ "darling_core 0.20.11", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -2449,7 +2458,7 @@ checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" dependencies = [ "darling_core 0.21.3", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -2509,7 +2518,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d162beedaa69905488a8da94f5ac3edb4dd4788b732fadb7bd120b2625c1976" dependencies = [ "data-encoding", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -2616,7 +2625,38 @@ checksum = "1e567bd82dcff979e4b03460c307b3cdc9e96fde3d73bed1496d2bc75d9dd62a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", +] + +[[package]] +name = "derive_builder" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "507dfb09ea8b7fa618fcf76e953f4f5e192547945816d5358edffe39f6f94947" +dependencies = [ + "derive_builder_macro", +] + +[[package]] +name = "derive_builder_core" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d5bcf7b024d6835cfb3d473887cd966994907effbe9227e8c8219824d06c4e8" +dependencies = [ + "darling 0.20.11", + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "derive_builder_macro" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" +dependencies = [ + "derive_builder_core", + "syn 2.0.111", ] [[package]] @@ -2625,31 +2665,33 @@ version = "0.99.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6edb4b64a43d977b8e99788fe3a04d483834fba1215a7e02caa415b626497f7f" dependencies = [ - "convert_case", + "convert_case 0.4.0", "proc-macro2", "quote", "rustc_version 0.4.1", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] name = "derive_more" -version = "2.0.1" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "093242cf7570c207c83073cf82f79706fe7b8317e98620a47d5be7c3d8497678" +checksum = "10b768e943bed7bf2cab53df09f4bc34bfd217cdb57d971e769874c9a6710618" dependencies = [ "derive_more-impl", ] [[package]] name = "derive_more-impl" -version = "2.0.1" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" +checksum = "6d286bfdaf75e988b4a78e013ecd79c581e06399ab53fbacd2d916c2f904f30b" dependencies = [ + "convert_case 0.10.0", "proc-macro2", "quote", - "syn 2.0.110", + "rustc_version 0.4.1", + "syn 2.0.111", "unicode-xid", ] @@ -2756,7 +2798,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -2851,7 +2893,7 @@ dependencies = [ "enum-ordinalize", "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -3074,7 +3116,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -3094,7 +3136,7 @@ checksum = "8ca9601fb2d62598ee17836250842873a413586e5d7ed88b356e38ddbb0ec631" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -3319,7 +3361,7 @@ dependencies = [ "darling 0.20.11", "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -3731,7 +3773,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -3841,6 +3883,18 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "getset" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cf0fc11e47561d47397154977bc219f4cf809b2974facc3ccb3b89e2436f912" +dependencies = [ + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.111", +] + [[package]] name = "ghash" version = "0.5.1" @@ -3894,7 +3948,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.12.0", + "indexmap 2.12.1", "slab", "tokio", "tokio-util", @@ -3912,8 +3966,8 @@ dependencies = [ "fnv", "futures-core", "futures-sink", - "http 1.3.1", - "indexmap 2.12.0", + "http 1.4.0", + "indexmap 2.12.1", "slab", "tokio", "tokio-util", @@ -3975,12 +4029,13 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.16.0" +version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d" +checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" dependencies = [ "foldhash 0.2.0", "serde", + "serde_core", ] [[package]] @@ -4083,9 +4138,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "hex-conservative" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5313b072ce3c597065a808dbf612c4c8e8590bdbf8b579508bf7a762c5eae6cd" +checksum = "fda06d18ac606267c40c04e41b9947729bf8b9efe74bd4e82b61a5f26a510b9f" dependencies = [ "arrayvec", ] @@ -4193,12 +4248,11 @@ dependencies = [ [[package]] name = "http" -version = "1.3.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" +checksum = "e3ba2a386d7f85a81f119ad7498ebe444d2e22c2af0b86b069416ace48b3311a" dependencies = [ "bytes", - "fnv", "itoa", ] @@ -4220,7 +4274,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ "bytes", - "http 1.3.1", + "http 1.4.0", ] [[package]] @@ -4231,7 +4285,7 @@ checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" dependencies = [ "bytes", "futures-core", - "http 1.3.1", + "http 1.4.0", "http-body 1.0.1", "pin-project-lite", ] @@ -4364,7 +4418,7 @@ dependencies = [ "futures-channel", "futures-core", "h2 0.4.12", - "http 1.3.1", + "http 1.4.0", "http-body 1.0.1", "httparse", "httpdate", @@ -4382,7 +4436,7 @@ version = "0.27.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" dependencies = [ - "http 1.3.1", + "http 1.4.0", "hyper 1.8.1", "hyper-util", "rustls 0.23.35", @@ -4424,16 +4478,16 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.18" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52e9a2a24dc5c6821e71a7030e1e14b7b632acac55c40e9d2e082c621261bb56" +checksum = "727805d60e7938b76b826a6ef209eb70eaa1812794f9424d4a4e2d740662df5f" dependencies = [ "base64 0.22.1", "bytes", "futures-channel", "futures-core", "futures-util", - "http 1.3.1", + "http 1.4.0", "http-body 1.0.1", "hyper 1.8.1", "ipnet", @@ -4518,9 +4572,9 @@ checksum = "7aedcccd01fc5fe81e6b489c15b247b8b0690feb23304303a9e560f37efc560a" [[package]] name = "icu_properties" -version = "2.1.1" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e93fcd3157766c0c8da2f8cff6ce651a31f0810eaa1c51ec363ef790bbb5fb99" +checksum = "020bfc02fe870ec3a66d93e677ccca0562506e5872c650f893269e08615d74ec" dependencies = [ "icu_collections", "icu_locale_core", @@ -4532,9 +4586,9 @@ dependencies = [ [[package]] name = "icu_properties_data" -version = "2.1.1" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02845b3647bb045f1100ecd6480ff52f34c35f82d9880e029d329c21d1054899" +checksum = "616c294cf8d725c6afcd8f55abc17c56464ef6211f9ed59cccffe534129c77af" [[package]] name = "icu_provider" @@ -4621,7 +4675,7 @@ dependencies = [ "attohttpc", "bytes", "futures", - "http 1.3.1", + "http 1.4.0", "http-body-util", "hyper 1.8.1", "hyper-util", @@ -4649,7 +4703,7 @@ checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -4665,13 +4719,13 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.12.0" +version = "2.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6717a8d2a5a929a1a2eb43a12812498ed141a0bcfb7e8f7844fbdbe4303bba9f" +checksum = "0ad4bb2b565bca0645f4d68c5c9af97fba094e9791da685bf83cb5f3ce74acf2" dependencies = [ "arbitrary", "equivalent", - "hashbrown 0.16.0", + "hashbrown 0.16.1", "serde", "serde_core", ] @@ -4839,9 +4893,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.82" +version = "0.3.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b011eec8cc36da2aab2d5cff675ec18454fad408585853910a202391cf9f8e65" +checksum = "464a3709c7f55f1f721e5389aa6ea4e3bc6aba669353300af094b29ffbdde1d8" dependencies = [ "once_cell", "wasm-bindgen", @@ -4933,7 +4987,7 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" dependencies = [ - "spin", + "spin 0.9.8", ] [[package]] @@ -4980,6 +5034,229 @@ dependencies = [ "validator_dir", ] +[[package]] +name = "lean_client" +version = "0.1.0" +dependencies = [ + "clap", + "clap_utils", + "environment", + "health_metrics", + "lean_config", + "lean_consensus", + "lean_genesis", + "lean_keystore", + "lean_network", + "lean_network_config", + "lean_store", + "lean_validator_service", + "lighthouse_version", + "logging", + "metrics", + "serde", + "slot_clock", + "store", + "task_executor", + "tempfile", + "tokio", + "tracing", + "types", + "warp", + "warp_utils", +] + +[[package]] +name = "lean_config" +version = "0.1.0" +dependencies = [ + "ethereum_ssz", + "hex", + "lean_consensus", + "lean_genesis", + "lean_keystore", + "lean_network", + "lean_network_config", + "lean_store", + "serde", + "serde_json", + "serde_yaml", + "slot_clock", + "ssz_types", + "store", + "tracing", + "tree_hash", + "types", +] + +[[package]] +name = "lean_consensus" +version = "0.1.0" +dependencies = [ + "ethereum_ssz", + "ethereum_ssz_derive", + "fixed_bytes", + "int_to_bytes", + "lazy_static", + "lean_crypto", + "metrics", + "milhouse", + "serde", + "serde_json", + "serde_yaml", + "ssz_types", + "tracing", + "tree_hash", + "tree_hash_derive", + "types", +] + +[[package]] +name = "lean_crypto" +version = "0.1.0" +dependencies = [ + "alloy-primitives", + "ethereum_ssz", + "ethereum_ssz_derive", + "lean_keystore", + "leansig", + "ssz_types", + "tree_hash", + "tree_hash_derive", + "typenum", + "types", +] + +[[package]] +name = "lean_forkchoice" +version = "0.1.0" +dependencies = [ + "ethereum_ssz", + "fixed_bytes", + "lazy_static", + "lean_consensus", + "metrics", + "parking_lot", + "store", + "tracing", + "types", +] + +[[package]] +name = "lean_genesis" +version = "0.1.0" +dependencies = [ + "serde", + "serde_yaml", + "tracing", +] + +[[package]] +name = "lean_keystore" +version = "0.1.0" +dependencies = [ + "clap", + "ethereum_ssz", + "ethereum_ssz_derive", + "leansig", + "rand 0.9.2", + "serde", + "serde_json", + "tempfile", + "thiserror 1.0.69", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "lean_network" +version = "0.1.0" +dependencies = [ + "async-trait", + "enr", + "ethereum_ssz", + "ethereum_ssz_derive", + "futures", + "lean_consensus", + "libp2p", + "libp2p-gossipsub 0.50.0", + "libp2p-identity", + "metrics", + "serde", + "serde_yaml", + "sha2 0.10.9", + "snap", + "task_executor", + "tokio", + "tracing", + "types", +] + +[[package]] +name = "lean_network_config" +version = "0.1.0" +dependencies = [ + "serde", + "serde_yaml", + "tracing", +] + +[[package]] +name = "lean_store" +version = "0.1.0" +dependencies = [ + "ethereum_ssz", + "lean_consensus", + "store", + "types", +] + +[[package]] +name = "lean_validator_service" +version = "0.1.0" +dependencies = [ + "ethereum_ssz", + "ethereum_ssz_derive", + "fixed_bytes", + "futures", + "lazy_static", + "lean_consensus", + "lean_crypto", + "lean_forkchoice", + "lean_keystore", + "lean_network", + "lean_store", + "leansig", + "metrics", + "milhouse", + "slot_clock", + "ssz_types", + "store", + "tokio", + "tracing", + "tree_hash", + "types", +] + +[[package]] +name = "leansig" +version = "0.1.0" +source = "git+https://github.com/leanEthereum/leanSig#d9610e7fbbc75197f134e065df79acc630994706" +dependencies = [ + "dashmap", + "ethereum_ssz", + "num-bigint", + "num-traits", + "p3-baby-bear", + "p3-field", + "p3-koala-bear", + "p3-symmetric", + "rand 0.9.2", + "rayon", + "serde", + "sha3", + "thiserror 2.0.17", +] + [[package]] name = "leveldb" version = "0.8.6" @@ -5005,9 +5282,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.177" +version = "0.2.178" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976" +checksum = "37c93d8daa9d8a012fd8ab92f088405fb202ea0b6ab73ee2482ae66af4f42091" [[package]] name = "libloading" @@ -5055,6 +5332,7 @@ dependencies = [ "libp2p-connection-limits", "libp2p-core", "libp2p-dns", + "libp2p-gossipsub 0.49.2", "libp2p-identify", "libp2p-identity", "libp2p-mdns", @@ -5062,6 +5340,7 @@ dependencies = [ "libp2p-noise", "libp2p-plaintext", "libp2p-quic", + "libp2p-request-response", "libp2p-swarm", "libp2p-tcp", "libp2p-upnp", @@ -5135,6 +5414,36 @@ dependencies = [ "tracing", ] +[[package]] +name = "libp2p-gossipsub" +version = "0.49.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7f58e37d8d6848e5c4c9e3c35c6f61133235bff2960c9c00a663b0849301221" +dependencies = [ + "async-channel 2.5.0", + "asynchronous-codec", + "base64 0.22.1", + "byteorder", + "bytes", + "either", + "fnv", + "futures", + "futures-timer", + "getrandom 0.2.16", + "hashlink 0.9.1", + "hex_fmt", + "libp2p-core", + "libp2p-identity", + "libp2p-swarm", + "quick-protobuf", + "quick-protobuf-codec", + "rand 0.8.5", + "regex", + "sha2 0.10.9", + "tracing", + "web-time", +] + [[package]] name = "libp2p-gossipsub" version = "0.50.0" @@ -5188,9 +5497,9 @@ dependencies = [ [[package]] name = "libp2p-identity" -version = "0.2.12" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3104e13b51e4711ff5738caa1fb54467c8604c2e94d607e27745bcf709068774" +checksum = "f0c7892c221730ba55f7196e98b0b8ba5e04b4155651736036628e9f73ed6fc3" dependencies = [ "asn1_der", "bs58 0.5.1", @@ -5233,6 +5542,7 @@ checksum = "805a555148522cb3414493a5153451910cb1a146c53ffbf4385708349baf62b7" dependencies = [ "futures", "libp2p-core", + "libp2p-gossipsub 0.49.2", "libp2p-identify", "libp2p-identity", "libp2p-swarm", @@ -5321,6 +5631,23 @@ dependencies = [ "tracing", ] +[[package]] +name = "libp2p-request-response" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9f1cca83488b90102abac7b67d5c36fc65bc02ed47620228af7ed002e6a1478" +dependencies = [ + "async-trait", + "futures", + "futures-bounded", + "libp2p-core", + "libp2p-identity", + "libp2p-swarm", + "rand 0.8.5", + "smallvec", + "tracing", +] + [[package]] name = "libp2p-swarm" version = "0.47.0" @@ -5351,7 +5678,7 @@ checksum = "dd297cf53f0cb3dee4d2620bb319ae47ef27c702684309f682bdb7e55a18ae9c" dependencies = [ "heck", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -5473,6 +5800,7 @@ dependencies = [ "ethereum_hashing", "futures", "initialized_validators", + "lean_client", "lighthouse_network", "lighthouse_tracing", "lighthouse_version", @@ -5525,7 +5853,7 @@ dependencies = [ "hex", "itertools 0.10.5", "libp2p", - "libp2p-gossipsub", + "libp2p-gossipsub 0.50.0", "libp2p-mplex", "lighthouse_version", "local-ip-address", @@ -5642,14 +5970,14 @@ dependencies = [ [[package]] name = "local-ip-address" -version = "0.6.5" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "656b3b27f8893f7bbf9485148ff9a65f019e3f33bd5cdc87c83cab16b3fd9ec8" +checksum = "786c72d9739fc316a7acf9b22d9c2794ac9cb91074e9668feb04304ab7219783" dependencies = [ "libc", "neli", "thiserror 2.0.17", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -5671,9 +5999,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.28" +version = "0.4.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432" +checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" [[package]] name = "logging" @@ -5754,7 +6082,7 @@ checksum = "1b27834086c65ec3f9387b096d66e99f221cf081c2b738042aa252bcd41204e3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -5937,9 +6265,9 @@ dependencies = [ [[package]] name = "mio" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69d83b0086dc8ecf3ce9ae2874b2d1290252e2a30720bea58a5c6639b0092873" +checksum = "a69bcab0ad47271a0234d9422b131806bf3968021e5dc9328caf2d4cd58557fc" dependencies = [ "libc", "wasi", @@ -5975,7 +6303,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -5987,25 +6315,26 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] name = "mockito" -version = "1.7.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7760e0e418d9b7e5777c0374009ca4c93861b9066f18cb334a20ce50ab63aa48" +checksum = "7e0603425789b4a70fcc4ac4f5a46a566c116ee3e2a6b768dc623f7719c611de" dependencies = [ "assert-json-diff", "bytes", "colored", - "futures-util", - "http 1.3.1", + "futures-core", + "http 1.4.0", "http-body 1.0.1", "http-body-util", "hyper 1.8.1", "hyper-util", "log", + "pin-project-lite", "rand 0.9.2", "regex", "serde_json", @@ -6029,7 +6358,7 @@ dependencies = [ "rustc_version 0.4.1", "smallvec", "tagptr", - "uuid 1.18.1", + "uuid 1.19.0", ] [[package]] @@ -6131,27 +6460,31 @@ dependencies = [ [[package]] name = "neli" -version = "0.6.5" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93062a0dce6da2517ea35f301dfc88184ce18d3601ec786a727a87bf535deca9" +checksum = "e23bebbf3e157c402c4d5ee113233e5e0610cc27453b2f07eefce649c7365dcc" dependencies = [ + "bitflags 2.10.0", "byteorder", + "derive_builder", + "getset", "libc", "log", "neli-proc-macros", + "parking_lot", ] [[package]] name = "neli-proc-macros" -version = "0.1.4" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c8034b7fbb6f9455b2a96c19e6edf8dc9fc34c70449938d8ee3b4df363f61fe" +checksum = "05d8d08c6e98f20a62417478ebf7be8e1425ec9acecc6f63e22da633f6b71609" dependencies = [ "either", "proc-macro2", "quote", "serde", - "syn 1.0.109", + "syn 2.0.111", ] [[package]] @@ -6244,7 +6577,7 @@ dependencies = [ "itertools 0.10.5", "k256", "kzg", - "libp2p-gossipsub", + "libp2p-gossipsub 0.50.0", "lighthouse_network", "lighthouse_tracing", "logging", @@ -6463,7 +6796,7 @@ checksum = "ff32365de1b6743cb203b710788263c44a03de03802daf96092f2da4fe6ba4d7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -6562,7 +6895,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -6615,7 +6948,7 @@ checksum = "50f6639e842a97dbea8886e3439710ae463120091e2e064518ba8e716e6ac36d" dependencies = [ "async-trait", "bytes", - "http 1.3.1", + "http 1.4.0", "opentelemetry", "reqwest", ] @@ -6626,7 +6959,7 @@ version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dbee664a43e07615731afc539ca60c6d9f1a9425e25ca09c57bc36c87c55852b" dependencies = [ - "http 1.3.1", + "http 1.4.0", "opentelemetry", "opentelemetry-http", "opentelemetry-proto", @@ -6693,6 +7026,145 @@ dependencies = [ "types", ] +[[package]] +name = "p3-baby-bear" +version = "0.3.0" +source = "git+https://github.com/Plonky3/Plonky3.git?rev=a33a312#a33a31274a5e78bb5fbe3f82ffd2c294e17fa830" +dependencies = [ + "p3-field", + "p3-mds", + "p3-monty-31", + "p3-poseidon2", + "p3-symmetric", + "rand 0.9.2", +] + +[[package]] +name = "p3-dft" +version = "0.3.0" +source = "git+https://github.com/Plonky3/Plonky3.git?rev=a33a312#a33a31274a5e78bb5fbe3f82ffd2c294e17fa830" +dependencies = [ + "itertools 0.14.0", + "p3-field", + "p3-matrix", + "p3-maybe-rayon", + "p3-util", + "spin 0.10.0", + "tracing", +] + +[[package]] +name = "p3-field" +version = "0.3.0" +source = "git+https://github.com/Plonky3/Plonky3.git?rev=a33a312#a33a31274a5e78bb5fbe3f82ffd2c294e17fa830" +dependencies = [ + "itertools 0.14.0", + "num-bigint", + "p3-maybe-rayon", + "p3-util", + "paste", + "rand 0.9.2", + "serde", + "tracing", +] + +[[package]] +name = "p3-koala-bear" +version = "0.3.0" +source = "git+https://github.com/Plonky3/Plonky3.git?rev=a33a312#a33a31274a5e78bb5fbe3f82ffd2c294e17fa830" +dependencies = [ + "p3-field", + "p3-monty-31", + "p3-poseidon2", + "p3-symmetric", + "rand 0.9.2", +] + +[[package]] +name = "p3-matrix" +version = "0.3.0" +source = "git+https://github.com/Plonky3/Plonky3.git?rev=a33a312#a33a31274a5e78bb5fbe3f82ffd2c294e17fa830" +dependencies = [ + "itertools 0.14.0", + "p3-field", + "p3-maybe-rayon", + "p3-util", + "rand 0.9.2", + "serde", + "tracing", + "transpose", +] + +[[package]] +name = "p3-maybe-rayon" +version = "0.3.0" +source = "git+https://github.com/Plonky3/Plonky3.git?rev=a33a312#a33a31274a5e78bb5fbe3f82ffd2c294e17fa830" + +[[package]] +name = "p3-mds" +version = "0.3.0" +source = "git+https://github.com/Plonky3/Plonky3.git?rev=a33a312#a33a31274a5e78bb5fbe3f82ffd2c294e17fa830" +dependencies = [ + "p3-dft", + "p3-field", + "p3-symmetric", + "p3-util", + "rand 0.9.2", +] + +[[package]] +name = "p3-monty-31" +version = "0.3.0" +source = "git+https://github.com/Plonky3/Plonky3.git?rev=a33a312#a33a31274a5e78bb5fbe3f82ffd2c294e17fa830" +dependencies = [ + "itertools 0.14.0", + "num-bigint", + "p3-dft", + "p3-field", + "p3-matrix", + "p3-maybe-rayon", + "p3-mds", + "p3-poseidon2", + "p3-symmetric", + "p3-util", + "paste", + "rand 0.9.2", + "serde", + "spin 0.10.0", + "tracing", + "transpose", +] + +[[package]] +name = "p3-poseidon2" +version = "0.3.0" +source = "git+https://github.com/Plonky3/Plonky3.git?rev=a33a312#a33a31274a5e78bb5fbe3f82ffd2c294e17fa830" +dependencies = [ + "p3-field", + "p3-mds", + "p3-symmetric", + "p3-util", + "rand 0.9.2", +] + +[[package]] +name = "p3-symmetric" +version = "0.3.0" +source = "git+https://github.com/Plonky3/Plonky3.git?rev=a33a312#a33a31274a5e78bb5fbe3f82ffd2c294e17fa830" +dependencies = [ + "itertools 0.14.0", + "p3-field", + "serde", +] + +[[package]] +name = "p3-util" +version = "0.3.0" +source = "git+https://github.com/Plonky3/Plonky3.git?rev=a33a312#a33a31274a5e78bb5fbe3f82ffd2c294e17fa830" +dependencies = [ + "serde", +] + [[package]] name = "pairing" version = "0.23.0" @@ -6727,7 +7199,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -6815,9 +7287,9 @@ checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" [[package]] name = "pest" -version = "2.8.3" +version = "2.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "989e7521a040efde50c3ab6bbadafbe15ab6dc042686926be59ac35d74607df4" +checksum = "cbcfd20a6d4eeba40179f05735784ad32bdaef05ce8e8af05f180d45bb3e7e22" dependencies = [ "memchr", "ucd-trie", @@ -6840,7 +7312,7 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -7013,7 +7485,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" dependencies = [ "proc-macro2", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -7055,7 +7527,7 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -7116,7 +7588,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -7146,7 +7618,7 @@ checksum = "095a99f75c69734802359b682be8daaf8980296731f6470434ea2c652af1dd30" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -7169,7 +7641,7 @@ dependencies = [ "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -7498,7 +7970,7 @@ checksum = "b7186006dcb21920990093f30e3dea63b7d6e977bf1256be20c3563a5db070da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -7532,16 +8004,16 @@ checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" [[package]] name = "reqwest" -version = "0.12.24" +version = "0.12.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d0946410b9f7b082a427e4ef5c8ff541a88b357bc6c637c40db3a68ac70a36f" +checksum = "b6eff9328d40131d43bd911d42d79eb6a47312002a4daefc9e37f17e74a7701a" dependencies = [ "base64 0.22.1", "bytes", "futures-channel", "futures-core", "futures-util", - "http 1.3.1", + "http 1.4.0", "http-body 1.0.1", "http-body-util", "hyper 1.8.1", @@ -7871,9 +8343,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.13.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94182ad936a0c91c324cd46c6511b9510ed16af436d7b5bab34beab0afd55f7a" +checksum = "708c0f9d5f54ba0272468c1d306a52c495b31fa155e91bc25371e6df7996908c" dependencies = [ "web-time", "zeroize", @@ -8173,7 +8645,7 @@ checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -8197,7 +8669,7 @@ checksum = "175ee3e80ae9982737ca543e96133087cbd9a485eecc3bc4de9c1a37b47ea59c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -8214,15 +8686,15 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.16.0" +version = "3.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10574371d41b0d9b2cff89418eda27da52bcaff2cc8741db26382a77c29131f1" +checksum = "4fa237f2807440d238e0364a218270b98f767a00d3dada77b1c53ae88940e2e7" dependencies = [ "base64 0.22.1", "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.12.0", + "indexmap 2.12.1", "schemars 0.9.0", "schemars 1.1.0", "serde_core", @@ -8233,14 +8705,14 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.16.0" +version = "3.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08a72d8216842fdd57820dc78d840bef99248e35fb2554ff923319e60f2d686b" +checksum = "52a8e3ca0ca629121f70ab50f95249e5a6f925cc0f6ffe8256c45b728875706c" dependencies = [ "darling 0.21.3", "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -8249,7 +8721,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.12.0", + "indexmap 2.12.1", "itoa", "ryu", "serde", @@ -8338,9 +8810,9 @@ checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" -version = "1.4.6" +version = "1.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2a4719bff48cee6b39d12c020eeb490953ad2443b7055bd0b21fca26bd8c28b" +checksum = "7664a098b8e616bdfcc2dc0e9ac44eb231eedf41db4e9fe95d8d32ec728dedad" dependencies = [ "libc", ] @@ -8375,9 +8847,9 @@ dependencies = [ [[package]] name = "simd-adler32" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" +checksum = "e320a6c5ad31d271ad523dcf3ad13e2767ad8b1cb8f047f75a8aeaf8da139da2" [[package]] name = "similar" @@ -8566,6 +9038,15 @@ version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" +[[package]] +name = "spin" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5fe4ccb98d9c292d56fec89a5e07da7fc4cf0dc11e156b41793132775d3e591" +dependencies = [ + "lock_api", +] + [[package]] name = "spki" version = "0.7.3" @@ -8688,6 +9169,12 @@ dependencies = [ "zstd 0.13.3", ] +[[package]] +name = "strength_reduce" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe895eb47f22e2ddd4dabc02bce419d2e643c8e3b585c78158b349195bc24d82" + [[package]] name = "strsim" version = "0.10.0" @@ -8718,7 +9205,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -8738,7 +9225,7 @@ dependencies = [ "proc-macro2", "quote", "smallvec", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -8764,9 +9251,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.110" +version = "2.0.111" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a99801b5bd34ede4cf3fc688c5919368fea4e4814a4664359503e6015b280aea" +checksum = "390cc9a294ab71bdb1aa2e99d13be9c753cd2d7bd6560c77118597410c4d2e87" dependencies = [ "proc-macro2", "quote", @@ -8782,7 +9269,7 @@ dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -8802,7 +9289,7 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -8920,7 +9407,7 @@ name = "test_random_derive" version = "0.2.0" dependencies = [ "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -8949,7 +9436,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -8960,7 +9447,7 @@ checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -9143,7 +9630,7 @@ checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -9215,11 +9702,11 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.23.7" +version = "0.23.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6485ef6d0d9b5d0ec17244ff7eb05310113c3f316f2d14200d4de56b3cb98f8d" +checksum = "5d7cbc3b4b49633d57a0509303158ca50de80ae32c265093b24c414705807832" dependencies = [ - "indexmap 2.12.0", + "indexmap 2.12.1", "toml_datetime", "toml_parser", "winnow", @@ -9246,7 +9733,7 @@ dependencies = [ "base64 0.22.1", "bytes", "h2 0.4.12", - "http 1.3.1", + "http 1.4.0", "http-body 1.0.1", "http-body-util", "hyper 1.8.1", @@ -9273,7 +9760,7 @@ dependencies = [ "async-trait", "base64 0.22.1", "bytes", - "http 1.3.1", + "http 1.4.0", "http-body 1.0.1", "http-body-util", "hyper 1.8.1", @@ -9320,7 +9807,7 @@ checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" dependencies = [ "futures-core", "futures-util", - "indexmap 2.12.0", + "indexmap 2.12.1", "pin-project-lite", "slab", "sync_wrapper", @@ -9333,14 +9820,14 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.6.6" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2" +checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8" dependencies = [ "bitflags 2.10.0", "bytes", "futures-util", - "http 1.3.1", + "http 1.4.0", "http-body 1.0.1", "iri-string", "pin-project-lite", @@ -9363,9 +9850,9 @@ checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" -version = "0.1.41" +version = "0.1.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" +checksum = "2d15d90a0b5c19378952d479dc858407149d7bb45a14de0142f6c534b16fc647" dependencies = [ "log", "pin-project-lite", @@ -9375,32 +9862,32 @@ dependencies = [ [[package]] name = "tracing-appender" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3566e8ce28cc0a3fe42519fc80e6b4c943cc4c8cef275620eb8dac2d3d4e06cf" +checksum = "786d480bce6247ab75f005b14ae1624ad978d3029d9113f0a22fa1ac773faeaf" dependencies = [ "crossbeam-channel", - "thiserror 1.0.69", + "thiserror 2.0.17", "time", "tracing-subscriber", ] [[package]] name = "tracing-attributes" -version = "0.1.30" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" +checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] name = "tracing-core" -version = "0.1.34" +version = "0.1.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" +checksum = "7a04e24fab5c89c6a36eb8558c9656f30d81de51dfa4d3b45f26b21d61fa0a6c" dependencies = [ "once_cell", "valuable", @@ -9447,9 +9934,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.20" +version = "0.3.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2054a14f5307d601f88daf0553e1cbf472acc4f2c51afab632431cdcd72124d5" +checksum = "2f30143827ddab0d256fd843b7a66d164e9f271cfa0dde49142c5ca0ca291f1e" dependencies = [ "matchers", "nu-ansi-term", @@ -9466,6 +9953,16 @@ dependencies = [ "tracing-serde", ] +[[package]] +name = "transpose" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ad61aed86bc3faea4300c7aee358b4c6d0c8d6ccc36524c96e4c92ccf26e77e" +dependencies = [ + "num-integer", + "strength_reduce", +] + [[package]] name = "tree_hash" version = "0.12.0" @@ -9488,7 +9985,7 @@ dependencies = [ "darling 0.21.3", "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -9639,6 +10136,12 @@ dependencies = [ "tinyvec", ] +[[package]] +name = "unicode-segmentation" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" + [[package]] name = "unicode-xid" version = "0.2.6" @@ -9720,9 +10223,9 @@ dependencies = [ [[package]] name = "uuid" -version = "1.18.1" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f87b8aa10b915a06587d0dec516c282ff295b475d94abf425d62b57710070a2" +checksum = "e2e054861b4bd027cd373e18e8d8d8e6548085000e41290d95ce0c373a654b4a" dependencies = [ "getrandom 0.3.4", "js-sys", @@ -10050,9 +10553,9 @@ dependencies = [ [[package]] name = "wasm-bindgen" -version = "0.2.105" +version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da95793dfc411fbbd93f5be7715b0578ec61fe87cb1a42b12eb625caa5c5ea60" +checksum = "0d759f433fa64a2d763d1340820e46e111a7a5ab75f993d1852d70b03dbb80fd" dependencies = [ "cfg-if", "once_cell", @@ -10063,9 +10566,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.55" +version = "0.4.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "551f88106c6d5e7ccc7cd9a16f312dd3b5d36ea8b4954304657d5dfba115d4a0" +checksum = "836d9622d604feee9e5de25ac10e3ea5f2d65b41eac0d9ce72eb5deae707ce7c" dependencies = [ "cfg-if", "js-sys", @@ -10076,9 +10579,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.105" +version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04264334509e04a7bf8690f2384ef5265f05143a4bff3889ab7a3269adab59c2" +checksum = "48cb0d2638f8baedbc542ed444afc0644a29166f1595371af4fecf8ce1e7eeb3" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -10086,22 +10589,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.105" +version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "420bc339d9f322e562942d52e115d57e950d12d88983a14c79b86859ee6c7ebc" +checksum = "cefb59d5cd5f92d9dcf80e4683949f15ca4b511f4ac0a6e14d4e1ac60c6ecd40" dependencies = [ "bumpalo", "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.105" +version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76f218a38c84bcb33c25ec7059b07847d465ce0e0a76b995e134a45adcb6af76" +checksum = "cbc538057e648b67f72a982e708d485b2efa771e1ac05fec311f9f63e5800db4" dependencies = [ "unicode-ident", ] @@ -10135,9 +10638,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.82" +version = "0.3.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a1f95c0d03a47f4ae1f7a64643a6bb97465d9b740f0fa8f90ea33915c99a9a1" +checksum = "9b32828d774c412041098d182a8b38b16ea816958e07cf40eec2bc080ae137ac" dependencies = [ "js-sys", "wasm-bindgen", @@ -10303,7 +10806,7 @@ checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -10314,7 +10817,7 @@ checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -10649,9 +11152,9 @@ checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" [[package]] name = "winnow" -version = "0.7.13" +version = "0.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21a0236b59786fed61e2a80582dd500fe61f18b5dca67a4a067d0bc9039339cf" +checksum = "5a5364e9d77fcdeeaa6062ced926ee3381faa2ee02d3eb83a5c27a8825540829" dependencies = [ "memchr", ] @@ -10823,28 +11326,28 @@ checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", "synstructure", ] [[package]] name = "zerocopy" -version = "0.8.27" +version = "0.8.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0894878a5fa3edfd6da3f88c4805f4c8558e2b996227a3d864f47fe11e38282c" +checksum = "fd74ec98b9250adb3ca554bdde269adf631549f51d8a8f8f0a10b50f1cb298c3" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.27" +version = "0.8.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88d2b8d9c68ad2b9e4340d7832716a4d21a22a1154777ad56ea55c51a9cf3831" +checksum = "d8a8d209fdf45cf5138cbb5a506f6b52522a25afccc534d1475dad8e31105c6a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -10864,7 +11367,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", "synstructure", ] @@ -10886,7 +11389,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -10919,7 +11422,7 @@ checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index d5d1687c764..196ea4dbd57 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -62,6 +62,17 @@ members = [ "crypto/kzg", "database_manager", "lcli", + "lean_client", + "lean_client/validator_service", + "lean_client/consensus", + "lean_client/lean_network", + "lean_client/forkchoice", + "lean_client/crypto", + "lean_client/store", + "lean_client/keystore", + "lean_client/common/lean_genesis", + "lean_client/common/lean_config", + "lean_client/common/lean_network_config", "lighthouse", "lighthouse/environment", "slasher", @@ -279,6 +290,21 @@ zeroize = { version = "1", features = ["zeroize_derive", "serde"] } zip = "0.6" zstd = "0.13" +# lean chain crates +lean_client = { path = "lean_client" } +lean_validator_service = { path = "lean_client/validator_service" } +lean_consensus = { path = "lean_client/consensus" } +lean_network = { path = "lean_client/lean_network" } +lean_forkchoice = { path = "lean_client/forkchoice" } +lean_crypto = { path = "lean_client/crypto"} +lean_store = { path = "lean_client/store" } +lean_keystore = { path = "lean_client/keystore" } +lean_genesis = { path = "lean_client/common/lean_genesis" } +lean_config = { path = "lean_client/common/lean_config" } +lean_network_config = { path = "lean_client/common/lean_network_config" } +leansig = { git = "https://github.com/leanEthereum/leanSig" } + + [profile.maxperf] inherits = "release" lto = "fat" diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 46ba14f596b..556181354a3 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -3369,6 +3369,7 @@ impl BeaconChain { // Start the Prometheus timer. let _full_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_TIMES); + let _interop_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_DURATION_SECONDS); // Increment the Prometheus counter for block processing requests. metrics::inc_counter(&metrics::BLOCK_PROCESSING_REQUESTS); diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index e6557c7a270..28ef18543ef 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -49,6 +49,18 @@ pub static BLOCK_PROCESSING_TIMES: LazyLock> = LazyLock::new(| "Full runtime of block processing", ) }); +pub static CHAIN_ONBLOCK_DURATION_SECONDS: LazyLock> = LazyLock::new(|| { + try_create_histogram( + "chain_onblock_duration_seconds", + "Time taken to process a block in the chain's onBlock function.", + ) +}); +pub static BLOCK_PROCESSING_DURATION_SECONDS: LazyLock> = LazyLock::new(|| { + try_create_histogram( + "block_processing_duration_seconds", + "Time taken to process a block in the state transition function.", + ) +}); pub static BLOCK_PROCESSING_BLOCK_ROOT: LazyLock> = LazyLock::new(|| { try_create_histogram( "beacon_block_processing_block_root_seconds", diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index eb70147c6ef..ef61b44d665 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -18,6 +18,7 @@ use beacon_chain::{ observed_operations::ObservationOutcome, sync_committee_verification::{self, Error as SyncCommitteeError}, validator_monitor::{get_block_delay_ms, get_slot_delay_ms}, + metrics::CHAIN_ONBLOCK_DURATION_SECONDS, }; use beacon_processor::{Work, WorkEvent}; use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource}; @@ -1498,6 +1499,7 @@ impl NetworkBeaconProcessor { "fetch_blobs_gossip", ); + let _interop_timer = metrics::start_timer(&CHAIN_ONBLOCK_DURATION_SECONDS); let result = self .chain .process_block( @@ -1508,6 +1510,7 @@ impl NetworkBeaconProcessor { || Ok(()), ) .await; + drop(_interop_timer); register_process_result_metrics(&result, metrics::BlockSource::Gossip, "block"); match &result { diff --git a/common/directory/src/lib.rs b/common/directory/src/lib.rs index d042f8dfadc..a32c640ba8e 100644 --- a/common/directory/src/lib.rs +++ b/common/directory/src/lib.rs @@ -6,6 +6,7 @@ use std::path::{Path, PathBuf}; /// Names for the default directories. pub const DEFAULT_ROOT_DIR: &str = ".lighthouse"; pub const DEFAULT_BEACON_NODE_DIR: &str = "beacon"; +pub const DEFAULT_LEAN_NODE_DIR: &str = "lean"; pub const DEFAULT_NETWORK_DIR: &str = "network"; pub const DEFAULT_VALIDATOR_DIR: &str = "validators"; pub const DEFAULT_SECRET_DIR: &str = "secrets"; diff --git a/consensus/types/src/core/eth_spec.rs b/consensus/types/src/core/eth_spec.rs index 74795fdfc31..f0671efc600 100644 --- a/consensus/types/src/core/eth_spec.rs +++ b/consensus/types/src/core/eth_spec.rs @@ -179,6 +179,9 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + type BuilderPendingPaymentsLimit: Unsigned + Clone + Sync + Send + Debug + PartialEq; type BuilderPendingWithdrawalsLimit: Unsigned + Clone + Sync + Send + Debug + PartialEq; + /// Lean chain + type JustificationValidators: Unsigned + Clone + Sync + Send + Debug + PartialEq; + fn default_spec() -> ChainSpec; fn spec_name() -> EthSpecId; @@ -457,8 +460,8 @@ impl EthSpec for MainnetEthSpec { type SlotsPerHistoricalRoot = U8192; type EpochsPerHistoricalVector = U65536; type EpochsPerSlashingsVector = U8192; - type HistoricalRootsLimit = U16777216; - type ValidatorRegistryLimit = U1099511627776; + type HistoricalRootsLimit = U262144; + type ValidatorRegistryLimit = U4096; type BuilderPendingPaymentsLimit = U64; // 2 * SLOTS_PER_EPOCH = 2 * 32 = 64 type BuilderPendingWithdrawalsLimit = U1048576; type MaxProposerSlashings = U16; @@ -500,7 +503,8 @@ impl EthSpec for MainnetEthSpec { type MaxAttesterSlashingsElectra = U1; type MaxAttestationsElectra = U8; type MaxWithdrawalRequestsPerPayload = U16; - type MaxPendingDepositsPerEpoch = U16; + type MaxPendingDepositsPerEpoch = U16; // participation. If a larger size is needed, consider using a different data structure. + type JustificationValidators = U1073741824; type PTCSize = U512; type MaxPayloadAttestations = U4; @@ -546,6 +550,7 @@ impl EthSpec for MinimalEthSpec { type NumberOfColumns = U128; type ProposerLookaheadSlots = U16; // Derived from (MIN_SEED_LOOKAHEAD + 1) * SLOTS_PER_EPOCH type BuilderPendingPaymentsLimit = U16; // 2 * SLOTS_PER_EPOCH = 2 * 8 = 16 + type JustificationValidators = U1073741824; params_from_eth_spec!(MainnetEthSpec { JustificationBitsLength, @@ -652,6 +657,7 @@ impl EthSpec for GnosisEthSpec { type ProposerLookaheadSlots = U32; // Derived from (MIN_SEED_LOOKAHEAD + 1) * SLOTS_PER_EPOCH type PTCSize = U512; type MaxPayloadAttestations = U2; + type JustificationValidators = U1073741824; fn default_spec() -> ChainSpec { ChainSpec::gnosis() diff --git a/lean_client/Cargo.toml b/lean_client/Cargo.toml new file mode 100644 index 00000000000..5368026fb58 --- /dev/null +++ b/lean_client/Cargo.toml @@ -0,0 +1,35 @@ +[package] +name = "lean_client" +version = "0.1.0" +authors = ["Sigma Prime "] +edition.workspace = true + +[dependencies] +clap = { workspace = true } +clap_utils = { workspace = true } +lean_consensus = { workspace = true } +environment = { workspace = true } +health_metrics = { workspace = true } + +lean_genesis = { workspace = true } +lean_keystore = { path = "keystore" } +lean_network = { workspace = true } +lean_network_config = { workspace = true } +lean_store = { path = "store" } +lean_validator_service = { workspace = true } +lean_config = { workspace = true } +lighthouse_version = { workspace = true } +logging = { workspace = true } +metrics = { workspace = true } +serde = { workspace = true } +slot_clock = { workspace = true } +store = { workspace = true, features = ["redb"] } +task_executor = { workspace = true } +tokio = { workspace = true } +tracing = { workspace = true } +types = { workspace = true } +warp = { workspace = true } +warp_utils = { workspace = true } + +[dev-dependencies] +tempfile = { workspace = true } diff --git a/lean_client/common/lean_config/Cargo.toml b/lean_client/common/lean_config/Cargo.toml new file mode 100644 index 00000000000..f445629126d --- /dev/null +++ b/lean_client/common/lean_config/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "lean_config" +version = "0.1.0" +edition.workspace = true + +[dependencies] +lean_consensus = { workspace = true } +lean_genesis = { workspace = true } +lean_keystore = { workspace = true } +lean_network = { workspace = true } +lean_network_config = { workspace = true } +lean_store = { workspace = true } +ethereum_ssz = "0.10.0" +tree_hash = { workspace = true } +slot_clock = { workspace = true } +store = { workspace = true, features = ["redb"] } +tracing = { workspace = true } +types = { workspace = true } +hex = { workspace = true } +serde_json = { workspace = true } +serde_yaml = { workspace = true } +serde = { workspace = true, features = ["derive"] } +ssz_types = { workspace = true } diff --git a/lean_client/common/lean_config/src/lib.rs b/lean_client/common/lean_config/src/lib.rs new file mode 100644 index 00000000000..43f5626e57b --- /dev/null +++ b/lean_client/common/lean_config/src/lib.rs @@ -0,0 +1,300 @@ +mod validators; + +use std::path::{Path, PathBuf}; +use std::sync::Arc; +use std::time::Duration; + +use lean_consensus::lean_block::{LeanBlock, LeanBlockBody}; +use lean_consensus::lean_state::{LeanState, SECONDS_PER_SLOT}; +use lean_genesis::ValidatorConfig; +use lean_keystore::{DEFAULT_KEYS_DIR, KeyStore, ValidatorKeyPair}; +use lean_network::NetworkConfig; +use lean_network_config::load_network_files; +use lean_store::LeanStore; +use slot_clock::{SlotClock, SystemTimeSlotClock}; +use ssz_types::VariableList; +use store::database::interface::BeaconNodeBackend; +use tracing::info; +use tree_hash::TreeHash; +use types::{EthSpec, Slot}; + +use validators::build_validators_from_config; + +/// Input paths and identifiers required to build the lean client runtime. +pub struct LeanClientPaths { + pub data_dir: PathBuf, + pub config_path: PathBuf, + pub validators_path: PathBuf, + pub nodes_path: PathBuf, + pub node_id: String, + pub node_key_path: PathBuf, + pub genesis_json_path: Option, +} + +/// Runtime resources required by the lean client services. +pub struct LeanClientResources { + pub slot_clock: SystemTimeSlotClock, + pub db: Arc>, + pub validator_key_pair: ValidatorKeyPair, + pub validator_index: u64, + pub keystore: KeyStore, + pub network_config: NetworkConfig, +} + +/// Load all configuration and runtime data needed to run the lean client. +pub fn initialize(paths: LeanClientPaths) -> Result, String> { + let LeanClientPaths { + data_dir, + config_path, + validators_path, + nodes_path, + node_id, + node_key_path, + genesis_json_path, + } = paths; + + std::fs::create_dir_all(&data_dir) + .map_err(|e| format!("Failed to create data directory {:?}: {}", data_dir, e))?; + + let db_path = data_dir.join("lean_db"); + let store_config = store::StoreConfig::default(); + let db = Arc::new( + BeaconNodeBackend::open(&store_config, &db_path) + .map_err(|e| format!("Failed to open database: {:?}", e))?, + ); + let lean_store = LeanStore::new(db.clone()); + + let validator_config = ValidatorConfig::load_from_file(&validators_path).map_err(|e| { + format!( + "Failed to load validator config from {:?}: {}", + validators_path, e + ) + })?; + + let validators_dir = validators_path + .parent() + .ok_or_else(|| "validators.yaml path has no parent directory".to_string())?; + let keystore_dir = validators_dir.join(DEFAULT_KEYS_DIR); + let keystore = KeyStore::new(keystore_dir.clone()); + + // Load network config first to get validators from config.yaml + let config_info = load_network_files(&config_path, &nodes_path) + .map_err(|e| format!("Failed to load network config: {}", e))?; + + // Build validators from config.yaml GENESIS_VALIDATORS instead of keystore + let validators_list = build_validators_from_config(&config_info.config_bytes)?; + + info!( + total_validators = validators_list.len(), + "Loaded validators from config.yaml" + ); + + if validators_list.is_empty() { + return Err( + "No validators found in config.yaml GENESIS_VALIDATORS. Please check configuration." + .to_string(), + ); + } + + // Extract genesis_time from genesis.json if available, otherwise use default (0) + let genesis_time = if let Some(path) = &genesis_json_path { + match std::fs::read(path) { + Ok(bytes) => match serde_json::from_slice::(&bytes) { + Ok(json) => json + .get("config") + .and_then(|c| c.get("genesis_time")) + .and_then(|t| { + t.as_str() + .and_then(|s| s.parse::().ok()) + .or_else(|| t.as_u64()) + }) + .inspect(|&time| info!("Using genesis_time from genesis.json: {}", time)) + .unwrap_or(0), + Err(e) => { + info!( + "Failed to parse genesis.json: {}, using default genesis_time=0", + e + ); + 0 + } + }, + Err(e) => { + info!( + "Failed to read genesis.json: {}, using default genesis_time=0", + e + ); + 0 + } + } + } else { + 0 + }; + + // Try to load genesis state from SSZ, otherwise generate fresh + let _genesis_ssz_path = genesis_json_path + .as_ref() + .and_then(|json_path| json_path.parent().map(|parent| parent.join("genesis.ssz"))); + + // Generate fresh genesis state to align with spec logic, ignoring any existing genesis.ssz. + info!("Forcing fresh genesis state generation (ignoring genesis.ssz to align with spec)"); + + let validators = VariableList::new(validators_list) + .map_err(|e| format!("Failed to create validators list: {:?}", e))?; + + // Generate genesis state with ZERO_HASH checkpoints. + // Checkpoints will be updated to genesis_root during block processing. + let genesis_state = LeanState::::generate_genesis(genesis_time, validators); + + // Calculate the Genesis State Root + let genesis_state_root = genesis_state.tree_hash_root(); + + // Construct the Genesis Block with the populated state_root. + // Matches the spec where the Head Block Header includes the Genesis State Root. + let genesis_block = LeanBlock { + slot: genesis_state.slot, + proposer_index: genesis_state.latest_block_header.proposer_index.0, // Unwrap ValidatorIndex + parent_root: genesis_state.latest_block_header.parent_root, + state_root: genesis_state_root, + body: LeanBlockBody { + attestations: VariableList::empty(), + }, + }; + + let genesis_root = genesis_block.tree_hash_root(); + + // Save the Genesis Block as the initial Head/Safe Target. + lean_store + .save_block(genesis_root, &genesis_block) + .map_err(|e| format!("Failed to save genesis block: {}", e))?; + lean_store + .save_head_root(genesis_root) + .map_err(|e| format!("Failed to set head root: {}", e))?; + lean_store + .save_safe_target(genesis_root) + .map_err(|e| format!("Failed to set safe target: {}", e))?; + + info!( + slot = genesis_state.slot.0, + genesis_time = genesis_state.config.genesis_time, + validators_count = genesis_state.validators.len(), + genesis_root = ?genesis_root, + "Initialized genesis state with validators" + ); + + lean_store + .save_state(&genesis_state) + .map_err(|e| format!("Failed to save genesis state to database: {}", e))?; + info!( + genesis_root = ?genesis_root, + "Saved genesis state to database" + ); + + let slot_clock = SystemTimeSlotClock::new( + Slot::new(0), + Duration::from_secs(genesis_state.config.genesis_time), + Duration::from_secs(SECONDS_PER_SLOT), + ); + + let validator_assignments = validator_config.validator_assignments(); + let (start_index, end_index) = validator_assignments + .get(&node_id) + .ok_or_else(|| format!("Node ID '{}' not found in validator config", node_id))?; + + if *start_index >= *end_index { + return Err(format!( + "Node ID '{}' has no validators assigned (start={}, end={})", + node_id, start_index, end_index + )); + } + + info!( + node_id = node_id, + validator_start_index = start_index, + validator_count = validator_config + .validators + .iter() + .find(|v| v.name == node_id) + .map(|v| v.count) + .unwrap_or(0), + "Found validator assignment for node" + ); + + if end_index - start_index != 1 { + return Err(format!( + "Node ID '{}' must be assigned to exactly one validator, found range [{} , {})", + node_id, start_index, end_index + )); + } + + let validator_index = *start_index; + + let validator_key_pair = keystore.load_key_pair(validator_index).map_err(|e| { + format!( + "Failed to load keystore for validator index {}: {}", + validator_index, e + ) + })?; + + info!( + validator_index, + keystore_dir = ?keystore_dir, + "Loaded XMSS key pair for validator" + ); + + let listen_port = validator_config + .validators + .iter() + .find(|v| v.name == node_id) + .and_then(|v| v.enr_fields.as_ref()) + .and_then(|e| e.quic) + .unwrap_or(9000); + + info!( + config = ?config_info, + validators = ?validators_path, + nodes = ?nodes_path, + listen_port, + node_id, + bootstrap_nodes = config_info.bootstrap_enrs.len(), + "Lean node configuration loaded" + ); + + let node_key_bytes = load_node_key(&node_key_path)?; + + let network_name = "devnet0".to_string(); + + info!(network_name = %network_name, "Resolved network name"); + + let network_config = NetworkConfig::new(listen_port, network_name.clone()) + .with_bootstrap_nodes(config_info.bootstrap_enrs) + .with_node_key(node_key_bytes); + + Ok(LeanClientResources { + slot_clock, + db, + validator_key_pair, + validator_index, + keystore, + network_config, + }) +} + +fn load_node_key(path: &Path) -> Result, String> { + let contents = std::fs::read_to_string(path) + .map_err(|e| format!("Failed to read libp2p private key from {:?}: {}", path, e))?; + let trimmed = contents.trim(); + if trimmed.is_empty() { + return Err(format!("Libp2p private key file {:?} is empty", path)); + } + let hex_str = trimmed.strip_prefix("0x").unwrap_or(trimmed); + let key_bytes = hex::decode(hex_str) + .map_err(|e| format!("Invalid hex in libp2p private key {:?}: {}", path, e))?; + if key_bytes.len() != 32 { + return Err(format!( + "Libp2p private key {:?} must be 32 bytes, found {} bytes", + path, + key_bytes.len() + )); + } + Ok(key_bytes) +} diff --git a/lean_client/common/lean_config/src/validators.rs b/lean_client/common/lean_config/src/validators.rs new file mode 100644 index 00000000000..b035b6ed34c --- /dev/null +++ b/lean_client/common/lean_config/src/validators.rs @@ -0,0 +1,111 @@ +use lean_consensus::validator::Validator; +use lean_keystore::ValidatorKeyPair; +use std::collections::HashMap; +use ssz_types::FixedVector; + +/// Build validator list from raw key pairs loaded from the keystore. +#[allow(dead_code)] +pub fn build_validators( + all_key_pairs: HashMap, +) -> Result, String> { + let mut validators_list = Vec::new(); + let mut sorted_indices: Vec = all_key_pairs.keys().copied().collect(); + sorted_indices.sort(); + + for validator_index in sorted_indices { + let key_pair = all_key_pairs + .get(&validator_index) + .ok_or_else(|| format!("Validator index {} not found in key pairs", validator_index))?; + + // PublicKey is already 52 bytes + let pubkey_bytes = key_pair.public_key.as_bytes().to_vec(); + + let pubkey_fixed = FixedVector::new(pubkey_bytes).map_err(|e| { + format!( + "Failed to create FixedVector from public key bytes: {:?}", + e + ) + })?; + let validator = Validator { + pubkey: pubkey_fixed, + index: validator_index, + }; + validators_list.push(validator); + } + + Ok(validators_list) +} + +/// Build validator list from hex-encoded public keys in config.yaml GENESIS_VALIDATORS field. +pub fn build_validators_from_config(config_bytes: &[u8]) -> Result, String> { + let config: serde_yaml::Value = serde_yaml::from_slice(config_bytes) + .map_err(|e| format!("Failed to parse config.yaml: {}", e))?; + + let genesis_validators = config + .get("GENESIS_VALIDATORS") + .or_else(|| config.get("genesis_validators")) + .ok_or_else(|| "GENESIS_VALIDATORS not found in config.yaml".to_string())?; + + let validators_seq = genesis_validators + .as_sequence() + .ok_or_else(|| "GENESIS_VALIDATORS must be a list".to_string())?; + + let mut validators_list = Vec::new(); + + for (index, pubkey_val) in validators_seq.iter().enumerate() { + let pubkey_hex = pubkey_val + .as_str() + .ok_or_else(|| format!("Validator {} must be a hex string", index))?; + + // Debug: Log what we're parsing + tracing::info!("Parsing validator {}: pubkey_hex={}", index, pubkey_hex); + + let pubkey_bytes = hex_to_bytes52(pubkey_hex) + .map_err(|e| format!("Failed to parse validator {} pubkey: {}", index, e))?; + + let pubkey_fixed = FixedVector::new(pubkey_bytes.to_vec()).map_err(|e| { + format!( + "Failed to create FixedVector from public key bytes: {:?}", + e + ) + })?; + + let validator = Validator { + pubkey: pubkey_fixed, + index: index as u64, + }; + validators_list.push(validator); + } + + Ok(validators_list) +} + +/// Convert hex string to 52 bytes array +fn hex_to_bytes52(hex_str: &str) -> Result<[u8; 52], String> { + let hex_str = hex_str.strip_prefix("0x").unwrap_or(hex_str); + + if hex_str.len() != 104 { + // 52 bytes * 2 hex chars per byte + return Err(format!( + "Invalid pubkey hex length: expected 104 chars, got {}", + hex_str.len() + )); + } + + let mut bytes = [0u8; 52]; + for i in 0..52 { + bytes[i] = u8::from_str_radix(&hex_str[i * 2..i * 2 + 2], 16) + .map_err(|e| format!("Invalid hex character: {}", e))?; + } + + Ok(bytes) +} + +/// Get current time in seconds since epoch. +#[allow(dead_code)] +pub fn current_unix_timestamp() -> Result { + std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .map(|d| d.as_secs()) + .map_err(|e| format!("Failed to get current time: {}", e)) +} diff --git a/lean_client/common/lean_genesis/Cargo.toml b/lean_client/common/lean_genesis/Cargo.toml new file mode 100644 index 00000000000..372e5cceafe --- /dev/null +++ b/lean_client/common/lean_genesis/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "lean_genesis" +version = "0.1.0" +edition = "2024" + +[dependencies] +serde = { workspace = true, features = ["derive"] } +serde_yaml = { workspace = true } +tracing = { workspace = true } diff --git a/lean_client/common/lean_genesis/README.md b/lean_client/common/lean_genesis/README.md new file mode 100644 index 00000000000..787eb6c8b41 --- /dev/null +++ b/lean_client/common/lean_genesis/README.md @@ -0,0 +1,96 @@ +# Lean Genesis Utilities + +This crate provides utilities for parsing and managing configuration files used with PK's eth-beacon-genesis Docker tool for lean consensus genesis generation. + +## Important Note + +**Genesis state generation is handled by PK's Docker tool** (`ethpandaops/eth-beacon-genesis:pk910-leanchain`). +This crate only provides utilities for configuration file parsing. + +For complete genesis generation, use the shell script: +```bash +./scripts/lean-quickstart/generate-genesis.sh \ + --output-dir ./genesis \ + --validator-config ./validator-config.yaml +``` + +## Features + +This crate provides utilities for: + +- **Config Parsing** - Reading and writing `config.yaml` files +- **Validator Config Parsing** - Parsing `validator-config.yaml` files +- **Node Config Parsing** - Reading `nodes.yaml` files generated by PK's tool + +## Usage + +### Parsing validator-config.yaml + +```rust +use lean_genesis::ValidatorConfig; + +let config = ValidatorConfig::load_from_file("validator-config.yaml")?; +let total_validators = config.total_validator_count(); +let active_epoch = config.config.active_epoch; +``` + +### Parsing config.yaml + +```rust +use lean_genesis::ConfigYaml; + +let config = lean_genesis::load_config_yaml("config.yaml")?; +println!("Genesis time: {}", config.genesis_time); +println!("Validator count: {}", config.validator_count); +``` + +### Parsing nodes.yaml + +```rust +use lean_genesis::NodesConfig; + +let nodes = NodesConfig::load_from_file("nodes.yaml")?; +for node in nodes.nodes { + println!("Node: {} - ENR: {}", node.name, node.enr); +} +``` + +## File Formats + +### validator-config.yaml + +```yaml +shuffle: roundrobin +config: + activeEpoch: 18 + keyType: "hash-sig" +validators: + - name: "node-1" + privkey: "0x1234..." + enrFields: + ip: "127.0.0.1" + quic: 9000 + metricsPort: 8080 + count: 1 +``` + +### config.yaml (generated by PK's tool) + +```yaml +GENESIS_TIME: 1763522215 +VALIDATOR_COUNT: 3 +shuffle: roundrobin +config: + activeEpoch: 18 +``` + +## Genesis Generation Workflow + +The complete genesis generation process uses: + +1. **Hash-Sig Key Generation** - Rust implementation (`lean_keystore` crate) +2. **Config Generation** - Shell script generates `config.yaml` +3. **PK's Genesis Tool** - Docker tool generates genesis state files +4. **Node Key Files** - Shell script generates `.key` files + +See `scripts/lean-quickstart/generate-genesis.sh` for the complete implementation. diff --git a/lean_client/common/lean_genesis/src/config.rs b/lean_client/common/lean_genesis/src/config.rs new file mode 100644 index 00000000000..f1a1eb9c4d2 --- /dev/null +++ b/lean_client/common/lean_genesis/src/config.rs @@ -0,0 +1,56 @@ +//! Configuration file utilities for genesis generation +//! +//! Provides utilities for reading and writing config.yaml files used with PK's eth-beacon-genesis tool. + +use serde::{Deserialize, Serialize}; +use std::fs; +use std::path::Path; +use tracing::info; + +/// Configuration structure matching PK's tool format +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ConfigYaml { + #[serde(rename = "GENESIS_TIME")] + pub genesis_time: u64, + #[serde(rename = "VALIDATOR_COUNT")] + pub validator_count: u64, + pub shuffle: Option, + pub config: ConfigSection, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ConfigSection { + #[serde(rename = "activeEpoch")] + pub active_epoch: Option, +} + +/// Loads a config.yaml file +pub fn load_config_yaml(config_path: &Path) -> Result { + let yaml_content = fs::read_to_string(config_path) + .map_err(|e| format!("Failed to read config.yaml: {}", e))?; + + let config: ConfigYaml = serde_yaml::from_str(&yaml_content) + .map_err(|e| format!("Failed to parse config.yaml: {}", e))?; + + Ok(config) +} + +/// Saves a config.yaml file +pub fn save_config_yaml(config: &ConfigYaml, output_path: &Path) -> Result<(), String> { + info!(path = ?output_path, "Saving config.yaml"); + + // Create parent directories if needed + if let Some(parent) = output_path.parent() { + fs::create_dir_all(parent) + .map_err(|e| format!("Failed to create config directory: {}", e))?; + } + + let yaml_content = serde_yaml::to_string(config) + .map_err(|e| format!("Failed to serialize config to YAML: {}", e))?; + + fs::write(output_path, yaml_content) + .map_err(|e| format!("Failed to write config.yaml: {}", e))?; + + info!(path = ?output_path, "Config.yaml saved successfully"); + Ok(()) +} diff --git a/lean_client/common/lean_genesis/src/lib.rs b/lean_client/common/lean_genesis/src/lib.rs new file mode 100644 index 00000000000..f39782f7a49 --- /dev/null +++ b/lean_client/common/lean_genesis/src/lib.rs @@ -0,0 +1,16 @@ +//! Utilities for lean consensus genesis generation +//! +//! This crate provides utilities for parsing and managing configuration files +//! used with PK's eth-beacon-genesis Docker tool. +//! +//! **Note**: Genesis state generation is handled by PK's Docker tool +//! (`ethpandaops/eth-beacon-genesis:pk910-leanchain`). See `scripts/lean-quickstart/generate-genesis.sh` +//! for the complete genesis generation workflow. + +mod config; +mod node_config; +mod validator_config; + +pub use config::*; +pub use node_config::*; +pub use validator_config::*; diff --git a/lean_client/common/lean_genesis/src/node_config.rs b/lean_client/common/lean_genesis/src/node_config.rs new file mode 100644 index 00000000000..88b007b2083 --- /dev/null +++ b/lean_client/common/lean_genesis/src/node_config.rs @@ -0,0 +1,48 @@ +//! Node configuration utilities +//! +//! Note: ENR generation is handled by PK's eth-beacon-genesis tool. +//! This module provides utilities for reading nodes.yaml files. + +use serde::{Deserialize, Serialize}; +use std::fs; +use std::path::Path; +use tracing::info; + +/// Node entry in nodes.yaml +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct NodeEntry { + /// Node name + pub name: String, + /// ENR (Ethereum Node Record) string + pub enr: String, + /// Node ID (derived from ENR) + #[serde(skip_serializing_if = "Option::is_none")] + pub node_id: Option, +} + +/// Nodes configuration structure +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct NodesConfig { + /// List of node entries + pub nodes: Vec, +} + +impl NodesConfig { + /// Saves nodes configuration to a YAML file + pub fn save_to_file(&self, path: &Path) -> Result<(), String> { + info!(path = ?path, "Saving nodes.yaml"); + + // Create parent directories if needed + if let Some(parent) = path.parent() { + fs::create_dir_all(parent) + .map_err(|e| format!("Failed to create nodes config directory: {}", e))?; + } + + let yaml_content = serde_yaml::to_string(self) + .map_err(|e| format!("Failed to serialize nodes config to YAML: {}", e))?; + + fs::write(path, yaml_content).map_err(|e| format!("Failed to write nodes.yaml: {}", e))?; + + Ok(()) + } +} diff --git a/lean_client/common/lean_genesis/src/validator_config.rs b/lean_client/common/lean_genesis/src/validator_config.rs new file mode 100644 index 00000000000..9adba1af582 --- /dev/null +++ b/lean_client/common/lean_genesis/src/validator_config.rs @@ -0,0 +1,142 @@ +//! Validator configuration parsing and management + +use serde::{Deserialize, Serialize}; +use std::collections::BTreeMap; +use std::fs; +use std::path::Path; +use tracing::info; + +/// ENR fields for node configuration +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct EnrFields { + /// IP address for the node + pub ip: Option, + /// QUIC port + pub quic: Option, + /// UDP port + pub udp: Option, + /// TCP port + pub tcp: Option, +} + +/// Validator configuration entry for a single node +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ValidatorEntry { + /// Node name identifier + pub name: String, + /// Number of validators for this node + pub count: u64, + /// Private key in hex format (for node identity) + #[serde(default)] + pub privkey: String, + /// ENR fields for peer discovery + #[serde(default, rename = "enrFields")] + pub enr_fields: Option, + /// Metrics port for this node + #[serde(default, rename = "metricsPort")] + pub metrics_port: Option, +} + +/// Configuration section of validator-config.yaml +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ValidatorConfigSection { + /// Log2 of the number of active epochs (e.g., 24 means 2^24 active epochs) + #[serde(default, rename = "activeEpoch")] + pub active_epoch: Option, + /// Key type (e.g., "hash-sig") + #[serde(default, rename = "keyType")] + pub key_type: Option, +} + +/// Validator configuration structure matching lean-quickstart format +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ValidatorConfig { + /// List of validator entries, one per node + pub validators: Vec, + /// Configuration section + #[serde(default)] + pub config: Option, + /// Shuffle algorithm (optional, can be at top level) + #[serde(default)] + pub shuffle: Option, +} + +impl ValidatorConfig { + pub fn load_from_file(path: &Path) -> Result { + info!(path = ?path, "Loading validator configuration"); + + let yaml_content = fs::read_to_string(path) + .map_err(|e| format!("Failed to read validator configuration: {}", e))?; + + // Try parsing as complex format first + if let Ok(config) = serde_yaml::from_str::(&yaml_content) { + return Ok(config); + } + + // Try parsing as simple format (node_name -> validator_indices) + let node_map: BTreeMap> = serde_yaml::from_str(&yaml_content) + .map_err(|e| format!("Failed to parse validator configuration as both complex and simple formats: {}", e))?; + + // Convert simple format to ValidatorConfig + let validators = node_map + .into_iter() + .map(|(name, indices)| { + let count = indices.len() as u64; + ValidatorEntry { + name, + count, + privkey: String::new(), + enr_fields: None, + metrics_port: None, + } + }) + .collect(); + + Ok(ValidatorConfig { + validators, + config: None, + shuffle: None, + }) + } + + /// Saves validator configuration to a YAML file + pub fn save_to_file(&self, path: &Path) -> Result<(), String> { + info!(path = ?path, "Saving validator configuration"); + + // Create parent directories if needed + if let Some(parent) = path.parent() { + fs::create_dir_all(parent) + .map_err(|e| format!("Failed to create validator config directory: {}", e))?; + } + + let yaml_content = serde_yaml::to_string(self) + .map_err(|e| format!("Failed to serialize validator config to YAML: {}", e))?; + + fs::write(path, yaml_content) + .map_err(|e| format!("Failed to write validator-config.yaml: {}", e))?; + + Ok(()) + } + + /// Calculates the total number of validators across all nodes + pub fn total_validator_count(&self) -> u64 { + self.validators.iter().map(|v| v.count).sum() + } + + /// Generates validator index assignments per node + /// + /// Returns a map from node name to (start_index, end_index) range + pub fn validator_assignments(&self) -> std::collections::HashMap { + let mut assignments = std::collections::HashMap::new(); + let mut current_index = 0u64; + + for entry in &self.validators { + let start_index = current_index; + let end_index = current_index + entry.count; + assignments.insert(entry.name.clone(), (start_index, end_index)); + current_index = end_index; + } + + assignments + } +} diff --git a/lean_client/common/lean_network_config/Cargo.toml b/lean_client/common/lean_network_config/Cargo.toml new file mode 100644 index 00000000000..5b17cae6762 --- /dev/null +++ b/lean_client/common/lean_network_config/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "lean_network_config" +version = "0.1.0" +edition.workspace = true + +[dependencies] +serde = { workspace = true, features = ["derive"] } +serde_yaml = { workspace = true } +tracing = { workspace = true } diff --git a/lean_client/common/lean_network_config/src/bootstrap.rs b/lean_client/common/lean_network_config/src/bootstrap.rs new file mode 100644 index 00000000000..ae4da09f419 --- /dev/null +++ b/lean_client/common/lean_network_config/src/bootstrap.rs @@ -0,0 +1,111 @@ +use serde::Deserialize; +use std::fs; +use std::path::Path; +use tracing::{debug, error, warn}; + +/// Loads bootstrap nodes from a YAML file (raw bytes) +/// +/// # Arguments +/// * `path` - Path to the bootstrap_nodes.yaml file +/// +/// # Returns +/// A vector of raw ENR string bytes. Invalid entries are skipped with warnings. +pub fn load_bootstrap_nodes>(path: P) -> Result, String> { + parse_bootstrap_nodes_yaml( + &fs::read_to_string(path.as_ref()) + .map_err(|e| format!("Failed to read bootstrap nodes file: {}", e))?, + ) +} + +/// Parse bootstrap nodes from YAML content +/// +/// # Arguments +/// * `content` - YAML file contents as a string +/// +/// # Returns +/// A vector of ENR strings. Invalid entries are skipped with warnings. +pub fn parse_bootstrap_nodes_yaml(content: &str) -> Result, String> { + if content.trim().is_empty() { + return Err("Bootstrap nodes YAML is empty".to_string()); + } + + debug!( + "Parsing bootstrap nodes YAML, content length: {} bytes", + content.len() + ); + + #[derive(Debug, Deserialize)] + struct EnrRecord { + enr: String, + } + + #[derive(Debug, Deserialize)] + #[serde(untagged)] + enum BootstrapEntry { + Plain(String), + Record(EnrRecord), + } + + // Parse as YAML array of ENR strings or objects of the form `{ enr: "" }` + let enr_records: Vec = serde_yaml::from_str(content).map_err(|e| { + format!( + "Failed to parse bootstrap nodes YAML: {}. Content preview: {}", + e, + content.chars().take(200).collect::() + ) + })?; + + debug!( + "Parsed {} ENR records from bootstrap nodes YAML", + enr_records.len() + ); + + let mut valid_records = Vec::new(); + let mut empty_count = 0; + + for (i, entry) in enr_records.iter().enumerate() { + let enr_str = match entry { + BootstrapEntry::Plain(value) => value.trim(), + BootstrapEntry::Record(record) => record.enr.trim(), + }; + + if enr_str.is_empty() { + warn!("ENR record #{} is empty, skipping", i); + empty_count += 1; + continue; + } + + // Basic validation: ENR records should start with "enr:" + if !enr_str.starts_with("enr:") { + warn!( + "ENR record #{} does not start with 'enr:', skipping: {}", + i, enr_str + ); + continue; + } + + valid_records.push(enr_str.to_string()); + } + + if valid_records.is_empty() { + let error_msg = if empty_count == enr_records.len() { + format!("All {} ENR records are empty", enr_records.len()) + } else { + format!( + "No valid ENR records found. Total: {}, Empty: {}", + enr_records.len(), + empty_count + ) + }; + error!("No valid bootstrap nodes found. {}", error_msg); + return Err(error_msg); + } + + debug!( + "Successfully parsed {} valid ENR records ({} invalid/empty)", + valid_records.len(), + enr_records.len() - valid_records.len() + ); + + Ok(valid_records) +} diff --git a/lean_client/common/lean_network_config/src/config.rs b/lean_client/common/lean_network_config/src/config.rs new file mode 100644 index 00000000000..d208c759d6e --- /dev/null +++ b/lean_client/common/lean_network_config/src/config.rs @@ -0,0 +1,112 @@ +use crate::genesis::GenesisStateBytes; +use std::fs; +use std::path::Path; +use tracing::{debug, info}; + +/// Raw configuration bytes for a network +#[derive(Clone, Debug)] +pub struct NetworkConfigBytes { + /// Config YAML file contents as raw bytes + pub config: Vec, + /// Bootstrap nodes YAML file contents as raw bytes + pub bootstrap_nodes: Option>, +} + +impl NetworkConfigBytes { + /// Load network configuration from a directory + pub fn load_from_dir>(dir: P) -> Result { + let dir_path = dir.as_ref(); + debug!("Loading network configuration from {:?}", dir_path); + + // Load config.yaml + let config_path = dir_path.join("config.yaml"); + let config = fs::read(&config_path) + .map_err(|e| format!("Failed to read config.yaml from {:?}: {}", config_path, e))?; + + // Try to load bootstrap_nodes.yaml (optional) + let bootstrap_nodes_path = dir_path.join("bootstrap_nodes.yaml"); + let bootstrap_nodes = if bootstrap_nodes_path.exists() { + Some(fs::read(&bootstrap_nodes_path).map_err(|e| { + format!( + "Failed to read bootstrap_nodes.yaml from {:?}: {}", + bootstrap_nodes_path, e + ) + })?) + } else { + None + }; + + info!("Loaded network config from {:?}", dir_path); + + Ok(NetworkConfigBytes { + config, + bootstrap_nodes, + }) + } +} + +/// Specifies a lean network configuration +#[derive(Clone, Debug)] +pub struct LeanNetworkConfig { + /// Network name (e.g., "mainnet", "holesky", "testnet") + pub name: String, + /// Raw configuration bytes + pub config_bytes: NetworkConfigBytes, + /// Genesis state bytes + pub genesis_state_bytes: Option, +} + +impl LeanNetworkConfig { + /// Create a new network configuration + pub fn new( + name: String, + config_bytes: NetworkConfigBytes, + genesis_state_bytes: Option, + ) -> Self { + Self { + name, + config_bytes, + genesis_state_bytes, + } + } + + /// Load network configuration from a directory + pub fn load_from_dir>(name: String, dir: P) -> Result { + let dir_path = dir.as_ref(); + debug!("Loading network '{}' from {:?}", name, dir_path); + + let config_bytes = NetworkConfigBytes::load_from_dir(dir_path)?; + + // Try to load genesis state + let genesis_path = dir_path.join("genesis.ssz"); + let genesis_state_bytes = if genesis_path.exists() { + let bytes = fs::read(&genesis_path).map_err(|e| { + format!("Failed to read genesis.ssz from {:?}: {}", genesis_path, e) + })?; + Some(GenesisStateBytes::Vec(bytes)) + } else { + None + }; + + Ok(Self { + name, + config_bytes, + genesis_state_bytes, + }) + } + + /// Get the config YAML as bytes + pub fn config_bytes(&self) -> &[u8] { + &self.config_bytes.config + } + + /// Get the bootstrap nodes YAML as bytes (if available) + pub fn bootstrap_nodes_bytes(&self) -> Option<&[u8]> { + self.config_bytes.bootstrap_nodes.as_deref() + } + + /// Get the genesis state as bytes (if available) + pub fn genesis_state_bytes(&self) -> Option<&[u8]> { + self.genesis_state_bytes.as_ref().map(|gs| gs.as_ref()) + } +} diff --git a/lean_client/common/lean_network_config/src/genesis.rs b/lean_client/common/lean_network_config/src/genesis.rs new file mode 100644 index 00000000000..741b8b734f4 --- /dev/null +++ b/lean_client/common/lean_network_config/src/genesis.rs @@ -0,0 +1,29 @@ +/// A simple enum to store genesis state bytes from either static or runtime sources +#[derive(Clone, PartialEq, Debug)] +pub enum GenesisStateBytes { + /// Genesis state included in the binary + Slice(&'static [u8]), + /// Genesis state loaded from filesystem at runtime + Vec(Vec), +} + +impl AsRef<[u8]> for GenesisStateBytes { + fn as_ref(&self) -> &[u8] { + match self { + GenesisStateBytes::Slice(slice) => slice, + GenesisStateBytes::Vec(vec) => vec.as_ref(), + } + } +} + +impl From<&'static [u8]> for GenesisStateBytes { + fn from(slice: &'static [u8]) -> Self { + GenesisStateBytes::Slice(slice) + } +} + +impl From> for GenesisStateBytes { + fn from(vec: Vec) -> Self { + GenesisStateBytes::Vec(vec) + } +} diff --git a/lean_client/common/lean_network_config/src/lib.rs b/lean_client/common/lean_network_config/src/lib.rs new file mode 100644 index 00000000000..d9492b62b00 --- /dev/null +++ b/lean_client/common/lean_network_config/src/lib.rs @@ -0,0 +1,26 @@ +//! Network configuration for the lean client +//! +//! This crate provides network configuration management for the lean client. +//! It defines the configuration of lean networks (e.g., testnets, mainnet). +//! +//! The crate intentionally avoids importing consensus types and only deals with +//! raw bytes and configuration data. This keeps the crate simple and decoupled from +//! the consensus layer. + +mod bootstrap; +mod config; +mod genesis; +mod loader; + +pub use bootstrap::{load_bootstrap_nodes, parse_bootstrap_nodes_yaml}; +pub use config::{LeanNetworkConfig, NetworkConfigBytes}; +pub use genesis::GenesisStateBytes; +pub use loader::{ConfigInfo, load_network_files}; + +/// File names for network configuration +pub const CONFIG_FILE: &str = "config.yaml"; +pub const BOOTSTRAP_NODES_FILE: &str = "bootstrap_nodes.yaml"; +pub const GENESIS_STATE_FILE: &str = "genesis.ssz"; +pub const GENESIS_STATE_ZIP_FILE: &str = "genesis.ssz.zip"; + +pub const DEFAULT_NETWORK: &str = "mainnet"; diff --git a/lean_client/common/lean_network_config/src/loader.rs b/lean_client/common/lean_network_config/src/loader.rs new file mode 100644 index 00000000000..9bfabd505c9 --- /dev/null +++ b/lean_client/common/lean_network_config/src/loader.rs @@ -0,0 +1,47 @@ +use crate::bootstrap::load_bootstrap_nodes; +use std::fs; +use std::path::Path; +use tracing::info; + +/// Configuration information extracted from network files +#[derive(Clone, Debug)] +pub struct ConfigInfo { + /// Raw configuration YAML bytes + pub config_bytes: Vec, + /// ENR records from bootstrap_nodes.yaml as raw strings + pub bootstrap_enrs: Vec, +} + +/// Load network configuration and bootstrap nodes +/// +/// # Arguments +/// * `config_path` - Path to config.yaml +/// * `nodes_path` - Path to bootstrap_nodes.yaml +/// +/// # Returns +/// ConfigInfo containing raw configuration bytes and ENR strings +pub fn load_network_files>( + config_path: P, + nodes_path: P, +) -> Result { + let config_path = config_path.as_ref(); + let nodes_path = nodes_path.as_ref(); + + // Load config.yaml + let config_bytes = fs::read(config_path) + .map_err(|e| format!("Failed to read config from {:?}: {}", config_path, e))?; + + // Load bootstrap nodes ENRs + let bootstrap_enrs = load_bootstrap_nodes(nodes_path)?; + + info!( + config_path = ?config_path, + bootstrap_nodes_count = bootstrap_enrs.len(), + "Loaded network configuration files" + ); + + Ok(ConfigInfo { + config_bytes, + bootstrap_enrs, + }) +} diff --git a/lean_client/consensus/Cargo.toml b/lean_client/consensus/Cargo.toml new file mode 100644 index 00000000000..0b56f6445c5 --- /dev/null +++ b/lean_client/consensus/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "lean_consensus" +version = "0.1.0" +edition = "2024" + +[dependencies] +ethereum_ssz = "0.10.0" +ethereum_ssz_derive = "0.10.0" +fixed_bytes = { workspace = true } +int_to_bytes = { workspace = true } +lean_crypto = { workspace = true } +milhouse = { workspace = true } +serde = { workspace = true, features = ["derive"] } +serde_json = { workspace = true } +serde_yaml = { workspace = true } +ssz_types = { workspace = true } +metrics = { workspace = true } +lazy_static = "1.4.0" +tracing = { workspace = true } +tree_hash = { workspace = true } +tree_hash_derive = { workspace = true } +types = { workspace = true } + + diff --git a/lean_client/consensus/src/attestation.rs b/lean_client/consensus/src/attestation.rs new file mode 100644 index 00000000000..8e7d4fc59f0 --- /dev/null +++ b/lean_client/consensus/src/attestation.rs @@ -0,0 +1,115 @@ +use lean_crypto::Signature; +use ssz::{Decode, Encode}; +use ssz_derive::{Decode, Encode}; +use types::Hash256; + +use tree_hash::TreeHash; +use tree_hash_derive::TreeHash; + +#[derive(Debug, Clone, Encode, Decode, TreeHash)] +pub struct Attestation { + pub validator_id: u64, + pub attestation_data: AttestationData, +} + +#[derive(Debug, Clone, Encode, Decode, TreeHash)] +pub struct AttestationData { + pub slot: Slot, + pub head: Checkpoint, + pub target: Checkpoint, + pub source: Checkpoint, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Default)] +pub struct Slot(pub u64); + +impl Slot { + pub fn is_justifiable_after(self, finalized_slot: Slot) -> Result<(), String> { + // Match Zeam's logic: candidate must not be BEFORE finalized (but can be equal) + // When finalized=0 and candidate=0, delta=0 which is <= 5, so it's justifiable. + if self < finalized_slot { + return Err(format!( + "candidate slot must not be before finalized slot candidate={} finalized={}", + self.0, finalized_slot.0 + )); + } + + let delta = self.0 - finalized_slot.0; + + if delta >= 5 + && (delta.count_ones() == delta.trailing_ones() || { + let val: u64 = 4 * delta + 1; + val.count_ones() == val.trailing_ones() + && (delta.wrapping_sub(1) >> 2).count_ones() + == (delta.wrapping_sub(1) >> 2).trailing_ones() + }) + { + return Err( + "the slot is not justifiable after the finalized slot".to_string() + ); + } + Ok(()) + } +} + +impl TreeHash for Slot { + fn tree_hash_type() -> tree_hash::TreeHashType { + tree_hash::TreeHashType::Basic + } + + fn tree_hash_packed_encoding(&self) -> tree_hash::PackedEncoding { + self.0.tree_hash_packed_encoding() + } + + fn tree_hash_packing_factor() -> usize { + u64::tree_hash_packing_factor() + } + + fn tree_hash_root(&self) -> Hash256 { + self.0.tree_hash_root() + } +} + +impl Encode for Slot { + fn is_ssz_fixed_len() -> bool { + ::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + ::ssz_fixed_len() + } + + fn ssz_bytes_len(&self) -> usize { + self.0.ssz_bytes_len() + } + + fn ssz_append(&self, buf: &mut Vec) { + self.0.ssz_append(buf) + } +} + +impl Decode for Slot { + fn is_ssz_fixed_len() -> bool { + ::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + ::ssz_fixed_len() + } + + fn from_ssz_bytes(bytes: &[u8]) -> Result { + u64::from_ssz_bytes(bytes).map(Slot) + } +} + +#[derive(Debug, Clone, Default, Encode, Decode, TreeHash)] +pub struct Checkpoint { + pub root: Hash256, + pub slot: Slot, +} + +#[derive(Debug, Clone, Encode, Decode)] +pub struct SignedAttestation { + pub message: Attestation, + pub signature: Signature, +} diff --git a/lean_client/consensus/src/helpers.rs b/lean_client/consensus/src/helpers.rs new file mode 100644 index 00000000000..8cb787912b3 --- /dev/null +++ b/lean_client/consensus/src/helpers.rs @@ -0,0 +1,141 @@ +/// Helper functions for consensus operations. + +/// Determines if a slot can be a justification target based on the finalized slot. +/// +/// Justifiable slots follow a specific pattern based on the delta from finalized: +/// - Slots within 5 of finalized are always justifiable (delta 0-5) +/// - Perfect squares (delta = 1, 4, 9, 16, 25...) are justifiable +/// - Numbers of form n² + n (delta = 6, 12, 20, 30, 42...) are justifiable +/// +/// # Arguments +/// * `finalized_slot` - The slot of the latest finalized checkpoint +/// * `candidate_slot` - The slot being checked for justifiability +/// +/// # Returns +/// * `Ok(true)` if the slot can be a justification target +/// * `Ok(false)` if the slot cannot be a justification target +/// * `Err` if candidate_slot < finalized_slot +pub fn is_justifiable_slot(finalized_slot: u64, candidate_slot: u64) -> Result { + if candidate_slot < finalized_slot { + return Err(format!( + "candidate slot {} must be >= finalized slot {}", + candidate_slot, finalized_slot + )); + } + + let delta = candidate_slot - finalized_slot; + + // Rule 1: First 6 slots (delta 0-5) are always justifiable + if delta <= 5 { + return Ok(true); + } + + // Rule 2: Perfect squares are justifiable + // Check if sqrt(delta) is an integer + let sqrt = (delta as f64).sqrt(); + if sqrt.fract() == 0.0 { + return Ok(true); + } + + // Rule 3: Numbers of form n² + 2n are justifiable + // This is equivalent to checking if sqrt(delta + 0.25) has fractional part 0.5 + // Because n² + 2n + 0.25 = (n + 0.5)² + // So sqrt(n² + 2n + 0.25) = n + 0.5 + let sqrt_plus = ((delta as f64) + 0.25).sqrt(); + if (sqrt_plus.fract() - 0.5).abs() < f64::EPSILON { + return Ok(true); + } + + Ok(false) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_first_six_slots_always_justifiable() { + // Slots 0-5 after finalized are always justifiable + for delta in 0..=5 { + assert!( + is_justifiable_slot(0, delta).unwrap(), + "slot {} should be justifiable", + delta + ); + // Also test with non-zero finalized slot + assert!( + is_justifiable_slot(10, 10 + delta).unwrap(), + "slot {} (finalized=10) should be justifiable", + 10 + delta + ); + } + } + + #[test] + fn test_perfect_squares_justifiable() { + // Perfect squares: 1, 4, 9, 16, 25, 36, 49, 64, 81, 100... + let perfect_squares = [1, 4, 9, 16, 25, 36, 49, 64, 81, 100]; + for &delta in &perfect_squares { + assert!( + is_justifiable_slot(0, delta).unwrap(), + "slot {} (perfect square) should be justifiable", + delta + ); + } + } + + #[test] + fn test_n_squared_plus_n_justifiable() { + // n² + n pattern (derived from sqrt(delta + 0.25) having fractional part 0.5): + // n=2: 4 + 2 = 6 + // n=3: 9 + 3 = 12 + // n=4: 16 + 4 = 20 + // n=5: 25 + 5 = 30 + let n_squared_plus_n = [6, 12, 20, 30, 42, 56, 72, 90, 110]; + for &delta in &n_squared_plus_n { + assert!( + is_justifiable_slot(0, delta).unwrap(), + "slot {} (n² + n pattern) should be justifiable", + delta + ); + } + } + + #[test] + fn test_non_justifiable_slots() { + // Slots that should NOT be justifiable (not in any pattern) + let non_justifiable = [7, 8, 10, 11, 13, 14, 15, 17, 18, 19, 21, 22, 23, 24, 26]; + for &delta in &non_justifiable { + assert!( + !is_justifiable_slot(0, delta).unwrap(), + "slot {} should NOT be justifiable", + delta + ); + } + } + + #[test] + fn test_candidate_before_finalized_returns_error() { + let result = is_justifiable_slot(10, 5); + assert!(result.is_err()); + } + + #[test] + fn test_with_nonzero_finalized() { + // Test that the function works correctly with non-zero finalized slot + let finalized = 100; + + // First 6 slots after finalized + assert!(is_justifiable_slot(finalized, 100).unwrap()); // delta 0 + assert!(is_justifiable_slot(finalized, 105).unwrap()); // delta 5 + + // Perfect square: delta 9 + assert!(is_justifiable_slot(finalized, 109).unwrap()); + + // n² + n: delta 6 + assert!(is_justifiable_slot(finalized, 106).unwrap()); + + // Non-justifiable: delta 7 + assert!(!is_justifiable_slot(finalized, 107).unwrap()); + } +} diff --git a/lean_client/consensus/src/lean_block.rs b/lean_client/consensus/src/lean_block.rs new file mode 100644 index 00000000000..1a6398bb78d --- /dev/null +++ b/lean_client/consensus/src/lean_block.rs @@ -0,0 +1,127 @@ +use crate::attestation::{Attestation, Slot}; +use crate::lean_state::LeanState; +use crate::validator::ValidatorIndex; +use lean_crypto::Signature; +use ssz_derive::{Decode, Encode}; +use ssz_types::VariableList; +use tree_hash::TreeHash; +use tree_hash_derive::TreeHash; +use types::{EthSpec, Hash256}; + +#[derive(Debug, Clone, Encode, Decode, TreeHash)] +pub struct LeanBlock { + pub slot: Slot, + pub proposer_index: u64, + pub parent_root: Hash256, + pub state_root: Hash256, + pub body: LeanBlockBody, +} + +#[derive(Debug, Clone, Encode, Decode, TreeHash)] +pub struct LeanBlockBody { + pub attestations: VariableList, +} + +#[derive(Debug, Clone, Encode, Decode, TreeHash)] +pub struct LeanBlockHeader { + pub slot: Slot, + pub proposer_index: ValidatorIndex, + pub parent_root: Hash256, + pub state_root: Hash256, + pub body_root: Hash256, +} + +#[derive(Debug, Clone, Encode, Decode)] +pub struct LeanBlockWithAttestation { + pub block: LeanBlock, + pub proposer_attestation: Attestation, +} + +#[derive(Debug, Clone, Encode, Decode)] +pub struct SignedLeanBlockWithAttestation { + pub message: LeanBlockWithAttestation, + pub signature: VariableList, +} + +impl SignedLeanBlockWithAttestation { + /// Verify all XMSS signatures in this signed block. + /// + /// This function ensures that every attestation included in the block + /// (both on-chain attestations from the block body and the proposer's + /// own attestation) is properly signed by the claimed validator using + /// their registered XMSS public key. + /// + /// # Parameters + /// - `parent_state`: The state at the parent block, used to retrieve + /// validator public keys and verify signatures. + /// + /// # Returns + /// - `Ok(())` if all signatures are cryptographically valid + /// - `Err(String)` if signature verification fails + pub fn verify_signatures(&self, parent_state: &LeanState) -> Result<(), String> { + let block = &self.message.block; + let signatures = &self.signature; + + // Combine all attestations that need verification: + // 1. Block body attestations (from other validators) + // 2. Proposer attestation (from the block producer) + let mut all_attestations = Vec::new(); + all_attestations.extend(block.body.attestations.iter().cloned()); + all_attestations.push(self.message.proposer_attestation.clone()); + + // Verify signature count matches attestation count + if signatures.len() != all_attestations.len() { + return Err(format!( + "Number of signatures ({}) does not match number of attestations ({})", + signatures.len(), + all_attestations.len() + )); + } + + let validators = &parent_state.validators; + + // Verify each attestation signature + for (attestation, _signature) in + all_attestations.iter().zip(signatures.iter()) + { + // Identify the validator who created this attestation + let validator_id = attestation.validator_id as usize; + + // Ensure validator exists in the active set + if validator_id >= validators.len() { + return Err(format!( + "Validator index {} out of range (total validators: {})", + validator_id, + validators.len() + )); + } + + let validator = validators + .get(validator_id) + .ok_or_else(|| format!("Failed to get validator at index {}", validator_id))?; + + // Calculate the hash of the full attestation (this is what's signed) + // Note: Zeam signs the full Attestation struct (validator_id + attestation_data), + // not just the AttestationData. + let message_hash = attestation.tree_hash_root(); + + // Verify the signature + let is_valid = lean_crypto::verify_signature( + validator.get_pubkey(), + message_hash.as_slice(), + _signature, + attestation.attestation_data.slot.0, + ) + .map_err(|e| format!("Signature verification error: {}", e))?; + + if !is_valid { + return Err(format!( + "Invalid signature for validator {} at slot {}", + validator_id, attestation.attestation_data.slot.0 + )); + } + } + + Ok(()) + } +} diff --git a/lean_client/consensus/src/lean_state.rs b/lean_client/consensus/src/lean_state.rs new file mode 100644 index 00000000000..ee0ec5a5166 --- /dev/null +++ b/lean_client/consensus/src/lean_state.rs @@ -0,0 +1,889 @@ +use crate::attestation::{Attestation, Checkpoint, Slot}; + +use ssz_derive::{Decode, Encode}; +use std::collections::HashMap; +use tracing::debug; +use tree_hash::TreeHash; +use tree_hash_derive::TreeHash; + +use crate::lean_block::{ + LeanBlock, LeanBlockBody, LeanBlockHeader, SignedLeanBlockWithAttestation, +}; +use crate::validator::Validator; +use crate::validator::ValidatorIndex; + +use ssz_types::{BitList, VariableList}; +use types::{EthSpec, Hash256}; + +#[derive(TreeHash, Encode, Decode, Debug)] +pub struct LeanState { + pub config: Config, + pub slot: Slot, + pub latest_block_header: LeanBlockHeader, + pub latest_justified: Checkpoint, + pub latest_finalized: Checkpoint, + pub historical_block_hashes: VariableList, + pub justified_slots: BitList, + pub validators: VariableList, + pub justifications_roots: VariableList, + pub justifications_validators: BitList, +} + +impl LeanState { + pub fn new( + genesis_time: u64, + justified_slots: BitList, + validators: VariableList, + ) -> Self { + let genesis_config = Config { genesis_time }; + let genesis_header = LeanBlockHeader { + slot: Slot(0), + proposer_index: ValidatorIndex(0), + parent_root: Hash256::ZERO, + state_root: Hash256::ZERO, + body_root: LeanBlockBody:: { + attestations: VariableList::empty(), + } + .tree_hash_root(), + }; + + let genesis_checkpoint = Checkpoint { + root: Hash256::ZERO, + slot: Slot(0), + }; + + Self { + config: genesis_config, + slot: Slot(0), + latest_block_header: genesis_header.clone(), + latest_justified: genesis_checkpoint.clone(), + latest_finalized: genesis_checkpoint, + historical_block_hashes: VariableList::empty(), + justified_slots: justified_slots.clone(), + validators: validators.clone(), + justifications_roots: VariableList::empty(), + justifications_validators: BitList::with_capacity(0) + .expect("Failed to create justifications_validators BitList"), + } + } + + pub fn generate_genesis( + genesis_time: u64, + validators: VariableList, + ) -> Self { + let genesis_config = Config { genesis_time }; + let genesis_header = LeanBlockHeader { + slot: Slot(0), + proposer_index: ValidatorIndex(0), + parent_root: Hash256::ZERO, + state_root: Hash256::ZERO, + body_root: LeanBlockBody:: { + attestations: VariableList::empty(), + } + .tree_hash_root(), + }; + + // Initialize justified_slots as empty to align with the spec. + // Justification occurs via attestations during block processing. + let justified_slots = + BitList::with_capacity(0).expect("Failed to create justified_slots BitList"); + + // Genesis checkpoints use ZERO_HASH as root, matching the spec (mini3sf). + // The actual genesis block root is set later during block processing when + // the first block after genesis is processed. + let genesis_checkpoint = Checkpoint { + root: Hash256::ZERO, + slot: Slot(0), + }; + + Self { + config: genesis_config, + slot: Slot(0), + latest_block_header: genesis_header.clone(), + latest_justified: genesis_checkpoint.clone(), + latest_finalized: genesis_checkpoint, + historical_block_hashes: VariableList::empty(), + justified_slots, + validators, + justifications_roots: VariableList::empty(), + justifications_validators: BitList::with_capacity(0) + .expect("Failed to create justifications_validators BitList"), + } + } + + pub fn is_proposer(&self, validator_index: ValidatorIndex) -> bool { + self.slot.0 % self.validators.len() as u64 == validator_index.0 + } + pub fn get_justifications( + &self, + ) -> Result>, String> { + if self.justifications_roots.is_empty() { + return Ok(HashMap::new()); + } + + let validator_count = self.validators.len(); + + self.justifications_roots + .iter() + .enumerate() + .map(|(i, root)| { + let start = i * validator_count; + let end = (i + 1) * validator_count; + + let mut justifications = BitList::::with_capacity( + validator_count, + ) + .map_err(|e| { + format!( + "Failed to create BitList with capacity {}: {:?}", + validator_count, e + ) + })?; + for (bit_idx, global_idx) in (start..end).enumerate() { + let bit_value = + self.justifications_validators + .get(global_idx) + .map_err(|e| { + format!("Failed to get bit at index {}: {:?}", global_idx, e) + })?; + justifications + .set(bit_idx, bit_value) + .map_err(|e| format!("Failed to set bit at index {}: {:?}", bit_idx, e))?; + } + + Ok((*root, justifications)) + }) + .collect() + } + pub fn with_justification( + &mut self, + root: Hash256, + validator_justifications: &BitList, + ) -> Result<(), String> { + let validator_count = self.validators.len(); + + if validator_justifications.len() < validator_count { + return Err(format!( + "Justifications vector length {} is less than validator count {}", + validator_justifications.len(), + validator_count + )); + } + + if self.justifications_roots.contains(&root) { + return Err(format!( + "Root {:?} already exists in justifications_roots", + root + )); + } + + self.justifications_roots + .push(root) + .map_err(|e| format!("Failed to append root to justifications_roots: {:?}", e))?; + + // Extend justifications_validators by creating a new BitList with larger capacity + let current_len = self.justifications_validators.len(); + let new_len = current_len + validator_count; + let mut new_justifications_validators = + BitList::::with_capacity(new_len).map_err(|e| { + format!( + "Failed to create extended justifications_validators with capacity {}: {:?}", + new_len, e + ) + })?; + + // Copy existing bits + for i in 0..current_len { + let bit = self.justifications_validators.get(i).map_err(|e| { + format!( + "Failed to get justifications_validators bit at {}: {:?}", + i, e + ) + })?; + new_justifications_validators.set(i, bit).map_err(|e| { + format!( + "Failed to copy justifications_validators bit at {}: {:?}", + i, e + ) + })?; + } + + // Append new bits from validator_justifications + for i in 0..validator_count { + let bit_value = validator_justifications.get(i).map_err(|e| { + format!( + "Failed to get bit at index {} from validator_justifications: {:?}", + i, e + ) + })?; + + new_justifications_validators + .set(current_len + i, bit_value) + .map_err(|e| { + format!( + "Failed to set bit at index {} in justifications_validators: {:?}", + current_len + i, + e + ) + })?; + } + + self.justifications_validators = new_justifications_validators; + + Ok(()) + } + + /// Replaces all justifications with a new map. + /// + /// This is used after processing attestations to persist the updated justification state. + /// The map is flattened into `justifications_roots` and `justifications_validators` with + /// roots sorted lexicographically for deterministic ordering. + pub fn update_justifications( + &mut self, + justifications: HashMap>, + ) -> Result<(), String> { + let num_validators = self.validators.len(); + + // Sort roots for deterministic ordering + let mut roots: Vec<_> = justifications.keys().cloned().collect(); + roots.sort(); + + // Calculate total size needed for justifications_validators + let total_bits = roots.len() * num_validators; + + // Create new structures + let mut new_roots = VariableList::::empty(); + // Create BitList with exact capacity - with_capacity creates a list of that length + let mut new_validators = BitList::::with_capacity(total_bits) + .map_err(|e| format!("Failed to create justifications_validators BitList: {:?}", e))?; + + // Track the current index for writing bits + let mut bit_index = 0; + + for root in roots { + new_roots + .push(root) + .map_err(|e| format!("Failed to push root to justifications_roots: {:?}", e))?; + + let votes = justifications.get(&root).ok_or_else(|| { + format!("Root {:?} not found in justifications map", root) + })?; + + for i in 0..num_validators { + let bit = votes.get(i).unwrap_or(false); + new_validators.set(bit_index, bit).map_err(|e| { + format!( + "Failed to set bit at index {} (total_bits={}): {:?}", + bit_index, total_bits, e + ) + })?; + bit_index += 1; + } + } + + self.justifications_roots = new_roots; + self.justifications_validators = new_validators; + + Ok(()) + } + + pub fn process_slot(&mut self) -> Result<(), String> { + // If state_root is unpopulated (e.g., genesis state), populate it now. + // This ensures the header incorrectly reflects the state root for the transition to Slot 1, + // while preserving pre-populated headers from `handle_block` for subsequent blocks. + if self.latest_block_header.state_root == Hash256::ZERO { + use tree_hash::TreeHash; + let previous_state_root = self.tree_hash_root(); + self.latest_block_header.state_root = previous_state_root; + } + + Ok(()) + } + + pub fn process_slots(&mut self, target_slot: Slot) -> Result<(), String> { + if self.slot >= target_slot { + return Err(format!( + "Target slot must be in the future. Current slot: {}, target slot: {}", + self.slot.0, target_slot.0 + )); + } + + while self.slot < target_slot { + self.process_slot()?; + + self.slot = Slot(self.slot.0 + 1); + } + + Ok(()) + } + pub fn process_block_header(&mut self, block: &LeanBlock) -> Result<(), String> { + let parent_header = &self.latest_block_header; + let parent_root = parent_header.tree_hash_root(); + + debug!( + block_slot = block.slot.0, + parent_header_slot = parent_header.slot.0, + parent_header_state_root = ?parent_header.state_root, + computed_parent_root = ?parent_root, + block_parent_root = ?block.parent_root, + "Processing block header - computing parent root" + ); + + if block.slot != self.slot { + return Err(format!( + "Block slot mismatch. Expected: {}, got: {}", + self.slot.0, block.slot.0 + )); + } + + if block.slot <= parent_header.slot { + return Err(format!( + "Block is not newer than latest header. Block slot: {}, latest header slot: {}", + block.slot.0, parent_header.slot.0 + )); + } + + if !self.is_proposer(ValidatorIndex(block.proposer_index)) { + return Err(format!( + "Incorrect block proposer. Expected proposer for slot {}, got validator {}", + self.slot.0, block.proposer_index + )); + } + + if block.parent_root != parent_root { + return Err(format!( + "Block parent root mismatch. Expected: {:?}, got: {:?}", + parent_root, block.parent_root + )); + } + + let is_genesis_parent = parent_header.slot == Slot(0); + if is_genesis_parent { + self.latest_justified.root = parent_root; + self.latest_finalized.root = parent_root; + debug!( + genesis_root = ?parent_root, + "Genesis block finalized and justified" + ); + } + + let num_empty_slots = block.slot.0 - parent_header.slot.0 - 1; + + // Add parent root to historical_block_hashes (spec: leanSpec/state.py line 260) + self.historical_block_hashes + .push(parent_root) + .map_err(|e| format!("Failed to append parent root to historical hashes: {:?}", e))?; + + // Add ZERO_HASH for each empty slot (spec: leanSpec/state.py line 260) + for _ in 0..num_empty_slots { + self.historical_block_hashes + .push(Hash256::ZERO) + .map_err(|e| format!("Failed to append ZERO_HASH for empty slot: {:?}", e))?; + } + + // Force materialization of the list by serializing and deserializing it + // This ensures pending updates are flushed before tree_hash is called + let encoded_hashes = ssz::Encode::as_ssz_bytes(&self.historical_block_hashes); + self.historical_block_hashes = ssz::Decode::from_ssz_bytes(&encoded_hashes) + .map_err(|e| format!("Failed to rematerialize historical_block_hashes: {:?}", e))?; + + debug!( + block_slot = block.slot.0, + parent_header_slot = parent_header.slot.0, + added_parent_root = ?parent_root, + num_empty_slots = num_empty_slots, + historical_hashes_len = self.historical_block_hashes.len(), + "Updated historical_block_hashes" + ); + + // Following the spec pattern: build new justified_slots by growing the BitList + // + // The spec pattern: justified_slots + [Boolean(is_genesis_parent)] + ([Boolean(False)] * num_empty_slots) + // This means justified_slots grows in parallel with historical_block_hashes. + + let current_len = self.justified_slots.len(); + let num_empty_slots_usize = num_empty_slots as usize; + + // Calculate total size of new BitList + let new_len = current_len + 1 + num_empty_slots_usize; + + // Create a fresh BitList with the exact final size needed + // We must create with the final size because .set() won't extend beyond capacity + let mut new_justified_slots = BitList::with_capacity(new_len).map_err(|e| { + format!( + "Failed to create justified_slots BitList with capacity {}: {:?}", + new_len, e + ) + })?; + + // Only copy existing bits if there are any (avoid unnecessary work on first block) + if current_len > 0 { + for i in 0..current_len { + let bit_value = self.justified_slots.get(i).map_err(|e| { + format!("Failed to read justified_slots bit at index {}: {:?}", i, e) + })?; + new_justified_slots + .set(i, bit_value) + .map_err(|e| format!("Failed to copy bit at index {}: {:?}", i, e))?; + } + + debug!( + "Copied {} existing bits to new justified_slots BitList", + current_len + ); + } + + // Set the parent slot bit (justified only if it's the genesis block) + new_justified_slots + .set(current_len, is_genesis_parent) + .map_err(|e| format!("Failed to set parent bit in justified_slots: {:?}", e))?; + + debug!( + "Set parent justified bit at index {}: is_genesis_parent={}", + current_len, is_genesis_parent + ); + + // Set false for each empty slot + for i in 0..num_empty_slots_usize { + new_justified_slots + .set(current_len + 1 + i, false) + .map_err(|e| { + format!( + "Failed to set empty slot bit at index {}: {:?}", + current_len + 1 + i, + e + ) + })?; + } + + debug!( + "Set {} empty slot bits (all false) in justified_slots", + num_empty_slots_usize + ); + + self.justified_slots = new_justified_slots; + + debug!( + "Rebuilt justified_slots BitList: old_len={}, new_len={}, hist_block_hashes_len={}", + current_len, + self.justified_slots.len(), + self.historical_block_hashes.len() + ); + + self.latest_block_header = LeanBlockHeader { + slot: block.slot, + proposer_index: ValidatorIndex(block.proposer_index), + parent_root: block.parent_root, + body_root: block.body.tree_hash_root(), + state_root: Hash256::ZERO, + }; + + Ok(()) + } + pub fn process_block(&mut self, block: &LeanBlock) -> Result<(), String> { + self.process_block_header(block)?; + + self.process_attestations(&block.body.attestations)?; + + Ok(()) + } + /// Process attestations with 2/3 supermajority check for justification. + /// + /// The justification algorithm: + /// 1. Track which validators have attested to each target checkpoint + /// 2. Only justify a target when 2/3+ of validators have attested (3 * count >= 2 * total) + /// 3. Finalize the source when target is justified and no justifiable slots exist between them + pub fn process_attestations( + &mut self, + attestations: &VariableList, + ) -> Result<(), String> { + use crate::helpers::is_justifiable_slot; + + let num_validators = self.validators.len(); + if num_validators == 0 { + return Ok(()); + } + + // Load existing justifications into working map + let mut justifications = self.get_justifications()?; + + for attestation in attestations.iter() { + let attestation_data = &attestation.attestation_data; + let source = &attestation_data.source; + let target = &attestation_data.target; + let validator_id = attestation.validator_id as usize; + + // Validate attestation + if source.slot >= target.slot { + debug!( + source_slot = source.slot.0, + target_slot = target.slot.0, + "Skipping attestation: source slot >= target slot" + ); + continue; + } + + let source_slot = source.slot.0 as usize; + let target_slot = target.slot.0 as usize; + + // Check source is justified + let source_is_justified = if source_slot < self.justified_slots.len() { + self.justified_slots.get(source_slot).map_err(|e| { + format!("Failed to get justified slot at index {}: {:?}", source_slot, e) + })? + } else { + debug!( + source_slot = source_slot, + justified_slots_len = self.justified_slots.len(), + "Skipping attestation: source slot out of range" + ); + continue; + }; + + if !source_is_justified { + debug!( + source_slot = source_slot, + "Skipping attestation: source not justified" + ); + continue; + } + + // Check target not already justified + let target_is_justified = if target_slot < self.justified_slots.len() { + self.justified_slots.get(target_slot).unwrap_or(false) + } else { + false + }; + + if target_is_justified { + debug!( + target_slot = target_slot, + "Skipping attestation: target already justified" + ); + continue; + } + + // Check roots match historical block hashes + if source_slot >= self.historical_block_hashes.len() { + continue; + } + if target_slot >= self.historical_block_hashes.len() { + continue; + } + if self.historical_block_hashes[source_slot] != source.root { + debug!( + source_slot = source_slot, + expected_root = ?self.historical_block_hashes[source_slot], + actual_root = ?source.root, + "Skipping attestation: source root mismatch" + ); + continue; + } + if self.historical_block_hashes[target_slot] != target.root { + debug!( + target_slot = target_slot, + expected_root = ?self.historical_block_hashes[target_slot], + actual_root = ?target.root, + "Skipping attestation: target root mismatch" + ); + continue; + } + + // Check target is justifiable based on the slot pattern + match is_justifiable_slot(self.latest_finalized.slot.0, target.slot.0) { + Ok(true) => {} + Ok(false) => { + debug!( + target_slot = target.slot.0, + finalized_slot = self.latest_finalized.slot.0, + "Skipping attestation: target slot not justifiable" + ); + continue; + } + Err(e) => { + debug!( + target_slot = target.slot.0, + finalized_slot = self.latest_finalized.slot.0, + error = ?e, + "Skipping attestation: is_justifiable_slot error" + ); + continue; + } + } + + // Validate validator_id + if validator_id >= num_validators { + debug!( + validator_id = validator_id, + num_validators = num_validators, + "Skipping attestation: invalid validator_id" + ); + continue; + } + + // Track this validator's vote for target + let target_votes = justifications.entry(target.root).or_insert_with(|| { + BitList::::with_capacity(num_validators) + .expect("Failed to create BitList for target votes") + }); + + target_votes + .set(validator_id, true) + .map_err(|e| format!("Failed to set vote for validator {}: {:?}", validator_id, e))?; + + // Count votes for this target + let vote_count = (0..num_validators) + .filter(|i| target_votes.get(*i).unwrap_or(false)) + .count(); + + let threshold_2_3 = (2 * num_validators + 2) / 3; // Ceiling of 2/3 + + debug!( + target_slot = target.slot.0, + vote_count = vote_count, + threshold_2_3 = threshold_2_3, + num_validators = num_validators, + validator_id = validator_id, + "Attestation processed" + ); + + // Check 2/3 supermajority: 3 * count >= 2 * total (equivalent to count >= 2/3 * total) + if 3 * vote_count >= 2 * num_validators { + // Extend justified_slots if needed by creating a larger BitList + if self.justified_slots.len() <= target_slot { + let new_len = target_slot + 1; + let mut new_justified_slots = + BitList::::with_capacity(new_len).map_err( + |e| { + format!( + "Failed to create extended justified_slots with capacity {}: {:?}", + new_len, e + ) + }, + )?; + + // Copy existing bits + for i in 0..self.justified_slots.len() { + let bit = self.justified_slots.get(i).map_err(|e| { + format!("Failed to get justified_slots bit at {}: {:?}", i, e) + })?; + new_justified_slots.set(i, bit).map_err(|e| { + format!("Failed to copy justified_slots bit at {}: {:?}", i, e) + })?; + } + + // Fill remaining slots with false + for i in self.justified_slots.len()..new_len { + new_justified_slots.set(i, false).map_err(|e| { + format!("Failed to initialize justified_slots bit at {}: {:?}", i, e) + })?; + } + + self.justified_slots = new_justified_slots; + + debug!( + old_len = target_slot, + new_len = new_len, + "Extended justified_slots BitList" + ); + } + + // Justify target + self.justified_slots + .set(target_slot, true) + .map_err(|e| format!("Failed to set justified slot: {:?}", e))?; + + debug!( + target_slot = target.slot.0, + vote_count = vote_count, + threshold_2_3 = threshold_2_3, + "JUSTIFIED: 2/3 supermajority reached" + ); + + if target.slot > self.latest_justified.slot { + // Check if we can finalize source + // Source can be finalized if there are no justifiable slots between source and target + let can_finalize = self.can_finalize_source(source, target)?; + + if can_finalize { + self.latest_finalized = source.clone(); + debug!( + finalized_slot = source.slot.0, + finalized_root = ?source.root, + "FINALIZED: source checkpoint finalized via 2/3 supermajority" + ); + } + + self.latest_justified = target.clone(); + debug!( + justified_slot = target.slot.0, + justified_root = ?target.root, + "Latest justified updated" + ); + } + + // Remove from tracking (already justified) + justifications.remove(&target.root); + } + } + + // Persist updated justifications back to state + self.update_justifications(justifications)?; + + Ok(()) + } + + /// Check if source can be finalized based on target justification. + /// + /// Source can be finalized if there are no justifiable slots between source and target. + fn can_finalize_source(&self, source: &Checkpoint, target: &Checkpoint) -> Result { + use crate::helpers::is_justifiable_slot; + + for check_slot in (source.slot.0 + 1)..target.slot.0 { + match is_justifiable_slot(self.latest_finalized.slot.0, check_slot) { + Ok(true) => return Ok(false), // Found a justifiable slot in between + Ok(false) => continue, + Err(e) => return Err(format!("is_justifiable_slot error: {}", e)), + } + } + Ok(true) + } + pub fn state_transition( + &mut self, + signed_block: &SignedLeanBlockWithAttestation, + validate_signatures: bool, + ) -> Result<(), String> { + if validate_signatures { + signed_block.verify_signatures(self)?; + } + + let block = &signed_block.message.block; + + if self.slot < block.slot { + self.process_slots(block.slot)?; + } + + self.process_block(block)?; + + let computed_state_root = self.tree_hash_root(); + if block.state_root != computed_state_root { + return Err(format!( + "Invalid block state root. Expected: {:?}, got: {:?}", + computed_state_root, block.state_root + )); + } + + Ok(()) + } +} + +/// Configuration for the lean consensus protocol. +/// +/// This struct contains chain-specific parameters that are part of the consensus state. +/// To match the spec's BeamStateConfig, this only contains genesis_time. +/// Other parameters are defined as constants (SECONDS_PER_SLOT, etc.) +#[derive(TreeHash, Encode, Decode, Debug, Clone, serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Config { + /// Genesis time in seconds since Unix epoch. + pub genesis_time: u64, +} + +/// Configuration for genesis generation (YAML/JSON only, not part of chain state) +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct GenesisConfig { + /// Base chain configuration + #[serde(flatten)] + pub config: Config, + /// Log2 of the number of active epochs for XMSS keys (e.g., 24 means 2^24 active epochs). + #[serde(skip_serializing_if = "Option::is_none")] + pub active_epoch: Option, + /// Shuffle algorithm for validator assignment (e.g., "roundrobin"). + #[serde(skip_serializing_if = "Option::is_none")] + pub shuffle: Option, +} + +impl From for GenesisConfig { + fn from(config: Config) -> Self { + Self { + config, + active_epoch: None, + shuffle: Some("roundrobin".to_string()), + } + } +} + +impl From for Config { + fn from(genesis_config: GenesisConfig) -> Self { + genesis_config.config + } +} + +impl Config { + /// Returns the devnet chain configuration. + /// + /// This is the default configuration for the lean consensus devnet. + pub fn devnet() -> Self { + Self { + genesis_time: 0, // Default genesis time, should be set when creating genesis state + } + } + + /// Calculates genesis time as current time + offset seconds. + /// + /// This allows nodes to start before genesis and sync up. + pub fn with_genesis_time_offset(offset_seconds: u64) -> Self { + let mut config = Self::devnet(); + config.genesis_time = std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_secs() + + offset_seconds; + config + } + + /// Returns the number of seconds per forkchoice processing interval. + /// + /// This is derived from `SECONDS_PER_SLOT` and `intervals_per_slot`. + pub fn seconds_per_interval(&self) -> u64 { + SECONDS_PER_SLOT / INTERVALS_PER_SLOT + } + + /// Returns the number of intervals per slot. + /// + /// This is a constant value defined by the lean consensus specification. + pub fn intervals_per_slot(&self) -> u64 { + INTERVALS_PER_SLOT + } +} + +impl Default for Config { + fn default() -> Self { + Self::devnet() + } +} + +// --- Time Parameters --- + +/// Number of intervals per slot for forkchoice processing. +pub const INTERVALS_PER_SLOT: u64 = 4; + +/// The fixed duration of a single slot in seconds. +pub const SECONDS_PER_SLOT: u64 = 4; + +/// Seconds per forkchoice processing interval. +pub const SECONDS_PER_INTERVAL: u64 = SECONDS_PER_SLOT / INTERVALS_PER_SLOT; + +/// The number of slots to lookback for justification. +pub const JUSTIFICATION_LOOKBACK_SLOTS: u64 = 3; + +// --- State List Length Presets --- + +/// The maximum number of historical block roots to store in the state. +/// +/// With a 4-second slot, this corresponds to a history of approximately 12.1 days. +pub const HISTORICAL_ROOTS_LIMIT: u64 = 1 << 18; // 2^18 + +/// The maximum number of validators that can be in the registry. +pub const VALIDATOR_REGISTRY_LIMIT: u64 = 1 << 12; // 2^12 diff --git a/lean_client/consensus/src/lib.rs b/lean_client/consensus/src/lib.rs new file mode 100644 index 00000000000..cd94e047728 --- /dev/null +++ b/lean_client/consensus/src/lib.rs @@ -0,0 +1,7 @@ +pub mod attestation; +pub use attestation::{Attestation, AttestationData, Checkpoint, SignedAttestation, Slot}; +pub mod helpers; +pub use helpers::is_justifiable_slot; +pub mod lean_block; +pub mod lean_state; +pub mod validator; diff --git a/lean_client/consensus/src/validator.rs b/lean_client/consensus/src/validator.rs new file mode 100644 index 00000000000..d135aedf8b5 --- /dev/null +++ b/lean_client/consensus/src/validator.rs @@ -0,0 +1,78 @@ +use crate::attestation::Slot; +use ssz_derive::{Decode, Encode}; +use ssz_types::FixedVector; +use ssz_types::typenum::U52; +use tree_hash::TreeHash; +use tree_hash_derive::TreeHash; +use types::Hash256; +#[derive(Clone, PartialEq, Decode, Encode, TreeHash, Debug)] +pub struct Validator { + pub pubkey: FixedVector, + pub index: u64, // Added to match the Validator struct +} + +impl Validator { + /// Get a reference to the public key bytes. + pub fn get_pubkey(&self) -> &[u8] { + &self.pubkey[..] + } +} + +#[derive(Debug, Clone, Copy)] +pub struct ValidatorIndex(pub u64); + +impl ssz::Encode for ValidatorIndex { + fn is_ssz_fixed_len() -> bool { + ::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + ::ssz_fixed_len() + } + + fn ssz_bytes_len(&self) -> usize { + self.0.ssz_bytes_len() + } + + fn ssz_append(&self, buf: &mut Vec) { + self.0.ssz_append(buf) + } +} + +impl ssz::Decode for ValidatorIndex { + fn is_ssz_fixed_len() -> bool { + ::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + ::ssz_fixed_len() + } + + fn from_ssz_bytes(bytes: &[u8]) -> Result { + u64::from_ssz_bytes(bytes).map(ValidatorIndex) + } +} + +impl ValidatorIndex { + pub fn is_proposer(&self, slot: Slot, num_validators: u64) -> bool { + slot.0 % num_validators == self.0 + } +} + +impl TreeHash for ValidatorIndex { + fn tree_hash_type() -> tree_hash::TreeHashType { + tree_hash::TreeHashType::Basic + } + + fn tree_hash_packed_encoding(&self) -> tree_hash::PackedEncoding { + self.0.tree_hash_packed_encoding() + } + + fn tree_hash_packing_factor() -> usize { + u64::tree_hash_packing_factor() + } + + fn tree_hash_root(&self) -> Hash256 { + self.0.tree_hash_root() + } +} diff --git a/lean_client/crypto/Cargo.toml b/lean_client/crypto/Cargo.toml new file mode 100644 index 00000000000..2a8c7d36527 --- /dev/null +++ b/lean_client/crypto/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "lean_crypto" +version = "0.1.0" +authors = ["Sigma Prime "] +edition = "2024" + +[dependencies] +alloy-primitives = { workspace = true } +ethereum_ssz = "0.10.0" +ethereum_ssz_derive = "0.10.0" +ssz_types = { workspace = true } +tree_hash = { workspace = true } +tree_hash_derive = { workspace = true } +typenum = { workspace = true } +types = { workspace = true } +leansig = { git = "https://github.com/leanEthereum/leanSig" } +lean_keystore = { path = "../keystore" } + diff --git a/lean_client/crypto/src/lib.rs b/lean_client/crypto/src/lib.rs new file mode 100644 index 00000000000..ea0c17e4acd --- /dev/null +++ b/lean_client/crypto/src/lib.rs @@ -0,0 +1,2 @@ +pub mod signature; +pub use signature::{Signature, verify_signature}; diff --git a/lean_client/crypto/src/signature.rs b/lean_client/crypto/src/signature.rs new file mode 100644 index 00000000000..42f5365982e --- /dev/null +++ b/lean_client/crypto/src/signature.rs @@ -0,0 +1,136 @@ +use ssz_derive::{Decode, Encode}; +use ssz_types::FixedVector; +use tree_hash_derive::TreeHash; +use typenum::*; + +/// XMSS signature size in bytes (3112 bytes) +pub const SIGNATURE_SIZE: usize = 3112; + +/// Type alias for U3112 = U2048 + U1024 + U40 +type U3112 = Sum, U40>; + +/// XMSS signature represented as fixed-size vector (3112 bytes) +#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, TreeHash)] +pub struct Signature { + bytes: FixedVector, +} + +impl Signature { + /// Create a new signature from a 3112-byte array + pub fn from_bytes(bytes: [u8; SIGNATURE_SIZE]) -> Self { + Self { + bytes: FixedVector::new(bytes.to_vec()) + .expect("Fixed vector creation should not fail for correct size"), + } + } + + /// Create a signature from a byte slice (must be exactly SIGNATURE_SIZE bytes) + pub fn try_from_slice(bytes: &[u8]) -> Result { + if bytes.len() != SIGNATURE_SIZE { + return Err(format!( + "Invalid signature length: expected {}, got {}", + SIGNATURE_SIZE, + bytes.len() + )); + } + let vec = bytes.to_vec(); + let fixed = + FixedVector::new(vec).map_err(|e| format!("Failed to create fixed vector: {:?}", e))?; + Ok(Self { bytes: fixed }) + } + + /// Get the signature as a slice + pub fn as_slice(&self) -> &[u8] { + self.bytes.as_ref() + } + + /// Create a zero-filled signature + pub fn zero() -> Self { + Self { + bytes: FixedVector::new(vec![0u8; SIGNATURE_SIZE]) + .expect("Fixed vector creation should not fail for correct size"), + } + } +} + +impl Default for Signature { + fn default() -> Self { + Self::zero() + } +} + +impl core::fmt::Display for Signature { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "0x")?; + for byte in self.bytes.iter().take(6) { + write!(f, "{:02x}", byte)?; + } + write!(f, "...({} bytes)", SIGNATURE_SIZE)?; + Ok(()) + } +} + +impl core::hash::Hash for Signature { + fn hash(&self, state: &mut H) { + self.bytes.hash(state); + } +} + +impl From<[u8; SIGNATURE_SIZE]> for Signature { + fn from(bytes: [u8; SIGNATURE_SIZE]) -> Self { + Self::from_bytes(bytes) + } +} + +impl AsRef<[u8]> for Signature { + fn as_ref(&self) -> &[u8] { + &self.bytes + } +} + +// SSZ Encoding/Decoding is derived from FixedVector implementation +// TreeHash is also derived from FixedVector implementation + +use leansig::signature::generalized_xmss::instantiations_poseidon_top_level::lifetime_2_to_the_32::hashing_optimized::SIGTopLevelTargetSumLifetime32Dim64Base8; +use leansig::signature::SignatureScheme; +use leansig::serialization::Serializable; +use leansig::MESSAGE_LENGTH; + +pub type LeanSigScheme = SIGTopLevelTargetSumLifetime32Dim64Base8; +type PublicKeyType = ::PublicKey; +type SignatureType = ::Signature; + +/// Verify an XMSS signature +pub fn verify_signature( + pubkey_bytes: &[u8], + message: &[u8], + signature: &Signature, + epoch: u64, +) -> Result { + if message.len() != MESSAGE_LENGTH { + return Err(format!( + "Invalid message length: expected {}, got {}", + MESSAGE_LENGTH, + message.len() + )); + } + + // Convert message slice to fixed array + let mut message_array = [0u8; MESSAGE_LENGTH]; + message_array.copy_from_slice(message); + + // Deserialize public key + // The public key in Lighthouse is just the raw bytes (FixedVector) + // We need to parse it into the leanSig PublicKey type + let pk = PublicKeyType::from_bytes(pubkey_bytes) + .map_err(|_| "Failed to deserialize public key".to_string())?; + + // Deserialize signature + // The signature in Lighthouse is a wrapper around the raw bytes + let sig = SignatureType::from_bytes(&signature.bytes[..]) + .map_err(|_| "Failed to deserialize signature".to_string())?; + + // Verify + let epoch32 = epoch as u32; + Ok(LeanSigScheme::verify(&pk, epoch32, &message_array, &sig)) +} diff --git a/lean_client/forkchoice/Cargo.toml b/lean_client/forkchoice/Cargo.toml new file mode 100644 index 00000000000..ebf5bc30dfd --- /dev/null +++ b/lean_client/forkchoice/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "lean_forkchoice" +version = "0.1.0" +edition = "2024" + +[dependencies] +ethereum_ssz = "0.10.0" +lean_consensus = { workspace = true } +fixed_bytes = { workspace = true } +parking_lot = { workspace = true } +store = { workspace = true, features = ["redb"] } +tracing = { workspace = true } +metrics = { workspace = true } +lazy_static = "1.4.0" +types = { workspace = true } diff --git a/lean_client/forkchoice/src/helpers.rs b/lean_client/forkchoice/src/helpers.rs new file mode 100644 index 00000000000..65208050c41 --- /dev/null +++ b/lean_client/forkchoice/src/helpers.rs @@ -0,0 +1,89 @@ +use lean_consensus::attestation::{Checkpoint, SignedAttestation}; +use lean_consensus::lean_block::LeanBlock; +use lean_consensus::lean_state::LeanState; +use std::collections::HashMap; +use types::{EthSpec, Hash256}; + +pub fn get_fork_choice_head( + blocks: &HashMap>, + root: Hash256, + latest_attestations: &HashMap, + min_score: usize, +) -> Hash256 { + let mut current_root = root; + if root == Hash256::ZERO { + current_root = blocks + .iter() + .min_by_key(|(_, block)| block.slot) + .map(|(hash, _)| *hash) + .unwrap_or(Hash256::ZERO); + } + + let mut attestation_weights: HashMap = HashMap::new(); + + for attestation in latest_attestations.values() { + let head_root = attestation.message.attestation_data.head.root; + // Only process attestations that point to known blocks + if blocks.contains_key(&head_root) { + // Walk up from attestation target, incrementing ancestor weights + let mut block_hash = head_root; + while let Some(block) = blocks.get(&block_hash) { + if let Some(root_block) = blocks.get(¤t_root) + && block.slot <= root_block.slot + { + break; + } + + *attestation_weights.entry(block_hash).or_insert(0) += 1; + block_hash = block.parent_root; + } + } + } + + let mut children_map: HashMap> = HashMap::new(); + + for (block_hash, block) in blocks.iter() { + if block.parent_root != Hash256::ZERO { + // Only include blocks that have enough attestations OR when min_score is 0 + let weight = *attestation_weights.get(block_hash).unwrap_or(&0); + if min_score == 0 || weight >= min_score { + children_map + .entry(block.parent_root) + .or_default() + .push(*block_hash); + } + } + } + + let mut current = current_root; + loop { + let children = children_map.get(¤t); + + if children.is_none() || children.unwrap().is_empty() { + return current; + } + + current = *children + .unwrap() + .iter() + .max_by_key(|&&block_hash| { + let weight = *attestation_weights.get(&block_hash).unwrap_or(&0); + (weight, block_hash) + }) + .unwrap(); + } +} + +pub fn get_latest_justified( + states: &HashMap>, +) -> Option { + if states.is_empty() { + return None; + } + + let latest_state = states + .values() + .max_by_key(|state| state.latest_justified.slot)?; + + Some(latest_state.latest_justified.clone()) +} diff --git a/lean_client/forkchoice/src/lib.rs b/lean_client/forkchoice/src/lib.rs new file mode 100644 index 00000000000..4f276161638 --- /dev/null +++ b/lean_client/forkchoice/src/lib.rs @@ -0,0 +1,6 @@ +pub mod helpers; +pub mod proto_array; +pub mod store; + +pub use helpers::get_fork_choice_head; +pub use store::{AttestationTracker, ForkChoiceStore, ProtoAttestation}; diff --git a/lean_client/forkchoice/src/proto_array.rs b/lean_client/forkchoice/src/proto_array.rs new file mode 100644 index 00000000000..f8243701767 --- /dev/null +++ b/lean_client/forkchoice/src/proto_array.rs @@ -0,0 +1,451 @@ +use fixed_bytes::FixedBytesExtended; +use lean_consensus::attestation::Slot; +use std::collections::HashMap; +use types::Hash256; + +/// Errors returned by [`ProtoArray`]. +#[derive(Debug, PartialEq, Eq)] +pub enum ProtoArrayError { + /// Attempted to register a block whose parent is unknown. + UnknownParent { + block_root: Hash256, + parent_root: Hash256, + }, + /// Attempted to update or query an unknown block root. + UnknownBlock(Hash256), + /// Attempted to access an invalid node index. + InvalidNodeIndex(usize), +} + + + +/// Node metadata tracked by the proto-array. +#[derive(Debug, Clone)] +struct ProtoNode { + slot: Slot, + root: Hash256, + parent: Option, + children: Vec, + weight: i64, + best_child: Option, + best_descendant: Option, + +} + +impl ProtoNode { + fn new(slot: Slot, root: Hash256, parent: Option) -> Self { + Self { + slot, + root, + parent, + children: Vec::new(), + weight: 0, + best_child: None, + best_descendant: None, + + } + } +} + +/// Proto-array implementation that mirrors the spec's fork-choice structure. +/// +/// Tracks parent/child relationships, cumulative weights, best-child/best-descendant metadata, +/// and execution payload validity. Weight updates are propagated to ancestors so fork choice +/// can walk the heaviest subtree starting from a justified root. +#[derive(Debug, Default)] +pub struct ProtoArray { + nodes: Vec, + indices: HashMap, +} + +impl ProtoArray { + /// Create a proto-array initialized with the genesis block. + pub fn new(genesis_root: Hash256, genesis_slot: Slot) -> Self { + let mut indices = HashMap::new(); + indices.insert(genesis_root, 0); + + Self { + nodes: vec![ProtoNode::new(genesis_slot, genesis_root, None)], + indices, + } + } + + /// Returns `true` if the proto array already contains `root`. + pub fn contains(&self, root: &Hash256) -> bool { + self.indices.contains_key(root) + } + + fn node_index(&self, root: &Hash256) -> Result { + self.indices + .get(root) + .copied() + .ok_or(ProtoArrayError::UnknownBlock(*root)) + } + + /// Registers a new block in the proto array. + pub fn on_block( + &mut self, + block_root: Hash256, + block_slot: Slot, + parent_root: Hash256, + ) -> Result<(), ProtoArrayError> { + if self.contains(&block_root) { + return Ok(()); + } + + let parent_index = + if parent_root == Hash256::zero() { + None + } else { + Some(self.indices.get(&parent_root).copied().ok_or( + ProtoArrayError::UnknownParent { + block_root, + parent_root, + }, + )?) + }; + + let node_index = self.nodes.len(); + let node = ProtoNode::new(block_slot, block_root, parent_index); + + if let Some(parent_idx) = parent_index { + self.nodes[parent_idx].children.push(node_index); + } + + self.indices.insert(block_root, node_index); + self.nodes.push(node); + + // Update metadata for this node and its ancestors. + self.update_best_paths(node_index)?; + let mut current = parent_index; + while let Some(idx) = current { + self.update_best_paths(idx)?; + current = self.nodes[idx].parent; + } + + Ok(()) + } + + /// Adds `weight` to `block_root` and all of its ancestors. + pub fn add_weight(&mut self, block_root: Hash256, weight: u64) -> Result<(), ProtoArrayError> { + self.apply_weight_delta(block_root, weight as i64) + } + + /// Removes `weight` from `block_root` and its ancestors, saturating at zero. + pub fn remove_weight( + &mut self, + block_root: Hash256, + weight: u64, + ) -> Result<(), ProtoArrayError> { + self.apply_weight_delta(block_root, -(weight as i64)) + } + + /// Returns the canonical head when starting at `start_root`. + /// + /// Traversal selects the child whose subtree has the highest weight. Ties fall back to the + /// highest slot, then lexicographically greatest root for stability. + pub fn find_head(&self, start_root: Hash256) -> Result { + let mut index = self.node_index(&start_root)?; + + loop { + let node = self + .nodes + .get(index) + .ok_or(ProtoArrayError::UnknownBlock(start_root))?; + + let Some(best_child_idx) = node.best_child else { return Ok(node.root) }; + + let next = self.nodes[best_child_idx] + .best_descendant + .unwrap_or(best_child_idx); + index = next; + } + } + + /// Returns the total number of tracked nodes. + pub fn len(&self) -> usize { + self.nodes.len() + } + + /// Returns `true` if the proto array is empty. + pub fn is_empty(&self) -> bool { + self.nodes.is_empty() + } + + + + fn apply_weight_delta( + &mut self, + block_root: Hash256, + delta: i64, + ) -> Result<(), ProtoArrayError> { + let mut index = self.node_index(&block_root)?; + + loop { + let node = self + .nodes + .get_mut(index) + .ok_or(ProtoArrayError::InvalidNodeIndex(index))?; + + node.weight = (node.weight + delta).max(0); + + let _ = node; + self.update_best_paths(index)?; + + match self.nodes[index].parent { + Some(parent_index) => index = parent_index, + None => break, + } + } + + Ok(()) + } + + /// Applies weight deltas to all nodes and propagates to parents. + /// + /// The deltas array must have the same length as the number of nodes. + /// After applying deltas, recalculates best_child and best_descendant for + /// all nodes, optionally filtering by a cutoff weight. + pub fn apply_deltas( + &mut self, + deltas: &mut [i64], + cutoff_weight: u64, + ) -> Result<(), ProtoArrayError> { + if deltas.len() != self.nodes.len() { + return Err(ProtoArrayError::InvalidNodeIndex(deltas.len())); + } + + // Forward pass: apply deltas and propagate to parents (iterate backwards) + for i in (0..self.nodes.len()).rev() { + let node_delta = deltas[i]; + self.nodes[i].weight += node_delta; + + if let Some(parent_idx) = self.nodes[i].parent { + deltas[parent_idx] += node_delta; + } + } + + // Backward pass: recalculate best_child and best_descendant with cutoff + for i in (0..self.nodes.len()).rev() { + self.update_best_paths_with_cutoff(i, cutoff_weight)?; + } + + Ok(()) + } + + /// Returns the canonical head when starting at `start_root`, with a minimum weight cutoff. + /// + /// Only considers nodes whose weight is >= cutoff_weight. This is used for + /// safe target computation where we require 2/3 supermajority. + pub fn find_head_with_cutoff( + &self, + start_root: Hash256, + cutoff_weight: u64, + ) -> Result { + let mut index = self.node_index(&start_root)?; + + loop { + let node = self + .nodes + .get(index) + .ok_or(ProtoArrayError::InvalidNodeIndex(index))?; + + // Check if current node meets cutoff + if (node.weight as u64) < cutoff_weight { + // Current node doesn't meet cutoff, return parent or genesis + return Ok(node.root); + } + + let Some(best_child_idx) = node.best_child else { + return Ok(node.root); + }; + + let best_child = self + .nodes + .get(best_child_idx) + .ok_or(ProtoArrayError::InvalidNodeIndex(best_child_idx))?; + + // Check if best child meets cutoff + if (best_child.weight as u64) < cutoff_weight { + return Ok(node.root); + } + + let next = self.nodes[best_child_idx] + .best_descendant + .unwrap_or(best_child_idx); + + // Check if best descendant meets cutoff + if let Some(desc) = self.nodes.get(next) { + if (desc.weight as u64) < cutoff_weight { + return Ok(best_child.root); + } + } + + index = next; + } + } + + /// Returns the index of a block root, if it exists. + pub fn get_index(&self, root: &Hash256) -> Option { + self.indices.get(root).copied() + } + + fn update_best_paths(&mut self, index: usize) -> Result<(), ProtoArrayError> { + self.update_best_paths_with_cutoff(index, 0) + } + + fn update_best_paths_with_cutoff( + &mut self, + index: usize, + _cutoff_weight: u64, + ) -> Result<(), ProtoArrayError> { + let (best_child, best_descendant) = self.compute_best_paths(index)?; + + let node = self + .nodes + .get_mut(index) + .ok_or(ProtoArrayError::InvalidNodeIndex(index))?; + node.best_child = best_child; + node.best_descendant = best_descendant; + Ok(()) + } + + fn compute_best_paths( + &self, + index: usize, + ) -> Result<(Option, Option), ProtoArrayError> { + let node = self + .nodes + .get(index) + .ok_or(ProtoArrayError::InvalidNodeIndex(index))?; + + let mut best_child: Option = None; + let mut best_descendant: Option = None; + + for &child_idx in &node.children { + let child = self + .nodes + .get(child_idx) + .ok_or(ProtoArrayError::InvalidNodeIndex(child_idx))?; + + + let candidate_idx = child.best_descendant.unwrap_or(child_idx); + let candidate = self + .nodes + .get(candidate_idx) + .ok_or(ProtoArrayError::InvalidNodeIndex(candidate_idx))?; + + + match best_child { + None => { + best_child = Some(child_idx); + best_descendant = Some(candidate_idx); + } + Some(current_child_idx) => { + let current_candidate_idx = best_descendant.unwrap_or(current_child_idx); + let current_candidate = self + .nodes + .get(current_candidate_idx) + .ok_or(ProtoArrayError::InvalidNodeIndex(current_candidate_idx))?; + let current_key = ( + self.nodes[current_child_idx].weight, + current_candidate.slot.0, + current_candidate.root, + ); + + let candidate_key = ( + self.nodes[child_idx].weight, + candidate.slot.0, + candidate.root, + ); + + if candidate_key > current_key { + best_child = Some(child_idx); + best_descendant = Some(candidate_idx); + } + } + } + } + + Ok((best_child, best_descendant)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + fn hash(i: u8) -> Hash256 { + let mut bytes = [0u8; 32]; + bytes[31] = i; + Hash256::from(bytes) + } + + #[test] + fn can_register_blocks() { + let genesis = hash(0); + let mut proto_array = ProtoArray::new(genesis, Slot(0)); + assert_eq!(proto_array.len(), 1); + + let child = hash(1); + proto_array + .on_block(child, Slot(1), genesis) + .expect("child added"); + assert!(proto_array.contains(&child)); + assert_eq!(proto_array.len(), 2); + } + + #[test] + fn rejects_unknown_parent() { + let genesis = hash(0); + let mut proto_array = ProtoArray::new(genesis, Slot(0)); + + let err = proto_array + .on_block(hash(2), Slot(1), hash(1)) + .expect_err("missing parent"); + + assert_eq!( + err, + ProtoArrayError::UnknownParent { + block_root: hash(2), + parent_root: hash(1) + } + ); + } + + #[test] + fn weight_updates_affect_ancestors() { + let genesis = hash(0); + let mut proto_array = ProtoArray::new(genesis, Slot(0)); + let a = hash(1); + let b = hash(2); + + proto_array + .on_block(a, Slot(1), genesis) + .expect("add block a"); + proto_array.on_block(b, Slot(2), a).expect("add block b"); + + proto_array + .add_weight(b, 2) + .expect("weight applied to chain"); + + let head = proto_array.find_head(genesis).expect("head"); + assert_eq!(head, b); + } + + #[test] + fn remove_weight_walks_parents() { + let genesis = hash(0); + let mut proto_array = ProtoArray::new(genesis, Slot(0)); + let a = hash(1); + + proto_array + .on_block(a, Slot(1), genesis) + .expect("add block a"); + proto_array.add_weight(a, 5).expect("weight"); + proto_array.remove_weight(a, 5).expect("remove weight"); + + let head = proto_array.find_head(genesis).expect("head"); + assert_eq!(head, a, "head stays on last child when weights equal"); + } +} diff --git a/lean_client/forkchoice/src/store.rs b/lean_client/forkchoice/src/store.rs new file mode 100644 index 00000000000..06451805360 --- /dev/null +++ b/lean_client/forkchoice/src/store.rs @@ -0,0 +1,303 @@ +//! Fork choice store and attestation tracking structures. +//! +//! This module provides the core data structures for fork choice operations. + +use lean_consensus::attestation::{Checkpoint, SignedAttestation, Slot}; + +/// Fork choice store that tracks timing and checkpoint state. +#[derive(Debug, Clone)] +pub struct ForkChoiceStore { + /// Current time in intervals since genesis. + pub time_intervals: u64, + /// Current time in slots (derived from intervals). + pub time_slots: u64, + /// Latest justified checkpoint. + pub latest_justified: Checkpoint, + /// Latest finalized checkpoint. + pub latest_finalized: Checkpoint, +} + +impl ForkChoiceStore { + /// Number of intervals per slot. + pub const INTERVALS_PER_SLOT: u64 = 4; + + /// Creates a new ForkChoiceStore with the given anchor checkpoint. + pub fn new(anchor_checkpoint: Checkpoint) -> Self { + Self { + time_intervals: 0, + time_slots: 0, + latest_justified: anchor_checkpoint.clone(), + latest_finalized: anchor_checkpoint, + } + } + + /// Creates a ForkChoiceStore initialized at a specific slot. + pub fn at_slot(slot: u64, anchor_checkpoint: Checkpoint) -> Self { + Self { + time_intervals: slot * Self::INTERVALS_PER_SLOT, + time_slots: slot, + latest_justified: anchor_checkpoint.clone(), + latest_finalized: anchor_checkpoint, + } + } + + /// Updates the justified and finalized checkpoints if they are newer. + pub fn update(&mut self, justified: Checkpoint, finalized: Checkpoint) { + if justified.slot > self.latest_justified.slot { + self.latest_justified = justified; + } + if finalized.slot > self.latest_finalized.slot { + self.latest_finalized = finalized; + } + } + + /// Returns the current interval within the slot (0-3). + pub fn current_interval(&self) -> u64 { + self.time_intervals % Self::INTERVALS_PER_SLOT + } + + /// Advances time by one interval. + /// + /// Returns the new interval number within the slot (0-3). + pub fn tick(&mut self) -> u64 { + self.time_intervals += 1; + let interval = self.current_interval(); + + if interval == 0 { + self.time_slots += 1; + } + + interval + } + + /// Advances to a specific time in intervals. + pub fn advance_to(&mut self, target_intervals: u64) { + while self.time_intervals < target_intervals { + self.tick(); + } + } +} + +/// Attestation metadata for fork choice tracking. +/// +/// This is a lightweight representation of an attestation's position in the proto array. +#[derive(Debug, Clone, Default)] +pub struct ProtoAttestation { + /// Index of the attested block in the proto array. + pub index: usize, + /// Slot of the attestation. + pub slot: Slot, + /// The full signed attestation (optional, for block inclusion). + pub attestation: Option, +} + +impl ProtoAttestation { + /// Creates a new ProtoAttestation. + pub fn new(index: usize, slot: Slot, attestation: Option) -> Self { + Self { + index, + slot, + attestation, + } + } +} + +/// Tracks attestation state for a single validator. +/// +/// Distinguishes between: +/// - `latest_known`: On-chain attestations (included in a block) +/// - `latest_new`: Gossip attestations (not yet included in a block) +/// - `applied_index`: The last index whose weight was applied to fork choice +#[derive(Debug, Clone, Default)] +pub struct AttestationTracker { + /// Index of the last attestation whose weight was applied to fork choice. + /// Used to compute weight deltas. + pub applied_index: Option, + + /// Latest on-chain attestation (included in a block). + /// These contribute to fork choice immediately. + pub latest_known: Option, + + /// Latest gossip attestation (not yet included in a block). + /// These are promoted to `latest_known` at specific intervals. + pub latest_new: Option, +} + +impl AttestationTracker { + /// Creates a new empty AttestationTracker. + pub fn new() -> Self { + Self::default() + } + + /// Updates the tracker with an on-chain attestation. + /// + /// If the new attestation is newer than the current `latest_known`, it replaces it. + /// If it's also newer than `latest_new`, clears `latest_new`. + pub fn on_block_attestation(&mut self, proto_att: ProtoAttestation) { + let new_slot = proto_att.slot; + + // Update latest_known if this is newer + let should_update_known = self + .latest_known + .as_ref() + .map(|k| new_slot > k.slot) + .unwrap_or(true); + + if should_update_known { + self.latest_known = Some(proto_att); + } + + // Clear latest_new if the on-chain version is newer + if let Some(ref new_att) = self.latest_new { + if new_slot > new_att.slot { + self.latest_new = None; + } + } + } + + /// Updates the tracker with a gossip attestation. + /// + /// Only updates if the new attestation is newer than the current `latest_new`. + pub fn on_gossip_attestation(&mut self, proto_att: ProtoAttestation) { + let new_slot = proto_att.slot; + + let should_update = self + .latest_new + .as_ref() + .map(|n| new_slot > n.slot) + .unwrap_or(true); + + if should_update { + self.latest_new = Some(proto_att); + } + } + + /// Promotes `latest_new` to `latest_known`. + /// + /// Called at specific intervals to accept pending gossip attestations. + pub fn accept_new(&mut self) { + if let Some(new_att) = self.latest_new.take() { + self.latest_known = Some(new_att); + } + } + + /// Returns the attestation to use for weight calculation. + /// + /// If `from_known` is true, returns `latest_known`, otherwise returns `latest_new`. + pub fn get_attestation(&self, from_known: bool) -> Option<&ProtoAttestation> { + if from_known { + self.latest_known.as_ref() + } else { + self.latest_new.as_ref() + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use fixed_bytes::FixedBytesExtended; + use types::Hash256; + + fn make_checkpoint(slot: u64) -> Checkpoint { + Checkpoint { + root: Hash256::zero(), + slot: Slot(slot), + } + } + + #[test] + fn test_fork_choice_store_new() { + let checkpoint = make_checkpoint(0); + let store = ForkChoiceStore::new(checkpoint.clone()); + + assert_eq!(store.time_intervals, 0); + assert_eq!(store.time_slots, 0); + assert_eq!(store.latest_justified.slot, checkpoint.slot); + assert_eq!(store.latest_finalized.slot, checkpoint.slot); + } + + #[test] + fn test_fork_choice_store_tick() { + let checkpoint = make_checkpoint(0); + let mut store = ForkChoiceStore::new(checkpoint); + + // Tick through one full slot + assert_eq!(store.tick(), 1); // interval 1 + assert_eq!(store.time_slots, 0); + + assert_eq!(store.tick(), 2); // interval 2 + assert_eq!(store.time_slots, 0); + + assert_eq!(store.tick(), 3); // interval 3 + assert_eq!(store.time_slots, 0); + + assert_eq!(store.tick(), 0); // interval 0 of next slot + assert_eq!(store.time_slots, 1); + } + + #[test] + fn test_fork_choice_store_update() { + let checkpoint = make_checkpoint(0); + let mut store = ForkChoiceStore::new(checkpoint); + + // Update with newer checkpoints + let justified = make_checkpoint(5); + let finalized = make_checkpoint(3); + store.update(justified.clone(), finalized.clone()); + + assert_eq!(store.latest_justified.slot.0, 5); + assert_eq!(store.latest_finalized.slot.0, 3); + + // Update with older checkpoints (should not change) + let old_justified = make_checkpoint(2); + let old_finalized = make_checkpoint(1); + store.update(old_justified, old_finalized); + + assert_eq!(store.latest_justified.slot.0, 5); + assert_eq!(store.latest_finalized.slot.0, 3); + } + + #[test] + fn test_attestation_tracker_on_block() { + let mut tracker = AttestationTracker::new(); + + // First on-chain attestation + let att1 = ProtoAttestation::new(1, Slot(5), None); + tracker.on_block_attestation(att1); + + assert!(tracker.latest_known.is_some()); + assert_eq!(tracker.latest_known.as_ref().unwrap().slot.0, 5); + + // Newer on-chain attestation replaces it + let att2 = ProtoAttestation::new(2, Slot(10), None); + tracker.on_block_attestation(att2); + + assert_eq!(tracker.latest_known.as_ref().unwrap().slot.0, 10); + + // Older on-chain attestation does not replace + let att3 = ProtoAttestation::new(3, Slot(7), None); + tracker.on_block_attestation(att3); + + assert_eq!(tracker.latest_known.as_ref().unwrap().slot.0, 10); + } + + #[test] + fn test_attestation_tracker_accept_new() { + let mut tracker = AttestationTracker::new(); + + // Add gossip attestation + let att = ProtoAttestation::new(1, Slot(5), None); + tracker.on_gossip_attestation(att); + + assert!(tracker.latest_new.is_some()); + assert!(tracker.latest_known.is_none()); + + // Accept new + tracker.accept_new(); + + assert!(tracker.latest_new.is_none()); + assert!(tracker.latest_known.is_some()); + assert_eq!(tracker.latest_known.as_ref().unwrap().slot.0, 5); + } +} diff --git a/lean_client/keystore/Cargo.toml b/lean_client/keystore/Cargo.toml new file mode 100644 index 00000000000..3cc38166b01 --- /dev/null +++ b/lean_client/keystore/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "lean_keystore" +version = "0.1.0" +edition = "2024" + +# Binary commented out - using library functions directly +# [[bin]] +# name = "generate_keys" +# path = "src/bin/generate_keys.rs" + +[dependencies] +serde = { workspace = true, features = ["derive"] } +serde_json = { workspace = true } +tracing = { workspace = true } +tracing-subscriber = { workspace = true } +clap = { workspace = true, features = ["derive"] } +thiserror = "1" +leansig = { git = "https://github.com/leanEthereum/leanSig" } +ethereum_ssz = "0.10.0" +ethereum_ssz_derive = "0.10.0" +rand = "0.9" + +[dev-dependencies] +tempfile = { workspace = true } diff --git a/lean_client/keystore/src/codec.rs b/lean_client/keystore/src/codec.rs new file mode 100644 index 00000000000..a3c3941bed9 --- /dev/null +++ b/lean_client/keystore/src/codec.rs @@ -0,0 +1,18 @@ +/// Re-export leanSig's serialization traits for backwards compatibility +pub use leansig::serialization::Serializable; + +/// Encode a value using leanSig's canonical serialization. +pub fn encode_to_vec(value: &T) -> Vec +where + T: Serializable, +{ + value.to_bytes() +} + +/// Decode a value using leanSig's canonical serialization. +pub fn decode_from_slice(bytes: &[u8]) -> Result +where + T: Serializable, +{ + T::from_bytes(bytes) +} diff --git a/lean_client/keystore/src/key_generation.rs b/lean_client/keystore/src/key_generation.rs new file mode 100644 index 00000000000..867752b0ef3 --- /dev/null +++ b/lean_client/keystore/src/key_generation.rs @@ -0,0 +1,232 @@ +//! Key generation functionality using hash-sig Rust crate + +use crate::key_storage::{KeyStore, KeyStoreError, ValidatorKeyPair, PublicKey, PrivateKey}; +use crate::HashSigKeyConfig; +use leansig::signature::generalized_xmss::instantiations_poseidon_top_level::lifetime_2_to_the_32::hashing_optimized::SIGTopLevelTargetSumLifetime32Dim64Base8; +use leansig::signature::SignatureScheme; +use leansig::serialization::Serializable; +use rand::{rngs::StdRng, SeedableRng}; +use std::path::PathBuf; +use std::convert::TryInto; +use tracing::{debug, info}; + +/// Error types for key generation operations +#[derive(Debug, thiserror::Error)] +pub enum KeyGenerationError { + #[error("Key generation failed: {0}")] + GenerationFailed(String), + #[error("Key storage error: {0}")] + StorageError(#[from] KeyStoreError), + #[error("Invalid configuration: {0}")] + InvalidConfig(String), + #[error("Hash-sig error: {0}")] + HashSigError(String), +} + +/// Configuration for key generation +#[derive(Debug, Clone)] +pub struct KeyGenerationConfig { + /// Number of validators to generate keys for + pub num_validators: u64, + /// Log2 of the number of active epochs + pub log_num_active_epochs: u64, + /// Output directory for generated keys + pub output_dir: PathBuf, +} + +impl From<&HashSigKeyConfig> for KeyGenerationConfig { + fn from(config: &HashSigKeyConfig) -> Self { + Self { + num_validators: config.num_validators, + log_num_active_epochs: config.log_num_active_epochs, + output_dir: config.output_dir.clone(), + } + } +} + +/// Generates hash-sig validator keys using the hash-sig Rust crate +/// +/// This function: +/// 1. Creates a GeneralizedXmssScheme with the specified parameters +/// 2. Generates key pairs for each validator +/// 3. Stores the generated keys using KeyStore +/// 4. Returns all generated key pairs +/// +/// The scheme used is: SIGTopLevelTargetSumLifetime32Dim64Base8 +/// - Total lifetime: 2^32 epochs +/// - Active epochs: 2^log_num_active_epochs +pub fn generate_keys( + config: &KeyGenerationConfig, +) -> Result, KeyGenerationError> { + // Validate configuration + if config.num_validators == 0 { + return Err(KeyGenerationError::InvalidConfig( + "Number of validators must be greater than 0".to_string(), + )); + } + + if config.log_num_active_epochs == 0 { + return Err(KeyGenerationError::InvalidConfig( + "log_num_active_epochs must be greater than 0".to_string(), + )); + } + + info!( + num_validators = config.num_validators, + log_num_active_epochs = config.log_num_active_epochs, + output_dir = ?config.output_dir, + "Generating hash-sig validator keys using hash-sig crate" + ); + + // Ensure output directory exists + std::fs::create_dir_all(&config.output_dir).map_err(|e| { + KeyGenerationError::GenerationFailed(format!("Failed to create output directory: {}", e)) + })?; + + // Use SIGTopLevelTargetSumLifetime32Dim64Base8 scheme + // - LOG_LIFETIME = 32 (total lifetime: 2^32 epochs) + // - LOG_NUM_ACTIVE_EPOCHS = log_num_active_epochs (active epochs: 2^log_num_active_epochs) + // - DIM = 64 + // - BASE = 8 + + info!( + "Generating XMSS keys with LOG_LIFETIME=32, LOG_NUM_ACTIVE_EPOCHS={}", + config.log_num_active_epochs + ); + + let mut key_pairs = Vec::new(); + let keystore = KeyStore::new(config.output_dir.clone()); + + // Calculate number of active epochs (2^log_num_active_epochs) + let num_active_epochs = 1u64 << config.log_num_active_epochs; + + // Generate key pairs for each validator + for validator_index in 0..config.num_validators { + debug!(validator_index, "Generating key pair"); + + // Generate keys using hash-sig crate + let key_pair = generate_xmss_key_pair(validator_index, num_active_epochs)?; + + // Save to keystore + keystore.save_key_pair(validator_index, &key_pair)?; + key_pairs.push(key_pair); + } + + info!( + generated_count = key_pairs.len(), + "Key generation completed successfully" + ); + + Ok(key_pairs) +} + +/// Generates keys synchronously (same as regular version) +pub fn generate_keys_sync( + config: &KeyGenerationConfig, +) -> Result, KeyGenerationError> { + generate_keys(config) +} + +/// Generates an XMSS key pair for a validator using the hash-sig crate +fn generate_xmss_key_pair( + validator_index: u64, + num_active_epochs: u64, +) -> Result { + // Create a cryptographically secure RNG + // Using validator_index as a seed to ensure deterministic key generation + // Note: In production, you may want to use a more sophisticated seeding strategy + let mut rng = StdRng::from_seed( + validator_index.to_le_bytes().repeat(4)[..32] + .try_into() + .unwrap(), + ); + + // Generate key pair using the SIGTopLevelTargetSumLifetime32Dim64Base8 scheme + // Activation epoch starts at 0, and the key is active for num_active_epochs epochs + let (public_key_raw, secret_key_raw) = + SIGTopLevelTargetSumLifetime32Dim64Base8::key_gen(&mut rng, 0, num_active_epochs as usize); + + let _public_key_bytes = public_key_raw.to_bytes(); + let _private_key_bytes = secret_key_raw.to_bytes(); + + let public_key_json = serde_json::to_string(&public_key_raw).map_err(|e| { + KeyGenerationError::HashSigError(format!( + "Failed to serialize lean-sig public key to JSON: {}", + e + )) + })?; + let private_key_json = serde_json::to_string(&secret_key_raw).map_err(|e| { + KeyGenerationError::HashSigError(format!( + "Failed to serialize lean-sig secret key to JSON: {}", + e + )) + })?; + + let public_key: PublicKey = serde_json::from_str(&public_key_json).map_err(|e| { + KeyGenerationError::HashSigError(format!( + "Failed to convert lean-sig public key into lean format: {}", + e + )) + })?; + let private_key: PrivateKey = serde_json::from_str(&private_key_json).map_err(|e| { + KeyGenerationError::HashSigError(format!( + "Failed to convert lean-sig secret key into lean format: {}", + e + )) + })?; + + debug!( + validator_index, + num_active_epochs, "Generated XMSS key pair" + ); + + Ok(ValidatorKeyPair::with_serialized( + public_key, + private_key, + public_key_json, + private_key_json, + )) +} + +#[cfg(test)] +mod tests { + use super::*; + use tempfile::TempDir; + + #[test] + fn test_key_generation_config_from_hash_sig_config() { + let hash_sig_config = HashSigKeyConfig::new(10, 24); + let gen_config = KeyGenerationConfig::from(&hash_sig_config); + + assert_eq!(gen_config.num_validators, 10); + assert_eq!(gen_config.log_num_active_epochs, 24); + } + + #[test] + fn test_invalid_config_zero_validators() { + let temp_dir = TempDir::new().unwrap(); + let config = KeyGenerationConfig { + num_validators: 0, + log_num_active_epochs: 24, + output_dir: temp_dir.path().to_path_buf(), + }; + + let result = generate_keys(&config); + + assert!(matches!(result, Err(KeyGenerationError::InvalidConfig(_)))); + } + + #[test] + fn test_invalid_config_zero_active_epochs() { + let temp_dir = TempDir::new().unwrap(); + let config = KeyGenerationConfig { + num_validators: 10, + log_num_active_epochs: 0, + output_dir: temp_dir.path().to_path_buf(), + }; + + let result = generate_keys(&config); + + assert!(matches!(result, Err(KeyGenerationError::InvalidConfig(_)))); + } +} diff --git a/lean_client/keystore/src/key_storage.rs b/lean_client/keystore/src/key_storage.rs new file mode 100644 index 00000000000..fc3784e3c5f --- /dev/null +++ b/lean_client/keystore/src/key_storage.rs @@ -0,0 +1,518 @@ +//! Key storage and retrieval functionality + +use leansig::signature::generalized_xmss::instantiations_poseidon_top_level::lifetime_2_to_the_32::hashing_optimized::SIGTopLevelTargetSumLifetime32Dim64Base8; +use leansig::signature::SignatureScheme; +use leansig::serialization::Serializable; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use std::fs; +use std::path::{Path, PathBuf}; +use tracing::{debug, error, info, warn}; + +type HashSigScheme = SIGTopLevelTargetSumLifetime32Dim64Base8; +type HashSigSecretKey = ::SecretKey; +type HashSigPublicKey = ::PublicKey; + +/// Error types for key storage operations +#[derive(Debug, thiserror::Error)] +pub enum KeyStoreError { + #[error("IO error: {0}")] + Io(#[from] std::io::Error), + #[error("JSON serialization error: {0}")] + Json(#[from] serde_json::Error), + #[error("Key not found for validator {0}")] + KeyNotFound(u64), + #[error("Invalid key file format: {0}")] + InvalidFormat(String), + #[error("Key store directory error: {0}")] + DirectoryError(String), +} + +/// Public key structure for XMSS keys (52 bytes) +/// Stores the public key as 52 raw bytes: root (32 bytes) + parameters (20 bytes) +#[derive(Debug, Clone)] +pub struct PublicKey { + /// Raw 52-byte public key (32 bytes root + 20 bytes parameters) + pub bytes: [u8; 52], +} + +impl Serialize for PublicKey { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + // Serialize as legacy format for compatibility + #[derive(Serialize)] + struct LegacyPublicKey { + root: Vec, + parameter: Vec, + } + + let root: Vec = self.bytes[0..32] + .chunks(4) + .map(|chunk| u32::from_le_bytes([chunk[0], chunk[1], chunk[2], chunk[3]])) + .collect(); + + let parameter: Vec = self.bytes[32..52] + .chunks(4) + .map(|chunk| u32::from_le_bytes([chunk[0], chunk[1], chunk[2], chunk[3]])) + .collect(); + + let legacy = LegacyPublicKey { root, parameter }; + legacy.serialize(serializer) + } +} + +impl<'de> Deserialize<'de> for PublicKey { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + // Try to deserialize as legacy format (root + parameter) + #[derive(Deserialize)] + struct LegacyPublicKey { + root: Vec, + parameter: Vec, + } + + let legacy = LegacyPublicKey::deserialize(deserializer)?; + + if legacy.root.len() != 8 || legacy.parameter.len() != 5 { + return Err(serde::de::Error::custom(format!( + "Invalid public key: expected 8 root values and 5 parameter values, got {} and {}", + legacy.root.len(), + legacy.parameter.len() + ))); + } + + let mut bytes = [0u8; 52]; + + // Encode root (8 u32 = 32 bytes) in little-endian + for (i, &val) in legacy.root.iter().enumerate() { + bytes[i * 4..(i + 1) * 4].copy_from_slice(&val.to_le_bytes()); + } + + // Encode parameter (5 u32 = 20 bytes) in little-endian + for (i, &val) in legacy.parameter.iter().enumerate() { + bytes[32 + i * 4..32 + (i + 1) * 4].copy_from_slice(&val.to_le_bytes()); + } + + Ok(PublicKey { bytes }) + } +} + +impl PublicKey { + /// Create a public key from raw 52 bytes + pub fn from_bytes(bytes: [u8; 52]) -> Self { + Self { bytes } + } + + /// Convert to leansig HashSigPublicKey for verification + pub fn to_hashsig(&self) -> Result { + // The bytes are already in the correct format for leansig + HashSigPublicKey::from_bytes(&self.bytes) + .map_err(|e| format!("Failed to parse bytes into lean-sig public key: {:?}", e)) + } + + /// Get the raw 52 bytes + pub fn as_bytes(&self) -> &[u8; 52] { + &self.bytes + } +} + +/// XMSS tree layer structure +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct TreeLayer { + pub start_index: u64, + pub nodes: Vec>, +} + +/// XMSS tree structure for managing tree state +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct XmssTree { + pub depth: u32, + pub lowest_layer: u32, + pub layers: Vec, +} + +/// Private key structure for XMSS keys +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct PrivateKey { + /// PRF (Pseudo-Random Function) key + pub prf_key: Vec, + /// Parameter values for the XMSS key + pub parameter: Vec, + /// Activation epoch for the key + pub activation_epoch: u64, + /// Number of active epochs + pub num_active_epochs: u64, + /// Top tree structure + pub top_tree: XmssTree, +} + +/// Validator key pair (public and private keys) +#[derive(Debug, Clone)] +pub struct ValidatorKeyPair { + /// Public key + pub public_key: PublicKey, + /// Private key + pub private_key: PrivateKey, + /// Original JSON for the public key + public_key_json: String, + /// Original JSON for the private key + private_key_json: String, +} + +impl ValidatorKeyPair { + /// Creates a new key pair from XMSS key structures + pub fn new(public_key: PublicKey, private_key: PrivateKey) -> Self { + let public_key_json = + serde_json::to_string(&public_key).expect("PublicKey should serialize to JSON"); + let private_key_json = + serde_json::to_string(&private_key).expect("PrivateKey should serialize to JSON"); + + Self::with_serialized(public_key, private_key, public_key_json, private_key_json) + } + + /// Construct from explicit JSON representations. + pub fn with_serialized( + public_key: PublicKey, + private_key: PrivateKey, + public_key_json: String, + private_key_json: String, + ) -> Self { + Self { + public_key, + private_key, + public_key_json, + private_key_json, + } + } + + /// Gets the public key bytes + pub fn public_key_bytes(&self) -> &[u8; 52] { + &self.public_key.bytes + } + + /// Returns the raw JSON for the public key. + pub fn public_key_json(&self) -> &str { + &self.public_key_json + } + + /// Returns the raw JSON for the private key. + pub fn private_key_json(&self) -> &str { + &self.private_key_json + } + + /// Gets the private key PRF key + pub fn private_key_prf(&self) -> &[u8] { + &self.private_key.prf_key + } + + /// Gets the activation epoch + pub fn activation_epoch(&self) -> u64 { + self.private_key.activation_epoch + } + + /// Gets the number of active epochs + pub fn num_active_epochs(&self) -> u64 { + self.private_key.num_active_epochs + } + + /// Decode the hash-sig public key into its native representation. + pub fn hashsig_public_key(&self) -> Result { + serde_json::from_str(self.public_key_json()).map_err(|e| { + format!( + "Failed to parse hash-sig public key JSON for validator: {}", + e + ) + }) + } + + /// Decode the hash-sig secret key into its native representation. + pub fn hashsig_secret_key(&self) -> Result { + serde_json::from_str(self.private_key_json()).map_err(|e| { + format!( + "Failed to parse hash-sig secret key JSON for validator: {}", + e + ) + }) + } +} + +/// Key store for managing validator keys +pub struct KeyStore { + /// Base directory for key storage + base_dir: PathBuf, +} + +impl KeyStore { + /// Creates a new key store at the specified directory + pub fn new(base_dir: PathBuf) -> Self { + Self { base_dir } + } + + /// Creates the key store directory if it doesn't exist + pub fn ensure_directory(&self) -> Result<(), KeyStoreError> { + fs::create_dir_all(&self.base_dir).map_err(|e| { + KeyStoreError::DirectoryError(format!("Failed to create directory: {}", e)) + })?; + Ok(()) + } + + /// Saves a validator key pair to disk + /// + /// Creates two files: + /// - `validator_{index}_pk.json` - Public key + /// - `validator_{index}_sk.json` - Private key + pub fn save_key_pair( + &self, + validator_index: u64, + key_pair: &ValidatorKeyPair, + ) -> Result<(), KeyStoreError> { + self.ensure_directory()?; + + // Save public key + let public_key_path = self + .base_dir + .join(format!("validator_{}_pk.json", validator_index)); + let public_key_json = serde_json::to_string_pretty(&key_pair.public_key)?; + fs::write(&public_key_path, public_key_json)?; + debug!(?public_key_path, "Saved public key"); + + // Save private key + let private_key_path = self + .base_dir + .join(format!("validator_{}_sk.json", validator_index)); + let private_key_json = serde_json::to_string_pretty(&key_pair.private_key)?; + fs::write(&private_key_path, private_key_json)?; + debug!(?private_key_path, "Saved private key"); + + Ok(()) + } + + /// Loads a validator key pair from disk + /// + /// Reads both public and private key files for the given validator index. + pub fn load_key_pair(&self, validator_index: u64) -> Result { + // Load public key + let public_key_path = self + .base_dir + .join(format!("validator_{}_pk.json", validator_index)); + if !public_key_path.exists() { + return Err(KeyStoreError::KeyNotFound(validator_index)); + } + + let public_key_json = fs::read_to_string(&public_key_path)?; + let public_key: PublicKey = serde_json::from_str(&public_key_json).map_err(|e| { + KeyStoreError::InvalidFormat(format!("Invalid public key format: {}", e)) + })?; + + // Load private key + let private_key_path = self + .base_dir + .join(format!("validator_{}_sk.json", validator_index)); + if !private_key_path.exists() { + return Err(KeyStoreError::KeyNotFound(validator_index)); + } + + let private_key_json = fs::read_to_string(&private_key_path)?; + let private_key: PrivateKey = serde_json::from_str(&private_key_json).map_err(|e| { + KeyStoreError::InvalidFormat(format!("Invalid private key format: {}", e)) + })?; + + info!(validator_index, "Loaded XMSS key pair for validator"); + + Ok(ValidatorKeyPair::with_serialized( + public_key, + private_key, + public_key_json, + private_key_json, + )) + } + + /// Loads all key pairs from the key store directory + /// + /// Scans the directory for `validator_*_pk.json` files and loads + /// the corresponding key pairs. + pub fn load_all_key_pairs(&self) -> Result, KeyStoreError> { + self.ensure_directory()?; + + let mut key_pairs = HashMap::new(); + + // Read directory entries + let entries = fs::read_dir(&self.base_dir)?; + + // Collect all public key files + let mut public_key_files = Vec::new(); + for entry in entries { + let entry = entry?; + let file_name = entry.file_name(); + let file_name_str = file_name.to_string_lossy(); + + if file_name_str.starts_with("validator_") && file_name_str.ends_with("_pk.json") { + // Extract validator index from filename: validator_{index}_pk.json + let index_str = file_name_str + .strip_prefix("validator_") + .and_then(|s| s.strip_suffix("_pk.json")) + .ok_or_else(|| { + KeyStoreError::InvalidFormat(format!( + "Invalid filename format: {}", + file_name_str + )) + })?; + + let validator_index = index_str.parse::().map_err(|e| { + KeyStoreError::InvalidFormat(format!( + "Invalid validator index in filename: {}", + e + )) + })?; + + public_key_files.push(validator_index); + } + } + + // Load key pairs for each found public key file + for validator_index in public_key_files { + match self.load_key_pair(validator_index) { + Ok(key_pair) => { + key_pairs.insert(validator_index, key_pair); + } + Err(e) => { + warn!( + validator_index, + error = %e, + "Failed to load key pair, skipping" + ); + } + } + } + + info!( + loaded_count = key_pairs.len(), + ?self.base_dir, + "Loaded key pairs from keystore" + ); + + Ok(key_pairs) + } + + /// Checks if a key pair exists for the given validator index + pub fn key_pair_exists(&self, validator_index: u64) -> bool { + let public_key_path = self + .base_dir + .join(format!("validator_{}_pk.json", validator_index)); + let private_key_path = self + .base_dir + .join(format!("validator_{}_sk.json", validator_index)); + public_key_path.exists() && private_key_path.exists() + } + + /// Gets the number of key pairs stored in the keystore + pub fn count_key_pairs(&self) -> Result { + let key_pairs = self.load_all_key_pairs()?; + Ok(key_pairs.len()) + } + + /// Gets the base directory path + pub fn base_dir(&self) -> &Path { + &self.base_dir + } + + /// Loads a public key from disk for verification purposes + /// + /// Reads the public key file for the given validator index. + /// Automatically converts from legacy format (root + parameter) to 52-byte format. + pub fn load_public_key(&self, validator_index: u64) -> Result { + let public_key_path = self + .base_dir + .join(format!("validator_{}_pk.json", validator_index)); + if !public_key_path.exists() { + return Err(KeyStoreError::KeyNotFound(validator_index)); + } + + let public_key_json = fs::read_to_string(&public_key_path)?; + let public_key: PublicKey = serde_json::from_str(&public_key_json).map_err(|e| { + KeyStoreError::InvalidFormat(format!("Invalid public key format: {}", e)) + })?; + + Ok(public_key) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use tempfile::TempDir; + + fn create_test_key_pair() -> ValidatorKeyPair { + // Create 52-byte public key (32 bytes root + 20 bytes parameter) + let mut bytes = [0u8; 52]; + for i in 0..8 { + bytes[i * 4..(i + 1) * 4].copy_from_slice(&(i as u32 + 1).to_le_bytes()); + } + for i in 0..5 { + bytes[32 + i * 4..32 + (i + 1) * 4].copy_from_slice(&(i as u32 + 9).to_le_bytes()); + } + + let public_key = PublicKey::from_bytes(bytes); + + let private_key = PrivateKey { + prf_key: vec![1, 2, 3, 4, 5], + parameter: vec![9, 10, 11, 12, 13], + activation_epoch: 0, + num_active_epochs: 262144, + top_tree: XmssTree { + depth: 32, + lowest_layer: 16, + layers: vec![], + }, + }; + + ValidatorKeyPair::new(public_key, private_key) + } + + #[test] + fn test_save_and_load_key_pair() { + let temp_dir = TempDir::new().unwrap(); + let keystore = KeyStore::new(temp_dir.path().to_path_buf()); + + let key_pair = create_test_key_pair(); + + // Save key pair + keystore.save_key_pair(0, &key_pair).unwrap(); + + // Load key pair + let loaded = keystore.load_key_pair(0).unwrap(); + + assert_eq!(loaded.public_key_bytes().len(), 52); + assert_eq!(loaded.private_key_prf(), &[1, 2, 3, 4, 5]); + assert_eq!(loaded.activation_epoch(), 0); + } + + #[test] + fn test_key_pair_not_found() { + let temp_dir = TempDir::new().unwrap(); + let keystore = KeyStore::new(temp_dir.path().to_path_buf()); + + let result = keystore.load_key_pair(999); + assert!(matches!(result, Err(KeyStoreError::KeyNotFound(999)))); + } + + #[test] + fn test_load_all_key_pairs() { + let temp_dir = TempDir::new().unwrap(); + let keystore = KeyStore::new(temp_dir.path().to_path_buf()); + + // Save multiple key pairs + for i in 0..5 { + let key_pair = create_test_key_pair(); + keystore.save_key_pair(i, &key_pair).unwrap(); + } + + // Load all key pairs + let all_pairs = keystore.load_all_key_pairs().unwrap(); + assert_eq!(all_pairs.len(), 5); + } +} diff --git a/lean_client/keystore/src/lib.rs b/lean_client/keystore/src/lib.rs new file mode 100644 index 00000000000..6ef7dc296c4 --- /dev/null +++ b/lean_client/keystore/src/lib.rs @@ -0,0 +1,95 @@ +//! Keystore for managing hash-sig (XMSS) validator keys +//! +//! This module provides functionality for generating, storing, and retrieving +//! hash-sig validator keys used in lean consensus. +//! +//! Key generation uses the `b-wagn/hash-sig` Rust crate +//! with scheme: SIGTopLevelTargetSumLifetime32Dim64Base8 + +pub mod codec; +mod key_generation; +mod key_storage; + +pub use key_generation::{ + KeyGenerationConfig, KeyGenerationError, generate_keys, generate_keys_sync, +}; +pub use key_storage::{KeyStore, KeyStoreError, PrivateKey, PublicKey, ValidatorKeyPair}; + +use std::path::PathBuf; + +/// Hash-sig key scheme used for validator keys +pub const KEY_SCHEME: &str = "SIGTopLevelTargetSumLifetime32Dim64Base8"; + +/// Hash-sig crate repository +pub const HASH_SIG_CRATE: &str = "https://github.com/b-wagn/hash-sig"; + +/// Default directory name for storing hash-sig keys +pub const DEFAULT_KEYS_DIR: &str = "hash-sig-keys"; + +/// Configuration for hash-sig key generation +#[derive(Debug, Clone)] +pub struct HashSigKeyConfig { + /// Number of validators to generate keys for + pub num_validators: u64, + /// Log2 of the number of active epochs (e.g., 24 means 2^24 active epochs) + pub log_num_active_epochs: u64, + /// Output directory for generated keys + pub output_dir: PathBuf, +} + +impl Default for HashSigKeyConfig { + fn default() -> Self { + Self { + num_validators: 0, + log_num_active_epochs: 24, // Default: 2^24 active epochs + output_dir: PathBuf::from(DEFAULT_KEYS_DIR), + } + } +} + +impl HashSigKeyConfig { + /// Creates a new configuration with the specified number of validators + pub fn new(num_validators: u64, log_num_active_epochs: u64) -> Self { + Self { + num_validators, + log_num_active_epochs, + ..Default::default() + } + } + + /// Sets the output directory for generated keys + pub fn with_output_dir(mut self, output_dir: PathBuf) -> Self { + self.output_dir = output_dir; + self + } + + /// Generates keys and stores them using this configuration + /// + /// This is a convenience method that: + /// 1. Generates keys using the hash-sig Rust crate + /// 2. Stores them in the configured output directory + /// 3. Returns all generated key pairs + pub fn generate_and_store(&self) -> Result, KeyGenerationError> { + let gen_config = KeyGenerationConfig::from(self); + generate_keys(&gen_config) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_hash_sig_key_config_default() { + let config = HashSigKeyConfig::default(); + assert_eq!(config.num_validators, 0); + assert_eq!(config.log_num_active_epochs, 24); + } + + #[test] + fn test_hash_sig_key_config_new() { + let config = HashSigKeyConfig::new(10, 20); + assert_eq!(config.num_validators, 10); + assert_eq!(config.log_num_active_epochs, 20); + } +} diff --git a/lean_client/lean_network/Cargo.toml b/lean_client/lean_network/Cargo.toml new file mode 100644 index 00000000000..9814c7e3cca --- /dev/null +++ b/lean_client/lean_network/Cargo.toml @@ -0,0 +1,39 @@ +[package] +name = "lean_network" +version = "0.1.0" +edition.workspace = true + +[dependencies] +async-trait = "0.1" +enr = { version = "0.13", features = ["ed25519", "k256"] } +ethereum_ssz_derive = "0.10.0" +futures = { workspace = true } +gossipsub = { workspace = true } +lean_consensus = { workspace = true } +ethereum_ssz = "0.10.0" +libp2p-identity = { version = "0.2", features = ["secp256k1", "ed25519"] } +serde = { workspace = true, features = ["derive"] } +serde_yaml = { workspace = true } +sha2 = "0.10" +snap = "1.1" +task_executor = { workspace = true } +tokio = { workspace = true } +tracing = { workspace = true } +types = { workspace = true } +metrics = { workspace = true } + + +[dependencies.libp2p] +version = "0.56" +default-features = false +features = [ + "gossipsub", + "identify", + "tcp", + "tokio", + "macros", + "quic", + "mdns", + "request-response", +] + diff --git a/lean_client/lean_network/src/bootstrap.rs b/lean_client/lean_network/src/bootstrap.rs new file mode 100644 index 00000000000..ed58ff779fe --- /dev/null +++ b/lean_client/lean_network/src/bootstrap.rs @@ -0,0 +1,184 @@ +use libp2p::multiaddr::Protocol; +use libp2p::{Multiaddr, PeerId}; +use libp2p_identity::{PublicKey, ed25519, secp256k1}; +use std::net::IpAddr; +use std::path::Path; +use tracing::{debug, error, warn}; + +/// Loads bootstrap nodes from nodes.yaml using ENR records +/// +/// # Arguments +/// * `nodes_path` - Path to the nodes.yaml file containing ENR records +/// +/// # Returns +/// A vector of multiaddr strings for each bootstrap node. Invalid entries are skipped with warnings. +pub fn load_bootstrap_nodes>(nodes_path: P) -> Result, String> { + let path = nodes_path.as_ref(); + + // Check if file exists + if !path.exists() { + return Err(format!("nodes.yaml file does not exist at {:?}", path)); + } + + let content = std::fs::read_to_string(path) + .map_err(|e| format!("Failed to read nodes.yaml file: {}", e))?; + + if content.trim().is_empty() { + return Err(format!("nodes.yaml file at {:?} is empty", path)); + } + + debug!( + "Reading nodes.yaml from {:?}, content length: {} bytes", + path, + content.len() + ); + + // Parse as YAML array of ENR strings + let enr_records: Vec = serde_yaml::from_str(&content).map_err(|e| { + format!( + "Failed to parse nodes.yaml as YAML: {}. Content preview: {}", + e, + content.chars().take(200).collect::() + ) + })?; + + debug!("Parsed {} ENR records from nodes.yaml", enr_records.len()); + + let mut multiaddrs = Vec::new(); + let mut parse_errors = Vec::new(); + + for (i, enr_str) in enr_records.iter().enumerate() { + if enr_str.is_empty() { + warn!("ENR record #{} is empty, skipping", i); + continue; + } + + match parse_enr_to_multiaddr(enr_str) { + Ok(multiaddr) => { + let multiaddr_str = multiaddr.to_string(); + debug!("Loaded bootstrap node #{} from ENR: {}", i, multiaddr_str); + multiaddrs.push(multiaddr_str); + } + Err(e) => { + let error_msg = format!("Failed to parse ENR record #{}: {}", i, e); + warn!("{}", error_msg); + parse_errors.push((i, error_msg)); + } + } + } + + if multiaddrs.is_empty() { + let error_details = if !parse_errors.is_empty() { + format!( + "All {} ENR records failed to parse. First error: {}", + enr_records.len(), + parse_errors[0].1 + ) + } else { + "No ENR records found in file".to_string() + }; + error!( + "No valid bootstrap nodes found in {:?}. {}", + path, error_details + ); + return Err(format!( + "No valid bootstrap nodes found in {:?}. {}", + path, error_details + )); + } else { + debug!( + "Loaded {} bootstrap nodes from {:?} ({} failed to parse)", + multiaddrs.len(), + path, + parse_errors.len() + ); + } + + Ok(multiaddrs) +} + +/// Parses an ENR record string to extract multiaddr +/// ENR records are base64-encoded and contain IP/port information +/// +/// This follows the same pattern used elsewhere in Lighthouse: +/// - Uses standard ENR fields: ip4/ip6 for IP addresses +/// - Uses get_decodable("quic") for QUIC port +/// - Falls back to udp4/udp6 if quic port is not available +pub fn parse_enr_to_multiaddr(enr_str: &str) -> Result { + // ENR format: enr:-IW4Q... + let enr = enr_str + .parse::>() + .map_err(|e| format!("Failed to parse ENR string: {:?}", e))?; + + debug!("Parsed ENR: seq={}, id={:?}", enr.seq(), enr.id()); + + // Extract IP address - use standard ENR fields + let ip = enr + .ip4() + .map(IpAddr::V4) + .or_else(|| enr.ip6().map(IpAddr::V6)) + .ok_or_else(|| { + let has_ip4 = enr.ip4().is_some(); + let has_ip6 = enr.ip6().is_some(); + format!("ENR has no IP address (ip4: {}, ip6: {})", has_ip4, has_ip6) + })?; + + // Extract QUIC port using get_decodable + // The "quic" key is decoded as u16 automatically by get_decodable + let port = enr + .get_decodable::("quic") + .and_then(Result::ok) + .or_else(|| enr.udp4()) + .or_else(|| enr.udp6()) + .ok_or_else(|| { + let has_udp4 = enr.udp4().is_some(); + let has_udp6 = enr.udp6().is_some(); + let has_quic = enr.get_decodable::("quic").is_some(); + format!( + "ENR has no UDP/QUIC port (udp4: {}, udp6: {}, quic: {})", + has_udp4, has_udp6, has_quic + ) + })?; + + // Extract peer ID from ENR public key + let peer_id = extract_peer_id_from_enr(&enr)?; + + // Build multiaddr with peer ID (matching format: /ip4/127.0.0.1/udp/9000/quic-v1/p2p/{peer_id}) + let mut multiaddr = match ip { + IpAddr::V4(ipv4) => format!("/ip4/{}/udp/{}/quic-v1", ipv4, port) + .parse::() + .map_err(|e| format!("Failed to construct multiaddr: {}", e))?, + IpAddr::V6(ipv6) => format!("/ip6/{}/udp/{}/quic-v1", ipv6, port) + .parse::() + .map_err(|e| format!("Failed to construct multiaddr: {}", e))?, + }; + + // Add peer ID to multiaddr (as shown in validator-config.yaml comments) + // Format: /ip4/127.0.0.1/udp/9000/quic-v1/p2p/{peer_id} + multiaddr.push(Protocol::P2p(peer_id)); + + Ok(multiaddr) +} + +/// Extracts the libp2p PeerId from an ENR's public key +/// This matches the implementation in network_utils::enr_ext::EnrExt::peer_id +fn extract_peer_id_from_enr(enr: &enr::Enr) -> Result { + let public_key = enr.public_key(); + let peer_id = match public_key { + enr::CombinedPublicKey::Secp256k1(pk) => { + let pk_bytes = pk.to_sec1_bytes(); + let libp2p_pk: PublicKey = secp256k1::PublicKey::try_from_bytes(&pk_bytes) + .map_err(|e| format!("Failed to parse secp256k1 public key: {:?}", e))? + .into(); + PeerId::from_public_key(&libp2p_pk) + } + enr::CombinedPublicKey::Ed25519(pk) => { + let pk_bytes = pk.to_bytes(); + let libp2p_pk: PublicKey = ed25519::PublicKey::try_from_bytes(&pk_bytes) + .map_err(|e| format!("Failed to parse ed25519 public key: {:?}", e))? + .into(); + PeerId::from_public_key(&libp2p_pk) + } + }; + Ok(peer_id) +} diff --git a/lean_client/lean_network/src/config.rs b/lean_client/lean_network/src/config.rs new file mode 100644 index 00000000000..e2835921c77 --- /dev/null +++ b/lean_client/lean_network/src/config.rs @@ -0,0 +1,44 @@ +/// Network configuration for the lean client +#[derive(Debug, Clone)] +pub struct NetworkConfig { + /// Port to listen on for network connections + pub listen_port: u16, + /// Human-readable network name used in gossipsub topics + pub network_name: String, + /// Bootstrap nodes + pub bootstrap_nodes: Vec, + /// Raw libp2p private key bytes (secp256k1). Optional for generated identity. + pub node_key: Option>, +} + +impl Default for NetworkConfig { + fn default() -> Self { + Self { + listen_port: 9000, + network_name: "lean".to_string(), + bootstrap_nodes: vec![], + node_key: None, + } + } +} + +impl NetworkConfig { + pub fn new(listen_port: u16, network_name: impl Into) -> Self { + Self { + listen_port, + network_name: network_name.into(), + bootstrap_nodes: vec![], + node_key: None, + } + } + + pub fn with_bootstrap_nodes(mut self, bootstrap_nodes: Vec) -> Self { + self.bootstrap_nodes = bootstrap_nodes; + self + } + + pub fn with_node_key(mut self, node_key: Vec) -> Self { + self.node_key = Some(node_key); + self + } +} diff --git a/lean_client/lean_network/src/lib.rs b/lean_client/lean_network/src/lib.rs new file mode 100644 index 00000000000..fd9eccebff8 --- /dev/null +++ b/lean_client/lean_network/src/lib.rs @@ -0,0 +1,17 @@ +mod bootstrap; +mod config; +mod service; +mod topics; +mod metrics; + + +pub use bootstrap::load_bootstrap_nodes; +pub use config::NetworkConfig; +pub use service::{NetworkMessage, NetworkService}; +pub use topics::{Topic, get_topics}; +pub mod rpc; +pub mod status; +mod peer_manager; +pub use libp2p::PeerId; +pub use rpc::{BlocksByRootRequest, RPCRequest, RPCResponse}; +pub use status::{LeanStatusProtocol, StatusMessage}; diff --git a/lean_client/lean_network/src/metrics.rs b/lean_client/lean_network/src/metrics.rs new file mode 100644 index 00000000000..7f16b4ddfd6 --- /dev/null +++ b/lean_client/lean_network/src/metrics.rs @@ -0,0 +1,32 @@ +pub use metrics::*; +use std::sync::LazyLock; + +/* P2P Metrics */ +pub static LEAN_P2P_PEERS: LazyLock> = LazyLock::new(|| { + try_create_int_gauge("lean_p2p_peers", "Total number of connected peers") +}); + +pub static LEAN_P2P_MESSAGES_RECEIVED_TOTAL: LazyLock> = LazyLock::new(|| { + try_create_int_counter_vec( + "lean_p2p_messages_received_total", + "Total number of gossip messages received", + &["topic"], + ) +}); + +pub static LEAN_P2P_MESSAGES_PUBLISHED_TOTAL: LazyLock> = LazyLock::new(|| { + try_create_int_counter_vec( + "lean_p2p_messages_published_total", + "Total number of gossip messages published", + &["topic"], + ) +}); + +pub static LEAN_P2P_TO_VALIDATOR_DROPPED_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter_vec( + "lean_p2p_to_validator_dropped_total", + "Total number of network->validator messages dropped (e.g., channel full)", + &["reason", "kind"], + ) + }); diff --git a/lean_client/lean_network/src/peer_manager.rs b/lean_client/lean_network/src/peer_manager.rs new file mode 100644 index 00000000000..023973cac69 --- /dev/null +++ b/lean_client/lean_network/src/peer_manager.rs @@ -0,0 +1,181 @@ +use crate::status::StatusMessage; +use libp2p::{Multiaddr, PeerId}; +use std::collections::HashMap; +use std::time::{Duration, Instant}; +use tokio::sync::mpsc; +use tracing::{debug, warn}; +use types::Hash256; + +/// Events observed by the network service and forwarded to the peer manager. +#[derive(Debug, Clone)] +pub enum PeerEvent { + Connected(PeerId), + Disconnected(PeerId), + StatusReceived(PeerId, StatusMessage), +} + +/// Commands issued by the peer manager for the network service to execute. +#[derive(Debug, Clone)] +pub enum PeerCommand { + Dial(Multiaddr), +} + +#[derive(Debug, Clone)] +struct PeerRecord { + connected: bool, + attempts: u32, + next_dial_at: Instant, + latest_status: Option, +} + +impl PeerRecord { + fn new(now: Instant) -> Self { + Self { + connected: false, + attempts: 0, + next_dial_at: now, + latest_status: None, + } + } +} + +/// Async peer manager. +/// +/// Responsibilities: +/// - Track bootstrap peers and reconnect with exponential backoff. +/// - Track the latest received head (via status messages) per peer. +/// - Provide dial commands to the network service. +pub struct PeerManager { + /// Bootstrap peers we actively try to keep connected to. + bootstrap: HashMap, + /// Per-peer state (includes latest status). + peers: HashMap, + base_backoff: Duration, + max_backoff: Duration, + /// Receive events from network service. + event_rx: mpsc::Receiver, + /// Send dial commands to network service. + cmd_tx: mpsc::Sender, +} + +impl PeerManager { + pub fn new( + bootstrap: Vec<(PeerId, Multiaddr)>, + base_backoff: Duration, + max_backoff: Duration, + event_rx: mpsc::Receiver, + cmd_tx: mpsc::Sender, + ) -> Self { + let now = Instant::now(); + let mut bootstrap_map = HashMap::new(); + let mut peers = HashMap::new(); + + for (peer_id, addr) in bootstrap { + bootstrap_map.insert(peer_id, addr); + peers.entry(peer_id).or_insert_with(|| PeerRecord::new(now)); + } + + Self { + bootstrap: bootstrap_map, + peers, + base_backoff, + max_backoff, + event_rx, + cmd_tx, + } + } + + pub async fn run(mut self) { + loop { + let now = Instant::now(); + let next_wake = self.next_wake(now); + tokio::select! { + _ = tokio::time::sleep_until(tokio::time::Instant::from_std(next_wake)) => { + self.dial_due(now).await; + } + maybe_ev = self.event_rx.recv() => { + match maybe_ev { + Some(ev) => self.on_event(ev), + None => return, // network service dropped sender + } + } + } + } + } + + fn next_wake(&self, now: Instant) -> Instant { + let mut next = now + self.base_backoff; + for (peer_id, _addr) in &self.bootstrap { + if let Some(rec) = self.peers.get(peer_id) { + if !rec.connected && rec.next_dial_at < next { + next = rec.next_dial_at; + } + } + } + next + } + + fn on_event(&mut self, ev: PeerEvent) { + let now = Instant::now(); + match ev { + PeerEvent::Connected(peer_id) => { + let rec = self.peers.entry(peer_id).or_insert_with(|| PeerRecord::new(now)); + rec.connected = true; + rec.attempts = 0; + rec.next_dial_at = now + self.base_backoff; + } + PeerEvent::Disconnected(peer_id) => { + let rec = self.peers.entry(peer_id).or_insert_with(|| PeerRecord::new(now)); + rec.connected = false; + rec.next_dial_at = now; // dial ASAP on next tick + } + PeerEvent::StatusReceived(peer_id, status) => { + let rec = self.peers.entry(peer_id).or_insert_with(|| PeerRecord::new(now)); + rec.latest_status = Some(status); + } + } + } + + async fn dial_due(&mut self, now: Instant) { + for (peer_id, addr) in self.bootstrap.clone() { + let Some(rec) = self.peers.get_mut(&peer_id) else { continue }; + if rec.connected || now < rec.next_dial_at { + continue; + } + + // Issue dial. + if let Err(e) = self.cmd_tx.send(PeerCommand::Dial(addr.clone())).await { + warn!("PeerManager failed to send dial command: {}", e); + return; + } + + // Exponential backoff scheduling. + rec.attempts = rec.attempts.saturating_add(1); + let shift = rec.attempts.min(30); // cap to avoid overflow in u32 multiplier + let mult = 1u32.checked_shl(shift).unwrap_or(u32::MAX); + let mut delay = self.base_backoff.saturating_mul(mult); + if delay > self.max_backoff { + delay = self.max_backoff; + } + rec.next_dial_at = now + delay; + + debug!( + peer = ?peer_id, + attempts = rec.attempts, + next_in_secs = delay.as_secs(), + "Scheduled next dial attempt" + ); + } + } + + /// Returns the latest head root/slot we've heard from this peer (via Status). + #[allow(dead_code)] + pub fn latest_head(&self, peer_id: &PeerId) -> Option<(u64, Hash256)> { + self.peers + .get(peer_id) + .and_then(|r| r.latest_status.as_ref()) + .map(|s| (s.head_slot, s.head_root)) + } +} + + diff --git a/lean_client/lean_network/src/rpc.rs b/lean_client/lean_network/src/rpc.rs new file mode 100644 index 00000000000..82d06a285ca --- /dev/null +++ b/lean_client/lean_network/src/rpc.rs @@ -0,0 +1,189 @@ +use async_trait::async_trait; +use futures::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; +use lean_consensus::lean_block::SignedLeanBlockWithAttestation; +use libp2p::request_response::Codec; +use snap::raw::{Decoder, Encoder}; +use ssz::{Decode, Encode}; +use ssz_derive::{Decode, Encode}; +use std::io; +use std::marker::PhantomData; +use types::{EthSpec, Hash256}; + +// Protocol definition +#[derive(Debug, Clone)] +pub struct LeanBlocksByRootProtocol; + +impl AsRef for LeanBlocksByRootProtocol { + fn as_ref(&self) -> &str { + "/leanconsensus/req/lean_blocks_by_root/1/ssz_snappy" + } +} + +// Request/Response types +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum RPCRequest { + BlocksByRoot(BlocksByRootRequest), +} + +#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode)] +pub struct BlocksByRootRequest { + pub block_roots: Vec, +} + +#[derive(Debug, Clone)] +pub enum RPCResponse { + BlocksByRoot(SignedLeanBlockWithAttestation), +} + +// Codec implementation +#[derive(Clone)] +pub struct SSZSnappyCodec { + phantom: PhantomData, +} + +impl SSZSnappyCodec { + pub fn new() -> Self { + Self { + phantom: PhantomData, + } + } +} + +impl Default for SSZSnappyCodec { + fn default() -> Self { + Self::new() + } +} + +#[async_trait] +impl Codec for SSZSnappyCodec { + type Protocol = LeanBlocksByRootProtocol; + type Request = RPCRequest; + type Response = RPCResponse; + + async fn read_request( + &mut self, + _: &LeanBlocksByRootProtocol, + io: &mut T, + ) -> io::Result + where + T: AsyncRead + Unpin + Send, + { + let bytes = read_ssz_snappy(io).await?; + let request = BlocksByRootRequest::from_ssz_bytes(&bytes) + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, format!("{:?}", e)))?; + Ok(RPCRequest::BlocksByRoot(request)) + } + + async fn read_response( + &mut self, + _: &LeanBlocksByRootProtocol, + io: &mut T, + ) -> io::Result + where + T: AsyncRead + Unpin + Send, + { + let bytes = read_ssz_snappy(io).await?; + let response = SignedLeanBlockWithAttestation::from_ssz_bytes(&bytes) + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, format!("{:?}", e)))?; + Ok(RPCResponse::BlocksByRoot(response)) + } + + async fn write_request( + &mut self, + _: &LeanBlocksByRootProtocol, + io: &mut T, + request: Self::Request, + ) -> io::Result<()> + where + T: AsyncWrite + Unpin + Send, + { + match request { + RPCRequest::BlocksByRoot(req) => write_ssz_snappy(io, &req).await, + } + } + + async fn write_response( + &mut self, + _: &LeanBlocksByRootProtocol, + io: &mut T, + response: Self::Response, + ) -> io::Result<()> + where + T: AsyncWrite + Unpin + Send, + { + match response { + RPCResponse::BlocksByRoot(resp) => write_ssz_snappy(io, &resp).await, + } + } +} + +// Helper functions for reading/writing SSZ-Snappy +// Note: This implements a simplified version. A production version should handle varint prefixes and max size limits. +async fn read_ssz_snappy(io: &mut T) -> io::Result> { + let mut _len_buf = [0u8; 10]; // Max varint size is 10 bytes for 64-bit int + let mut byte = [0u8; 1]; + let mut len: u64 = 0; + let mut shift = 0; + + // Read varint length + loop { + io.read_exact(&mut byte).await?; + let b = byte[0] as u64; + len |= (b & 0x7f) << shift; + if b & 0x80 == 0 { + break; + } + shift += 7; + if shift > 63 { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + "Varint too long", + )); + } + } + + // Read compressed data + let mut compressed = vec![0u8; len as usize]; + io.read_exact(&mut compressed).await?; + + // Decompress + let mut decoder = Decoder::new(); + decoder + .decompress_vec(&compressed) + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, format!("Snappy error: {:?}", e))) +} + +async fn write_ssz_snappy( + io: &mut T, + value: &V, +) -> io::Result<()> { + let ssz_bytes = value.as_ssz_bytes(); + + // Compress + let mut encoder = Encoder::new(); + let compressed = encoder.compress_vec(&ssz_bytes).map_err(|e| { + io::Error::new(io::ErrorKind::InvalidData, format!("Snappy error: {:?}", e)) + })?; + + // Write varint length + let mut len_buf = [0u8; 10]; + let mut len = compressed.len() as u64; + let mut i = 0; + loop { + if len & !0x7f == 0 { + len_buf[i] = len as u8; + i += 1; + break; + } else { + len_buf[i] = (len & 0x7f | 0x80) as u8; + len >>= 7; + i += 1; + } + } + io.write_all(&len_buf[0..i]).await?; + + // Write compressed data + io.write_all(&compressed).await?; + Ok(()) +} diff --git a/lean_client/lean_network/src/service.rs b/lean_client/lean_network/src/service.rs new file mode 100644 index 00000000000..2e36f04c01e --- /dev/null +++ b/lean_client/lean_network/src/service.rs @@ -0,0 +1,643 @@ +use crate::config::NetworkConfig; +use crate::rpc::{LeanBlocksByRootProtocol, RPCRequest, RPCResponse, SSZSnappyCodec}; +use crate::status::{LeanStatusProtocol, StatusMessage, StatusSnappyCodec}; +use crate::topics::{self, Topic}; +use crate::peer_manager::{PeerCommand, PeerEvent, PeerManager}; +use futures::StreamExt; +use lean_consensus::attestation::SignedAttestation; +use lean_consensus::lean_block::SignedLeanBlockWithAttestation; +use libp2p::identity::{self, Keypair}; +use libp2p::{ + Multiaddr, PeerId, Swarm, Transport, + gossipsub::{self, MessageId}, + request_response::{self, ProtocolSupport}, + swarm::{NetworkBehaviour, SwarmEvent}, +}; +use sha2::{Digest, Sha256}; +use snap::raw::{Decoder as RawDecoder, Encoder as RawEncoder}; +use ssz::{Decode, Encode}; +use std::sync::Arc; +use std::time::Duration; +use tokio::sync::mpsc; +use tokio::sync::mpsc::error::TrySendError; +use tracing::{debug, info, trace, warn}; +use types::EthSpec; +use crate::metrics; + + +/// Domain prefix for valid snappy-compressed messages per Eth2 networking spec +/// This is prepended to message data before hashing to create unique message IDs +const MESSAGE_DOMAIN_VALID_SNAPPY: &[u8] = &[0x01, 0x00, 0x00, 0x00]; + +#[derive(NetworkBehaviour)] +pub struct LeanBehaviour { + gossipsub: gossipsub::Behaviour, + req_resp: request_response::Behaviour>, + status_req_resp: request_response::Behaviour, +} + +/// Messages received from the network that need to be processed +pub enum NetworkMessage { + /// Signed attestation received from network (peer_id is Some for network gossip) + Attestation(Option, Arc), + /// Block received from network or to be published (peer_id is None for local) + Block(Option, Arc>), + /// A peer connected (used to trigger sync/status handshake) + PeerConnected(PeerId), + /// A peer disconnected (used for status display / heuristics) + PeerDisconnected(PeerId), + /// Status response received from a peer. + Status(PeerId, StatusMessage), + /// Update the cached local status used for responding to inbound status requests. + UpdateLocalStatus(StatusMessage), + /// Request to send an RPC request to a peer + SendRequest { + peer_id: PeerId, + request: RPCRequest, + }, + /// Request to send a status request to a peer + SendStatusRequest { + peer_id: PeerId, + status: StatusMessage, + }, +} + +pub struct NetworkService { + swarm: Swarm>, + /// Messages from network -> validator. + /// + /// This is bounded (created by the lean client) so we must avoid unbounded buffering here. + /// The swarm event loop must not await on backpressure, so we `try_send` and drop on overflow. + network_recv: mpsc::Sender>, + /// Messages from validator -> network. + network_send: mpsc::Receiver>, + /// Peer manager event sender (network -> peer manager). + peer_manager_evt_tx: mpsc::Sender, + /// Peer manager command receiver (peer manager -> network). + peer_manager_cmd_rx: mpsc::Receiver, + /// Peer manager instance (spawned when `start()` is called). + peer_manager: Option, + /// Network name used for topic encoding + network_name: String, + /// Cached local status (updated by validator service) used to reply to inbound status requests. + local_status: StatusMessage, +} + +impl NetworkService { + pub fn new( + config: NetworkConfig, + network_recv: mpsc::Sender>, + network_send: mpsc::Receiver>, + ) -> Result> { + let NetworkConfig { + listen_port, + bootstrap_nodes: bootstrap_node_strings, + node_key, + network_name, + } = config; + + let local_key = match node_key { + Some(mut key_bytes) => { + if key_bytes.len() != 32 { + return Err(format!( + "Libp2p private key must be 32 bytes, got {} bytes", + key_bytes.len() + ) + .into()); + } + let secret = identity::secp256k1::SecretKey::try_from_bytes(&mut key_bytes[..]) + .map_err(|e| { + format!("Failed to parse libp2p secp256k1 private key: {:?}", e) + })?; + let kp: identity::secp256k1::Keypair = secret.into(); + Keypair::from(kp) + } + None => identity::Keypair::generate_ed25519(), + }; + let local_peer_id = PeerId::from(local_key.public()); + + info!("Local peer id: {:?}", local_peer_id); + + // Use QUIC transport (handles multiplexing natively) + let transport = libp2p::quic::tokio::Transport::new(libp2p::quic::Config::new(&local_key)) + .map(|(peer_id, conn), _| (peer_id, libp2p::core::muxing::StreamMuxerBox::new(conn))) + .boxed(); + + let gossipsub_config = gossipsub::ConfigBuilder::default() + .heartbeat_interval(Duration::from_secs(1)) + // Zeam (and the other lean clients) use anonymous gossipsub messages. + // If we publish signed gossipsub messages, peers configured for anonymous validation + // may drop our messages. + .validation_mode(gossipsub::ValidationMode::Anonymous) + .message_id_fn(|message: &gossipsub::Message| { + // Use SHA256 hash of topic + data with domain prefix for message ID + // This matches the Eth2 networking spec for message deduplication + let topic_bytes = message.topic.as_str().as_bytes(); + let mut digest = vec![]; + digest.extend_from_slice(MESSAGE_DOMAIN_VALID_SNAPPY); + digest.extend_from_slice(&topic_bytes.len().to_le_bytes()); + digest.extend_from_slice(topic_bytes); + digest.extend_from_slice(&message.data); + + let hash = Sha256::digest(&digest); + MessageId::from(&hash[..20]) + }) + .build() + .map_err(|e| format!("Failed to build gossipsub config: {}", e))?; + + let gossipsub = gossipsub::Behaviour::new( + // Match Zeam: publish anonymous gossipsub messages for interoperability. + gossipsub::MessageAuthenticity::Anonymous, + gossipsub_config, + ) + .map_err(|e| format!("Failed to create gossipsub behaviour: {}", e))?; + + let req_resp = request_response::Behaviour::new( + vec![(LeanBlocksByRootProtocol, ProtocolSupport::Full)], + request_response::Config::default(), + ); + + // Status is used to learn a peer's head after downtime and trigger backfill. + // We also support inbound requests so peers like Zeam don't disconnect on status handshake. + let status_req_resp = request_response::Behaviour::new( + vec![(LeanStatusProtocol, ProtocolSupport::Full)], + request_response::Config::default(), + ); + + let behaviour = LeanBehaviour { + gossipsub, + req_resp, + status_req_resp, + }; + + let mut swarm = Swarm::new( + transport, + behaviour, + local_peer_id, + libp2p::swarm::Config::with_tokio_executor() + .with_idle_connection_timeout(Duration::from_secs(60)), + ); + + let listen_addr = format!("/ip4/0.0.0.0/udp/{}/quic-v1", listen_port) + .parse() + .map_err(|e| format!("Invalid listen address: {}", e))?; + + swarm.listen_on(listen_addr)?; + + let encoded_topics = topics::get_topics(&network_name); + info!("Subscribing to gossipsub topics: {:?}", encoded_topics); + for topic_str in encoded_topics.iter() { + let topic = gossipsub::IdentTopic::new(topic_str.clone()); + swarm + .behaviour_mut() + .gossipsub + .subscribe(&topic) + .map_err(|e| format!("Failed to subscribe to topic {}: {}", topic_str, e))?; + } + + // Collect bootstrap peers for the peer manager. + let mut bootstrap_peers: Vec<(PeerId, Multiaddr)> = Vec::new(); + for bootstrap_addr_raw in bootstrap_node_strings.iter() { + let bootstrap_addr_str = bootstrap_addr_raw.trim(); + + let multiaddr = if bootstrap_addr_str.starts_with("enr:") { + match crate::bootstrap::parse_enr_to_multiaddr(bootstrap_addr_str) { + Ok(multiaddr) => multiaddr, + Err(e) => { + warn!("Invalid bootstrap ENR {}: {}", bootstrap_addr_str, e); + continue; + } + } + } else { + match bootstrap_addr_str.parse::() { + Ok(multiaddr) => multiaddr, + Err(e) => { + warn!( + "Invalid bootstrap node address {}: {}", + bootstrap_addr_str, e + ); + continue; + } + } + }; + + if let Some(peer_in_addr) = peer_id_from_multiaddr(multiaddr.clone()) + && peer_in_addr != local_peer_id + { + bootstrap_peers.push((peer_in_addr, multiaddr.clone())); + } + } + + info!("Initialized {} bootstrap peers for peer manager", bootstrap_peers.len()); + + let (peer_manager_evt_tx, peer_manager_evt_rx) = mpsc::channel(64); + let (peer_manager_cmd_tx, peer_manager_cmd_rx) = mpsc::channel(64); + let peer_manager = PeerManager::new( + bootstrap_peers, + Duration::from_secs(1), + Duration::from_secs(60), + peer_manager_evt_rx, + peer_manager_cmd_tx, + ); + + Ok(Self { + swarm, + network_recv, + network_send, + peer_manager_evt_tx, + peer_manager_cmd_rx, + peer_manager: Some(peer_manager), + network_name, + local_status: StatusMessage { + finalized_root: types::Hash256::ZERO, + finalized_slot: 0, + head_root: types::Hash256::ZERO, + head_slot: 0, + }, + }) + } + + fn try_forward_to_validator(&self, msg: NetworkMessage, kind: &'static str) { + match self.network_recv.try_send(msg) { + Ok(()) => {} + Err(TrySendError::Full(_msg)) => { + // Drop on overflow to avoid unbounded memory growth. + metrics::inc_counter_vec( + &*metrics::LEAN_P2P_TO_VALIDATOR_DROPPED_TOTAL, + &["channel_full", kind], + ); + } + Err(TrySendError::Closed(_msg)) => { + warn!("Validator channel closed; dropping network message"); + metrics::inc_counter_vec( + &*metrics::LEAN_P2P_TO_VALIDATOR_DROPPED_TOTAL, + &["channel_closed", kind], + ); + } + } + } + + /// Decode message based on topic and create appropriate NetworkMessage + /// Messages are expected to be snappy-compressed + fn decode_network_message( + &self, + topic: &str, + data: &[u8], + peer_id: PeerId, + ) -> Option> { + let Some(base_topic) = topics::parse_topic_name(topic, &self.network_name) else { + debug!("Unknown topic format: {}", topic); + return None; + }; + + metrics::inc_counter_vec(&*metrics::LEAN_P2P_MESSAGES_RECEIVED_TOTAL, &[topic]); + + + // Decompress snappy-compressed message + let decompressed = match self.decompress_snappy(data) { + Ok(d) => d, + Err(e) => { + warn!("Failed to decompress snappy message: {}", e); + return None; + } + }; + + match base_topic { + Topic::Block => match SignedLeanBlockWithAttestation::from_ssz_bytes(&decompressed) { + Ok(block) => { + debug!("Successfully decoded lean block from network"); + Some(NetworkMessage::Block(Some(peer_id), Arc::new(block))) + } + Err(e) => { + warn!("Failed to decode lean block: {:?}", e); + None + } + }, + Topic::Attestation => match SignedAttestation::from_ssz_bytes(&decompressed) { + Ok(signed_attestation) => { + debug!("Successfully decoded signed lean attestation from network"); + Some(NetworkMessage::Attestation( + Some(peer_id), + Arc::new(signed_attestation), + )) + } + Err(e) => { + warn!("Failed to decode signed lean attestation: {:?}", e); + None + } + }, + } + } + + /// Decompress raw (unframed) snappy-compressed data + /// Gossipsub uses raw snappy format per Eth2 networking spec + fn decompress_snappy(&self, data: &[u8]) -> Result, String> { + let mut decoder = RawDecoder::new(); + decoder + .decompress_vec(data) + .map_err(|e| format!("Snappy decompression failed: {}", e)) + } + + /// Compress data using raw (unframed) snappy compression + /// Gossipsub uses raw snappy format per Eth2 networking spec + fn compress_snappy(&self, data: &[u8]) -> Result, String> { + let mut encoder = RawEncoder::new(); + encoder + .compress_vec(data) + .map_err(|e| format!("Snappy compression failed: {}", e)) + } + + pub async fn start(mut self) { + info!("Network service started"); + // Start the async peer manager (handles exponential backoff and dialing). + if let Some(pm) = self.peer_manager.take() { + tokio::spawn(pm.run()); + } + + loop { + tokio::select! { + // Peer manager dial commands. + Some(cmd) = self.peer_manager_cmd_rx.recv() => { + match cmd { + PeerCommand::Dial(addr) => { + if let Err(e) = self.swarm.dial(addr.clone()) { + warn!("Failed to dial {}: {:?}", addr, e); + } else { + debug!("Dialing peer via peer manager: {}", addr); + } + } + } + } + + // Handle messages to publish or requests to send + Some(msg) = self.network_send.recv() => { + self.handle_command(msg).await; + } + + // Handle swarm events + event = self.swarm.select_next_some() => { + match event { + SwarmEvent::NewListenAddr { address, .. } => { + info!("Listening on: {:?}", address); + } + SwarmEvent::Behaviour(event) => { + match event { + LeanBehaviourEvent::Gossipsub(gossipsub::Event::Message { + propagation_source, + message, + message_id, + }) => { + debug!( + "Received gossipsub message from {:?} on topic {:?}, message_id: {:?}", + propagation_source, message.topic, message_id + ); + + // Decode the message based on topic + if let Some(network_msg) = + self.decode_network_message(message.topic.as_str(), &message.data, propagation_source) + { + match &network_msg { + NetworkMessage::Attestation(_, _) => { + info!("Forwarding attestation to attestation service"); + } + NetworkMessage::Block(_, _) => { + info!("Forwarding block to attestation service"); + } + NetworkMessage::PeerConnected(_) => {} + NetworkMessage::PeerDisconnected(_) => {} + NetworkMessage::Status(_, _) => {} + NetworkMessage::UpdateLocalStatus(_) => {} + NetworkMessage::SendRequest { .. } => { + warn!("Received SendRequest from network decode (unexpected)"); + } + NetworkMessage::SendStatusRequest { .. } => { + warn!("Received SendStatusRequest from network decode (unexpected)"); + } + } + + let kind = match &network_msg { + NetworkMessage::Attestation(_, _) => "attestation", + NetworkMessage::Block(_, _) => "block", + NetworkMessage::PeerConnected(_) => "peer_connected", + NetworkMessage::PeerDisconnected(_) => "peer_disconnected", + NetworkMessage::Status(_, _) => "status", + NetworkMessage::UpdateLocalStatus(_) => "update_local_status", + NetworkMessage::SendRequest { .. } => "send_request", + NetworkMessage::SendStatusRequest { .. } => "send_status_request", + }; + self.try_forward_to_validator(network_msg, kind); + } + } + LeanBehaviourEvent::Gossipsub(gossipsub::Event::Subscribed { peer_id, topic }) => { + debug!("Peer {:?} subscribed to topic: {:?}", peer_id, topic); + } + LeanBehaviourEvent::Gossipsub(gossipsub::Event::Unsubscribed { peer_id, topic }) => { + debug!("Peer {:?} unsubscribed from topic: {:?}", peer_id, topic); + } + LeanBehaviourEvent::Gossipsub(other_gossipsub_event) => { + trace!("Gossipsub behaviour event: {:?}", other_gossipsub_event); + } + LeanBehaviourEvent::ReqResp(request_response::Event::Message { + peer, + message: request_response::Message::Response { response, .. }, + .. + }) => { + match response { + RPCResponse::BlocksByRoot(block) => { + info!( + slot = block.message.block.slot.0, + peer = ?peer, + "Received BlocksByRoot response" + ); + self.try_forward_to_validator( + NetworkMessage::Block(Some(peer), Arc::new(block)), + "block_rpc", + ); + } + } + } + LeanBehaviourEvent::StatusReqResp(request_response::Event::Message { + peer, + message, + .. + }) => { + match message { + request_response::Message::Response { response, .. } => { + debug!( + peer = ?peer, + head_slot = response.head_slot, + finalized_slot = response.finalized_slot, + "Received Status response" + ); + let _ = self + .peer_manager_evt_tx + .try_send(PeerEvent::StatusReceived(peer, response.clone())); + self.try_forward_to_validator( + NetworkMessage::Status(peer, response), + "status", + ); + } + request_response::Message::Request { + request, channel, .. + } => { + debug!( + peer = ?peer, + req_head_slot = request.head_slot, + req_finalized_slot = request.finalized_slot, + "Received Status request" + ); + let resp = self.local_status.clone(); + if let Err(e) = self + .swarm + .behaviour_mut() + .status_req_resp + .send_response(channel, resp) + { + warn!("Failed to send status response: {:?}", e); + } + } + } + } + LeanBehaviourEvent::ReqResp(other_event) => { + trace!("ReqResp behaviour event: {:?}", other_event); + } + LeanBehaviourEvent::StatusReqResp(other_event) => { + trace!("StatusReqResp behaviour event: {:?}", other_event); + } + } + } + SwarmEvent::ConnectionEstablished { + peer_id, + endpoint, + .. + } => { + info!( + "Connection established with peer: {:?} at {:?}", + peer_id, endpoint + ); + metrics::inc_gauge(&*metrics::LEAN_P2P_PEERS); + let _ = self.peer_manager_evt_tx.try_send(PeerEvent::Connected(peer_id)); + + // Notify validator service so it can trigger sync/status handshake. + self.try_forward_to_validator(NetworkMessage::PeerConnected(peer_id), "peer_connected"); + } + SwarmEvent::ConnectionClosed { + peer_id, cause, .. + } => { + debug!("Connection closed with peer: {:?}, cause: {:?}", peer_id, cause); + metrics::dec_gauge(&*metrics::LEAN_P2P_PEERS); + // Notify validator service (for display / peer tracking). + self.try_forward_to_validator(NetworkMessage::PeerDisconnected(peer_id), "peer_disconnected"); + let _ = self.peer_manager_evt_tx.try_send(PeerEvent::Disconnected(peer_id)); + } + + + SwarmEvent::IncomingConnection { .. } => { + debug!("Incoming connection"); + } + SwarmEvent::IncomingConnectionError { error, .. } => { + warn!("Incoming connection error: {:?}", error); + } + SwarmEvent::OutgoingConnectionError { peer_id, error, .. } => { + warn!("Outgoing connection error to {:?}: {:?}", peer_id, error); + if let Some(peer_id) = peer_id { + let _ = self.peer_manager_evt_tx.try_send(PeerEvent::Disconnected(peer_id)); + } + } + SwarmEvent::Dialing { .. } => { + debug!("Dialing peer"); + } + _ => {} + } + } + } + } + } + + /// Handles a command from the validator service (publish message or send request) + async fn handle_command(&mut self, msg: NetworkMessage) { + match msg { + NetworkMessage::SendRequest { peer_id, request } => { + let request_id = self + .swarm + .behaviour_mut() + .req_resp + .send_request(&peer_id, request); + debug!(?peer_id, ?request_id, "Sent RPC request"); + } + NetworkMessage::SendStatusRequest { peer_id, status } => { + let request_id = self + .swarm + .behaviour_mut() + .status_req_resp + .send_request(&peer_id, status); + debug!(?peer_id, ?request_id, "Sent status request"); + } + NetworkMessage::UpdateLocalStatus(status) => { + self.local_status = status; + } + NetworkMessage::Attestation(_, signed_attestation) => { + info!( + slot = signed_attestation.message.attestation_data.slot.0, + validator_id = signed_attestation.message.validator_id, + "Publishing attestation to network" + ); + self.publish_gossip_data(Topic::Attestation, signed_attestation.as_ssz_bytes()); + } + NetworkMessage::Block(_, block) => { + info!( + slot = block.message.block.slot.0, + "Publishing block to network" + ); + self.publish_gossip_data(Topic::Block, block.as_ssz_bytes()); + } + NetworkMessage::PeerConnected(_) | NetworkMessage::Status(_, _) => { + // Not commands for the network service. + } + NetworkMessage::PeerDisconnected(_) => {} + } + } + + /// Publishes data to the gossipsub network + fn publish_gossip_data(&mut self, topic_variant: Topic, data: Vec) { + // Compress data using snappy before publishing (required by Eth2 networking spec) + let compressed_data = match self.compress_snappy(&data) { + Ok(compressed) => compressed, + Err(e) => { + warn!("Failed to compress message for publishing: {}", e); + return; + } + }; + + // Create gossipsub topic + let encoded_topic = topics::encode_topic(&self.network_name, topic_variant); + let topic = gossipsub::IdentTopic::new(encoded_topic.clone()); + + // Publish to gossipsub + if let Err(e) = self + .swarm + .behaviour_mut() + .gossipsub + .publish(topic, compressed_data) + { + warn!( + "Failed to publish message to gossipsub topic {}: {:?}", + encoded_topic, e + ); + } else { + debug!("Successfully published message to topic: {}", encoded_topic); + metrics::inc_counter_vec(&*metrics::LEAN_P2P_MESSAGES_PUBLISHED_TOTAL, &[&encoded_topic]); + } + + + } + + // Bootstrap dialing/retry is handled by `PeerManager`. +} +fn peer_id_from_multiaddr(mut addr: Multiaddr) -> Option { + if let Some(libp2p::multiaddr::Protocol::P2p(mh)) = addr.pop() { + PeerId::from_multihash(mh.into()).ok() + } else { + None + } +} diff --git a/lean_client/lean_network/src/status.rs b/lean_client/lean_network/src/status.rs new file mode 100644 index 00000000000..da26dff2d32 --- /dev/null +++ b/lean_client/lean_network/src/status.rs @@ -0,0 +1,152 @@ +use async_trait::async_trait; +use futures::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; +use libp2p::request_response::Codec; +use snap::raw::{Decoder, Encoder}; +use ssz::{Decode, Encode}; +use ssz_derive::{Decode, Encode}; +use std::io; +use types::Hash256; + +/// Request/response protocol for exchanging node status (head/finalized checkpoints). +/// +/// This is used to bootstrap syncing after downtime by learning a peer's head root/slot. +#[derive(Debug, Clone)] +pub struct LeanStatusProtocol; + +impl AsRef for LeanStatusProtocol { + fn as_ref(&self) -> &str { + "/leanconsensus/req/status/1/ssz_snappy" + } +} + +/// Minimal status message used by the lean network. +/// +/// Mirrors what Zeam uses in its status exchange: head + finalized checkpoints. +#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode)] +pub struct StatusMessage { + // NOTE: Field order matters for SSZ. This order matches Zeam's `types.Status`: + // finalized_root, finalized_slot, head_root, head_slot. + pub finalized_root: Hash256, + pub finalized_slot: u64, + pub head_root: Hash256, + pub head_slot: u64, +} + +/// SSZ-snappy codec for Status messages. +#[derive(Clone, Default)] +pub struct StatusSnappyCodec; + +#[async_trait] +impl Codec for StatusSnappyCodec { + type Protocol = LeanStatusProtocol; + type Request = StatusMessage; + type Response = StatusMessage; + + async fn read_request( + &mut self, + _: &LeanStatusProtocol, + io: &mut T, + ) -> io::Result + where + T: AsyncRead + Unpin + Send, + { + let bytes = read_ssz_snappy(io).await?; + StatusMessage::from_ssz_bytes(&bytes) + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, format!("{:?}", e))) + } + + async fn read_response( + &mut self, + _: &LeanStatusProtocol, + io: &mut T, + ) -> io::Result + where + T: AsyncRead + Unpin + Send, + { + let bytes = read_ssz_snappy(io).await?; + StatusMessage::from_ssz_bytes(&bytes) + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, format!("{:?}", e))) + } + + async fn write_request( + &mut self, + _: &LeanStatusProtocol, + io: &mut T, + request: Self::Request, + ) -> io::Result<()> + where + T: AsyncWrite + Unpin + Send, + { + write_ssz_snappy(io, &request).await + } + + async fn write_response( + &mut self, + _: &LeanStatusProtocol, + io: &mut T, + response: Self::Response, + ) -> io::Result<()> + where + T: AsyncWrite + Unpin + Send, + { + write_ssz_snappy(io, &response).await + } +} + +// Helper functions for reading/writing SSZ-Snappy frames (varint length prefix + raw snappy body) +async fn read_ssz_snappy(io: &mut T) -> io::Result> { + let mut byte = [0u8; 1]; + let mut len: u64 = 0; + let mut shift = 0; + + loop { + io.read_exact(&mut byte).await?; + let b = byte[0] as u64; + len |= (b & 0x7f) << shift; + if b & 0x80 == 0 { + break; + } + shift += 7; + if shift > 63 { + return Err(io::Error::new(io::ErrorKind::InvalidData, "Varint too long")); + } + } + + let mut compressed = vec![0u8; len as usize]; + io.read_exact(&mut compressed).await?; + + let mut decoder = Decoder::new(); + decoder + .decompress_vec(&compressed) + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, format!("Snappy error: {:?}", e))) +} + +async fn write_ssz_snappy(io: &mut T, value: &V) -> io::Result<()> { + let ssz_bytes = value.as_ssz_bytes(); + + let mut encoder = Encoder::new(); + let compressed = encoder + .compress_vec(&ssz_bytes) + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, format!("Snappy error: {:?}", e)))?; + + // Write varint length + let mut len_buf = [0u8; 10]; + let mut len = compressed.len() as u64; + let mut i = 0; + loop { + if len & !0x7f == 0 { + len_buf[i] = len as u8; + i += 1; + break; + } else { + len_buf[i] = (len & 0x7f | 0x80) as u8; + len >>= 7; + i += 1; + } + } + io.write_all(&len_buf[0..i]).await?; + io.write_all(&compressed).await?; + Ok(()) +} + + diff --git a/lean_client/lean_network/src/topics.rs b/lean_client/lean_network/src/topics.rs new file mode 100644 index 00000000000..303e020c079 --- /dev/null +++ b/lean_client/lean_network/src/topics.rs @@ -0,0 +1,83 @@ +/// Gossipsub topic names for the lean client network +/// +/// These topics are defined in the lean specification and correspond to the message types +/// that can be propagated across the network. +/// See: leanSpec/src/lean_spec/subspecs/networking/gossipsub/topic.py +use std::fmt; +use std::str::FromStr; + +/// Topic prefix matching the spec's lean network implementation. +pub const TOPIC_PREFIX: &str = "leanconsensus"; + +/// Gossip encoding string used for SSZ + Snappy payloads. +pub const ENCODING: &str = "ssz_snappy"; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub enum Topic { + Block, + Attestation, +} + +impl Topic { + pub fn as_str(&self) -> &'static str { + match self { + Topic::Block => "block", + Topic::Attestation => "attestation", + } + } + + pub fn iter() -> impl Iterator { + [Topic::Block, Topic::Attestation].into_iter() + } +} + +impl fmt::Display for Topic { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.as_str()) + } +} + +impl FromStr for Topic { + type Err = String; + + fn from_str(s: &str) -> Result { + match s { + "block" => Ok(Topic::Block), + "attestation" => Ok(Topic::Attestation), + _ => Err(format!("Unknown topic: {}", s)), + } + } +} + +/// Builds the fully-qualified gossipsub topic string for the given base topic. +pub fn encode_topic(network_name: &str, topic: Topic) -> String { + format!("/{}/{}/{}/{}", TOPIC_PREFIX, network_name, topic, ENCODING) +} + +/// Returns a list of encoded gossipsub topics for the provided network name. +pub fn get_topics(network_name: &str) -> Vec { + Topic::iter() + .map(|topic| encode_topic(network_name, topic)) + .collect() +} + +/// Attempts to parse a fully-qualified topic string, returning the base topic if it matches the +/// expected prefix, network, and encoding. +pub fn parse_topic_name(topic: &str, expected_network: &str) -> Option { + let trimmed = topic.trim_start_matches('/'); + let mut iter = trimmed.split('/'); + let prefix = iter.next()?; + let network = iter.next()?; + let name = iter.next()?; + let encoding = iter.next()?; + + if prefix == TOPIC_PREFIX + && network == expected_network + && encoding == ENCODING + && iter.next().is_none() + { + Topic::from_str(name).ok() + } else { + None + } +} diff --git a/lean_client/src/cli.rs b/lean_client/src/cli.rs new file mode 100644 index 00000000000..13a8a121805 --- /dev/null +++ b/lean_client/src/cli.rs @@ -0,0 +1,109 @@ +pub use clap::{FromArgMatches, Parser}; + +use clap_utils::get_color_style; +use serde::{Deserialize, Serialize}; +use std::net::IpAddr; +use std::path::PathBuf; + +#[derive(Parser, Clone, Deserialize, Serialize, Debug)] +#[clap( + name = "lean_validator_node", + visible_aliases = &["l", "ln", "lean-node"], + about = "lean node follows the lean consensus for ethereum chain", + styles = get_color_style(), + next_line_help = true, + term_width = 80, + disable_help_flag = true, + disable_help_subcommand = true, + display_order = 0, +)] +pub struct LeanNode { + #[clap( + long, + value_name = "CONFIG_YAML", + help = "Path to the chain config.yaml file", + display_order = 0 + )] + pub config: PathBuf, + + #[clap( + long, + value_name = "VALIDATOR_CONFIG_YAML", + help = "Path to the validator-config.yaml file", + display_order = 1 + )] + pub validators: PathBuf, + + #[clap( + long, + value_name = "NODES_YAML", + help = "Path to the nodes.yaml file for bootnodes", + display_order = 2 + )] + pub nodes: PathBuf, + + #[clap( + long, + value_name = "NODE_ID", + help = "Node identifier (e.g., 'lighthouse_0')", + display_order = 3 + )] + pub node_id: String, + + #[clap( + long, + value_name = "PRIVATE_KEY", + help = "Path to the hex encoded secp256k1 libp2p private key", + display_order = 4 + )] + pub private_key: PathBuf, + + #[clap( + long, + value_name = "SOCKET_PORT", + help = "P2P socket port (QUIC)", + display_order = 5 + )] + pub socket_port: u16, + + #[clap( + long, + value_name = "GENESIS_JSON", + help = "Path to the genesis.json file", + display_order = 6 + )] + pub genesis_json: Option, + + #[clap( + long, + help = "Enable the metrics HTTP server", + display_order = 100 + )] + pub metrics: bool, + + #[clap( + long, + value_name = "ADDRESS", + help = "Listen address for the metrics HTTP server", + default_value = "127.0.0.1", + display_order = 101 + )] + pub metrics_address: IpAddr, + + #[clap( + long, + value_name = "PORT", + help = "Listen port for the metrics HTTP server", + default_value = "5064", + display_order = 102 + )] + pub metrics_port: u16, + + #[clap( + long, + value_name = "ORIGIN", + help = "CORS allow origin for the metrics HTTP server", + display_order = 103 + )] + pub metrics_allow_origin: Option, +} diff --git a/lean_client/src/config.rs b/lean_client/src/config.rs new file mode 100644 index 00000000000..de523ccb5fd --- /dev/null +++ b/lean_client/src/config.rs @@ -0,0 +1,74 @@ +use crate::cli::LeanNode; +use lean_config::LeanClientPaths; +use std::net::IpAddr; +use std::path::PathBuf; + +/// Configuration for the HTTP metrics server. +#[derive(Debug, Clone)] +pub struct MetricsConfig { + pub enabled: bool, + pub listen_addr: IpAddr, + pub listen_port: u16, + pub allow_origin: Option, +} + +impl Default for MetricsConfig { + fn default() -> Self { + Self { + enabled: false, + listen_addr: "127.0.0.1".parse().unwrap(), + listen_port: 5064, + allow_origin: None, + } + } +} + +/// Runtime configuration derived from CLI arguments for the lean client. +#[derive(Debug, Clone)] +pub struct Config { + pub data_dir: PathBuf, + pub config_path: PathBuf, + pub validators_path: PathBuf, + pub nodes_path: PathBuf, + pub node_id: String, + pub private_key: PathBuf, + pub socket_port: u16, + pub genesis_json_path: Option, + pub metrics: MetricsConfig, +} + +impl Config { + /// Build the runtime configuration from CLI inputs and resolved data directory. + pub fn from_cli(cli: LeanNode, data_dir: PathBuf) -> Self { + Self { + data_dir, + config_path: cli.config, + validators_path: cli.validators, + nodes_path: cli.nodes, + node_id: cli.node_id, + private_key: cli.private_key, + socket_port: cli.socket_port, + genesis_json_path: cli.genesis_json, + metrics: MetricsConfig { + enabled: cli.metrics, + listen_addr: cli.metrics_address, + listen_port: cli.metrics_port, + allow_origin: cli.metrics_allow_origin, + }, + } + } +} + +impl From for LeanClientPaths { + fn from(value: Config) -> Self { + LeanClientPaths { + data_dir: value.data_dir, + config_path: value.config_path, + validators_path: value.validators_path, + nodes_path: value.nodes_path, + node_id: value.node_id, + node_key_path: value.private_key, + genesis_json_path: value.genesis_json_path, + } + } +} diff --git a/lean_client/src/http_metrics.rs b/lean_client/src/http_metrics.rs new file mode 100644 index 00000000000..5d8e8b2322c --- /dev/null +++ b/lean_client/src/http_metrics.rs @@ -0,0 +1,95 @@ +use crate::config::MetricsConfig; +use lighthouse_version::version_with_platform; +use logging::crit; +use std::future::Future; +use std::net::SocketAddr; +use tracing::info; +use warp::{Filter, http::Response}; + +#[derive(Debug)] +pub enum Error { + Warp(warp::Error), + Other(String), +} + +impl From for Error { + fn from(e: warp::Error) -> Self { + Error::Warp(e) + } +} + +impl From for Error { + fn from(e: String) -> Self { + Error::Other(e) + } +} + +/// Creates a server that will serve Prometheus metrics. +pub fn serve( + config: MetricsConfig, + shutdown: impl Future + Send + Sync + 'static, +) -> Result<(SocketAddr, impl Future), Error> { + let cors_builder = { + let builder = warp::cors() + .allow_method("GET") + .allow_headers(vec!["Content-Type"]); + + warp_utils::cors::set_builder_origins( + builder, + config.allow_origin.as_deref(), + (config.listen_addr, config.listen_port), + )? + }; + + if !config.enabled { + crit!("Cannot start disabled metrics HTTP server"); + return Err(Error::Other( + "A disabled metrics server should not be started".to_string(), + )); + } + + let routes = warp::get() + .and(warp::path("metrics")) + .and_then(|| async move { + let mut buffer = String::new(); + let encoder = metrics::TextEncoder::new(); + + // Scrape health metrics (CPU, Memory, etc.) + health_metrics::metrics::scrape_health_metrics(); + + // metrics::gather() returns Vec, which is what we want to encode. + + match encoder.encode_utf8(&metrics::gather(), &mut buffer) { + Ok(()) => Ok::<_, warp::Rejection>( + Response::builder() + .status(200) + .header("Content-Type", "text/plain") + .body(buffer) + .unwrap() + ), + Err(e) => Ok::<_, warp::Rejection>( + Response::builder() + .status(500) + .header("Content-Type", "text/plain") + .body(format!("Unable to gather metrics: {:?}", e)) + .unwrap() + ), + } + }) + .map(|reply| warp::reply::with_header(reply, "Server", &version_with_platform())) + .with(cors_builder.build()); + + let (listening_socket, server) = warp::serve(routes).try_bind_with_graceful_shutdown( + SocketAddr::new(config.listen_addr, config.listen_port), + async { + shutdown.await; + }, + )?; + + info!( + listen_address = listening_socket.to_string(), + "Metrics HTTP server started" + ); + + Ok((listening_socket, server)) +} diff --git a/lean_client/src/lib.rs b/lean_client/src/lib.rs new file mode 100644 index 00000000000..bbd08ced23e --- /dev/null +++ b/lean_client/src/lib.rs @@ -0,0 +1,121 @@ +pub mod cli; +pub mod config; +pub mod http_metrics; + +pub use config::Config; + +use std::sync::Arc; + +use crate::config::Config as LeanClientConfig; +use crate::config::MetricsConfig; +use environment::RuntimeContext; +use lean_config::{LeanClientPaths, initialize as load_runtime}; +use lean_keystore::{KeyStore, ValidatorKeyPair}; +use lean_network::{NetworkConfig, NetworkService}; +use lean_validator_service::ValidatorService; +use slot_clock::SystemTimeSlotClock; +use store::database::interface::BeaconNodeBackend as LeanBackend; +use tokio::sync::mpsc; +use tracing::info; +use types::EthSpec; + +/// Capacity for messages flowing from `NetworkService` -> `ValidatorService`. +/// +/// This is intentionally bounded to avoid unbounded memory growth under gossip storms. +const NETWORK_TO_VALIDATOR_CHANNEL_CAPACITY: usize = 1024; + +/// Capacity for messages flowing from `ValidatorService` -> `NetworkService`. +/// +/// These are control-plane messages (publish, requests, status updates) and should be relatively +/// low volume. Keeping this bounded helps prevent memory growth in pathological cases. +const VALIDATOR_TO_NETWORK_CHANNEL_CAPACITY: usize = 256; + +pub struct ProductionLeanClient { + context: RuntimeContext, + slot_clock: SystemTimeSlotClock, + db: Arc>, + validator_key_pair: Option, + validator_index: u64, + keystore: Option, + network_config: NetworkConfig, + metrics_config: MetricsConfig, +} + +impl ProductionLeanClient { + pub async fn new(context: RuntimeContext, config: LeanClientConfig) -> Result { + let metrics_config = config.metrics.clone(); + let resources = load_runtime::(LeanClientPaths::from(config))?; + + info!("Lean client runtime resources prepared"); + + Ok(Self { + context, + slot_clock: resources.slot_clock, + db: resources.db, + validator_key_pair: Some(resources.validator_key_pair), + validator_index: resources.validator_index, + keystore: Some(resources.keystore), + network_config: resources.network_config, + metrics_config, + }) + } + + pub async fn start_service(&mut self) -> Result<(), String> { + let (network_recv_tx, network_recv_rx) = + mpsc::channel(NETWORK_TO_VALIDATOR_CHANNEL_CAPACITY); + let (network_send_tx, network_send_rx) = + mpsc::channel(VALIDATOR_TO_NETWORK_CHANNEL_CAPACITY); + + info!("Starting network service"); + let network_service = NetworkService::::new( + self.network_config.clone(), + network_recv_tx, + network_send_rx, + ) + .map_err(|e| format!("Failed to create network service: {}", e))?; + self.context + .executor + .clone_with_name("lean_network_service".into()) + .spawn(network_service.start(), "network_service"); + + info!("Starting validator service"); + let validator_key_pair = self + .validator_key_pair + .take() + .ok_or_else(|| "Validator key pair not loaded".to_string())?; + let keystore = self + .keystore + .take() + .ok_or_else(|| "Keystore not initialized".to_string())?; + + let validator_service = ValidatorService::new( + self.slot_clock.clone(), + network_recv_rx, + network_send_tx, + self.db.clone(), + self.validator_index, + validator_key_pair, + keystore, + )?; + + self.context + .executor + .clone_with_name("lean_validator_service".into()) + .spawn(validator_service.run(), "validator_service"); + + if self.metrics_config.enabled { + let (listen_addr, server) = http_metrics::serve( + self.metrics_config.clone(), + self.context.executor.exit(), + ) + .map_err(|e| format!("Failed to start metrics server: {:?}", e))?; + + self.context + .executor + .spawn_without_exit(server, "http_metrics"); + info!(%listen_addr, "Metrics server started"); + } + + Ok(()) + } +} diff --git a/lean_client/store/Cargo.toml b/lean_client/store/Cargo.toml new file mode 100644 index 00000000000..20649461917 --- /dev/null +++ b/lean_client/store/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "lean_store" +version = "0.1.0" +authors = ["Sigma Prime "] +edition.workspace = true + +[dependencies] +ethereum_ssz = "0.10.0" +lean_consensus = { workspace = true } +store = { workspace = true, features = ["redb"] } +types = { workspace = true } diff --git a/lean_client/store/src/lib.rs b/lean_client/store/src/lib.rs new file mode 100644 index 00000000000..e5eecf1d66f --- /dev/null +++ b/lean_client/store/src/lib.rs @@ -0,0 +1,769 @@ +use lean_consensus::attestation::SignedAttestation; +use lean_consensus::lean_block::LeanBlock; +use lean_consensus::lean_state::LeanState; +use ssz::{Decode, Encode}; +use std::collections::HashMap; +use std::collections::HashSet; +use std::marker::PhantomData; +use std::sync::Arc; +use store::{DBColumn, KeyValueStore}; +use types::{EthSpec, Hash256}; + +/// Rolling block metadata record stored in `StorageKey::BlockMetaIndex`. +/// +/// SSZ is not used here; we store fixed-size binary records for compactness: +/// - slot (u64 LE) +/// - block_root (32 bytes) +/// - parent_root (32 bytes) +#[derive(Clone, Copy, Debug)] +struct BlockMetaEntry { + slot: u64, + root: Hash256, + parent_root: Hash256, +} + +/// Rolling state metadata record stored in `StorageKey::StateMetaIndex`. +/// +/// Fixed-size binary records: +/// - slot (u64 LE) +/// - block_root (32 bytes) +#[derive(Clone, Copy, Debug)] +struct StateMetaEntry { + slot: u64, + block_root: Hash256, +} + +// Keep meta indices bounded to avoid unbounded allocations and steadily increasing RSS. +// These are "best effort" buffers used for pruning/loading; pruning still enforces the real windows. +const BLOCK_META_INDEX_MAX_ENTRIES: usize = 4096; +const STATE_META_INDEX_MAX_ENTRIES: usize = 4096; + +/// Storage key definitions for the lean client +#[derive(Debug, Clone, Copy)] +enum StorageKey { + /// Single-item keys + State, + HeadRoot, + SafeTarget, + /// Prefix-based keys with u64 suffix + Block, + /// Prefix-based keys with Hash256 suffix (per-root state) + StateByRoot, + Attestation, + NewAttestation, + /// Index keys + /// Legacy: list of all block roots ever seen. Kept for backward compat/migration only. + BlockRootsIndex, + /// Rolling block metadata for pruning/loading: (slot, block_root, parent_root) records. + BlockMetaIndex, + /// Rolling state metadata for pruning: (slot, state_root_keyed_by_block_root) records. + StateMetaIndex, + ValidatorIndices, + NewValidatorIndices, +} + +impl StorageKey { + /// Get the key bytes, optionally with a u64 suffix + fn key(&self) -> Vec { + match self { + StorageKey::State => b"lean_state".to_vec(), + StorageKey::HeadRoot => b"head_root".to_vec(), + StorageKey::SafeTarget => b"safe_target".to_vec(), + StorageKey::Block => b"block_".to_vec(), + StorageKey::StateByRoot => b"lean_state_".to_vec(), + StorageKey::Attestation => b"attestation_".to_vec(), + StorageKey::NewAttestation => b"new_attestation_".to_vec(), + StorageKey::BlockRootsIndex => b"block_roots_index".to_vec(), + StorageKey::BlockMetaIndex => b"block_meta_index".to_vec(), + StorageKey::StateMetaIndex => b"state_meta_index".to_vec(), + StorageKey::ValidatorIndices => b"validator_indices".to_vec(), + StorageKey::NewValidatorIndices => b"new_validator_indices".to_vec(), + } + } + + /// Get key with u64 suffix (for indexed items) + fn key_with_id(&self, id: u64) -> Vec { + let mut key = self.key(); + key.extend_from_slice(&id.to_le_bytes()); + key + } + + /// Get key with Hash256 suffix (for blocks) + fn key_with_hash(&self, hash: &Hash256) -> Vec { + let mut key = self.key(); + key.extend_from_slice(&hash.0); + key + } +} + +/// Store for managing lean client database operations +pub struct LeanStore> { + db: Arc, + _phantom: PhantomData, +} + +impl> LeanStore { + /// Creates a new store with the provided database + pub fn new(db: Arc) -> Self { + Self { + db, + _phantom: PhantomData, + } + } + + // ============ State Management ============ + + /// Fetches the lean state from the database + pub fn fetch_state(&self) -> Result>, String> { + self.fetch_single_item(StorageKey::State, DBColumn::BeaconMeta) + } + + /// Saves the lean state to the database + pub fn save_state(&self, state: &LeanState) -> Result<(), String> { + self.save_single_item(StorageKey::State, DBColumn::BeaconMeta, state) + } + + /// Fetches a state by the block root it was computed for. + pub fn fetch_state_by_root(&self, block_root: Hash256) -> Result>, String> { + let key = StorageKey::StateByRoot.key_with_hash(&block_root); + self.fetch_with_key(DBColumn::BeaconState, &key) + } + + /// Saves a state keyed by the block root it was computed for. + pub fn save_state_by_root( + &self, + block_root: Hash256, + state: &LeanState, + ) -> Result<(), String> { + let key = StorageKey::StateByRoot.key_with_hash(&block_root); + let bytes = state.as_ssz_bytes(); + self.db + .put_bytes(DBColumn::BeaconState, &key, &bytes) + .map_err(|e| format!("Failed to save state by root: {:?}", e))?; + + self.append_state_meta(block_root, state.slot.0)?; + Ok(()) + } + + // ============ Block Management ============ + + /// Saves a block to the database by its root and updates the rolling block meta index. + pub fn save_block(&self, block_root: Hash256, block: &LeanBlock) -> Result<(), String> { + let key = StorageKey::Block.key_with_hash(&block_root); + // Avoid duplicate index entries and expensive index rewrites. + if self + .db + .key_exists(DBColumn::BeaconBlock, &key) + .map_err(|e| format!("Failed to check block existence: {:?}", e))? + { + return Ok(()); + } + let bytes = block.as_ssz_bytes(); + self.db + .put_bytes(DBColumn::BeaconBlock, &key, &bytes) + .map_err(|e| format!("Failed to save block: {:?}", e))?; + + // Update rolling block meta index (used for pruning/loading). + self.append_block_meta(block_root, block.slot.0, block.parent_root)?; + Ok(()) + } + + /// Fetches a block from the database by its root + pub fn fetch_block(&self, block_root: Hash256) -> Result>, String> { + let key = StorageKey::Block.key_with_hash(&block_root); + self.fetch_with_key(DBColumn::BeaconBlock, &key) + } + + /// Checks if a block exists in the database + pub fn block_exists(&self, block_root: Hash256) -> Result { + let key = StorageKey::Block.key_with_hash(&block_root); + self.db + .key_exists(DBColumn::BeaconBlock, &key) + .map_err(|e| format!("Failed to check block existence: {:?}", e)) + } + + /// Loads blocks from the database using the rolling block meta index. + /// + /// If the meta index doesn't exist yet (older DB), falls back to the legacy block roots index. + pub fn load_all_blocks(&self) -> Result>, String> { + let mut block_roots: Vec = self + .load_block_meta_index()? + .into_iter() + .map(|m| m.root) + .collect(); + + if block_roots.is_empty() { + // Backward compat: old DBs only have the legacy index. + block_roots = self.load_hash256_index(StorageKey::BlockRootsIndex)?; + // Best-effort migrate: populate meta index from legacy roots by reading blocks. + self.migrate_block_meta_index_from_roots(&block_roots)?; + } + let mut blocks = HashMap::new(); + + for block_root in block_roots { + if let Some(block) = self.fetch_block(block_root)? { + blocks.insert(block_root, block); + } + } + + Ok(blocks) + } + + /// Prune old blocks from the DB, keeping a rolling window by slot. + /// + /// - Keeps all blocks with `slot >= min_slot_to_keep` + /// - Also keeps any roots in `always_keep_roots` and their ancestors (while those ancestors exist in DB) + /// - Deletes pruned blocks from `DBColumn::BeaconBlock` + /// - Rewrites the `BlockRootsIndex` to only include kept roots + /// + /// Returns the number of deleted blocks. + pub fn prune_blocks_older_than( + &self, + min_slot_to_keep: u64, + always_keep_roots: &HashSet, + ) -> Result { + let meta_entries = self.load_block_meta_index()?; + if meta_entries.is_empty() { + return Ok(0); + } + + let mut meta: HashMap = HashMap::new(); + for m in &meta_entries { + meta.insert(m.root, (m.slot, m.parent_root)); + } + + // Initial keep-set: blocks in the rolling window. + let mut keep: HashSet = HashSet::new(); + for (root, (slot, _parent)) in &meta { + if *slot >= min_slot_to_keep { + keep.insert(*root); + } + } + + // Always keep explicit roots + their ancestors. + let mut stack: Vec = always_keep_roots.iter().copied().collect(); + while let Some(root) = stack.pop() { + if keep.insert(root) { + if let Some((_slot, parent)) = meta.get(&root).copied() { + if parent != Hash256::ZERO { + stack.push(parent); + } + } + } + } + + // Also keep ancestors of all kept blocks so we don't strand parents on restart. + let mut ancestor_stack: Vec = keep.iter().copied().collect(); + while let Some(root) = ancestor_stack.pop() { + if let Some((_slot, parent)) = meta.get(&root).copied() { + if parent != Hash256::ZERO && keep.insert(parent) { + ancestor_stack.push(parent); + } + } + } + + // Delete blocks not in keep-set and rebuild meta index. + let mut deleted = 0usize; + let mut kept_meta: Vec = Vec::new(); + for entry in meta_entries { + if keep.contains(&entry.root) { + kept_meta.push(entry); + continue; + } + + let key = StorageKey::Block.key_with_hash(&entry.root); + self.db + .key_delete(DBColumn::BeaconBlock, &key) + .map_err(|e| format!("Failed to delete block: {:?}", e))?; + deleted += 1; + } + + self.save_block_meta_index(&kept_meta)?; + + // Compact the beacon block column to reclaim space. + self.db + .compact_column(DBColumn::BeaconBlock) + .map_err(|e| format!("Failed to compact BeaconBlock column: {:?}", e))?; + + Ok(deleted) + } + + /// Prune old per-root states from the DB, keeping a rolling window by slot. + /// + /// Returns number of deleted states. + pub fn prune_states_older_than( + &self, + min_slot_to_keep: u64, + always_keep_roots: &HashSet, + ) -> Result { + let entries = self.load_state_meta_index()?; + if entries.is_empty() { + return Ok(0); + } + + let mut keep: HashSet = HashSet::new(); + for e in &entries { + if e.slot >= min_slot_to_keep { + keep.insert(e.block_root); + } + } + for r in always_keep_roots { + keep.insert(*r); + } + + let mut deleted = 0usize; + let mut kept: Vec = Vec::new(); + for e in entries { + if keep.contains(&e.block_root) { + kept.push(e); + continue; + } + let key = StorageKey::StateByRoot.key_with_hash(&e.block_root); + self.db + .key_delete(DBColumn::BeaconState, &key) + .map_err(|er| format!("Failed to delete state: {:?}", er))?; + deleted += 1; + } + + self.save_state_meta_index(&kept)?; + self.db + .compact_column(DBColumn::BeaconState) + .map_err(|e| format!("Failed to compact BeaconState column: {:?}", e))?; + Ok(deleted) + } + + // ======== Block meta index helpers ======== + + fn load_block_meta_index(&self) -> Result, String> { + let bytes = self + .db + .get_bytes(DBColumn::BeaconMeta, &StorageKey::BlockMetaIndex.key()) + .map_err(|e| format!("Failed to load block meta index: {:?}", e))?; + let Some(data) = bytes else { return Ok(Vec::new()) }; + if data.len() % 72 != 0 { + return Err("Invalid block meta index length".to_string()); + } + let mut out: Vec = Vec::with_capacity(data.len() / 72); + for chunk in data.chunks_exact(72) { + let slot = u64::from_le_bytes(chunk[0..8].try_into().map_err(|_| "Invalid slot bytes".to_string())?); + let root = Hash256::from_slice(&chunk[8..40]); + let parent_root = Hash256::from_slice(&chunk[40..72]); + out.push(BlockMetaEntry { slot, root, parent_root }); + } + Ok(out) + } + + fn save_block_meta_index(&self, entries: &[BlockMetaEntry]) -> Result<(), String> { + let mut bytes = Vec::with_capacity(entries.len() * 72); + for e in entries { + bytes.extend_from_slice(&e.slot.to_le_bytes()); + bytes.extend_from_slice(&e.root.0); + bytes.extend_from_slice(&e.parent_root.0); + } + self.db + .put_bytes(DBColumn::BeaconMeta, &StorageKey::BlockMetaIndex.key(), &bytes) + .map_err(|e| format!("Failed to save block meta index: {:?}", e)) + } + + fn append_block_meta( + &self, + block_root: Hash256, + slot: u64, + parent_root: Hash256, + ) -> Result<(), String> { + let key = StorageKey::BlockMetaIndex.key(); + let mut data = self + .db + .get_bytes(DBColumn::BeaconMeta, &key) + .map_err(|e| format!("Failed to load block meta index: {:?}", e))? + .unwrap_or_default(); + + data.extend_from_slice(&slot.to_le_bytes()); + data.extend_from_slice(&block_root.0); + data.extend_from_slice(&parent_root.0); + + let max_len = BLOCK_META_INDEX_MAX_ENTRIES * 72; + if data.len() > max_len { + let start = data.len() - max_len; + data.drain(0..start); + } + + self.db + .put_bytes(DBColumn::BeaconMeta, &key, &data) + .map_err(|e| format!("Failed to append block meta index: {:?}", e)) + } + + fn migrate_block_meta_index_from_roots(&self, roots: &[Hash256]) -> Result<(), String> { + if !self.load_block_meta_index()?.is_empty() { + return Ok(()); + } + let mut entries: Vec = Vec::new(); + for r in roots { + if let Some(b) = self.fetch_block(*r)? { + entries.push(BlockMetaEntry { + slot: b.slot.0, + root: *r, + parent_root: b.parent_root, + }); + } + } + self.save_block_meta_index(&entries) + } + + // ======== State meta index helpers ======== + + fn load_state_meta_index(&self) -> Result, String> { + let bytes = self + .db + .get_bytes(DBColumn::BeaconMeta, &StorageKey::StateMetaIndex.key()) + .map_err(|e| format!("Failed to load state meta index: {:?}", e))?; + let Some(data) = bytes else { return Ok(Vec::new()) }; + if data.len() % 40 != 0 { + return Err("Invalid state meta index length".to_string()); + } + let mut out: Vec = Vec::with_capacity(data.len() / 40); + for chunk in data.chunks_exact(40) { + let slot = u64::from_le_bytes(chunk[0..8].try_into().map_err(|_| "Invalid slot bytes".to_string())?); + let block_root = Hash256::from_slice(&chunk[8..40]); + out.push(StateMetaEntry { slot, block_root }); + } + Ok(out) + } + + fn save_state_meta_index(&self, entries: &[StateMetaEntry]) -> Result<(), String> { + let mut bytes = Vec::with_capacity(entries.len() * 40); + for e in entries { + bytes.extend_from_slice(&e.slot.to_le_bytes()); + bytes.extend_from_slice(&e.block_root.0); + } + self.db + .put_bytes(DBColumn::BeaconMeta, &StorageKey::StateMetaIndex.key(), &bytes) + .map_err(|e| format!("Failed to save state meta index: {:?}", e)) + } + + fn append_state_meta(&self, block_root: Hash256, slot: u64) -> Result<(), String> { + let key = StorageKey::StateMetaIndex.key(); + let mut data = self + .db + .get_bytes(DBColumn::BeaconMeta, &key) + .map_err(|e| format!("Failed to load state meta index: {:?}", e))? + .unwrap_or_default(); + + data.extend_from_slice(&slot.to_le_bytes()); + data.extend_from_slice(&block_root.0); + + let max_len = STATE_META_INDEX_MAX_ENTRIES * 40; + if data.len() > max_len { + let start = data.len() - max_len; + data.drain(0..start); + } + + self.db + .put_bytes(DBColumn::BeaconMeta, &key, &data) + .map_err(|e| format!("Failed to append state meta index: {:?}", e)) + } + + // ============ Fork Choice State ============ + + /// Saves the current head root to the database + pub fn save_head_root(&self, head_root: Hash256) -> Result<(), String> { + self.save_hash256_item(StorageKey::HeadRoot, head_root) + } + + /// Fetches the current head root from the database + pub fn fetch_head_root(&self) -> Result, String> { + self.fetch_hash256_item(StorageKey::HeadRoot) + } + + /// Saves the current safe target root to the database + pub fn save_safe_target(&self, safe_target: Hash256) -> Result<(), String> { + self.save_hash256_item(StorageKey::SafeTarget, safe_target) + } + + /// Fetches the current safe target root from the database + pub fn fetch_safe_target(&self) -> Result, String> { + self.fetch_hash256_item(StorageKey::SafeTarget) + } + + // ============ Attestation Management ============ + + /// Saves a known (confirmed) attestation to the database + pub fn save_known_attestation( + &self, + validator_id: u64, + attestation: &SignedAttestation, + ) -> Result<(), String> { + self.save_indexed_item(StorageKey::Attestation, validator_id, attestation)?; + self.add_to_index(StorageKey::ValidatorIndices, &validator_id.to_le_bytes())?; + Ok(()) + } + + /// Saves a new (pending) attestation to the database + pub fn save_new_attestation( + &self, + validator_id: u64, + attestation: &SignedAttestation, + ) -> Result<(), String> { + self.save_indexed_item(StorageKey::NewAttestation, validator_id, attestation)?; + self.add_to_index(StorageKey::NewValidatorIndices, &validator_id.to_le_bytes())?; + Ok(()) + } + + /// Loads all known attestations from the database + pub fn load_known_attestations(&self) -> Result, String> { + self.load_indexed_items(StorageKey::Attestation, StorageKey::ValidatorIndices) + } + + /// Loads all new (pending) attestations from the database + pub fn load_new_attestations(&self) -> Result, String> { + self.load_indexed_items(StorageKey::NewAttestation, StorageKey::NewValidatorIndices) + } + + /// Loads all attestations (alias for known attestations) + pub fn load_all_attestations(&self) -> Result, String> { + self.load_known_attestations() + } + + /// Deletes a new attestation for the given validator index + pub fn delete_new_attestation(&self, validator_id: u64) -> Result<(), String> { + let key = StorageKey::NewAttestation.key_with_id(validator_id); + self.db + .key_delete(DBColumn::BeaconMeta, &key) + .map_err(|e| format!("Failed to delete new attestation: {:?}", e))?; + + self.remove_from_index(StorageKey::NewValidatorIndices, &validator_id.to_le_bytes())?; + Ok(()) + } + + /// Migrates all new attestations to known attestations + pub fn accept_new_attestations(&self) -> Result<(), String> { + let new_attestations = self.load_new_attestations()?; + + for (validator_id, attestation) in new_attestations { + self.save_known_attestation(validator_id, &attestation)?; + self.delete_new_attestation(validator_id)?; + } + + Ok(()) + } + + // ============ Private Helpers ============ + + /// Fetches a single SSZ-encoded item from the database + fn fetch_single_item( + &self, + key: StorageKey, + column: DBColumn, + ) -> Result, String> { + let bytes = self + .db + .get_bytes(column, &key.key()) + .map_err(|e| format!("Failed to fetch item: {:?}", e))?; + + match bytes { + Some(data) => { + let item = T::from_ssz_bytes(&data) + .map_err(|e| format!("Failed to decode item: {:?}", e))?; + Ok(Some(item)) + } + None => Ok(None), + } + } + + /// Saves a single SSZ-encoded item to the database + fn save_single_item( + &self, + key: StorageKey, + column: DBColumn, + item: &T, + ) -> Result<(), String> { + let bytes = item.as_ssz_bytes(); + self.db + .put_bytes(column, &key.key(), &bytes) + .map_err(|e| format!("Failed to save item: {:?}", e)) + } + + /// Fetches an item using a specific key + fn fetch_with_key(&self, column: DBColumn, key: &[u8]) -> Result, String> { + let bytes = self + .db + .get_bytes(column, key) + .map_err(|e| format!("Failed to fetch item: {:?}", e))?; + + match bytes { + Some(data) => { + let item = T::from_ssz_bytes(&data) + .map_err(|e| format!("Failed to decode item: {:?}", e))?; + Ok(Some(item)) + } + None => Ok(None), + } + } + + /// Saves a Hash256 item to the database + fn save_hash256_item(&self, key: StorageKey, hash: Hash256) -> Result<(), String> { + self.db + .put_bytes(DBColumn::BeaconMeta, &key.key(), &hash.0) + .map_err(|e| format!("Failed to save hash: {:?}", e)) + } + + /// Fetches a Hash256 item from the database + fn fetch_hash256_item(&self, key: StorageKey) -> Result, String> { + let bytes = self + .db + .get_bytes(DBColumn::BeaconMeta, &key.key()) + .map_err(|e| format!("Failed to fetch hash: {:?}", e))?; + + match bytes { + Some(data) if data.len() == 32 => Ok(Some(Hash256::from_slice(&data))), + _ => Ok(None), + } + } + + /// Saves an indexed item (with u64 key suffix) + fn save_indexed_item( + &self, + key_prefix: StorageKey, + id: u64, + item: &T, + ) -> Result<(), String> { + let key = key_prefix.key_with_id(id); + let bytes = item.as_ssz_bytes(); + self.db + .put_bytes(DBColumn::BeaconMeta, &key, &bytes) + .map_err(|e| format!("Failed to save indexed item: {:?}", e)) + } + + /// Loads all indexed items for a given prefix + fn load_indexed_items( + &self, + item_key: StorageKey, + index_key: StorageKey, + ) -> Result, String> { + let indices = self.load_u64_index(index_key)?; + let mut items = HashMap::new(); + + for id in indices { + let key = item_key.key_with_id(id); + if let Some(item) = self.fetch_with_key(DBColumn::BeaconMeta, &key)? { + items.insert(id, item); + } + } + + Ok(items) + } + + /// Loads a u64 index from the database + fn load_u64_index(&self, key: StorageKey) -> Result, String> { + let bytes = self + .db + .get_bytes(DBColumn::BeaconMeta, &key.key()) + .map_err(|e| format!("Failed to load index: {:?}", e))?; + + match bytes { + Some(data) => { + let count = data.len() / 8; + let mut indices = Vec::with_capacity(count); + for i in 0..count { + let start = i * 8; + let end = start + 8; + let bytes_array: [u8; 8] = data[start..end] + .try_into() + .map_err(|_| "Invalid index bytes".to_string())?; + indices.push(u64::from_le_bytes(bytes_array)); + } + Ok(indices) + } + None => Ok(Vec::new()), + } + } + + /// Saves a u64 index to the database + fn save_u64_index(&self, key: StorageKey, indices: &[u64]) -> Result<(), String> { + let mut bytes = Vec::with_capacity(indices.len() * 8); + for &index in indices { + bytes.extend_from_slice(&index.to_le_bytes()); + } + + self.db + .put_bytes(DBColumn::BeaconMeta, &key.key(), &bytes) + .map_err(|e| format!("Failed to save index: {:?}", e)) + } + + /// Loads a Hash256 index from the database + fn load_hash256_index(&self, key: StorageKey) -> Result, String> { + let bytes = self + .db + .get_bytes(DBColumn::BeaconMeta, &key.key()) + .map_err(|e| format!("Failed to load index: {:?}", e))?; + + match bytes { + Some(data) => { + let count = data.len() / 32; + let mut hashes = Vec::with_capacity(count); + for i in 0..count { + let start = i * 32; + let end = start + 32; + hashes.push(Hash256::from_slice(&data[start..end])); + } + Ok(hashes) + } + None => Ok(Vec::new()), + } + } + + /// Adds an item to an index if not already present + fn add_to_index(&self, index_key: StorageKey, item_bytes: &[u8]) -> Result<(), String> { + let item_len = match index_key { + StorageKey::ValidatorIndices | StorageKey::NewValidatorIndices => 8, + StorageKey::BlockRootsIndex => 32, + _ => return Err("Invalid index key for add_to_index".to_string()), + }; + + if item_len == 8 { + let id = u64::from_le_bytes( + item_bytes + .try_into() + .map_err(|_| "Invalid id bytes".to_string())?, + ); + let mut indices = match index_key { + StorageKey::ValidatorIndices => self.load_u64_index(index_key)?, + StorageKey::NewValidatorIndices => self.load_u64_index(index_key)?, + _ => unreachable!(), + }; + if !indices.contains(&id) { + indices.push(id); + self.save_u64_index(index_key, &indices)?; + } + } else { + let hash = Hash256::from_slice(item_bytes); + let mut hashes = self.load_hash256_index(index_key)?; + if !hashes.contains(&hash) { + hashes.push(hash); + // Save Hash256 index + let mut bytes = Vec::with_capacity(hashes.len() * 32); + for h in &hashes { + bytes.extend_from_slice(&h.0); + } + self.db + .put_bytes(DBColumn::BeaconMeta, &index_key.key(), &bytes) + .map_err(|e| format!("Failed to save index: {:?}", e))?; + } + } + + Ok(()) + } + + /// Removes an item from an index + fn remove_from_index(&self, index_key: StorageKey, item_bytes: &[u8]) -> Result<(), String> { + let id = u64::from_le_bytes( + item_bytes + .try_into() + .map_err(|_| "Invalid id bytes".to_string())?, + ); + let mut indices = self.load_u64_index(index_key)?; + indices.retain(|&i| i != id); + self.save_u64_index(index_key, &indices)?; + Ok(()) + } +} diff --git a/lean_client/validator_service/Cargo.toml b/lean_client/validator_service/Cargo.toml new file mode 100644 index 00000000000..ab16dc0c05c --- /dev/null +++ b/lean_client/validator_service/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "lean_validator_service" +version = "0.1.0" +edition.workspace = true + +[dependencies] +ethereum_ssz = "0.10.0" +ethereum_ssz_derive = "0.10.0" +futures = { workspace = true } +fixed_bytes = { workspace = true } +leansig = { git = "https://github.com/leanEthereum/leanSig" } +lean_consensus = { workspace = true } +lean_crypto = { workspace = true } +lean_forkchoice = { workspace = true } +lean_keystore = { path = "../keystore" } +lean_network = { workspace = true } +lean_store = { workspace = true } +slot_clock = { workspace = true } +store = { workspace = true } +tokio = { workspace = true } +tracing = { workspace = true } +tree_hash = { workspace = true } +types = { workspace = true } +ssz_types = { workspace = true } +milhouse = { workspace = true } +metrics = { workspace = true } +lazy_static = "1.4.0" diff --git a/lean_client/validator_service/src/chain.rs b/lean_client/validator_service/src/chain.rs new file mode 100644 index 00000000000..97e675d3bf8 --- /dev/null +++ b/lean_client/validator_service/src/chain.rs @@ -0,0 +1,532 @@ +use std::collections::HashMap; +use std::collections::HashSet; +use std::sync::Arc; + +use lean_consensus::attestation::{SignedAttestation, Slot as LeanSlot}; +use lean_consensus::lean_block::{LeanBlock, LeanBlockBody}; +use lean_consensus::lean_state::LeanState; +use lean_forkchoice::proto_array::{ProtoArray, ProtoArrayError}; +use lean_store::LeanStore; +use ssz::{Decode, Encode}; +use ssz_types::VariableList; +use store::KeyValueStore; +use tree_hash::TreeHash; +use types::{EthSpec, Hash256}; + +use fixed_bytes::FixedBytesExtended; + +/// Central coordinator for fork-choice state and database access. +/// +/// `LeanChain` maintains an in-memory proto-array alongside persistent storage. +/// The validator service delegates block/attestation integration and head updates to this struct. +/// +/// States and blocks are cached in memory per root, avoiding database reads during block processing. +/// The database is used for persistence only, with all hot-path operations using the in-memory caches. +pub struct LeanChain> { + store: LeanStore, + proto_array: ProtoArray, + latest_votes: HashMap, + /// In-memory cache of states keyed by block root + states: HashMap>>, + /// In-memory cache of blocks keyed by block root + blocks: HashMap>, +} + +/// Rolling retention policy for the lean client. +const BLOCK_DB_RETENTION_SLOTS: u64 = 1024; +const STATE_CACHE_RETENTION_SLOTS: u64 = 128; +/// How often to run DB compaction (every N slots). +const DB_COMPACTION_INTERVAL_SLOTS: u64 = 32; + +impl> LeanChain { + /// Clones a state using SSZ serialization/deserialization. + /// + /// Ensures deep copies of all state fields including complex types like milhouse::List and BitVector. + pub fn clone_state(state: &LeanState) -> Result, String> { + let bytes = state.as_ssz_bytes(); + LeanState::from_ssz_bytes(&bytes) + .map_err(|e| format!("Failed to clone state via SSZ: {:?}", e)) + } + + /// Construct a new chain coordinator from an existing key-value backend. + pub fn new(db: Arc) -> Self { + let store = LeanStore::new(db); + let (proto_array, latest_votes, states, blocks) = match Self::initialize_fork_choice(&store) + { + Ok(result) => result, + Err(e) => { + tracing::warn!( + error = %e, + "Failed to initialize proto array from store, starting empty" + ); + ( + ProtoArray::new(Hash256::zero(), LeanSlot(0)), + HashMap::new(), + HashMap::new(), + HashMap::new(), + ) + } + }; + + Self { + store, + proto_array, + latest_votes, + states, + blocks, + } + } + + /// Returns a reference to the underlying store. + pub fn store(&self) -> &LeanStore { + &self.store + } + + /// Registers a block with the in-memory proto-array, ensuring the parent chain exists. + pub fn register_block( + &mut self, + block_root: Hash256, + slot: LeanSlot, + parent_root: Hash256, + ) -> Result<(), String> { + if parent_root != Hash256::zero() { + self.ensure_block_in_proto_array(parent_root)?; + } + + let res = self + .proto_array + .on_block(block_root, slot, parent_root) + .map_err(Self::format_proto_error); + + if let Err(e) = res { + // Handle error or just ignore for now as per previous logic + return Err(format!("Failed to persist state: {:?}", e)); + } + res + } + + /// Returns true if the block root is in cache or on disk. + pub fn block_exists(&self, block_root: Hash256) -> Result { + if self.blocks.contains_key(&block_root) { + return Ok(true); + } + self.store.block_exists(block_root) + } + + /// Gets a state from the cache by block root. + /// Returns None if the state is not in the cache. + pub fn get_state(&self, block_root: &Hash256) -> Option>> { + self.states.get(block_root).cloned() + } + + /// Fetches a block by root from cache first, then DB if not cached. + /// Use get_block() for cache-only access during hot paths. + /// + /// Note: This method takes &self and returns a clone. For write-heavy workloads, + /// consider using get_block() which returns a reference. + pub fn fetch_block(&self, block_root: Hash256) -> Result>, String> { + // Check cache first + if let Some(block) = self.blocks.get(&block_root) { + return Ok(Some(block.clone())); + } + + // Fall back to DB and populate cache + // Since we have &self, we can't mutate the cache here. + // The block will be cached when saved via save_block(). + self.store.fetch_block(block_root) + } + + /// Persists a block to cache and disk atomically. + pub fn save_block(&mut self, block_root: Hash256, block: &LeanBlock) -> Result<(), String> { + // Update cache first + self.blocks.insert(block_root, block.clone()); + // Then persist to disk + self.store.save_block(block_root, block) + } + + /// Returns all blocks from cache. + pub fn load_all_blocks(&self) -> Result>, String> { + Ok(self.blocks.clone()) + } + + /// Returns the latest head root from disk. + pub fn fetch_head_root(&self) -> Result, String> { + self.store.fetch_head_root() + } + + /// Retrieves a state by block root from cache. + /// Returns None if not in cache. Does not hit the database. + pub fn fetch_state(&self, block_root: &Hash256) -> Option>> { + if let Some(state) = self.get_state(block_root) { + return Some(state); + } + // DB fallback for restarts: try per-root state. + match self.store.fetch_state_by_root(*block_root) { + Ok(Some(state)) => Some(Arc::new(state)), + _ => None, + } + } + + /// Gets the current head state from cache. + /// Returns None if head root is not set or state not in cache. + pub fn get_head_state(&self) -> Option>> { + let head_root = self.store.fetch_head_root().ok()??; + self.get_state(&head_root) + } + + /// Persists a state to cache and disk atomically. + /// The state is associated with the provided block root. + pub fn save_state(&mut self, block_root: Hash256, state: &LeanState) -> Result<(), String> { + // Update cache first + self.states + .insert(block_root, Arc::new(Self::clone_state(state)?)); + + // Persist both: + // - per-root (for correct restart/import of parent states) + // - latest snapshot (legacy / debugging) + self.store.save_state_by_root(block_root, state)?; + self.store.save_state(state) + } + + /// Prune in-memory caches and compact the on-disk block DB. + /// + /// - Keeps last `STATE_CACHE_RETENTION_SLOTS` of states in memory + /// - Keeps last `BLOCK_DB_RETENTION_SLOTS` of blocks on disk (plus required ancestors) + /// - Always keeps head + safe_target roots + /// + /// `last_compaction_slot` is updated by the caller to rate-limit DB compaction. + pub fn prune_and_compact( + &mut self, + current_slot: u64, + last_compaction_slot: &mut u64, + ) -> Result<(), String> { + // Always prune the state cache (cheap). + self.prune_state_cache(current_slot); + + // Rate-limit DB compaction. + if current_slot < *last_compaction_slot + DB_COMPACTION_INTERVAL_SLOTS { + return Ok(()); + } + *last_compaction_slot = current_slot; + + let cutoff = current_slot.saturating_sub(BLOCK_DB_RETENTION_SLOTS); + let state_cutoff = current_slot.saturating_sub(STATE_CACHE_RETENTION_SLOTS); + + let mut keep_roots: HashSet = HashSet::new(); + if let Ok(Some(head)) = self.fetch_head_root() { + keep_roots.insert(head); + } + if let Ok(Some(safe)) = self.fetch_safe_target() { + keep_roots.insert(safe); + } + + // Prune block cache in-memory first to reduce memory. + self.blocks + .retain(|root, block| keep_roots.contains(root) || block.slot.0 >= cutoff); + + // Prune blocks on disk. + let deleted = self + .store + .prune_blocks_older_than(cutoff, &keep_roots)?; + if deleted > 0 { + tracing::info!( + current_slot, + cutoff_slot = cutoff, + deleted_blocks = deleted, + "Pruned old blocks from DB" + ); + } + + // Prune per-root states on disk, matching the state retention window. + let deleted_states = self + .store + .prune_states_older_than(state_cutoff, &keep_roots)?; + if deleted_states > 0 { + tracing::info!( + current_slot, + cutoff_slot = state_cutoff, + deleted_states, + "Pruned old states from DB" + ); + } + + Ok(()) + } + + fn prune_state_cache(&mut self, current_slot: u64) { + let cutoff = current_slot.saturating_sub(STATE_CACHE_RETENTION_SLOTS); + self.states.retain(|_root, state| state.slot.0 >= cutoff); + } + + /// Returns the current safe target if stored. + pub fn fetch_safe_target(&self) -> Result, String> { + self.store.fetch_safe_target() + } + + /// Persists the safe target root. + pub fn save_safe_target(&self, safe_target: Hash256) -> Result<(), String> { + self.store.save_safe_target(safe_target) + } + + /// Loads all known attestations keyed by validator index. + pub fn load_known_attestations(&self) -> Result, String> { + self.store.load_known_attestations() + } + + /// Loads all new attestations keyed by validator index. + pub fn load_new_attestations(&self) -> Result, String> { + self.store.load_new_attestations() + } + + /// Persists a new attestation. + pub fn save_new_attestation( + &self, + validator_id: u64, + attestation: &SignedAttestation, + ) -> Result<(), String> { + self.store.save_new_attestation(validator_id, attestation) + } + + /// Persists a known attestation. + pub fn save_known_attestation( + &self, + validator_id: u64, + attestation: &SignedAttestation, + ) -> Result<(), String> { + self.store.save_known_attestation(validator_id, attestation) + } + + /// Deletes a pending attestation for the given validator. + pub fn delete_new_attestation(&self, validator_id: u64) -> Result<(), String> { + self.store.delete_new_attestation(validator_id) + } + + /// Applies weight for a validator attestation to the fork-choice tree. + pub fn apply_attestation_weight( + &mut self, + validator_id: u64, + head_root: Hash256, + ) -> Result<(), String> { + if head_root == Hash256::zero() { + return Ok(()); + } + + self.ensure_block_in_proto_array(head_root)?; + + if let Some(previous_root) = self.latest_votes.get(&validator_id).copied() { + if previous_root == head_root { + return Ok(()); + } + + self.proto_array + .remove_weight(previous_root, 1) + .map_err(Self::format_proto_error)?; + } + + self.proto_array + .add_weight(head_root, 1) + .map_err(Self::format_proto_error)?; + self.latest_votes.insert(validator_id, head_root); + + Ok(()) + } + + /// Promotes pending attestations to known, updating fork-choice weights in the process. + pub fn promote_new_attestations(&mut self) -> Result<(), String> { + let new_attestations = self.store.load_new_attestations()?; + for (validator_id, attestation) in new_attestations.iter() { + let head_root = attestation.message.attestation_data.head.root; + if let Err(e) = self.apply_attestation_weight(*validator_id, head_root) { + tracing::warn!( + validator_id = *validator_id, + ?head_root, + error = %e, + "Failed to apply weight for promoted attestation" + ); + } + } + self.store.accept_new_attestations() + } + + /// Updates the canonical head using the proto-array and persists it to the store. + pub fn update_head(&mut self, current_state: &LeanState) -> Result { + let stored_head = self.store.fetch_head_root()?; + let current_head_root = current_state.latest_block_header.tree_hash_root(); + let candidates = [ + Some(current_state.latest_justified.root), + stored_head, + Some(current_head_root), + ]; + + for candidate in candidates.into_iter().flatten() { + if candidate == Hash256::zero() { + continue; + } + + if let Err(e) = self.ensure_block_in_proto_array(candidate) { + tracing::warn!( + ?candidate, + error = %e, + "Failed to ensure candidate root exists in proto array" + ); + continue; + } + + match self.proto_array.find_head(candidate) { + Ok(head) => { + self.store.save_head_root(head)?; + + // Update metrics + + + + return Ok(head); + } + Err(err) => { + let err_msg = Self::format_proto_error(err); + tracing::warn!( + ?candidate, + error = err_msg, + "Proto array head selection failed for candidate" + ); + } + } + } + + self.store.save_head_root(Hash256::zero())?; + Ok(Hash256::zero()) + } + + #[allow(clippy::type_complexity)] + fn initialize_fork_choice( + store: &LeanStore, + ) -> Result< + ( + ProtoArray, + HashMap, + HashMap>>, + HashMap>, + ), + String, + > { + let Some(state) = store.fetch_state()? else { + return Ok(( + ProtoArray::new(Hash256::zero(), LeanSlot(0)), + HashMap::new(), + HashMap::new(), + HashMap::new(), + )); + }; + + let stored_head = store.fetch_head_root()?; + let genesis_slot = state.slot; + + // For genesis (slot 0), key the state by the stored head root (populated genesis block) + // instead of the internal zeroed-header root. This ensures correct cache lookups. + let genesis_root = if genesis_slot == LeanSlot(0) { + stored_head.unwrap_or_else(|| state.latest_block_header.tree_hash_root()) + } else { + state.latest_block_header.tree_hash_root() + }; + let genesis_proposer = state.latest_block_header.proposer_index.0; + let genesis_parent_root = state.latest_block_header.parent_root; + let genesis_state_root = state.latest_block_header.state_root; + + tracing::info!( + genesis_root = ?genesis_root, + genesis_slot = genesis_slot.0, + "Loaded genesis state from database for fork choice initialization" + ); + + // Initialize state cache with genesis state + let mut states = HashMap::new(); + states.insert(genesis_root, Arc::new(state)); + + // Persist the genesis block if it is not already available. + if store.fetch_block(genesis_root)?.is_none() { + let block = LeanBlock { + slot: genesis_slot, + proposer_index: genesis_proposer, + parent_root: genesis_parent_root, + state_root: genesis_state_root, + body: LeanBlockBody { + attestations: VariableList::default(), + }, + }; + store.save_block(genesis_root, &block)?; + } + + // Ensure head and safe target are initialized. + if store.fetch_head_root()?.is_none() { + store.save_head_root(genesis_root)?; + } + if store.fetch_safe_target()?.is_none() { + store.save_safe_target(genesis_root)?; + } + + // Load all known blocks from DB into memory cache + let blocks = store.load_all_blocks()?; + let mut proto_array = ProtoArray::new(genesis_root, genesis_slot); + + let mut entries: Vec<_> = blocks.iter().collect(); + entries.sort_by_key(|(_, block)| block.slot.0); + + for (root, block) in entries { + proto_array + .on_block(*root, block.slot, block.parent_root) + .map_err(Self::format_proto_error)?; + } + + let mut latest_votes = HashMap::new(); + let known_attestations = store.load_known_attestations()?; + for (validator_id, attestation) in known_attestations { + let head_root = attestation.message.attestation_data.head.root; + if !proto_array.contains(&head_root) || head_root == Hash256::zero() { + continue; + } + proto_array + .add_weight(head_root, 1) + .map_err(Self::format_proto_error)?; + latest_votes.insert(validator_id, head_root); + } + + Ok((proto_array, latest_votes, states, blocks)) + } + + fn ensure_block_in_proto_array(&mut self, block_root: Hash256) -> Result<(), String> { + if block_root == Hash256::zero() || self.proto_array.contains(&block_root) { + return Ok(()); + } + + // Fetch block from cache or DB + let block = self + .fetch_block(block_root)? + .ok_or_else(|| format!("Block {:?} not found in cache or store", block_root))?; + + if block.parent_root != Hash256::zero() { + self.ensure_block_in_proto_array(block.parent_root)?; + } + + self.proto_array + .on_block(block_root, block.slot, block.parent_root) + .map_err(Self::format_proto_error) + } + + fn format_proto_error(err: ProtoArrayError) -> String { + match err { + ProtoArrayError::UnknownParent { + block_root, + parent_root, + } => format!( + "Proto array missing parent {:?} for block {:?}", + parent_root, block_root + ), + ProtoArrayError::UnknownBlock(root) => { + format!("Proto array is missing block {:?}", root) + } + ProtoArrayError::InvalidNodeIndex(index) => { + format!("Proto array encountered invalid node index {}", index) + } + } + } +} diff --git a/lean_client/validator_service/src/lib.rs b/lean_client/validator_service/src/lib.rs new file mode 100644 index 00000000000..2def1707c44 --- /dev/null +++ b/lean_client/validator_service/src/lib.rs @@ -0,0 +1,1892 @@ +use lean_consensus::attestation::{ + Attestation, AttestationData, Checkpoint, SignedAttestation, Slot as LeanSlot, +}; +use lean_consensus::lean_block::{ + LeanBlock, LeanBlockBody, LeanBlockWithAttestation, SignedLeanBlockWithAttestation, +}; +use lean_consensus::lean_state::{ + INTERVALS_PER_SLOT, JUSTIFICATION_LOOKBACK_SLOTS, LeanState, SECONDS_PER_INTERVAL, + SECONDS_PER_SLOT, +}; +use lean_crypto::Signature; +use lean_forkchoice::helpers::get_fork_choice_head; +use lean_keystore::{KeyStore, ValidatorKeyPair}; +pub use lean_network::{BlocksByRootRequest, NetworkMessage, PeerId, RPCRequest, StatusMessage}; +use leansig::serialization::Serializable; +use leansig::signature::SignatureScheme; +use slot_clock::SlotClock; +use ssz_types::VariableList; +use std::collections::HashMap; +use std::collections::HashSet; +use std::sync::Arc; +use std::time::Instant; +use store::KeyValueStore; +use tokio::sync::mpsc; +use tokio::time::{Duration as TokioDuration, sleep}; +use tracing::{debug, error, info, warn}; +use tree_hash::TreeHash; +use types::{EthSpec, Hash256, Slot}; + +/// Maximum age (in seconds) before an orphan block is purged +const ORPHAN_BLOCK_MAX_AGE_SECS: u64 = 60; + +/// Maximum number of orphan blocks to keep in the queue +const ORPHAN_BLOCK_MAX_COUNT: usize = 256; + +/// An orphan block waiting for its parent to arrive +struct OrphanBlock { + /// The signed block waiting for its parent + block: Arc>, + /// The peer that sent this block (for requesting parent) + peer_id: Option, + /// When this block was added to the orphan queue + received_at: Instant, +} + +/// Queue for blocks whose parent state is not yet available +struct OrphanBlockQueue { + /// Blocks waiting for their parent, keyed by the parent root they need + /// Multiple blocks can be waiting for the same parent + waiting_for_parent: HashMap>>, + /// Total count of orphan blocks (for enforcing max count) + total_count: usize, +} + +mod chain; + +use chain::LeanChain; + +use leansig::signature::generalized_xmss::instantiations_poseidon_top_level::lifetime_2_to_the_32::hashing_optimized::SIGTopLevelTargetSumLifetime32Dim64Base8 as Scheme; + +mod metrics; + +impl OrphanBlockQueue { + /// Creates a new empty orphan block queue + fn new() -> Self { + Self { + waiting_for_parent: HashMap::new(), + total_count: 0, + } + } + + /// Adds a block to the orphan queue, waiting for its parent + /// + /// Returns true if the block was added, false if it was rejected (e.g., queue full) + fn insert(&mut self, parent_root: Hash256, block: Arc>, peer_id: Option) -> bool { + // Check if we're at capacity - if so, purge old blocks first + if self.total_count >= ORPHAN_BLOCK_MAX_COUNT { + self.purge_oldest(); + // If still at capacity after purge, reject + if self.total_count >= ORPHAN_BLOCK_MAX_COUNT { + warn!( + total_count = self.total_count, + max_count = ORPHAN_BLOCK_MAX_COUNT, + "Orphan block queue full, rejecting block" + ); + return false; + } + } + + let orphan = OrphanBlock { + block, + peer_id, + received_at: Instant::now(), + }; + + self.waiting_for_parent + .entry(parent_root) + .or_insert_with(Vec::new) + .push(orphan); + self.total_count += 1; + + debug!( + ?parent_root, + total_orphans = self.total_count, + "Block added to orphan queue" + ); + + true + } + + /// Removes and returns all blocks waiting for the given parent root + fn take_children(&mut self, parent_root: &Hash256) -> Vec> { + if let Some(children) = self.waiting_for_parent.remove(parent_root) { + self.total_count = self.total_count.saturating_sub(children.len()); + debug!( + ?parent_root, + children_count = children.len(), + remaining_orphans = self.total_count, + "Retrieved orphan blocks for processed parent" + ); + children + } else { + Vec::new() + } + } + + /// Purges blocks older than ORPHAN_BLOCK_MAX_AGE_SECS + /// + /// Returns the number of blocks purged + fn purge_stale(&mut self) -> usize { + let now = Instant::now(); + let max_age = std::time::Duration::from_secs(ORPHAN_BLOCK_MAX_AGE_SECS); + let mut purged = 0; + + self.waiting_for_parent.retain(|parent_root, blocks| { + let original_len = blocks.len(); + blocks.retain(|orphan| { + let age = now.duration_since(orphan.received_at); + if age > max_age { + debug!( + slot = orphan.block.message.block.slot.0, + ?parent_root, + age_secs = age.as_secs(), + "Purging stale orphan block" + ); + false + } else { + true + } + }); + purged += original_len - blocks.len(); + !blocks.is_empty() + }); + + self.total_count = self.total_count.saturating_sub(purged); + + if purged > 0 { + info!( + purged_count = purged, + remaining_orphans = self.total_count, + "Purged stale orphan blocks" + ); + } + + purged + } + + /// Purges the oldest blocks to make room for new ones + /// + /// Removes approximately 25% of the oldest blocks + fn purge_oldest(&mut self) { + let target_purge = ORPHAN_BLOCK_MAX_COUNT / 4; + let mut all_orphans: Vec<(Hash256, usize, Instant)> = Vec::new(); + + // Collect all orphans with their parent root, index, and timestamp + for (parent_root, blocks) in &self.waiting_for_parent { + for (idx, orphan) in blocks.iter().enumerate() { + all_orphans.push((*parent_root, idx, orphan.received_at)); + } + } + + // Sort by received_at (oldest first) + all_orphans.sort_by_key(|(_, _, received_at)| *received_at); + + // Mark oldest blocks for removal + let to_remove: Vec<(Hash256, usize)> = all_orphans + .into_iter() + .take(target_purge) + .map(|(parent_root, idx, _)| (parent_root, idx)) + .collect(); + + // Group removals by parent_root for efficient removal + let mut removals_by_parent: HashMap> = HashMap::new(); + for (parent_root, idx) in to_remove { + removals_by_parent.entry(parent_root).or_default().push(idx); + } + + let mut purged = 0; + for (parent_root, mut indices) in removals_by_parent { + if let Some(blocks) = self.waiting_for_parent.get_mut(&parent_root) { + // Sort indices in descending order to remove from end first + indices.sort_by(|a, b| b.cmp(a)); + for idx in indices { + if idx < blocks.len() { + let removed = blocks.remove(idx); + debug!( + slot = removed.block.message.block.slot.0, + ?parent_root, + "Purging oldest orphan block to make room" + ); + purged += 1; + } + } + // Remove entry if no blocks left + if blocks.is_empty() { + self.waiting_for_parent.remove(&parent_root); + } + } + } + + self.total_count = self.total_count.saturating_sub(purged); + + if purged > 0 { + info!( + purged_count = purged, + remaining_orphans = self.total_count, + "Purged oldest orphan blocks to make room" + ); + } + } + + /// Returns the total number of orphan blocks in the queue + fn len(&self) -> usize { + self.total_count + } +} + +/// Validator service that processes validator duties including attestations +/// +/// Manages interval ticks and network message processing using tokio::select! for simple +/// async handling without complex pinning or stream implementation. +pub struct ValidatorService> { + /// Receiver for network messages from the network service + network_recv: mpsc::Receiver>, + /// Sender for publishing messages to the network service + network_send: mpsc::Sender>, + /// Slot clock for timing + slot_clock: T, + /// Chain coordinator handling fork choice and storage interactions + chain: LeanChain, + /// Validator index managed by this node + validator_index: u64, + /// Hash-sig secret key decoded from the keystore JSON + hashsig_secret_key: ::SecretKey, + /// Key store for loading validator public keys for signature verification + _keystore: KeyStore, + /// Counter for periodic status logging + status_interval_counter: u64, + /// Queue of blocks waiting for their parent to be processed + orphan_queue: OrphanBlockQueue, + /// Last slot at which we compacted the DB (rate-limit expensive work) + last_db_compaction_slot: u64, + /// Connected peers (for CHAIN STATUS display) + connected_peers: HashSet, +} + +impl> ValidatorService { + /// Creates a new validator service with the provided channels + pub fn new( + slot_clock: T, + network_recv: mpsc::Receiver>, + network_send: mpsc::Sender>, + db: Arc, + validator_index: u64, + validator_key_pair: ValidatorKeyPair, + keystore: KeyStore, + ) -> Result { + let chain = LeanChain::new(db); + let hashsig_secret_key = validator_key_pair.hashsig_secret_key()?; + // Ensure the public key JSON is well-formed even if we do not store it explicitly. + let _ = validator_key_pair.hashsig_public_key()?; + + // Ensure all validator metrics are registered in the global Prometheus + // registry so that the /metrics endpoint exposes the full Zeam metric + // set even before any events have occurred. + metrics::init(); + + Ok(Self { + network_recv, + network_send, + slot_clock, + chain, + validator_index, + hashsig_secret_key, + _keystore: keystore, + status_interval_counter: 0, + orphan_queue: OrphanBlockQueue::new(), + last_db_compaction_slot: 0, + connected_peers: HashSet::new(), + }) + } + + /// Signs an attestation message using XMSS private key + /// + /// The message to sign is the SSZ-encoded Attestation. + /// Uses the hash-sig crate to create an XMSS signature. + fn sign_attestation( + &self, + validator_id: u64, + attestation: &Attestation, + epoch: u64, + ) -> Result { + // Hash the attestation message using tree hash (32 bytes) + let message_hash = attestation.tree_hash_root(); + + if validator_id != self.validator_index { + return Err(format!( + "Validator {} is not assigned to this node (controls {})", + validator_id, self.validator_index + )); + } + + // Sign the message hash using the XMSS scheme + // The epoch parameter is used for XMSS signature generation (must be u32) + let epoch_u32 = epoch + .try_into() + .map_err(|_| format!("Epoch {} is too large for u32", epoch))?; + + let _timer = metrics::start_timer(&metrics::LEAN_PQ_SIGNATURE_ATTESTATION_SIGNING_TIME); + let xmss_signature = Scheme::sign(&self.hashsig_secret_key, epoch_u32, &message_hash.0) + .map_err(|e| format!("Failed to sign attestation with XMSS: {:?}", e))?; + drop(_timer); + + // Serialize the XMSS signature to bytes using leanSig's serialization + let signature_bytes_vec = xmss_signature.to_bytes(); + + if signature_bytes_vec.len() != lean_crypto::signature::SIGNATURE_SIZE { + return Err(format!( + "Invalid signature size: expected {}, got {}", + lean_crypto::signature::SIGNATURE_SIZE, + signature_bytes_vec.len() + )); + } + + // Convert to fixed-size array + let mut signature_bytes = [0u8; lean_crypto::signature::SIGNATURE_SIZE]; + signature_bytes.copy_from_slice(&signature_bytes_vec); + + // Create Signature from fixed-size array + Ok(Signature::from_bytes(signature_bytes)) + } + + /// Sets the lean state for consensus processing by storing it in cache and database + pub fn set_lean_state(&mut self, state: LeanState) -> Result<(), String> { + // Save state keyed by its block root (genesis or current block) + let state_root = state.latest_block_header.tree_hash_root(); + self.chain.save_state(state_root, &state) + } + + /// Waits for genesis time to arrive + async fn wait_for_genesis(&self) { + // SystemTimeSlotClock::genesis_duration() returns the duration since UNIX_EPOCH to genesis + let genesis_time = self.slot_clock.genesis_duration(); + + let Ok(now) = std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH) else { + error!("System time is before UNIX EPOCH!"); + return; + }; + + if now < genesis_time { + let wait_duration = genesis_time - now; + info!( + wait_seconds = wait_duration.as_secs(), + genesis_time = genesis_time.as_secs(), + "Waiting for genesis" + ); + sleep(wait_duration).await; + info!("Genesis time reached"); + } else { + debug!("Genesis time is in the past, starting immediately"); + } + } + + /// Runs the validator service, processing messages and interval ticks + /// + /// Uses tokio::select! to handle: + /// - Interval tick timing (calculated from slot_clock) + /// - Network messages from the network service + pub async fn run(mut self) { + info!("Validator service started"); + + // Wait for genesis + self.wait_for_genesis().await; + + loop { + // Use slot_clock to determine when the next interval occurs + let Some(next_interval_duration) = self.get_time_to_next_interval() else { + // Critical failure: slot clock is unavailable or broken + // The validator service cannot function without access to the slot clock + error!( + "CRITICAL: Unable to determine next interval - slot clock is unavailable. \ + Validator service cannot proceed. This indicates a serious system failure." + ); + break; + }; + + tokio::select! { + // Wait for the next interval boundary + _ = sleep(next_interval_duration) => { + // Process the interval tick + if let Err(e) = self.process_interval_tick().await { + error!(error = %e, "Failed to process interval tick"); + } + } + + // Handle network messages + Some(network_msg) = self.network_recv.recv() => { + if let Err(e) = self.handle_network_message(network_msg).await { + error!("Error handling network message: {}", e); + } + + // Drain a small burst to prevent falling behind under high message rates. + // This reduces queueing and helps keep memory stable when combined with + // bounded channels. + for _ in 0..128 { + match self.network_recv.try_recv() { + Ok(next_msg) => { + if let Err(e) = self.handle_network_message(next_msg).await { + error!("Error handling network message: {}", e); + } + } + Err(_) => break, + } + } + } + + // If both channels are closed, exit + else => { + warn!("Network message channel closed, validator service shutting down"); + break; + } + } + } + + info!("Validator service stopped"); + } + + /// Calculates the time until the next interval boundary using the slot clock with sub-second precision + fn get_time_to_next_interval(&self) -> Option { + let now = self.slot_clock.now_duration()?; + let current_slot = self.slot_clock.now()?; + let slot_start = self.slot_clock.start_of(current_slot)?; + + // Time elapsed since slot start + let time_since_slot_start = now.checked_sub(slot_start)?; + + // Calculate current interval (0-3) with sub-second precision + let interval_duration = std::time::Duration::from_secs(SECONDS_PER_INTERVAL); + let current_interval = + (time_since_slot_start.as_nanos() / interval_duration.as_nanos()) as u64; + + // If we're past all intervals in this slot, wait for the next slot + if current_interval >= INTERVALS_PER_SLOT { + return self.slot_clock.duration_to_next_slot(); + } + + // Calculate the absolute time for the next interval boundary + let next_interval_relative_start = interval_duration * (current_interval as u32 + 1); + + // Time remaining until the next interval boundary + next_interval_relative_start.checked_sub(time_since_slot_start) + } + + /// Processes an interval tick: calculates current interval and executes appropriate duties + async fn process_interval_tick(&mut self) -> Result<(), String> { + if let Some((current_slot, current_interval)) = self.get_current_interval() { + debug!( + slot = current_slot.as_u64(), + interval = current_interval, + "Processing interval tick" + ); + + self.tick_interval().await?; + + // Periodically purge stale orphan blocks (once per slot, at interval 0) + if current_interval == 0 { + self.orphan_queue.purge_stale(); + // Rolling retention: + // - compact blocks DB to ~1024-slot window + // - prune in-memory states to ~128-slot window + self.chain.prune_and_compact( + current_slot.as_u64(), + &mut self.last_db_compaction_slot, + )?; + + // Update network service with our latest status so it can reply to inbound status requests. + if let Err(e) = self + .network_send + .send(NetworkMessage::UpdateLocalStatus(self.current_status()?)) + .await + { + warn!("Failed to send local status update to network service: {}", e); + } + } + } + + Ok(()) + } + + /// Gets the current slot and interval number + /// + /// Returns `Some((slot, interval))` if we can determine the current interval, + /// or `None` if the slot clock cannot be read. + fn get_current_interval(&self) -> Option<(Slot, u64)> { + let current_slot = self.slot_clock.now()?; + let now = self.slot_clock.now_duration()?; + let slot_start = self.slot_clock.start_of(current_slot)?; + + // Calculate time since slot start + let time_since_slot_start = now.checked_sub(slot_start)?; + + // Calculate current interval (0-3) with sub-second precision + let interval_duration = std::time::Duration::from_secs(SECONDS_PER_INTERVAL); + let current_interval = + (time_since_slot_start.as_nanos() / interval_duration.as_nanos()) as u64 + % INTERVALS_PER_SLOT; + + Some((current_slot, current_interval)) + } + + /// Log the current chain status (similar to other clients' CHAIN STATUS) + fn log_chain_status(&self, current_slot: Slot) -> Result<(), String> { + // Get head root, return early if not available + let Some(head_root) = self.chain.fetch_head_root()? else { + debug!("Head root not available for chain status logging"); + return Ok(()); + }; + + // Get head block, return early if not available + let Some(head_block) = self.chain.fetch_block(head_root)? else { + debug!("Head block not found for chain status logging"); + return Ok(()); + }; + + // Get head state, return early if not available + let Some(state) = self.chain.get_head_state() else { + debug!("Head state not available for chain status logging"); + return Ok(()); + }; + + let justified = &state.latest_justified; + let finalized = &state.latest_finalized; + + // Update chain status metrics + if let Ok(gauge) = &*metrics::LEAN_HEAD_SLOT { + gauge.set(head_block.slot.0 as i64); + } + if let Ok(gauge) = &*metrics::LEAN_LATEST_JUSTIFIED_SLOT { + gauge.set(justified.slot.0 as i64); + } + if let Ok(gauge) = &*metrics::LEAN_LATEST_FINALIZED_SLOT { + gauge.set(finalized.slot.0 as i64); + } + if let Ok(safe_root) = self.chain.fetch_safe_target() + && let Some(root) = safe_root + && let Ok(Some(block)) = self.chain.fetch_block(root) + && let Ok(gauge) = &*metrics::LEAN_LATEST_SAFE_SLOT + { + gauge.set(block.slot.0 as i64); + } + if let Ok(gauge) = &*metrics::LEAN_VALIDATORS_COUNT { + gauge.set(state.validators.len() as i64); + } + + + // Compute a signed "current slot" like Lighthouse BN output. + // If we're before genesis, show negative slots until genesis. + let signed_current_slot: i64 = if let Some(now) = self.slot_clock.now_duration() + && let Some(genesis) = self.slot_clock.start_of(Slot::new(0)) + { + if now < genesis { + let diff = genesis - now; + let secs = diff.as_secs_f64(); + let slots_until = (secs / SECONDS_PER_SLOT as f64).ceil() as i64; + -slots_until + } else { + current_slot.as_u64() as i64 + } + } else { + current_slot.as_u64() as i64 + }; + + let head_slot = head_block.slot.0 as i64; + let behind = (signed_current_slot - head_slot).max(0); + let timely = if behind <= 1 { "YES" } else { "NO" }; + + let peers = self.connected_peers.len(); + + let status = format!( + "\n+===============================================================+\n\ + CHAIN STATUS: Current Slot: {current_slot} | Head Slot: {head_slot_u64} | Behind: {behind}\n\ ++---------------------------------------------------------------+\n\ + Connected Peers: {peers}\n\ ++---------------------------------------------------------------+\n\ + Head Block Root: {head_root}\n\ + Parent Block Root: {parent_root}\n\ + State Root: {state_root}\n\ + Timely: {timely}\n\ ++---------------------------------------------------------------+\n\ + Latest Justified: Slot{justified_slot:>7} | Root: {justified_root}\n\ + Latest Finalized: Slot{finalized_slot:>7} | Root: {finalized_root}\n\ ++===============================================================+\n", + current_slot = signed_current_slot, + head_slot_u64 = head_block.slot.0, + behind = behind, + peers = peers, + head_root = format!("{:?}", head_root), + parent_root = format!("{:?}", head_block.parent_root), + state_root = format!("{:?}", state.latest_block_header.state_root), + timely = timely, + justified_slot = justified.slot.0, + justified_root = format!("{:?}", justified.root), + finalized_slot = finalized.slot.0, + finalized_root = format!("{:?}", finalized.root), + ); + + info!("{status}"); + + Ok(()) + } +} + +impl> ValidatorService { + /// Processes a single interval tick + /// + /// Implements the `tick_interval()` logic from the spec: + /// - Interval 0: Accept new attestations if proposal exists, update fork choice + /// - Interval 1: Validators create and gossip attestations + /// - Interval 2: Update safe target with 2/3+ majority + /// - Interval 3: Accept accumulated attestations (new → known), update fork choice + pub async fn tick_interval(&mut self) -> Result<(), String> { + let current_slot = self + .slot_clock + .now() + .ok_or_else(|| "Unable to determine current slot".to_string())?; + let now = self + .slot_clock + .now_duration() + .ok_or_else(|| "Unable to determine current time".to_string())?; + let slot_u64 = current_slot.as_u64(); + let slot_start = self + .slot_clock + .start_of(current_slot) + .ok_or_else(|| format!("Unable to determine start of slot {}", slot_u64))?; + + // Calculate current interval within slot (0-3) with sub-second precision + let time_since_slot_start = now + .checked_sub(slot_start) + .ok_or_else(|| "Current time is before slot start".to_string())?; + let interval_duration = std::time::Duration::from_secs(SECONDS_PER_INTERVAL); + let interval = + (time_since_slot_start.as_nanos() / interval_duration.as_nanos()) as u64 + % INTERVALS_PER_SLOT; + + debug!(slot = slot_u64, interval, "Processing interval tick"); + + // Log chain status every slot (every 4 intervals) + if interval == 3 { + self.log_chain_status(current_slot)?; + } + self.status_interval_counter += 1; + + // Handle each interval + match interval { + 0 => { + // Interval 0: Produce block if this validator is proposer, then accept attestations if proposal exists + if let Err(e) = self.perform_proposal_duties(current_slot).await { + warn!(error = %e, "Failed to perform proposal duties"); + } + + if self.has_proposal_for_slot(current_slot)? { + info!(slot = slot_u64, "Interval 0: Accepting new attestations"); + self.chain.promote_new_attestations()?; + self.update_fork_choice_head().await?; + } else { + debug!( + slot = slot_u64, + "Interval 0: No proposal, skipping attestation acceptance" + ); + } + } + 1 => { + // Interval 1: Validators create and gossip attestations + debug!(slot = slot_u64, "Interval 1: Validator attesting"); + if let Err(e) = self.perform_attestation_duties(slot_u64).await { + warn!(error = %e, "Failed to perform attestation duties"); + } + + // Log chain status at interval 1 + if let Ok(state) = self + .chain + .get_head_state() + .ok_or_else(|| "No state".to_string()) + { + info!( + current_slot = slot_u64, + head_slot = state.slot.0, + justified_slot = state.latest_justified.slot.0, + justified_root = ?state.latest_justified.root, + finalized_slot = state.latest_finalized.slot.0, + finalized_root = ?state.latest_finalized.root, + "CHAIN STATUS | Slot {} | Justified: {} | Finalized: {}", + slot_u64, + state.latest_justified.slot.0, + state.latest_finalized.slot.0 + ); + } + } + 2 => { + // Interval 2: Update safe target with 2/3+ majority + debug!(slot = slot_u64, "Interval 2: Updating safe target"); + self.update_safe_target().await?; + } + 3 => { + // Interval 3: Accept accumulated attestations + info!( + slot = slot_u64, + "Interval 3: Accepting accumulated attestations" + ); + self.chain.promote_new_attestations()?; + self.update_fork_choice_head().await?; + } + _ => { + warn!(interval, "Unexpected interval value (should be 0-3)"); + } + } + + Ok(()) + } + + /// Checks if there's a proposal (block) for the given slot + fn has_proposal_for_slot(&self, slot: Slot) -> Result { + // Check if we have a block for this slot + // Load all blocks and check if any match this slot + let blocks = self.chain.load_all_blocks()?; + let slot_u64 = slot.as_u64(); + // Compare using the u64 value since block.slot is lean_consensus::Slot(u64) + Ok(blocks.values().any(|block| block.slot.0 == slot_u64)) + } + + /// Updates the fork choice head after accepting new attestations + async fn update_fork_choice_head(&mut self) -> Result<(), String> { + // Get current head state from cache + let state = self + .chain + .get_head_state() + .ok_or_else(|| "No lean_state available for fork choice".to_string())?; + + // Update head using fork choice algorithm + let new_head = self.chain.update_head(&state)?; + debug!( + ?new_head, + "Updated fork choice head after attestation promotion" + ); + + Ok(()) + } + + /// Updates the safe target checkpoint + /// + /// Computes target that has sufficient (2/3+ majority) attestation support. + /// The safe target represents a block with enough attestation weight to be + /// considered "safe" for validators to attest to. + /// + /// The safe target is calculated using fork choice with a minimum score threshold + /// of 2/3 of validators, then persisted to the database. + async fn update_safe_target(&mut self) -> Result<(), String> { + // Load all blocks and attestations + let all_blocks = self.chain.load_all_blocks()?; + let all_attestations = self.chain.load_known_attestations()?; + + // Get current head state from cache for validator count + let state = self + .chain + .get_head_state() + .ok_or_else(|| "No lean_state available for safe target calculation".to_string())?; + + let num_validators = state.validators.len(); + + // Calculate 2/3 majority threshold (ceiling division) + let threshold_2_3 = (num_validators * 2).div_ceil(3); + // Find head with minimum attestation threshold + let latest_justified = &state.latest_justified; + let safe_target = get_fork_choice_head( + &all_blocks, + latest_justified.root, + &all_attestations, + threshold_2_3, + ); + + // Persist safe target to database + self.chain.save_safe_target(safe_target)?; + + debug!( + ?safe_target, + threshold_2_3, + "Found latest justified and safe target" + ); + + Ok(()) + } + /// Handles network messages received from the wire + async fn handle_network_message( + &mut self, + network_msg: NetworkMessage, + ) -> Result<(), String> { + match network_msg { + NetworkMessage::Attestation(peer_id, signed_attestation) => { + // Network gossip attestations are processed as "new" (pending) + // Verify the signature before processing + self.verify_and_handle_attestation(peer_id, signed_attestation, false) + .await + } + NetworkMessage::Block(peer_id, block) => self.handle_block(peer_id, block).await, + NetworkMessage::PeerConnected(peer_id) => { + self.connected_peers.insert(peer_id); + self.handle_peer_connected(peer_id).await + } + NetworkMessage::PeerDisconnected(peer_id) => { + self.connected_peers.remove(&peer_id); + Ok(()) + } + NetworkMessage::Status(peer_id, status) => { + self.handle_status(peer_id, status).await + } + _ => Ok(()), // Ignore other messages (like SendRequest if it loops back) + } + } + + /// Called when a peer connects. Triggers a status request so we can backfill after downtime. + async fn handle_peer_connected(&mut self, peer_id: PeerId) -> Result<(), String> { + let status = self.current_status()?; + self.network_send + .send(NetworkMessage::SendStatusRequest { peer_id, status }) + .await + .map_err(|e| format!("Failed to send status request: {}", e))?; + Ok(()) + } + + /// Handle a status response from a peer. + /// + /// If the peer is ahead of us, request its head block by root. Our existing orphan queue + /// + parent-root requests will walk backwards as needed. + async fn handle_status(&mut self, peer_id: PeerId, status: StatusMessage) -> Result<(), String> { + let local = self.current_status()?; + if status.head_root != Hash256::ZERO && status.head_slot > local.head_slot { + debug!( + peer = ?peer_id, + peer_head_slot = status.head_slot, + local_head_slot = local.head_slot, + peer_head_root = ?status.head_root, + "Peer ahead, requesting head block by root for backfill" + ); + + self.network_send + .send(NetworkMessage::SendRequest { + peer_id, + request: RPCRequest::BlocksByRoot(BlocksByRootRequest { + block_roots: vec![status.head_root], + }), + }) + .await + .map_err(|e| format!("Failed to request peer head block: {}", e))?; + } + Ok(()) + } + + fn current_status(&self) -> Result { + let head_root = self + .chain + .fetch_head_root()? + .unwrap_or(Hash256::ZERO); + + let head_slot = self + .chain + .fetch_block(head_root)? + .map(|b| b.slot.0) + .unwrap_or(0); + + let (finalized_slot, finalized_root) = self + .chain + .fetch_state(&head_root) + .and_then(|s| Some((s.latest_finalized.slot.0, s.latest_finalized.root))) + .unwrap_or((0, Hash256::ZERO)); + + Ok(StatusMessage { + finalized_root, + finalized_slot, + head_root, + head_slot, + }) + } + + /// Verifies the attestation signature and then handles it + /// + /// # Arguments + /// * `signed_attestation` - The signed attestation to verify and process + /// * `is_from_block` - True if attestation came from block body, False if from network gossip + async fn verify_and_handle_attestation( + &mut self, + peer_id: Option, + signed_attestation: Arc, + is_from_block: bool, + ) -> Result<(), String> { + // Verify the attestation signature + let epoch = signed_attestation.message.attestation_data.slot.0 / 32; + let _message_hash = signed_attestation.message.tree_hash_root(); + let validator_id = signed_attestation.message.validator_id; + + // NOTE: Signature verification will be implemented when signature verification functions + // become available from the leansig library. For now, we accept all attestations. + let _timer = metrics::start_timer(&metrics::LEAN_PQ_SIGNATURE_ATTESTATION_VERIFICATION_TIME); + let _validation_timer = metrics::start_timer(&metrics::LEAN_ATTESTATION_VALIDATION_TIME); + + // [PLACEHOLDER] verification logic + // For now considering all valid + if let Ok(counter) = &*metrics::LEAN_ATTESTATIONS_VALID_TOTAL { + counter.inc(); + } + + drop(_validation_timer); + drop(_timer); + + debug!(validator_id, epoch, "Attestation received and accepted"); + + // Process the attestation + self.handle_attestation(peer_id, signed_attestation.clone(), is_from_block) + .await + } + + /// Handles an attestation received from the network or block + /// + /// Implements attestation pipelining according to the spec: + /// - Network gossip attestations → saved as "new" (pending) + /// - Block body attestations → saved as "known" (immediately contribute to fork choice) + /// + /// # Arguments + /// * `signed_attestation` - The signed attestation to process + /// * `is_from_block` - True if attestation came from block body, False if from network gossip + async fn handle_attestation( + &mut self, + peer_id: Option, + signed_attestation: Arc, + is_from_block: bool, + ) -> Result<(), String> { + let attestation = &signed_attestation.message; + let attestation_slot = attestation.attestation_data.slot.0; + let validator_id = attestation.validator_id; + + info!( + slot = attestation_slot, + validator_id, is_from_block, "Processing lean attestation" + ); + + // NOTE: Attestations do NOT modify state outside of block processing. + // States are immutable snapshots at each block root. Only block.body.attestations + // modify state during state_transition. Gossip attestations only affect fork choice. + // + // Attestation processing: + // 1. Block body attestations → processed during state_transition in handle_block + // 2. Gossip attestations → saved for fork choice only, don't modify state + // + // This prevents state corruption where gossip attestations would overwrite + // block post-states, causing parent root mismatches. + + // Use the SignedAttestation received from the network (already has signature) + // No need to create a new signature - the attestation was signed by the original validator + + // Implement attestation pipelining: + // - Block body attestations → known (immediately contribute to fork choice) + // - Network gossip attestations → new (pending, wait for interval tick) + if is_from_block { + // On-chain attestation: process immediately as "known" + // Check if this supersedes existing known attestation + if let Ok(existing_known) = self.chain.load_known_attestations() + && let Some(existing) = existing_known.get(&validator_id) + { + // Only replace if new attestation is from a later slot + if existing.message.attestation_data.slot.0 >= attestation_slot { + debug!( + validator_id, + existing_slot = existing.message.attestation_data.slot.0, + new_slot = attestation_slot, + "Skipping attestation - existing known attestation is newer or equal" + ); + return Ok(()); + } + } + + // Save as known attestation (contributes to fork choice immediately) + self.chain + .save_known_attestation(validator_id, &signed_attestation)?; + + let head_root = attestation.attestation_data.head.root; + if let Err(e) = self.chain.apply_attestation_weight(validator_id, head_root) { + warn!( + validator_id, + ?head_root, + error = %e, + "Failed to update fork choice weight for known attestation" + ); + + // If we don't have the head block yet (common after downtime), request it. + if let Some(peer) = peer_id { + let _ = self.network_send.send(NetworkMessage::SendRequest { + peer_id: peer, + request: RPCRequest::BlocksByRoot(BlocksByRootRequest { + block_roots: vec![head_root], + }), + }).await; + } + } + + // Remove from new attestations if this supersedes it + // Check if there's a new attestation that should be removed + if let Ok(new_attestations) = self.chain.load_new_attestations() + && let Some(new_att) = new_attestations.get(&validator_id) + && new_att.message.attestation_data.slot.0 <= attestation_slot + { + // Remove the new attestation as it's superseded by the known one + self.chain.delete_new_attestation(validator_id)?; + } + + debug!( + validator_id, + slot = attestation_slot, + "Saved attestation as known (from block body)" + ); + } else { + // Network gossip attestation: save as "new" (pending) + // Check if this supersedes existing new attestation + if let Ok(existing_new) = self.chain.load_new_attestations() + && let Some(existing) = existing_new.get(&validator_id) + { + // Only replace if new attestation is from a later slot + if existing.message.attestation_data.slot.0 >= attestation_slot { + debug!( + validator_id, + existing_slot = existing.message.attestation_data.slot.0, + new_slot = attestation_slot, + "Skipping attestation - existing new attestation is newer or equal" + ); + return Ok(()); + } + } + + // Save as new attestation (does not contribute to fork choice yet) + self.chain + .save_new_attestation(validator_id, &signed_attestation)?; + + debug!( + validator_id, + slot = attestation_slot, + "Saved attestation as new (from network gossip)" + ); + } + + debug!( + validator_id, + slot = attestation_slot, + is_from_block, + "Attestation saved for fork choice (state unchanged)" + ); + + Ok(()) + } + + /// Handles a block received from the network (implements on_block from spec) + /// + /// This method integrates a block into the forkchoice store by: + /// 1. Validating the block's parent exists + /// 2. Computing the post-state via the state transition function + /// 3. Processing attestations included in the block body (on-chain) + /// 4. Updating the forkchoice head + /// 5. Processing the proposer's attestation (as if gossiped) + async fn handle_block( + &mut self, + peer_id: Option, + signed_block: Arc>, + ) -> Result<(), String> { + // Unpack block components + let block = &signed_block.message.block; + let proposer_attestation = &signed_block.message.proposer_attestation; + let block_root = block.tree_hash_root(); + + // Track total time spent in the on_block-style handler, matching the + // Zeam / beacon-node `chain_onblock_duration_seconds` metric name. + let _onblock_timer = metrics::start_timer(&metrics::CHAIN_ONBLOCK_DURATION_SECONDS); + + info!( + slot = block.slot.0, + proposer_index = block.proposer_index, + ?block_root, + "Processing lean block from network (on_block)" + ); + + // Skip duplicate blocks (idempotent operation) + if self.chain.block_exists(block_root)? { + debug!(?block_root, "Block already processed, skipping"); + return Ok(()); + } + + // Fetch parent state from cache + // + // The parent state must exist before processing this block. + // If missing, queue the block and request the parent from peer. + let Some(parent_state) = self.chain.fetch_state(&block.parent_root) else { + let parent_root = block.parent_root; + + info!( + slot = block.slot.0, + ?parent_root, + peer = ?peer_id, + orphan_queue_size = self.orphan_queue.len(), + "Parent state missing, queuing block and requesting parent" + ); + + // Queue this block for later processing + self.orphan_queue.insert(parent_root, signed_block.clone(), peer_id); + + // Request the missing parent block from peer if available + if let Some(peer) = peer_id { + let request = RPCRequest::BlocksByRoot(BlocksByRootRequest { + block_roots: vec![parent_root], + }); + + if let Err(e) = self.network_send.send(NetworkMessage::SendRequest { + peer_id: peer, + request, + }).await { + error!("Failed to send parent block request: {}", e); + } + } + + return Ok(()); + }; + + debug!( + parent_slot = parent_state.slot.0, + parent_justified = parent_state.latest_justified.slot.0, + "Fetched parent state from cache for block processing" + ); + + // Clone parent state to create post-state + let mut post_state = LeanChain::::clone_state(&parent_state)?; + + debug!("Cloned parent state for state transition"); + + // Execute state transition function with metrics + let _block_processing_timer = + metrics::start_timer(&metrics::BLOCK_PROCESSING_DURATION_SECONDS); + let _state_timer = metrics::start_timer(&metrics::LEAN_STATE_TRANSITION_TIME); + + let valid_signatures = true; + + // Verify signatures if enabled + if valid_signatures { + signed_block.verify_signatures(&post_state)?; + } + + let block = &signed_block.message.block; + + // Process slots (catch up) + if post_state.slot < block.slot { + let slots_processed = block.slot.0.saturating_sub(post_state.slot.0); + if let Ok(counter) = &*metrics::LEAN_STATE_TRANSITION_SLOTS_PROCESSED_TOTAL { + counter.inc_by(slots_processed); + } + + let _slots_timer = metrics::start_timer(&metrics::LEAN_STATE_TRANSITION_SLOTS_PROCESSING_TIME); + post_state.process_slots(block.slot)?; + drop(_slots_timer); + } + + // Process block + if let Ok(counter) = &*metrics::LEAN_STATE_TRANSITION_ATTESTATIONS_PROCESSED_TOTAL { + counter.inc_by(block.body.attestations.len() as u64); + } + let _block_timer = metrics::start_timer(&metrics::LEAN_STATE_TRANSITION_BLOCK_PROCESSING_TIME); + post_state.process_block(block)?; + drop(_block_timer); + + // Verify state root + let computed_state_root = post_state.tree_hash_root(); + if block.state_root != computed_state_root { + return Err(format!( + "Invalid block state root. Expected: {:?}, got: {:?}", + computed_state_root, block.state_root + )); + } + + drop(_block_processing_timer); + drop(_state_timer); + + debug!( + post_slot = post_state.slot.0, + post_justified = post_state.latest_justified.slot.0, + post_finalized = post_state.latest_finalized.slot.0, + "State transition completed" + ); + + // Save block and post-state to cache and database + // Post-state is keyed by block_root, representing the state after processing this block + // IMPORTANT: Update latest_block_header.state_root to the block's state_root + // so that when this state is loaded later, the header's tree_hash will match the block_root + post_state.latest_block_header.state_root = block.state_root; + + self.chain.save_block(block_root, block)?; + self.chain.save_state(block_root, &post_state)?; + + debug!(?block_root, "Block and state saved to cache and database"); + + // Ensure parent chain exists in proto array and register the new block. + let _fc_timer = metrics::start_timer(&metrics::LEAN_FORK_CHOICE_BLOCK_PROCESSING_TIME); + self.chain + .register_block(block_root, block.slot, block.parent_root)?; + drop(_fc_timer); + + // Process block body attestations + // + // Iterate over attestations in the block body. + // These are historical attestations from other validators included by the proposer. + // They are processed immediately as "known" attestations (is_from_block=true). + for attestation in block.body.attestations.iter() { + let signed_attestation = Arc::new(SignedAttestation { + message: attestation.clone(), + signature: lean_crypto::Signature::zero(), + }); + + if let Err(e) = self + .verify_and_handle_attestation(peer_id, signed_attestation, true) + .await + { + warn!( + validator_id = attestation.validator_id, + error = %e, + "Failed to process block body attestation" + ); + // Continue processing other attestations even if one fails + } + } + + debug!( + attestation_count = block.body.attestations.len(), + "Block body attestations processed" + ); + + // Update forkchoice head + // + // IMPORTANT: This must happen BEFORE processing proposer attestation + // to prevent the proposer from gaining circular weight advantage. + + // Run fork choice algorithm + let new_head = self.chain.update_head(&post_state)?; + + debug!(?new_head, "Fork choice updated head"); + + // Process proposer attestation as if received via gossip + // + // The proposer casts their attestation in interval 1, after block + // proposal. This attestation should: + // 1. NOT affect this block's fork choice position (processed as "new") + // 2. Be available for inclusion in future blocks + // 3. Influence fork choice only after interval 3 (end of slot) + let signed_proposer_attestation = Arc::new(SignedAttestation { + message: proposer_attestation.clone(), + signature: lean_crypto::Signature::zero(), + }); + + if let Err(e) = self + .verify_and_handle_attestation(peer_id, signed_proposer_attestation, false) + .await + { + warn!( + validator_id = proposer_attestation.validator_id, + error = %e, + "Failed to process proposer attestation" + ); + } + + // Log chain status + info!( + slot = block.slot.0, + head_block_root = ?block_root, + parent_root = ?block.parent_root, + state_root = ?post_state.latest_block_header.state_root, + justified_slot = post_state.latest_justified.slot.0, + justified_root = ?post_state.latest_justified.root, + finalized_slot = post_state.latest_finalized.slot.0, + finalized_root = ?post_state.latest_finalized.root, + attestation_count = block.body.attestations.len(), + "CHAIN STATUS: Block processed | Slot {} | Justified: {} | Finalized: {}", + block.slot.0, + post_state.latest_justified.slot.0, + post_state.latest_finalized.slot.0 + ); + + // Process any orphan blocks that were waiting for this block + self.process_orphan_children(block_root).await; + + Ok(()) + } + + /// Processes orphan blocks that were waiting for the given parent block + /// + /// This is called after a block is successfully imported to check if any + /// orphaned blocks were waiting for it. Those blocks are then processed + /// recursively. + async fn process_orphan_children(&mut self, parent_root: Hash256) { + let orphans = self.orphan_queue.take_children(&parent_root); + + if orphans.is_empty() { + return; + } + + info!( + ?parent_root, + orphan_count = orphans.len(), + "Processing orphan blocks that were waiting for this parent" + ); + + for orphan in orphans { + let block_slot = orphan.block.message.block.slot.0; + let block_root = orphan.block.message.block.tree_hash_root(); + + debug!( + slot = block_slot, + ?block_root, + ?parent_root, + age_ms = orphan.received_at.elapsed().as_millis(), + "Processing orphan block" + ); + + // Use Box::pin to handle the recursive async call + if let Err(e) = Box::pin(self.handle_block(orphan.peer_id, orphan.block)).await { + warn!( + slot = block_slot, + ?block_root, + error = %e, + "Failed to process orphan block" + ); + } + } + } + + /// Performs proposal duties if this validator is the proposer for the slot + /// + /// Checks if this validator is the proposer for the given slot. If so, produces + /// a block and publishes it to the network. + async fn perform_proposal_duties(&mut self, slot: Slot) -> Result<(), String> { + let slot_u64 = slot.as_u64(); + + // Fetch current head state from cache + let state = self + .chain + .get_head_state() + .ok_or_else(|| "No lean_state available to check proposal duties".to_string())?; + + // For now, just try to produce a block if we're one of the validators + // In a full implementation, we would check the proposer schedule + let num_validators = state.validators.len() as u64; + if num_validators == 0 { + return Err("No validators in state".to_string()); + } + + let expected_proposer = slot_u64 % num_validators; + if expected_proposer != self.validator_index { + debug!( + slot = slot_u64, + expected_proposer, + validator_id = self.validator_index, + "Not proposer for this slot" + ); + return Ok(()); + } + + debug!( + slot = slot_u64, + proposer = self.validator_index, + num_validators, + "Performing proposal duties" + ); + + if let Err(e) = self.produce_block(slot).await { + debug!( + slot = slot_u64, + error = %e, + "Failed to produce block" + ); + } + + Ok(()) + } + + /// Produces a block for the given slot + /// + /// Creates a simple block with: + /// 1. Current head as parent + /// 2. State root computed from current state + /// 3. Signs and publishes the block to the network + async fn produce_block(&mut self, slot: Slot) -> Result<(), String> { + let slot_u64 = slot.as_u64(); + debug!(slot = slot_u64, "=== BLOCK PRODUCTION START ==="); + + // Fetch current head state from cache + let parent_state = self + .chain + .get_head_state() + .ok_or_else(|| "No lean_state available to produce block".to_string())?; + + let num_validators = parent_state.validators.len() as u64; + + if num_validators == 0 { + return Err("No validators available in state".to_string()); + } + + let expected_proposer = slot_u64 % num_validators; + + if self.validator_index != expected_proposer { + return Err(format!( + "Validator {} is proposer for slot {} but this node controls validator {}", + expected_proposer, slot_u64, self.validator_index + )); + } + + debug!( + slot = slot_u64, + "This validator IS the proposer, proceeding" + ); + + // Clone parent state to compute post-state + // Advances state before computing state_root + let mut post_state = LeanChain::::clone_state(&parent_state)?; + + // Create the block with LeanSlot type + let lean_slot = LeanSlot(slot_u64); + let validator_id = self.validator_index; + + // Load known attestations from store and include them in block body + let known_attestations = match self.chain.load_known_attestations() { + Ok(attestations) => attestations, + Err(e) => { + warn!("Failed to load known attestations: {}", e); + Default::default() + } + }; + + // Convert HashMap to VariableList of Attestations (extract just the message) + let mut attestation_list = VariableList::default(); + for (_, signed_att) in known_attestations.iter() { + attestation_list + .push(signed_att.message.clone()) + .map_err(|e| format!("Failed to add attestation to block body: {:?}", e))?; + } + + let body = LeanBlockBody { + attestations: attestation_list, + }; + + debug!(slot = slot_u64, "Fetching parent root from fork choice"); + // The parent root is the root of the block that this state came from. + // This is the head of the fork choice tree (the most recent block we've processed). + let parent_root = self.chain.store().fetch_head_root()?.unwrap_or_else(|| { + // Fallback: reconstruct from genesis state + debug!( + slot = slot_u64, + "No fork choice head found, using genesis block header hash" + ); + let parent_block_header = &parent_state.latest_block_header; + parent_block_header.tree_hash_root() + }); + + debug!( + slot = slot_u64, + ?parent_root, + parent_state_slot = parent_state.slot.0, + "Block parent root set from fork choice head" + ); + + let block = LeanBlock { + slot: lean_slot, + proposer_index: validator_id, + parent_root, + state_root: Hash256::ZERO, + body, + }; + + debug!( + slot = slot_u64, + "Initial block created with parent_root. Starting state transitions" + ); + + // Advance post-state to block's slot + // This must happen BEFORE computing state_root + debug!( + slot = slot_u64, + pre_slot = post_state.slot.0, + target_slot = lean_slot.0, + "Processing empty slots" + ); + post_state.process_slots(lean_slot)?; + + // Process the block header to update latest_block_header in state + // This is critical! Without this, state.latest_block_header stays pointing to parent, + // causing all subsequent blocks to reference the wrong parent root + debug!(slot = slot_u64, "Processing block header"); + post_state.process_block(&block)?; + + // Compute state root from post-state + let computed_state_root = post_state.tree_hash_root(); + + // Create final block with state root + let final_block = LeanBlock { + state_root: computed_state_root, + ..block + }; + + let block_root = final_block.tree_hash_root(); + + // Create proposer attestation using post_state (after slot processing) + let proposer_attestation = Attestation { + validator_id, + attestation_data: AttestationData { + slot: lean_slot, + head: Checkpoint { + root: block_root, + slot: lean_slot, + }, + source: post_state.latest_justified.clone(), + target: post_state.latest_justified.clone(), + }, + }; + + // For now, create an empty signature list + // In a full implementation, we would sign the block and add signatures + let signatures = { VariableList::::empty() }; + + // Create signed block + let signed_block = Arc::new(SignedLeanBlockWithAttestation { + message: LeanBlockWithAttestation { + block: final_block, + proposer_attestation, + }, + signature: signatures, + }); + + info!( + slot = slot_u64, + proposer_index = validator_id, + ?block_root, + state_root = ?computed_state_root, + parent_root = ?parent_root, + "Produced block" + ); + + // Cache post-state and save block + // This is critical - we must cache the post-state so when we receive this block + // back from the network or process it later, we have the correct state + // IMPORTANT: Update latest_block_header.state_root to the computed state_root + // so that when this state is loaded later, the header's tree_hash will match the block_root + post_state.latest_block_header.state_root = computed_state_root; + + self.chain.save_state(block_root, &post_state)?; + debug!( + slot = slot_u64, + ?block_root, + "Post-state saved, now saving block" + ); + self.chain + .save_block(block_root, &signed_block.message.block)?; + + debug!( + ?block_root, + post_state_slot = post_state.slot.0, + "Cached post-state and saved block after production" + ); + + // Register the locally-produced block in proto-array so it becomes the new head + // This is critical! Without this, subsequent blocks will use the wrong parent_root + self.chain.register_block( + block_root, + signed_block.message.block.slot, + signed_block.message.block.parent_root, + )?; + + debug!( + ?block_root, + "Registered locally-produced block in proto-array" + ); + + // Publish block to network + debug!(slot = slot_u64, ?block_root, "Sending block to network"); + if let Err(e) = self + .network_send + .send(NetworkMessage::Block(None, signed_block)) + .await + { + error!( + slot = slot_u64, + ?block_root, + "Failed to send block to network: {}", + e + ); + return Err(format!("Failed to send block to network: {}", e)); + } + + debug!( + slot = slot_u64, + ?block_root, + "=== BLOCK PRODUCTION COMPLETE ===" + ); + Ok(()) + } + + /// Performs attestation duties for validators assigned to this node. + /// + /// The current lean client maps `--node-id` to a contiguous range of validator + /// indices. Only those validators are expected to produce attestations via this + /// service. + async fn perform_attestation_duties(&self, slot: u64) -> Result<(), String> { + debug!( + slot, + validator_id = self.validator_index, + "Performing attestation duties for assigned validator" + ); + + if let Err(e) = self.produce_attestation(slot, self.validator_index).await { + warn!( + slot, + validator_id = self.validator_index, + error = %e, + "Failed to produce attestation for validator" + ); + } + + Ok(()) + } + + /// Produces an attestation for the given slot and validator + /// + /// This method constructs an Attestation object according to the lean protocol + /// specification. The attestation represents the validator's view of the chain + /// state and their choice for the next justified checkpoint. + /// + /// Algorithm: + /// 1. Get the current head from forkchoice (or state if forkchoice not available) + /// 2. Calculate the appropriate attestation target using forkchoice state + /// 3. Use the latest justified checkpoint as the attestation source + /// 4. Construct and publish the complete Attestation object + async fn produce_attestation(&self, slot: u64, validator_id: u64) -> Result<(), String> { + debug!(slot, validator_id, "Producing attestation"); + + // Fetch current head state from cache + let state = self + .chain + .get_head_state() + .ok_or_else(|| "No lean_state available in cache to produce attestation".to_string())?; + + // Get head root from forkchoice (stored head) or fallback to state + let head_root = self + .chain + .fetch_head_root()? + .unwrap_or_else(|| state.latest_block_header.tree_hash_root()); + + // Get head block to determine head slot + let head_block = self + .chain + .fetch_block(head_root)? + .ok_or_else(|| format!("Head block not found in database: {:?}", head_root))?; + + // Create head checkpoint + let head = Checkpoint { + slot: head_block.slot, + root: head_root, + }; + + // Get source checkpoint (latest justified) + let source = state.latest_justified.clone(); + + // Calculate attestation target using forkchoice state + // This implements the get_attestation_target logic from the spec + let target = self.get_attestation_target(&state, &head_root, &head_block)?; + + // Store checkpoint slots and roots for logging before moving values + let head_slot = head.slot.0; + let target_slot = target.slot.0; + let source_slot = source.slot.0; + let head_root_dbg = head.root; + let target_root_dbg = target.root; + let source_root_dbg = source.root; + + // Create attestation data + let attestation_data = AttestationData { + slot: LeanSlot(slot), + head, + source, + target, + }; + + // Create attestation + let attestation = Attestation { + validator_id, + attestation_data, + }; + + debug!( + slot, + validator_id, + ?head_root, + head_slot, + target_slot, + source_slot, + ?head_root_dbg, + ?target_root_dbg, + ?source_root_dbg, + latest_justified_slot = state.latest_justified.slot.0, + latest_justified_root = ?state.latest_justified.root, + historical_hashes_len = state.historical_block_hashes.len(), + "Produced attestation" + ); + + // Sign attestation with XMSS key + // NOTE: Zeam uses attestation.data.slot as the "epoch" parameter for XMSS signing, + // not the actual epoch (slot / 32). We must match this for signature compatibility. + let xmss_epoch = slot; + let signature = { + self.sign_attestation(validator_id, &attestation, xmss_epoch) + .map_err(|e| format!("Failed to sign attestation: {}", e))? + }; + + // Create signed attestation + let signed_attestation = Arc::new(SignedAttestation { + message: attestation, + signature, + }); + + // Save locally produced attestation as "known" so it gets included in our blocks + // This is critical for justification - without this, our own attestations + // would never be included in blocks we produce! + self.chain + .save_known_attestation(validator_id, &signed_attestation)?; + + debug!( + validator_id, + slot, + "Saved locally produced attestation as known" + ); + + // Publish signed attestation to network + if let Err(e) = self + .network_send + .send(NetworkMessage::Attestation(None, signed_attestation)) + .await + { + error!( + slot, + validator_id, "Failed to send attestation to network: {}", e + ); + return Err(format!("Failed to send attestation to network: {}", e)); + } + + Ok(()) + } + + /// Calculate target checkpoint for validator attestations + /// + /// Determines appropriate attestation target based on head, safe target, + /// and finalization constraints. The target selection algorithm balances + /// between advancing the chain head and maintaining safety guarantees. + /// + /// Attestation Target Algorithm: + /// 1. Start at Head: Begin with the current head block + /// 2. Walk Toward Safe: Move backward (up to JUSTIFICATION_LOOKBACK_SLOTS steps) + /// if safe target is newer + /// 3. Ensure Justifiable: Continue walking back until slot is justifiable + /// 4. Return Checkpoint: Create checkpoint from selected block + fn get_attestation_target( + &self, + state: &LeanState, + head_root: &Hash256, + head_block: &LeanBlock, + ) -> Result { + // Start from current head + let mut target_block_root = *head_root; + let mut target_block = head_block.clone(); + + // Get safe target from database (if available) + let safe_target_root = self.chain.fetch_safe_target()?; + let justification_lookback_slots = JUSTIFICATION_LOOKBACK_SLOTS; + + // Walk back toward safe target (up to JUSTIFICATION_LOOKBACK_SLOTS steps) + // if safe target exists and current target is NEWER than safe target + if let Some(safe_target) = safe_target_root { + // Check if current target is newer than safe target (meaning we should walk back toward safe) + if let Ok(Some(safe_target_block)) = self.chain.fetch_block(safe_target) + && target_block.slot > safe_target_block.slot + { + // Walk back toward safe target (up to JUSTIFICATION_LOOKBACK_SLOTS steps) + let mut steps = 0; + while steps < justification_lookback_slots + && target_block.slot > safe_target_block.slot + && target_block.parent_root != Hash256::ZERO + { + target_block_root = target_block.parent_root; + target_block = + self.chain.fetch_block(target_block_root)?.ok_or_else(|| { + format!( + "Parent block not found while walking toward safe target: {:?}", + target_block_root + ) + })?; + steps += 1; + } + } + } + + // Ensure target is in justifiable slot range + // Walk back until we find a slot that satisfies justifiability rules + // relative to the latest finalized checkpoint + let finalized_slot = state.latest_finalized.slot; + let mut steps = 0; + const MAX_STEPS: u64 = 100; // Safety limit to prevent infinite loops + + while steps < MAX_STEPS { + // Check if current target slot is justifiable + match target_block.slot.is_justifiable_after(finalized_slot) { + Ok(()) => { + // Slot is justifiable, use this as target + if steps > 0 { + debug!( + initial_head_slot = head_block.slot.0, + selected_target_slot = target_block.slot.0, + walked_back_steps = steps, + finalized_slot = finalized_slot.0, + "Target selection: walked back to find justifiable slot" + ); + } + break; + } + Err(e) => { + // Slot is not justifiable, walk back to parent + debug!( + current_slot = target_block.slot.0, + finalized_slot = finalized_slot.0, + steps = steps, + error = ?e, + "Target selection: slot not justifiable, walking back" + ); + + if target_block.parent_root == Hash256::ZERO { + // Reached genesis, use current block + debug!("Target selection: reached genesis block"); + break; + } + + // Update root to parent before fetching + target_block_root = target_block.parent_root; + + // Fetch parent block + target_block = self.chain.fetch_block(target_block_root)?.ok_or_else(|| { + format!( + "Parent block not found while walking back: {:?}", + target_block_root + ) + })?; + steps += 1; + } + } + } + + if steps >= MAX_STEPS { + return Err("Exceeded maximum steps while finding justifiable target".to_string()); + } + + // Create checkpoint from selected target block + // Use the root we've been tracking, which corresponds to target_block + Ok(Checkpoint { + root: target_block_root, + slot: target_block.slot, + }) + } +} diff --git a/lean_client/validator_service/src/metrics.rs b/lean_client/validator_service/src/metrics.rs new file mode 100644 index 00000000000..87d3632686b --- /dev/null +++ b/lean_client/validator_service/src/metrics.rs @@ -0,0 +1,169 @@ +pub use metrics::*; +use std::sync::LazyLock; + +/* Block Processing Metrics (shared names with beacon node / Zeam) */ +pub static BLOCK_PROCESSING_DURATION_SECONDS: LazyLock> = LazyLock::new(|| { + try_create_histogram( + "block_processing_duration_seconds", + "Time taken to process a block in the state transition function.", + ) +}); + +pub static CHAIN_ONBLOCK_DURATION_SECONDS: LazyLock> = LazyLock::new(|| { + try_create_histogram( + "chain_onblock_duration_seconds", + "Time taken to process a block in the chain's onBlock function.", + ) +}); + +pub static LEAN_PQ_SIGNATURE_ATTESTATION_SIGNING_TIME: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "lean_pq_signature_attestation_signing_time_seconds", + "Time taken to sign an attestation", + ) + }); + +pub static LEAN_PQ_SIGNATURE_ATTESTATION_VERIFICATION_TIME: LazyLock> = + LazyLock::new(|| { + try_create_histogram( + "lean_pq_signature_attestation_verification_time_seconds", + "Time taken to verify an attestation signature", + ) + }); + +/* Chain Status Metrics */ +pub static LEAN_HEAD_SLOT: LazyLock> = LazyLock::new(|| { + try_create_int_gauge("lean_head_slot", "Slot of the head block") +}); + +pub static LEAN_LATEST_JUSTIFIED_SLOT: LazyLock> = LazyLock::new(|| { + try_create_int_gauge("lean_latest_justified_slot", "Slot of the latest justified checkpoint") +}); + +pub static LEAN_LATEST_FINALIZED_SLOT: LazyLock> = LazyLock::new(|| { + try_create_int_gauge("lean_latest_finalized_slot", "Slot of the latest finalized checkpoint") +}); + +pub static LEAN_LATEST_SAFE_SLOT: LazyLock> = LazyLock::new(|| { + try_create_int_gauge("lean_latest_safe_slot", "Slot of the latest safe target checkpoint") +}); + + +pub static LEAN_VALIDATORS_COUNT: LazyLock> = LazyLock::new(|| { + try_create_int_gauge("lean_validators_count", "Total number of active validators") +}); + +/* Fork Choice Metrics */ +pub static LEAN_FORK_CHOICE_BLOCK_PROCESSING_TIME: LazyLock> = LazyLock::new(|| { + try_create_histogram( + "lean_fork_choice_block_processing_time_seconds", + "Time taken to process a block in fork choice", + ) +}); + +/* Attestation Metrics */ +pub static LEAN_ATTESTATIONS_VALID_TOTAL: LazyLock> = LazyLock::new(|| { + try_create_int_counter( + "lean_attestations_valid_total", + "Total number of valid attestations processed", + ) +}); + +#[allow(dead_code)] +pub static LEAN_ATTESTATIONS_INVALID_TOTAL: LazyLock> = LazyLock::new(|| { + try_create_int_counter( + "lean_attestations_invalid_total", + "Total number of invalid attestations processed", + ) +}); + +pub static LEAN_ATTESTATION_VALIDATION_TIME: LazyLock> = LazyLock::new(|| { + try_create_histogram( + "lean_attestation_validation_time_seconds", + "Time taken to validate an attestation", + ) +}); + +/* State Transition Metrics */ +pub static LEAN_STATE_TRANSITION_TIME: LazyLock> = LazyLock::new(|| { + try_create_histogram( + "lean_state_transition_time_seconds", + "Total time taken for state transition", + ) +}); + +pub static LEAN_STATE_TRANSITION_BLOCK_PROCESSING_TIME: LazyLock> = LazyLock::new(|| { + try_create_histogram( + "lean_state_transition_block_processing_time_seconds", + "Time taken to process block in state transition", + ) +}); + +pub static LEAN_STATE_TRANSITION_SLOTS_PROCESSED_TOTAL: LazyLock> = LazyLock::new(|| { + try_create_int_counter( + "lean_state_transition_slots_processed_total", + "Total number of slots processed in state transition", + ) +}); + +pub static LEAN_STATE_TRANSITION_SLOTS_PROCESSING_TIME: LazyLock> = LazyLock::new(|| { + try_create_histogram( + "lean_state_transition_slots_processing_time_seconds", + "Time taken to process slots in state transition", + ) +}); + +pub static LEAN_STATE_TRANSITION_ATTESTATIONS_PROCESSED_TOTAL: LazyLock> = LazyLock::new(|| { + try_create_int_counter( + "lean_state_transition_attestations_processed_total", + "Total number of attestations processed in state transition", + ) +}); + +#[allow(dead_code)] +pub static LEAN_STATE_TRANSITION_ATTESTATIONS_PROCESSING_TIME: LazyLock> = LazyLock::new(|| { + try_create_histogram( + "lean_state_transition_attestations_processing_time_seconds", + "Time taken to process attestations in state transition", + ) +}); + +/// Initialize all validator service metrics so they are registered in the +/// Prometheus registry even before they are first used. +/// +/// This ensures metrics expected by the Zeam Grafana dashboards are always +/// present (with zero values) on the Lighthouse lean client endpoint. +pub fn init() { + // PQ signature metrics + let _ = LEAN_PQ_SIGNATURE_ATTESTATION_SIGNING_TIME.as_ref(); + let _ = LEAN_PQ_SIGNATURE_ATTESTATION_VERIFICATION_TIME.as_ref(); + + // Chain status metrics + let _ = LEAN_HEAD_SLOT.as_ref(); + let _ = LEAN_LATEST_JUSTIFIED_SLOT.as_ref(); + let _ = LEAN_LATEST_FINALIZED_SLOT.as_ref(); + let _ = LEAN_LATEST_SAFE_SLOT.as_ref(); + let _ = LEAN_VALIDATORS_COUNT.as_ref(); + + // Fork choice metrics + let _ = LEAN_FORK_CHOICE_BLOCK_PROCESSING_TIME.as_ref(); + + // Attestation metrics + let _ = LEAN_ATTESTATIONS_VALID_TOTAL.as_ref(); + let _ = LEAN_ATTESTATIONS_INVALID_TOTAL.as_ref(); + let _ = LEAN_ATTESTATION_VALIDATION_TIME.as_ref(); + + // State transition metrics + let _ = LEAN_STATE_TRANSITION_TIME.as_ref(); + let _ = LEAN_STATE_TRANSITION_BLOCK_PROCESSING_TIME.as_ref(); + let _ = LEAN_STATE_TRANSITION_SLOTS_PROCESSED_TOTAL.as_ref(); + let _ = LEAN_STATE_TRANSITION_SLOTS_PROCESSING_TIME.as_ref(); + let _ = LEAN_STATE_TRANSITION_ATTESTATIONS_PROCESSED_TOTAL.as_ref(); + let _ = LEAN_STATE_TRANSITION_ATTESTATIONS_PROCESSING_TIME.as_ref(); + + // Shared Zeam / beacon-node style block processing metrics + let _ = BLOCK_PROCESSING_DURATION_SECONDS.as_ref(); + let _ = CHAIN_ONBLOCK_DURATION_SECONDS.as_ref(); +} + diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index ebe00c9be59..b0ae410f06a 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -53,6 +53,7 @@ environment = { workspace = true } eth2_network_config = { workspace = true } ethereum_hashing = { workspace = true } futures = { workspace = true } +lean_client = { workspace = true } lighthouse_tracing = { workspace = true } lighthouse_version = { workspace = true } logging = { workspace = true } diff --git a/lighthouse/src/cli.rs b/lighthouse/src/cli.rs index ed665d2a479..b6c750dae95 100644 --- a/lighthouse/src/cli.rs +++ b/lighthouse/src/cli.rs @@ -1,5 +1,6 @@ use clap::Parser; use database_manager::cli::DatabaseManager; +use lean_client::cli::LeanNode; use serde::{Deserialize, Serialize}; use validator_client::cli::ValidatorClient; @@ -9,4 +10,6 @@ pub enum LighthouseSubcommands { DatabaseManager(Box), #[clap(name = "validator_client")] ValidatorClient(Box), + #[clap(name = "lean_node")] + LeanNode(Box), } diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index c93016a0f54..3f1a0d36520 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -10,13 +10,17 @@ use clap_utils::{ FLAG_HEADER, flags::DISABLE_MALLOC_TUNING_FLAG, get_color_style, get_eth2_network_config, }; use cli::LighthouseSubcommands; -use directory::{DEFAULT_BEACON_NODE_DIR, DEFAULT_VALIDATOR_DIR, parse_path_or_default}; +use directory::{ + DEFAULT_BEACON_NODE_DIR, DEFAULT_LEAN_NODE_DIR, DEFAULT_VALIDATOR_DIR, parse_path_or_default, +}; use environment::tracing_common; use environment::{EnvironmentBuilder, LoggerConfig}; use eth2_network_config::{DEFAULT_HARDCODED_NETWORK, Eth2NetworkConfig, HARDCODED_NET_NAMES}; use ethereum_hashing::have_sha_extensions; use futures::TryFutureExt; +use lean_client::ProductionLeanClient; use lighthouse_version::VERSION; + use logging::{MetricsLayer, build_workspace_filter, crit}; use malloc_utils::configure_memory_allocator; use opentelemetry::trace::TracerProvider; @@ -568,6 +572,11 @@ fn run( Some(base_path.join("logs")) } + Some(("lean_node", _)) => Some( + parse_path_or_default(matches, "datadir")? + .join(DEFAULT_LEAN_NODE_DIR) + .join("logs"), + ), _ => None, }; } @@ -672,6 +681,7 @@ fn run( .unwrap_or_else(|| match matches.subcommand() { Some(("beacon_node", _)) => "lighthouse-bn".to_string(), Some(("validator_client", _)) => "lighthouse-vc".to_string(), + Some(("lean_node", _)) => "lighthouse-ln".to_string(), _ => "lighthouse".to_string(), }); @@ -745,6 +755,7 @@ fn run( // Creating a command which can run both might be useful future works. // Print an indication of which network is currently in use. + // Note: lean_node subcommand doesn't use --network or --testnet-dir flags let optional_testnet = clap_utils::parse_optional::(matches, "network")?; let optional_testnet_dir = clap_utils::parse_optional::(matches, "testnet-dir")?; @@ -811,6 +822,31 @@ fn run( "validator_client", ); } + Ok(LighthouseSubcommands::LeanNode(lean_node_config)) => { + let lean_context = environment.service_context("lean_node".to_string()); + let executor = lean_context.executor.clone(); + let data_dir = parse_path_or_default(matches, "datadir")?.join(DEFAULT_LEAN_NODE_DIR); + let lean_node_cli = *lean_node_config; + let lean_client_config = lean_client::Config::from_cli(lean_node_cli, data_dir); + executor.clone().spawn( + async move { + if let Err(e) = ProductionLeanClient::new(lean_context, lean_client_config) + .and_then(|mut ln: ProductionLeanClient| async move { + ln.start_service().await + }) + .await + { + crit!(reason = e, "Failed to start lean node"); + // Ignore the error since it always occurs during normal operation when + // shutting down. + let _ = executor + .shutdown_sender() + .try_send(ShutdownReason::Failure("Failed to start lean node")); + } + }, + "lean_node", + ); + } Err(_) => (), }; @@ -849,6 +885,8 @@ fn run( // TODO(clap-derive) delete this once we've fully migrated to clap derive. // Qt the moment this needs to exist so that we dont trigger a crit. Some(("validator_client", _)) => (), + Some(("lean_node", _)) => (), + _ => { crit!("No subcommand supplied. See --help ."); return Err("No subcommand supplied.".into());