diff --git a/CHANGELOG.md b/CHANGELOG.md index 1b3d12b23..1eb962ad6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ At the moment this project **does not** adhere to ### Changed - Update substrate to polkadot stable2409 ([#1387](https://github.com/entropyxyz/entropy-core/pull/1387)) - Remove deadlines in OCW ([#1411](https://github.com/entropyxyz/entropy-core/pull/1411)) +- Bump synedrion to 0.3.0 and use manul for protocol session loop ([#1392](https://github.com/entropyxyz/entropy-core/pull/1392)) ## [0.4.0](https://github.com/entropyxyz/entropy-core/compare/release/v0.3.0...release/v0.4.0) - 2025-03-31 diff --git a/Cargo.lock b/Cargo.lock index 92b4f0bce..1b787ecef 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1159,15 +1159,6 @@ dependencies = [ "serde", ] -[[package]] -name = "bincode" -version = "2.0.0-rc.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f11ea1a0346b94ef188834a65c068a03aec181c94896d481d7a0a40d85b0ce95" -dependencies = [ - "serde", -] - [[package]] name = "bindgen" version = "0.65.1" @@ -2031,6 +2022,12 @@ version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" +[[package]] +name = "cobs" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67ba02a97a2bd10f4b59b25c7973101c79642302776489e030cd13cdab09ed15" + [[package]] name = "codespan-reporting" version = "0.11.1" @@ -2521,13 +2518,13 @@ dependencies = [ [[package]] name = "crypto-bigint" -version = "0.6.0-rc.6" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d748d1f5b807ee6d0df5a548d0130417295c3aaed1dcbbb3d6a2e7106e11fcca" +checksum = "96272c2ff28b807e09250b180ad1fb7889a3258f7455759b5c3c58b719467130" dependencies = [ "num-traits", "rand_core 0.6.4", - "serdect 0.3.0-rc.0", + "serdect 0.3.0", "subtle 2.6.1", "zeroize", ] @@ -2565,11 +2562,11 @@ dependencies = [ [[package]] name = "crypto-primes" -version = "0.6.0-pre.2" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9fad3f7645c77d3e0269f3e74a8dd25746de992b16bcecbb316059836e0b366" +checksum = "2acbaf157961745008b5a80ee1cc974150691304fe9177edf69747142bfd9878" dependencies = [ - "crypto-bigint 0.6.0-rc.6", + "crypto-bigint 0.6.1", "rand_core 0.6.4", ] @@ -3063,6 +3060,7 @@ dependencies = [ "const-oid", "der_derive", "flagset", + "pem-rfc7468", "zeroize", ] @@ -3436,6 +3434,7 @@ dependencies = [ "generic-array 0.14.7", "group", "hkdf", + "pem-rfc7468", "pkcs8", "rand_core 0.6.4", "sec1", @@ -3444,6 +3443,18 @@ dependencies = [ "zeroize", ] +[[package]] +name = "embedded-io" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef1a6892d9eef45c8fa6b9e0086428a2cca8491aca8f787c534a3d6d0bcb3ced" + +[[package]] +name = "embedded-io" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edd0f118536f44f5ccd48bcb8b111bdc3de888b58c74639dfb034a357d0f206d" + [[package]] name = "encode_unicode" version = "0.3.6" @@ -3550,6 +3561,7 @@ dependencies = [ "futures", "hex", "js-sys", + "k256", "num", "parity-scale-codec", "rand 0.8.5", @@ -3588,7 +3600,7 @@ dependencies = [ name = "entropy-kvdb" version = "0.4.0-rc.1" dependencies = [ - "bincode 1.3.3", + "bincode", "chacha20poly1305 0.9.1", "entropy-protocol", "hex", @@ -3637,7 +3649,7 @@ dependencies = [ "async-trait", "axum", "base64 0.22.1", - "bincode 1.3.3", + "bincode", "blake2 0.10.6", "entropy-shared", "futures", @@ -3648,11 +3660,14 @@ dependencies = [ "hpke-rs-crypto", "hpke-rs-rust-crypto", "js-sys", + "k256", + "manul", "num", "num_cpus", "rand_core 0.6.4", "schnorrkel", "serde", + "serde-persistent-deserializer", "serde_json", "serial_test", "snow", @@ -3664,6 +3679,7 @@ dependencies = [ "tokio", "tokio-tungstenite 0.26.2", "tracing", + "tracing-subscriber 0.3.19", "wasm-bindgen", "wasm-bindgen-derive", "wasm-bindgen-futures", @@ -3781,7 +3797,7 @@ name = "entropy-test-cli" version = "0.4.0-rc.1" dependencies = [ "anyhow", - "bincode 1.3.3", + "bincode", "clap", "colored", "entropy-client", @@ -3809,7 +3825,9 @@ dependencies = [ "entropy-tss", "hex", "hex-literal 1.0.0", + "k256", "lazy_static", + "manul", "parity-scale-codec", "project-root", "rand 0.8.5", @@ -3832,7 +3850,7 @@ dependencies = [ "axum", "backoff", "base64 0.22.1", - "bincode 1.3.3", + "bincode", "bip32", "bip39", "blake2 0.10.6", @@ -3852,7 +3870,9 @@ dependencies = [ "hex-literal 1.0.0", "hkdf", "hostname 0.4.1", + "k256", "lazy_static", + "manul", "more-asserts", "num", "parity-scale-codec", @@ -3972,6 +3992,16 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" +[[package]] +name = "erased-serde" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e004d887f51fcb9fef17317a2f3525c887d8aa3f4f50fed920816a688284a5b7" +dependencies = [ + "serde", + "typeid", +] + [[package]] name = "errno" version = "0.3.10" @@ -6555,6 +6585,7 @@ dependencies = [ "once_cell", "serdect 0.2.0", "sha2 0.10.9", + "signature", ] [[package]] @@ -7832,6 +7863,29 @@ dependencies = [ "syn 2.0.96", ] +[[package]] +name = "manul" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e547eb94f2f4c2a62a916a9ca9aa4a9fef5b7a19a001c31f291859bad96348a7" +dependencies = [ + "derive-where", + "digest 0.10.7", + "displaydoc", + "erased-serde", + "postcard", + "rand 0.8.5", + "rand_core 0.6.4", + "serde", + "serde-encoded-bytes", + "serde-persistent-deserializer", + "serde_json", + "signature", + "tinyvec", + "tokio", + "tracing", +] + [[package]] name = "maplit" version = "1.0.2" @@ -10187,6 +10241,7 @@ dependencies = [ "frame-election-provider-support", "frame-support 38.2.0", "frame-system", + "k256", "log", "pallet-authorship", "pallet-bags-list", @@ -11266,6 +11321,15 @@ dependencies = [ "base64 0.22.1", ] +[[package]] +name = "pem-rfc7468" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88b39c9bfcfc231068454382784bb460aae594343fb030d46e9f50a645418412" +dependencies = [ + "base64ct", +] + [[package]] name = "percent-encoding" version = "2.3.1" @@ -12095,6 +12159,18 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7170ef9988bc169ba16dd36a7fa041e5c4cbeb6a35b76d4c03daded371eae7c0" +[[package]] +name = "postcard" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "170a2601f67cc9dba8edd8c4870b15f71a6a2dc196daec8c83f72b59dff628a8" +dependencies = [ + "cobs", + "embedded-io 0.4.0", + "embedded-io 0.6.1", + "serde", +] + [[package]] name = "powerfmt" version = "0.2.0" @@ -12184,6 +12260,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "353e1ca18966c16d9deb1c69278edbc5f194139612772bd9537af60ac231e1e6" dependencies = [ "elliptic-curve", + "serdect 0.2.0", ] [[package]] @@ -15936,7 +16013,6 @@ version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e891af845473308773346dc847b2c23ee78fe442e0472ac50e22a18a93d3ae5a" dependencies = [ - "serde", "zeroize", ] @@ -16047,15 +16123,24 @@ dependencies = [ [[package]] name = "serde-encoded-bytes" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec007ca0e3db940a5409d65780b6bd0202cbea68800861ae876b80655ee8e24b" +checksum = "41015be3fac3ea5ca130f052836e50662c574547baca4d482e69e834f266e8d5" dependencies = [ - "base64 0.21.7", + "base64 0.22.1", "hex", "serde", ] +[[package]] +name = "serde-persistent-deserializer" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1f52002cbca6cb233262e7ab1a04d4214c3a13f4ee93cec344286ff14b01147" +dependencies = [ + "serde", +] + [[package]] name = "serde_bytes" version = "0.11.14" @@ -16142,9 +16227,9 @@ dependencies = [ [[package]] name = "serdect" -version = "0.3.0-rc.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a504c8ee181e3e594d84052f983d60afe023f4d94d050900be18062bbbf7b58" +checksum = "f42f67da2385b51a5f9652db9c93d78aeaf7610bf5ec366080b6de810604af53" dependencies = [ "base16ct", "serde", @@ -18952,25 +19037,30 @@ dependencies = [ [[package]] name = "synedrion" -version = "0.2.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a619936bb0dd5fa8f8e79c703590c6a10be9e2160b61a2e85484f9f053a3c5b0" +checksum = "1cc3a3e679944b21f44638c366b1b273882a918343c4e3774e3fce3f756cf028" dependencies = [ - "bincode 2.0.0-rc.3", "bip32", - "crypto-bigint 0.6.0-rc.6", + "crypto-bigint 0.6.1", "crypto-primes", "digest 0.10.7", "displaydoc", + "ecdsa", + "elliptic-curve", "hashing-serializer", "k256", + "manul", + "rand 0.8.5", + "rand_chacha 0.3.1", "rand_core 0.6.4", "secrecy 0.10.3", "serde", "serde-encoded-bytes", - "sha2 0.10.9", "sha3", "signature", + "tiny-curve", + "tracing", "zeroize", ] @@ -19227,6 +19317,20 @@ dependencies = [ "time-core", ] +[[package]] +name = "tiny-curve" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3839423882e7687ba6339ed468baea296bc13599969293c560ca3f7fc816be24" +dependencies = [ + "bip32", + "ecdsa", + "elliptic-curve", + "num-traits", + "primeorder", + "sha2 0.10.9", +] + [[package]] name = "tiny-keccak" version = "2.0.2" @@ -19252,6 +19356,7 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" dependencies = [ + "serde", "tinyvec_macros", ] @@ -19954,6 +20059,12 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "typeid" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc7d623258602320d5c55d1bc22793b57daff0ec7efc270ea7d55ce1d5f5471c" + [[package]] name = "typenum" version = "1.17.0" @@ -20565,7 +20676,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f907fdead3153cb9bfb7a93bbd5b62629472dc06dee83605358c64c52ed3dda9" dependencies = [ "anyhow", - "bincode 1.3.3", + "bincode", "cfg-if", "indexmap 1.9.3", "libc", @@ -20594,7 +20705,7 @@ checksum = "c4e87029cc5760db9a3774aff4708596fe90c20ed2baeef97212e98b812fd0fc" dependencies = [ "anyhow", "async-trait", - "bincode 1.3.3", + "bincode", "bumpalo", "cfg-if", "encoding_rs", @@ -20651,7 +20762,7 @@ checksum = "c86437fa68626fe896e5afc69234bb2b5894949083586535f200385adfd71213" dependencies = [ "anyhow", "base64 0.21.7", - "bincode 1.3.3", + "bincode", "directories-next", "file-per-thread-logger 0.1.6", "log", @@ -20671,7 +20782,7 @@ checksum = "31561fbbaa86d3c042696940bc9601146bf4aaec39ae725c86b5f1358d8d7023" dependencies = [ "anyhow", "base64 0.21.7", - "bincode 1.3.3", + "bincode", "directories-next", "file-per-thread-logger 0.2.0", "log", @@ -20844,7 +20955,7 @@ checksum = "0de48df552cfca1c9b750002d3e07b45772dd033b0b206d5c0968496abf31244" dependencies = [ "addr2line 0.19.0", "anyhow", - "bincode 1.3.3", + "bincode", "cfg-if", "cpp_demangle", "gimli 0.27.3", @@ -20868,7 +20979,7 @@ checksum = "cce606b392c321d7272928003543447119ef937a9c3ebfce5c4bb0bf6b0f5bac" dependencies = [ "addr2line 0.20.0", "anyhow", - "bincode 1.3.3", + "bincode", "cfg-if", "cpp_demangle", "gimli 0.27.3", diff --git a/crates/client/Cargo.toml b/crates/client/Cargo.toml index bdc6fb638..918006179 100644 --- a/crates/client/Cargo.toml +++ b/crates/client/Cargo.toml @@ -30,9 +30,10 @@ x25519-dalek ={ version="2.0.1", features=["static_secrets"], optional=true entropy-protocol ={ version="0.4.0-rc.1", path="../protocol", optional=true, default-features=false } reqwest ={ version="0.12.12", features=["json", "stream"], optional=true } base64 ={ version="0.22.0", optional=true } -synedrion ={ version="0.2.0", optional=true } +synedrion ={ version="0.3.0", optional=true, features=["k256"] } hex ={ version="0.4.3", optional=true } parity-scale-codec={ version="3.7.2", default-features=false, optional=true } +k256 ={ version="0.13", default-features=false, features=["ecdsa"], optional=true } # Only for the browser js-sys={ version="0.3.74", optional=true } @@ -65,6 +66,7 @@ full-client=[ "dep:reqwest", "dep:base64", "dep:synedrion", + "dep:k256", "dep:hex", "dep:parity-scale-codec", ] diff --git a/crates/client/src/client.rs b/crates/client/src/client.rs index ca5d2d9f9..05b14ffd8 100644 --- a/crates/client/src/client.rs +++ b/crates/client/src/client.rs @@ -57,6 +57,7 @@ pub use synedrion::KeyShare; use base64::prelude::{Engine, BASE64_STANDARD}; use entropy_protocol::RecoverableSignature; use futures::stream::StreamExt; +use k256::ecdsa::{RecoveryId, Signature as k256Signature, VerifyingKey}; use sp_core::{ sr25519::{self, Signature}, Pair, @@ -66,7 +67,6 @@ use subxt::{ utils::{AccountId32 as SubxtAccountId32, H256}, Config, OnlineClient, }; -use synedrion::k256::ecdsa::{RecoveryId, Signature as k256Signature, VerifyingKey}; pub const VERIFYING_KEY_LENGTH: usize = entropy_shared::VERIFICATION_KEY_LENGTH as usize; diff --git a/crates/client/src/errors.rs b/crates/client/src/errors.rs index 356390c41..5905a0dba 100644 --- a/crates/client/src/errors.rs +++ b/crates/client/src/errors.rs @@ -100,7 +100,7 @@ pub enum ClientError { #[error("Base64 decode: {0}")] Base64(#[from] base64::DecodeError), #[error("ECDSA: {0}")] - Ecdsa(#[from] synedrion::ecdsa::Error), + Ecdsa(#[from] k256::ecdsa::Error), #[error("Cannot get recovery ID from signature")] NoRecoveryId, #[error("Cannot parse recovery ID from signature")] diff --git a/crates/client/src/tests.rs b/crates/client/src/tests.rs index 7d93c82ae..6f74745d0 100644 --- a/crates/client/src/tests.rs +++ b/crates/client/src/tests.rs @@ -31,7 +31,7 @@ use rand::{ SeedableRng, }; use serial_test::serial; -use sp_core::{sr25519, Pair, H256}; +use sp_core::{sr25519, Pair}; use sp_keyring::AccountKeyring; use subxt::utils::AccountId32; diff --git a/crates/kvdb/Cargo.toml b/crates/kvdb/Cargo.toml index 60094af0b..7b5eb769c 100644 --- a/crates/kvdb/Cargo.toml +++ b/crates/kvdb/Cargo.toml @@ -23,7 +23,7 @@ zeroize ={ version="1.8", features=["zeroize_derive"], default-features= rpassword ={ version="7.4.0", default-features=false } scrypt ={ version="0.11.0", default-features=false, features=["std"] } chacha20poly1305={ version="0.9", features=["alloc"], default-features=false } -synedrion ="0.2.0" +synedrion ={ version="0.3.0", features=["k256"] } # Async tokio ={ version="1.44", features=["macros", "sync", "fs", "rt-multi-thread", "io-util"] } diff --git a/crates/kvdb/src/kv_manager/value.rs b/crates/kvdb/src/kv_manager/value.rs index 860a9c86d..e9112c55e 100644 --- a/crates/kvdb/src/kv_manager/value.rs +++ b/crates/kvdb/src/kv_manager/value.rs @@ -18,7 +18,7 @@ use std::{convert::TryFrom, path::PathBuf}; use entropy_protocol::PartyId; use serde::{Deserialize, Serialize}; -use synedrion::{KeyShare, ProductionParams}; +use synedrion::{k256::ProductionParams112, KeyShare}; use tracing::{info, span, Level, Span}; use zeroize::Zeroize; @@ -39,7 +39,7 @@ pub struct Entropy(pub Vec); pub struct PartyInfo { // TODO: in the future this will probably be a mapping {party_id: [share_id, share_id, ...]} pub party_ids: Vec, - pub share: KeyShare, + pub share: KeyShare, } impl fmt::Debug for PartyInfo { diff --git a/crates/protocol/Cargo.toml b/crates/protocol/Cargo.toml index 5ec5f63aa..46e582e6d 100644 --- a/crates/protocol/Cargo.toml +++ b/crates/protocol/Cargo.toml @@ -9,29 +9,32 @@ repository ='https://github.com/entropyxyz/entropy-core' edition ='2021' [dependencies] -async-trait ="0.1.88" -entropy-shared ={ version="0.4.0-rc.1", path="../shared", default-features=false } -synedrion ="0.2.0" -serde ={ version="1.0", features=["derive"], default-features=false } -subxt ={ version="0.38.0", default-features=false } -sp-core ={ version="34.0.0", default-features=false, features=["full_crypto", "serde", "std"] } -tokio ={ version="1.44", features=["sync", "rt", "macros"] } -x25519-dalek ={ version="2.0.1", features=["static_secrets"] } -futures ="0.3" -hex ="0.4.3" -blake2 ="0.10.4" -thiserror ="2.0.12" -snow ="0.9.6" -getrandom ={ version="0.2", features=["js"] } -rand_core ={ version="0.6.4", features=["getrandom"] } -tracing ="0.1.41" -bincode ="1.3.3" -serde_json ="1.0" -zeroize ="1.8.1" -hpke-rs ="0.2.0" -hpke-rs-crypto ="0.2.0" -hpke-rs-rust-crypto="0.2.0" -num ="0.4.3" +async-trait ="0.1.88" +entropy-shared ={ version="0.4.0-rc.1", path="../shared", default-features=false } +synedrion ={ version="0.3.0", features=["k256", "dev"] } +serde ={ version="1.0", features=["derive"], default-features=false } +subxt ={ version="0.38.0", default-features=false } +sp-core ={ version="34.0.0", default-features=false, features=["full_crypto", "serde", "std"] } +tokio ={ version="1.44", features=["sync", "rt", "macros"] } +x25519-dalek ={ version="2.0.1", features=["static_secrets"] } +futures ="0.3" +hex ="0.4.3" +blake2 ="0.10.4" +thiserror ="2.0.12" +snow ="0.9.6" +getrandom ={ version="0.2", features=["js"] } +rand_core ={ version="0.6.4", features=["getrandom"] } +tracing ="0.1.41" +bincode ="1.3.3" +serde_json ="1.0" +zeroize ="1.8.1" +hpke-rs ="0.2.0" +hpke-rs-crypto ="0.2.0" +hpke-rs-rust-crypto ="0.2.0" +num ="0.4.3" +k256 ={ version="0.13", default-features=false, features=["ecdsa"] } +serde-persistent-deserializer={ version="0.3" } +manul ={ version="0.2.1", features=["tokio"] } # Used only with the `server` feature to implement the WsConnection trait axum ={ version="0.8.4", features=["ws"], optional=true } @@ -45,6 +48,7 @@ wasm-bindgen-derive ={ version="0.3", optional=true } js-sys ={ version="0.3.74", optional=true } base64 ={ version="0.22.1", optional=true } schnorrkel ={ version="0.11.4", default-features=false, features=["std"], optional=true } +tracing-subscriber ={ version="0.3.19", features=["env-filter"] } [dev-dependencies] serial_test="3.2.0" diff --git a/crates/protocol/src/errors.rs b/crates/protocol/src/errors.rs index e9cc618b7..4dcfe5671 100644 --- a/crates/protocol/src/errors.rs +++ b/crates/protocol/src/errors.rs @@ -13,127 +13,20 @@ // You should have received a copy of the GNU Affero General Public License // along with this program. If not, see . -use synedrion::{ - sessions, AuxGenResult, InteractiveSigningResult, KeyInitResult, KeyResharingResult, - ProtocolResult, -}; +use manul::protocol::LocalError; use thiserror::Error; -use crate::{protocol_message::ProtocolMessage, KeyParams, PartyId}; - -#[derive(Debug, Error)] -pub enum GenericProtocolError { - #[error("Synedrion session error {0}")] - Joined(Box>), - #[error("Incoming message stream error: {0}")] - IncomingStream(String), - #[error("Broadcast error: {0}")] - Broadcast(#[from] Box>), - #[error("Mpsc send error: {0}")] - Mpsc(#[from] tokio::sync::mpsc::error::SendError), - #[error("Could not get session out of Arc - session has finalized before message processing finished")] - ArcUnwrapError, - #[error("Message processing task panic or cancellation: {0}")] - JoinHandle(#[from] tokio::task::JoinError), -} - -impl From for GenericProtocolError { - fn from(err: sessions::LocalError) -> Self { - Self::Joined(Box::new(sessions::Error::Local(err))) - } -} - -impl From> for GenericProtocolError { - fn from(err: sessions::RemoteError) -> Self { - Self::Joined(Box::new(sessions::Error::Remote(err))) - } -} - -impl From> for GenericProtocolError { - fn from(err: sessions::Error) -> Self { - Self::Joined(Box::new(err)) - } -} - -impl From>> - for ProtocolExecutionErr -{ - fn from(err: GenericProtocolError>) -> Self { - tracing::error!("{:?}", err); - match err { - GenericProtocolError::Joined(err) => ProtocolExecutionErr::SigningProtocolError(err), - GenericProtocolError::IncomingStream(err) => ProtocolExecutionErr::IncomingStream(err), - GenericProtocolError::Broadcast(err) => ProtocolExecutionErr::Broadcast(err), - GenericProtocolError::Mpsc(err) => ProtocolExecutionErr::Mpsc(err), - GenericProtocolError::ArcUnwrapError => ProtocolExecutionErr::ArcUnwrapError, - GenericProtocolError::JoinHandle(err) => ProtocolExecutionErr::JoinHandle(err), - } - } -} - -impl From>> for ProtocolExecutionErr { - fn from(err: GenericProtocolError>) -> Self { - tracing::error!("{:?}", err); - match err { - GenericProtocolError::Joined(err) => ProtocolExecutionErr::KeyInitProtocolError(err), - GenericProtocolError::IncomingStream(err) => ProtocolExecutionErr::IncomingStream(err), - GenericProtocolError::Broadcast(err) => ProtocolExecutionErr::Broadcast(err), - GenericProtocolError::Mpsc(err) => ProtocolExecutionErr::Mpsc(err), - GenericProtocolError::ArcUnwrapError => ProtocolExecutionErr::ArcUnwrapError, - GenericProtocolError::JoinHandle(err) => ProtocolExecutionErr::JoinHandle(err), - } - } -} - -impl From>> for ProtocolExecutionErr { - fn from(err: GenericProtocolError>) -> Self { - tracing::error!("{:?}", err); - match err { - GenericProtocolError::Joined(err) => ProtocolExecutionErr::KeyReshareProtocolError(err), - GenericProtocolError::IncomingStream(err) => ProtocolExecutionErr::IncomingStream(err), - GenericProtocolError::Broadcast(err) => ProtocolExecutionErr::Broadcast(err), - GenericProtocolError::Mpsc(err) => ProtocolExecutionErr::Mpsc(err), - GenericProtocolError::ArcUnwrapError => ProtocolExecutionErr::ArcUnwrapError, - GenericProtocolError::JoinHandle(err) => ProtocolExecutionErr::JoinHandle(err), - } - } -} - -impl From>> for ProtocolExecutionErr { - fn from(err: GenericProtocolError>) -> Self { - tracing::error!("{:?}", err); - match err { - GenericProtocolError::Joined(err) => ProtocolExecutionErr::AuxGenProtocolError(err), - GenericProtocolError::IncomingStream(err) => ProtocolExecutionErr::IncomingStream(err), - GenericProtocolError::Broadcast(err) => ProtocolExecutionErr::Broadcast(err), - GenericProtocolError::Mpsc(err) => ProtocolExecutionErr::Mpsc(err), - GenericProtocolError::ArcUnwrapError => ProtocolExecutionErr::ArcUnwrapError, - GenericProtocolError::JoinHandle(err) => ProtocolExecutionErr::JoinHandle(err), - } - } -} +use crate::protocol_message::ProtocolMessage; /// An error during or while setting up a protocol session #[derive(Debug, Error)] pub enum ProtocolExecutionErr { #[error("Incoming message stream error: {0}")] IncomingStream(String), - #[error("Synedrion session creation error: {0}")] - SessionCreation(sessions::LocalError), - #[error("Synedrion signing session error")] - SigningProtocolError( - Box, PartyId>>, - ), - #[error("Synedrion key init session error")] - KeyInitProtocolError(Box, PartyId>>), - #[error("Synedrion key reshare session error")] - KeyReshareProtocolError(Box, PartyId>>), - #[error("Synedrion aux generation session error")] - AuxGenProtocolError(Box, PartyId>>), #[error("Broadcast error: {0}")] Broadcast(#[from] Box>), #[error("Mpsc send error: {0}")] - Mpsc(#[from] tokio::sync::mpsc::error::SendError), + Mpsc(String), #[error("Bad keyshare error {0}")] BadKeyShare(String), #[error("Cannot serialize session ID {0}")] @@ -148,12 +41,30 @@ pub enum ProtocolExecutionErr { BadVerifyingKey(String), #[error("Expected verifying key but got a protocol message")] UnexpectedMessage, - #[error("Could not get session out of Arc")] - ArcUnwrapError, #[error("Message processing task panic or cancellation: {0}")] JoinHandle(#[from] tokio::task::JoinError), #[error("Could not get validating key from keyshare")] NoValidatingKey, + #[error("Manul local error {0}")] + Local(String), + #[error("The protocol session was terminated by the user")] + Terminated, + #[error("The protocol execution stalled because not enough messages were received to finalize the round")] + NotEnoughMessages, + #[error("Could not sent stop signal to incoming message handler - likely the handler has already terminated")] + StopSignal(#[from] tokio::sync::mpsc::error::SendError<()>), +} + +impl From for ProtocolExecutionErr { + fn from(err: LocalError) -> Self { + Self::Mpsc(format!("{err:?}")) + } +} + +impl From> for ProtocolExecutionErr { + fn from(err: tokio::sync::mpsc::error::SendError) -> Self { + Self::Local(format!("{err:?}")) + } } #[derive(Debug, Error)] diff --git a/crates/protocol/src/execute_protocol.rs b/crates/protocol/src/execute_protocol.rs index 6636cc8e8..6000bfef0 100644 --- a/crates/protocol/src/execute_protocol.rs +++ b/crates/protocol/src/execute_protocol.rs @@ -13,41 +13,47 @@ // You should have received a copy of the GNU Affero General Public License // along with this program. If not, see . -//! A wrapper for the threshold signing library to handle sending and receiving messages. - -use futures::future::try_join_all; +//! A wrapper for the threshold signing library to handle sending and receiving messages + +use blake2::{Blake2s256, Digest}; +use k256::{ecdsa::VerifyingKey, EncodedPoint}; +use manul::{ + protocol::Protocol, + session::{ + tokio::{par_run_session, MessageIn, MessageOut}, + Session, SessionId as ManulSessionId, SessionOutcome, + }, + signature::RandomizedDigestSigner, +}; use num::bigint::BigUint; use rand_core::{CryptoRngCore, OsRng}; use sp_core::{crypto::Ss58Codec, sr25519, Pair}; -use std::sync::Arc; use subxt::utils::AccountId32; use synedrion::{ - ecdsa::VerifyingKey, - k256::EncodedPoint, - make_aux_gen_session, make_interactive_signing_session, make_key_init_session, - make_key_resharing_session, - sessions::{FinalizeOutcome, Session, SessionId as SynedrionSessionId}, - signature::{self, hazmat::RandomizedPrehashSigner}, - AuxInfo, KeyResharingInputs, KeyShare, NewHolder, OldHolder, PrehashedMessage, - RecoverableSignature, ThresholdKeyShare, + signature::{self}, + AuxGen, AuxInfo, InteractiveSigning, KeyInit, KeyResharing, KeyShare, NewHolder, OldHolder, + PrehashedMessage, RecoverableSignature, ThresholdKeyShare, }; -use tokio::{sync::mpsc, task::spawn_blocking}; +use tokio::sync::mpsc; use crate::{ - errors::{GenericProtocolError, ProtocolExecutionErr}, + errors::ProtocolExecutionErr, protocol_message::{ProtocolMessage, ProtocolMessagePayload}, protocol_transport::Broadcaster, - KeyParams, KeyShareWithAuxInfo, PartyId, SessionId, Subsession, + EntropySessionParameters, KeyParams, KeyShareWithAuxInfo, PartyId, SessionId, Subsession, }; -use std::collections::{BTreeSet, VecDeque}; +use std::collections::BTreeSet; +/// For incoming protocol messages pub type ChannelIn = mpsc::Receiver; +/// For outgoing protocol messages pub type ChannelOut = Broadcaster; /// Thin wrapper broadcasting channel out and messages from other nodes in pub struct Channels(pub ChannelOut, pub ChannelIn); +/// Wraps [sr25519::Pair] with the needed traits to using for signing protocol messages #[derive(Clone)] pub struct PairWrapper(pub sr25519::Pair); @@ -59,142 +65,90 @@ impl signature::Keypair for PairWrapper { } } -impl RandomizedPrehashSigner for PairWrapper { - fn sign_prehash_with_rng( +impl RandomizedDigestSigner for PairWrapper { + fn try_sign_digest_with_rng( &self, _rng: &mut impl CryptoRngCore, - prehash: &[u8], + prehash: Blake2s256, ) -> Result { // TODO: doesn't seem like there's a way to randomize signing? - Ok(self.0.sign(prehash)) + let hash = prehash.finalize(); + Ok(self.0.sign(&hash)) + } +} + +impl std::fmt::Debug for PairWrapper { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self.0.public()) } } -pub async fn execute_protocol_generic( - chans: &mut Channels, - session: Session, - session_id_hash: [u8; 32], -) -> Result> +/// Execute any of the protocols with a given session +pub async fn execute_protocol_generic

( + mut chans: Channels, + session: Session, +) -> Result<(P::Result, Channels), ProtocolExecutionErr> where - ::ProvableError: std::marker::Send, - ::CorrectnessProof: std::marker::Send, + P: Protocol, +

>::ProtocolError: std::marker::Send, +

>::ProtocolError: Sync, +

>::Result: std::marker::Send, { - let session_id = synedrion::SessionId::from_seed(&session_id_hash); - let tx = &chans.0; - let rx = &mut chans.1; - - let my_id = session.verifier(); - - let mut session = session; - let mut cached_messages = Vec::new(); - - loop { - let mut accum = session.make_accumulator(); - let current_round = session.current_round(); - let session_arc = Arc::new(session); - - // Send outgoing messages - let destinations = session_arc.message_destinations(); - let join_handles = destinations.iter().map(|destination| { - let session_arc = session_arc.clone(); - let tx = tx.clone(); - let my_id = my_id.clone(); - let destination = destination.clone(); - spawn_blocking(move || { - session_arc - .make_message(&mut OsRng, &destination) - .map(|(message, artifact)| { - tx.send(ProtocolMessage::new(&my_id, &destination, message)) - .map(|_| artifact) - .map_err(|err| { - let err: GenericProtocolError = err.into(); - err - }) - }) - .map_err(|err| { - let err: GenericProtocolError = err.into(); - err - }) - }) - }); - - for result in try_join_all(join_handles).await? { - accum.add_artifact(result??)?; - } - - // Process cached messages - let join_handles = cached_messages.into_iter().map(|preprocessed| { - let session_arc = session_arc.clone(); - spawn_blocking(move || session_arc.process_message(&mut OsRng, preprocessed)) - }); - - for result in try_join_all(join_handles).await? { - accum.add_processed_message(result?)??; + let (tx_in, mut rx_in) = mpsc::channel::>(1024); + let (tx_out, mut rx_out) = mpsc::channel::>(1024); + + // Handle outgoing messages + let broadcast_out = chans.0.clone(); + tokio::spawn(async move { + while let Some(msg_out) = rx_out.recv().await { + if let Err(err) = broadcast_out.send(ProtocolMessage::new( + &msg_out.from, + &msg_out.to, + msg_out.message, + )) { + tracing::error!("Cannot write outgoing message to channel: {err:?}"); + break; + } } + }); - // Receive and process incoming messages - let (process_tx, mut process_rx) = mpsc::channel(1024); - let mut messages_for_next_subprotocol = VecDeque::new(); - while !session_arc.can_finalize(&accum)? { + // Handle incoming messages + let (stop_signal_tx, mut stop_signal_rx) = mpsc::channel(1); + let join_handle = tokio::spawn(async move { + loop { tokio::select! { - // Incoming message from remote peer - maybe_message = rx.recv() => { - let message = maybe_message.ok_or_else(|| { - GenericProtocolError::IncomingStream(format!("{:?}", current_round)) - })?; - - if let ProtocolMessagePayload::MessageBundle(payload) = message.payload.clone() { - if payload.session_id() == &session_id { - // Perform quick checks before proceeding with the verification. - let preprocessed = - session_arc.preprocess_message(&mut accum, &message.from, *payload)?; - - if let Some(preprocessed) = preprocessed { - let session_arc = session_arc.clone(); - let tx = process_tx.clone(); - tokio::spawn(async move { - let result = session_arc.process_message(&mut OsRng, preprocessed); - - if futures::executor::block_on(tx.send(result)).is_err() { - tracing::error!("Protocol finished before message processing result sent"); - } - }); + protocol_message_option = chans.1.recv() => { + if let Some(protocol_message) = protocol_message_option { + let from = protocol_message.from; + if let ProtocolMessagePayload::Message(message) = protocol_message.payload { + if let Err(err) = tx_in.send(MessageIn { from, message: *message }).await { + tracing::error!("Cannot write incoming message to channel: {err:?}"); + break; } - } else { - tracing::warn!("Got protocol message with incorrect session ID - putting back in queue"); - messages_for_next_subprotocol.push_back(message); } } else { - tracing::warn!("Got verifying key during protocol - ignoring"); + break; } } - - // Result from processing a message - maybe_result = process_rx.recv() => { - if let Some(result) = maybe_result { - accum.add_processed_message(result?)??; - } + _ = stop_signal_rx.recv() => { + break; } } } + chans + }); - for message in messages_for_next_subprotocol { - tx.incoming_sender.send(message).await?; - } + // Run protocol + let session_report = par_run_session(&mut OsRng, &tx_out, &mut rx_in, session).await?; - // Get session back out of Arc - let session_inner = - Arc::try_unwrap(session_arc).map_err(|_| GenericProtocolError::ArcUnwrapError)?; - match session_inner.finalize_round(&mut OsRng, accum)? { - FinalizeOutcome::Success(res) => break Ok(res), - FinalizeOutcome::AnotherRound { - session: new_session, - cached_messages: new_cached_messages, - } => { - session = new_session; - cached_messages = new_cached_messages; - }, - } + // Send closing signal to incoming message loop so we can get channels back + stop_signal_tx.send(()).await?; + let chans = join_handle.await?; + + match session_report.outcome { + SessionOutcome::Result(output) => Ok((output, chans)), + SessionOutcome::Terminated => Err(ProtocolExecutionErr::Terminated), + SessionOutcome::NotEnoughMessages => Err(ProtocolExecutionErr::NotEnoughMessages), } } @@ -206,35 +160,35 @@ where )] pub async fn execute_signing_protocol( session_id: SessionId, - mut chans: Channels, + chans: Channels, key_share: &KeyShare, aux_info: &AuxInfo, - prehashed_message: &PrehashedMessage, + prehashed_message: &PrehashedMessage, threshold_pair: &sr25519::Pair, threshold_accounts: Vec, -) -> Result { +) -> Result, ProtocolExecutionErr> { tracing::debug!("Executing signing protocol"); tracing::trace!("Using key share with verifying key {:?}", &key_share.verifying_key()); let party_ids: BTreeSet = threshold_accounts.iter().cloned().map(PartyId::new).collect(); + let aux_info = aux_info.clone().subset(&party_ids)?; let pair = PairWrapper(threshold_pair.clone()); let session_id_hash = session_id.blake2(None)?; - let session = make_interactive_signing_session( + let entry_point = + InteractiveSigning::new(*prehashed_message, key_share.clone(), aux_info.clone())?; + + let session = Session::<_, EntropySessionParameters>::new( &mut OsRng, - SynedrionSessionId::from_seed(session_id_hash.as_slice()), + ManulSessionId::from_seed::(session_id_hash.as_slice()), pair, - &party_ids, - key_share, - aux_info, - prehashed_message, - ) - .map_err(ProtocolExecutionErr::SessionCreation)?; - - Ok(execute_protocol_generic(&mut chans, session, session_id_hash).await?) + entry_point, + )?; + + Ok(execute_protocol_generic(chans, session).await?.0) } /// Execute dkg. @@ -245,7 +199,7 @@ pub async fn execute_signing_protocol( )] pub async fn execute_dkg( session_id: SessionId, - mut chans: Channels, + chans: Channels, threshold_pair: &sr25519::Pair, threshold_accounts: Vec, threshold: usize, @@ -263,23 +217,22 @@ pub async fn execute_dkg( let (key_init_parties, includes_me) = get_key_init_parties(&my_party_id, threshold, &party_ids, &session_id_hash)?; - let (verifying_key, old_holder, mut chans) = if includes_me { + let (verifying_key, old_holder, chans) = if includes_me { // First run the key init session. - let session = make_key_init_session( + let entry_point = KeyInit::new(key_init_parties.clone())?; + let session = Session::<_, EntropySessionParameters>::new( &mut OsRng, - SynedrionSessionId::from_seed(session_id_hash.as_slice()), + ManulSessionId::from_seed::(session_id_hash.as_slice()), pair.clone(), - &key_init_parties, - ) - .map_err(ProtocolExecutionErr::SessionCreation)?; + entry_point, + )?; - let init_keyshare = execute_protocol_generic(&mut chans, session, session_id_hash).await?; + let (init_keyshare, chans) = execute_protocol_generic(chans, session).await?; tracing::info!("Finished key init protocol"); // Send verifying key - let verifying_key = - init_keyshare.verifying_key().ok_or(ProtocolExecutionErr::NoValidatingKey)?; + let verifying_key = init_keyshare.verifying_key(); for party_id in party_ids.iter() { if !key_init_parties.contains(party_id) { let message = ProtocolMessage { @@ -324,42 +277,47 @@ pub async fn execute_dkg( }; // Now reshare to all n parties - let inputs = KeyResharingInputs { + let entry_point = KeyResharing::::new( old_holder, - new_holder: Some(NewHolder { + Some(NewHolder:: { verifying_key, old_threshold: threshold, - old_holders: key_init_parties.clone(), + old_holders: key_init_parties, }), - new_holders: party_ids.clone(), - new_threshold: threshold, - }; + party_ids.clone(), + threshold, + ); let session_id_hash = session_id.blake2(Some(Subsession::Reshare))?; - let session = make_key_resharing_session( + let manul_session_id = + ManulSessionId::from_seed::(session_id_hash.as_slice()); + + let session = Session::<_, EntropySessionParameters>::new( &mut OsRng, - SynedrionSessionId::from_seed(session_id_hash.as_slice()), + manul_session_id, pair.clone(), - &party_ids, - inputs, - ) - .map_err(ProtocolExecutionErr::SessionCreation)?; - let new_key_share_option = - execute_protocol_generic(&mut chans, session, session_id_hash).await?; + entry_point, + )?; + + let (new_key_share_option, chans) = execute_protocol_generic(chans, session).await?; + let new_key_share = new_key_share_option.ok_or(ProtocolExecutionErr::NoOutputFromReshareProtocol)?; tracing::info!("Finished reshare protocol"); // Now run the aux gen protocol to get AuxInfo + let entry_point = AuxGen::new(party_ids)?; + let session_id_hash = session_id.blake2(Some(Subsession::AuxGen))?; - let session = make_aux_gen_session( + + let session = Session::<_, EntropySessionParameters>::new( &mut OsRng, - SynedrionSessionId::from_seed(session_id_hash.as_slice()), - pair, - &party_ids, - ) - .map_err(ProtocolExecutionErr::SessionCreation)?; - let aux_info = execute_protocol_generic(&mut chans, session, session_id_hash).await?; + ManulSessionId::from_seed::(session_id_hash.as_slice()), + pair.clone(), + entry_point, + )?; + + let (aux_info, _) = execute_protocol_generic(chans, session).await?; tracing::info!("Finished aux gen protocol"); Ok((new_key_share, aux_info)) @@ -374,9 +332,9 @@ pub async fn execute_dkg( )] pub async fn execute_reshare( session_id: SessionId, - mut chans: Channels, + chans: Channels, threshold_pair: &sr25519::Pair, - inputs: KeyResharingInputs, + entry_point: KeyResharing, verifiers: &BTreeSet, aux_info_option: Option>, ) -> Result< @@ -389,18 +347,16 @@ pub async fn execute_reshare( let pair = PairWrapper(threshold_pair.clone()); - let session_id_hash = session_id.blake2(None)?; + let session_id_hash = session_id.blake2(Some(Subsession::Reshare))?; - let session = make_key_resharing_session( + let session = Session::<_, EntropySessionParameters>::new( &mut OsRng, - SynedrionSessionId::from_seed(session_id_hash.as_slice()), - pair, - verifiers, - inputs.clone(), - ) - .map_err(ProtocolExecutionErr::SessionCreation)?; + ManulSessionId::from_seed::(session_id_hash.as_slice()), + pair.clone(), + entry_point, + )?; - let new_key_share = execute_protocol_generic(&mut chans, session, session_id_hash).await?; + let (new_key_share, chans) = execute_protocol_generic(chans, session).await?; tracing::info!("Completed reshare protocol"); @@ -410,15 +366,19 @@ pub async fn execute_reshare( tracing::info!("Executing aux gen session as part of reshare"); // Now run an aux gen session let session_id_hash_aux_data = session_id.blake2(Some(Subsession::AuxGen))?; - let session = make_aux_gen_session( + + let entry_point = AuxGen::new(verifiers.clone())?; + + let session = Session::<_, EntropySessionParameters>::new( &mut OsRng, - SynedrionSessionId::from_seed(session_id_hash_aux_data.as_slice()), - PairWrapper(threshold_pair.clone()), - &inputs.new_holders, - ) - .map_err(ProtocolExecutionErr::SessionCreation)?; + ManulSessionId::from_seed::( + session_id_hash_aux_data.as_slice(), + ), + pair.clone(), + entry_point, + )?; - execute_protocol_generic(&mut chans, session, session_id_hash_aux_data).await? + execute_protocol_generic(chans, session).await?.0 }; Ok((new_key_share.ok_or(ProtocolExecutionErr::NoOutputFromReshareProtocol)?, aux_info)) diff --git a/crates/protocol/src/lib.rs b/crates/protocol/src/lib.rs index abcb27e27..3e7b38b35 100644 --- a/crates/protocol/src/lib.rs +++ b/crates/protocol/src/lib.rs @@ -31,20 +31,19 @@ use std::{ hash::{Hash, Hasher}, }; +use bincode::Options; use blake2::{Blake2s256, Digest}; use errors::{ProtocolExecutionErr, VerifyingKeyError}; +use k256::{ + ecdsa::{RecoveryId, Signature, VerifyingKey}, + EncodedPoint, +}; +use manul::signature::DigestVerifier; use serde::{ser::SerializeStruct, Deserialize, Serialize, Serializer}; +use serde_persistent_deserializer::{AsTransientDeserializer, PersistentDeserializer}; use sp_core::{sr25519, Pair}; use subxt::utils::AccountId32; -use synedrion::{ - ecdsa::VerifyingKey, - k256::{ - ecdsa::{RecoveryId, Signature}, - EncodedPoint, - }, - signature::{self, hazmat::PrehashVerifier}, - AuxInfo, ThresholdKeyShare, -}; +use synedrion::{k256::ProductionParams112, signature, AuxInfo, ThresholdKeyShare}; /// The current version number of the protocol message format or protocols themselves pub const PROTOCOL_MESSAGE_VERSION: u32 = 1; @@ -83,13 +82,13 @@ impl From for PartyId { } } -impl PrehashVerifier for PartyId { - fn verify_prehash( +impl DigestVerifier for PartyId { + fn verify_digest( &self, - prehash: &[u8], + digest: Blake2s256, signature: &sr25519::Signature, ) -> Result<(), signature::Error> { - if sr25519::Pair::verify(signature, prehash, &self.to_public()) { + if sr25519::Pair::verify(signature, digest.finalize(), &self.to_public()) { Ok(()) } else { Err(signature::Error::new()) @@ -124,17 +123,57 @@ impl fmt::Display for PartyId { } } -#[cfg(not(test))] -use synedrion::ProductionParams; +/// Session configuration used for manul sessions +pub struct EntropySessionParameters; + +impl manul::session::SessionParameters for EntropySessionParameters { + type Signer = execute_protocol::PairWrapper; + type Verifier = PartyId; + type Signature = sr25519::Signature; + type Digest = Blake2s256; + type WireFormat = BincodeWireFormat; +} + +/// Specifies the serialization used for protocol messages +#[derive(Debug)] +pub struct BincodeWireFormat; + +impl manul::session::WireFormat for BincodeWireFormat { + fn serialize(value: T) -> Result, manul::protocol::LocalError> { + Ok(bincode::config::DefaultOptions::new() + .serialize(&value) + .map_err(|e| manul::protocol::LocalError::new(format!("Serialization error: {e:?}")))? + .into()) + } + + type Deserializer<'de> = PersistentDeserializer>; + + fn deserializer(bytes: &[u8]) -> Self::Deserializer<'_> { + PersistentDeserializer::new(BincodeDeserializer(bincode::de::Deserializer::from_slice( + bytes, + bincode::config::DefaultOptions::new(), + ))) + } +} + +/// A wrapper for a bincode deserializer implementing the trait needed to use it as our WireFormat +#[allow(missing_debug_implementations)] +pub struct BincodeDeserializer<'de>( + bincode::de::Deserializer, bincode::config::DefaultOptions>, +); + +impl<'de> AsTransientDeserializer<'de> for BincodeDeserializer<'de> { + type Error = bincode::Error; + + fn as_transient_deserializer<'a>( + &'a mut self, + ) -> impl serde::Deserializer<'de, Error = Self::Error> { + &mut self.0 + } +} + /// Parameters used for the threshold signing scheme in production -#[cfg(not(test))] -pub type KeyParams = ProductionParams; - -#[cfg(test)] -use synedrion::TestParams; -/// Parameters used for the threshold signing scheme in tests (faster but less secure) -#[cfg(test)] -pub type KeyParams = TestParams; +pub type KeyParams = ProductionParams112; pub use synedrion::KeyShare; diff --git a/crates/protocol/src/protocol_message.rs b/crates/protocol/src/protocol_message.rs index c836fca49..1ee971321 100644 --- a/crates/protocol/src/protocol_message.rs +++ b/crates/protocol/src/protocol_message.rs @@ -15,9 +15,8 @@ use std::str; +use manul::session::Message; use serde::{Deserialize, Serialize}; -use sp_core::sr25519; -use synedrion::sessions::MessageBundle; use crate::{protocol_transport::errors::ProtocolMessageErr, PartyId}; @@ -39,7 +38,7 @@ pub struct ProtocolMessage { #[derive(Debug, Clone, Serialize, Deserialize)] pub enum ProtocolMessagePayload { /// The signed protocol message - MessageBundle(Box>), + Message(Box>), /// A verifying key for parties who were not present in the key init session VerifyingKey(Vec), } @@ -54,15 +53,11 @@ impl TryFrom<&[u8]> for ProtocolMessage { } impl ProtocolMessage { - pub(crate) fn new( - from: &PartyId, - to: &PartyId, - payload: MessageBundle, - ) -> Self { + pub(crate) fn new(from: &PartyId, to: &PartyId, payload: Message) -> Self { Self { from: from.clone(), to: to.clone(), - payload: ProtocolMessagePayload::MessageBundle(Box::new(payload)), + payload: ProtocolMessagePayload::Message(Box::new(payload)), } } } diff --git a/crates/protocol/tests/helpers/mod.rs b/crates/protocol/tests/helpers/mod.rs index d4558c77e..91fd0ab3e 100644 --- a/crates/protocol/tests/helpers/mod.rs +++ b/crates/protocol/tests/helpers/mod.rs @@ -35,7 +35,7 @@ use std::{ time::Duration, }; use subxt::utils::AccountId32; -use synedrion::{AuxInfo, KeyResharingInputs, KeyShare, NewHolder, OldHolder, ThresholdKeyShare}; +use synedrion::{AuxInfo, KeyResharing, KeyShare, NewHolder, OldHolder, ThresholdKeyShare}; use tokio::{ net::{TcpListener, TcpStream}, time::timeout, @@ -120,7 +120,7 @@ pub async fn server( channels, &keyshare.unwrap(), &aux_info.unwrap(), - &session_info.message_hash, + (&session_info.message_hash).into(), &pair, tss_accounts, ) @@ -133,16 +133,16 @@ pub async fn server( let old_key = threshold_keyshare.unwrap(); let party_ids: BTreeSet = tss_accounts.iter().cloned().map(PartyId::new).collect(); - let inputs = KeyResharingInputs { - old_holder: Some(OldHolder { key_share: old_key.clone() }), - new_holder: Some(NewHolder { - verifying_key: old_key.verifying_key(), + let inputs = KeyResharing::new( + Some(OldHolder { key_share: old_key.clone() }), + Some(NewHolder { + verifying_key: old_key.verifying_key().unwrap(), old_threshold: party_ids.len(), old_holders: party_ids.clone(), }), - new_holders: party_ids.clone(), - new_threshold: old_key.threshold(), - }; + party_ids.clone(), + old_key.threshold(), + ); let new_keyshare = execute_reshare(session_id, channels, &pair, inputs, &party_ids, None).await?; diff --git a/crates/protocol/tests/protocol.rs b/crates/protocol/tests/protocol.rs index afbf04056..728b33cb3 100644 --- a/crates/protocol/tests/protocol.rs +++ b/crates/protocol/tests/protocol.rs @@ -19,12 +19,13 @@ use entropy_protocol::{KeyParams, PartyId, SessionId, SigningSessionInfo, ValidatorInfo}; use futures::future; +use k256::ecdsa::VerifyingKey; use rand_core::OsRng; use serial_test::serial; use sp_core::{sr25519, Pair}; use std::{cmp::min, time::Instant}; use subxt::utils::AccountId32; -use synedrion::{ecdsa::VerifyingKey, AuxInfo, KeyShare, ThresholdKeyShare}; +use synedrion::{AuxInfo, KeyShare, ThresholdKeyShare}; use tokio::{net::TcpListener, runtime::Runtime, sync::oneshot}; use x25519_dalek::StaticSecret; @@ -34,7 +35,7 @@ use helpers::{server, ProtocolOutput}; use std::collections::BTreeSet; /// The maximum number of worker threads that tokio should use -const MAX_THREADS: usize = 16; +const MAX_THREADS: usize = 8; #[test] #[serial] @@ -78,7 +79,7 @@ async fn test_sign_with_parties(num_parties: usize) { let (pairs, ids) = get_keypairs_and_ids(num_parties); let keyshares = KeyShare::::new_centralized(&mut OsRng, &ids, None); let aux_infos = AuxInfo::::new_centralized(&mut OsRng, &ids); - let verifying_key = keyshares[&PartyId::from(pairs[0].public())].verifying_key().unwrap(); + let verifying_key = keyshares[&PartyId::from(pairs[0].public())].verifying_key(); let parties: Vec<_> = pairs .iter() @@ -114,7 +115,7 @@ async fn test_sign_with_parties(num_parties: usize) { async fn test_refresh_with_parties(num_parties: usize) { let (pairs, ids) = get_keypairs_and_ids(num_parties); let keyshares = KeyShare::::new_centralized(&mut OsRng, &ids, None); - let verifying_key = keyshares[&PartyId::from(pairs[0].public())].verifying_key().unwrap(); + let verifying_key = keyshares[&PartyId::from(pairs[0].public())].verifying_key(); let session_id = SessionId::Reshare { verifying_key: verifying_key.to_encoded_point(true).as_bytes().to_vec(), @@ -135,7 +136,7 @@ async fn test_refresh_with_parties(num_parties: usize) { let threshold = parties.len(); let mut outputs = test_protocol_with_parties(parties, session_id, threshold).await; if let ProtocolOutput::Reshare(keyshare) = outputs.pop().unwrap() { - assert!(keyshare.verifying_key() == verifying_key); + assert!(keyshare.verifying_key().unwrap() == verifying_key); } else { panic!("Unexpected protocol output"); } @@ -157,7 +158,7 @@ async fn test_dkg_with_parties(num_parties: usize) { async fn test_dkg_and_sign_with_parties(num_parties: usize) { let threshold = num_parties - 1; if threshold < 2 { - panic!("Not enought parties to test threshold signing"); + panic!("Not enough parties to test threshold signing"); } let (pairs, ids) = get_keypairs_and_ids(num_parties); let dkg_parties = @@ -177,7 +178,7 @@ async fn test_dkg_and_sign_with_parties(num_parties: usize) { .into_iter() .filter_map(|output| { if let ProtocolOutput::Dkg((threshold_keyshare, aux_info)) = output { - let keyshare = threshold_keyshare.to_key_share(&signing_committee); + let keyshare = threshold_keyshare.to_key_share(&signing_committee).unwrap(); if signing_committee.contains(keyshare.owner()) { let pair = pairs .iter() @@ -198,7 +199,7 @@ async fn test_dkg_and_sign_with_parties(num_parties: usize) { }) .collect(); - let verifying_key = parties[0].keyshare.clone().unwrap().verifying_key().unwrap(); + let verifying_key = parties[0].keyshare.clone().unwrap().verifying_key(); let message_hash = [0u8; 32]; let session_id = SessionId::Sign(SigningSessionInfo { diff --git a/crates/testing-utils/Cargo.toml b/crates/testing-utils/Cargo.toml index 416430a96..f8310a71d 100644 --- a/crates/testing-utils/Cargo.toml +++ b/crates/testing-utils/Cargo.toml @@ -24,11 +24,13 @@ entropy-tss={ version="0.4.0-rc.1", path="../threshold-signature-server", featur "test_helpers", ] } entropy-protocol={ version="0.4.0-rc.1", path="../protocol" } -synedrion="0.2.0" hex="0.4.3" rand_core="0.6.4" rand="0.8.5" tdx-quote={ version="0.0.3", features=["mock"] } +k256={ version="0.13", default-features=false, features=["ecdsa"] } +synedrion={ version="0.3.0", features=["k256", "dev"] } +manul={ version="0.2.1", features=["tokio", "dev"] } # Logging tracing ="0.1.41" diff --git a/crates/testing-utils/keyshares/production/keyshare-held-by-alice.keyshare b/crates/testing-utils/keyshares/production/keyshare-held-by-alice.keyshare index 5bfdf562c..fc5fabd66 100644 Binary files a/crates/testing-utils/keyshares/production/keyshare-held-by-alice.keyshare and b/crates/testing-utils/keyshares/production/keyshare-held-by-alice.keyshare differ diff --git a/crates/testing-utils/keyshares/production/keyshare-held-by-bob.keyshare b/crates/testing-utils/keyshares/production/keyshare-held-by-bob.keyshare index 3b99543ae..a24a0891d 100644 Binary files a/crates/testing-utils/keyshares/production/keyshare-held-by-bob.keyshare and b/crates/testing-utils/keyshares/production/keyshare-held-by-bob.keyshare differ diff --git a/crates/testing-utils/keyshares/production/keyshare-held-by-charlie.keyshare b/crates/testing-utils/keyshares/production/keyshare-held-by-charlie.keyshare index 1af50b17d..d3143274c 100644 Binary files a/crates/testing-utils/keyshares/production/keyshare-held-by-charlie.keyshare and b/crates/testing-utils/keyshares/production/keyshare-held-by-charlie.keyshare differ diff --git a/crates/testing-utils/keyshares/test/keyshare-held-by-alice.keyshare b/crates/testing-utils/keyshares/test/keyshare-held-by-alice.keyshare deleted file mode 100644 index 65e89b906..000000000 Binary files a/crates/testing-utils/keyshares/test/keyshare-held-by-alice.keyshare and /dev/null differ diff --git a/crates/testing-utils/keyshares/test/keyshare-held-by-bob.keyshare b/crates/testing-utils/keyshares/test/keyshare-held-by-bob.keyshare deleted file mode 100644 index 2877283cc..000000000 Binary files a/crates/testing-utils/keyshares/test/keyshare-held-by-bob.keyshare and /dev/null differ diff --git a/crates/testing-utils/keyshares/test/keyshare-held-by-charlie.keyshare b/crates/testing-utils/keyshares/test/keyshare-held-by-charlie.keyshare deleted file mode 100644 index d2559b0c5..000000000 Binary files a/crates/testing-utils/keyshares/test/keyshare-held-by-charlie.keyshare and /dev/null differ diff --git a/crates/testing-utils/src/create_test_keyshares.rs b/crates/testing-utils/src/create_test_keyshares.rs index ecd89853a..d17c43b9b 100644 --- a/crates/testing-utils/src/create_test_keyshares.rs +++ b/crates/testing-utils/src/create_test_keyshares.rs @@ -15,28 +15,25 @@ //! Simulates 3 TSS nodes running the reshare protocol in order to create keyshares with a //! pre-defined distributed keypair for testing entropy-tss -use entropy_protocol::{execute_protocol::PairWrapper, PartyId}; +use entropy_protocol::{execute_protocol::PairWrapper, EntropySessionParameters, PartyId}; +use k256::ecdsa::SigningKey; +use manul::dev::run_sync; use rand_core::OsRng; use sp_core::{sr25519, Pair}; use synedrion::{ - ecdsa::SigningKey, make_key_resharing_session, sessions::SessionId, AuxInfo, - KeyResharingInputs, KeyShare, NewHolder, OldHolder, SchemeParams, ThresholdKeyShare, + k256::ProductionParams112, AuxInfo, KeyResharing, KeyShare, NewHolder, OldHolder, + ThresholdKeyShare, }; -use synedrion_test_environment::run_nodes; use std::collections::BTreeSet; /// Given a secp256k1 secret key and 3 signing keypairs for the TSS parties, generate a set of /// threshold keyshares with auxiliary info -pub async fn create_test_keyshares( +pub async fn create_test_keyshares( distributed_secret_key_bytes: [u8; 32], signers: [sr25519::Pair; 3], -) -> Vec<(ThresholdKeyShare, AuxInfo)> -where - Params: SchemeParams, -{ +) -> Vec<(ThresholdKeyShare, AuxInfo)> { let signing_key = SigningKey::from_bytes(&(distributed_secret_key_bytes).into()).unwrap(); - let session_id = SessionId::from_seed(b"12345".as_slice()); let all_parties = signers.iter().map(|pair| PartyId::from(pair.public())).collect::>(); @@ -44,246 +41,60 @@ where // Remove one member as we initially create 2 of 2 keyshares, then reshare to 2 of 3 old_holders.remove(&PartyId::from(signers[2].public())); - let keyshares = - KeyShare::::new_centralized(&mut OsRng, &old_holders, Some(&signing_key)); - let aux_infos = AuxInfo::::new_centralized(&mut OsRng, &all_parties); + let keyshares = KeyShare::::new_centralized( + &mut OsRng, + &old_holders, + Some(&signing_key), + ); + let aux_infos = + AuxInfo::::new_centralized(&mut OsRng, &all_parties); let new_holder = NewHolder { - verifying_key: keyshares.values().next().unwrap().verifying_key().unwrap(), + verifying_key: keyshares.values().next().unwrap().verifying_key(), old_threshold: 2, old_holders, }; - let mut sessions = signers[..2] + let mut signers_and_entry_points = signers[..2] .iter() .map(|pair| { - let inputs = KeyResharingInputs { - old_holder: Some(OldHolder { + let entry_point = KeyResharing::new( + Some(OldHolder { key_share: ThresholdKeyShare::from_key_share( &keyshares[&PartyId::from(pair.public())], ), }), - new_holder: Some(new_holder.clone()), - new_holders: all_parties.clone(), - new_threshold: 2, - }; - make_key_resharing_session( - &mut OsRng, - session_id, - PairWrapper(pair.clone()), - &all_parties, - inputs, - ) - .unwrap() + Some(new_holder.clone()), + all_parties.clone(), + 2, // The threshold + ); + + (PairWrapper(pair.clone()), entry_point) }) .collect::>(); - let new_holder_session = { - let inputs = KeyResharingInputs { - old_holder: None, - new_holder: Some(new_holder.clone()), - new_holders: all_parties.clone(), - new_threshold: 2, - }; - make_key_resharing_session( - &mut OsRng, - session_id, - PairWrapper(signers[2].clone()), - &all_parties, - inputs, - ) - .unwrap() + let new_holder_signer_and_entry_point = { + let entry_point = KeyResharing::new( + None, + Some(new_holder.clone()), + all_parties.clone(), + 2, // The threshold + ); + + (PairWrapper(signers[2].clone()), entry_point) }; - sessions.push(new_holder_session); + signers_and_entry_points.push(new_holder_signer_and_entry_point); - let new_t_key_shares = run_nodes(sessions).await; + let new_t_key_shares = + run_sync::<_, EntropySessionParameters>(&mut OsRng, signers_and_entry_points) + .unwrap() + .results() + .unwrap(); let mut output = Vec::new(); - for (i, party_id) in signers.iter().map(|pair| PartyId::from(pair.public())).enumerate() { - output.push((new_t_key_shares[i].clone().unwrap(), aux_infos[&party_id].clone())); + for party_id in signers.iter().map(|pair| PartyId::from(pair.public())) { + output.push((new_t_key_shares[&party_id].clone().unwrap(), aux_infos[&party_id].clone())); } output } - -/// This is used to run the synedrion protocols - it is mostly copied from the synedrion integration -/// tests -mod synedrion_test_environment { - use entropy_protocol::{execute_protocol::PairWrapper, PartyId}; - use rand::Rng; - use rand_core::OsRng; - use sp_core::sr25519; - use std::collections::BTreeMap; - use synedrion::{FinalizeOutcome, MessageBundle, ProtocolResult, Session}; - use tokio::{ - sync::mpsc, - time::{sleep, Duration}, - }; - type MessageOut = (PartyId, PartyId, MessageBundle); - type MessageIn = (PartyId, MessageBundle); - - fn key_to_str(key: &PartyId) -> String { - key.to_string() - } - - /// Run a generic synedrion session - async fn run_session( - tx: mpsc::Sender, - rx: mpsc::Receiver, - session: Session, - ) -> Res::Success { - let mut rx = rx; - - let mut session = session; - let mut cached_messages = Vec::new(); - - let key = session.verifier(); - let key_str = key_to_str(&key); - - loop { - println!("{key_str}: *** starting round {:?} ***", session.current_round()); - - // This is kept in the main task since it's mutable, - // and we don't want to bother with synchronization. - let mut accum = session.make_accumulator(); - - // Note: generating/sending messages and verifying newly received messages - // can be done in parallel, with the results being assembled into `accum` - // sequentially in the host task. - - let destinations = session.message_destinations(); - for destination in destinations.iter() { - // In production usage, this will happen in a spawned task - // (since it can take some time to create a message), - // and the artifact will be sent back to the host task - // to be added to the accumulator. - let (message, artifact) = session.make_message(&mut OsRng, destination).unwrap(); - println!("{key_str}: sending a message to {}", key_to_str(destination)); - tx.send((key.clone(), destination.clone(), message)).await.unwrap(); - - // This will happen in a host task - accum.add_artifact(artifact).unwrap(); - } - - for preprocessed in cached_messages { - // In production usage, this will happen in a spawned task. - println!("{key_str}: applying a cached message"); - let result = session.process_message(&mut OsRng, preprocessed).unwrap(); - - // This will happen in a host task. - accum.add_processed_message(result).unwrap().unwrap(); - } - - while !session.can_finalize(&accum).unwrap() { - // This can be checked if a timeout expired, to see which nodes have not responded yet. - let unresponsive_parties = session.missing_messages(&accum).unwrap(); - assert!(!unresponsive_parties.is_empty()); - - println!("{key_str}: waiting for a message"); - let (from, message) = rx.recv().await.unwrap(); - - // Perform quick checks before proceeding with the verification. - let preprocessed = session.preprocess_message(&mut accum, &from, message).unwrap(); - - if let Some(preprocessed) = preprocessed { - // In production usage, this will happen in a spawned task. - println!("{key_str}: applying a message from {}", key_to_str(&from)); - let result = session.process_message(&mut OsRng, preprocessed).unwrap(); - - // This will happen in a host task. - accum.add_processed_message(result).unwrap().unwrap(); - } - } - - println!("{key_str}: finalizing the round"); - - match session.finalize_round(&mut OsRng, accum).unwrap() { - FinalizeOutcome::Success(res) => break res, - FinalizeOutcome::AnotherRound { - session: new_session, - cached_messages: new_cached_messages, - } => { - session = new_session; - cached_messages = new_cached_messages; - }, - } - } - } - - async fn message_dispatcher( - txs: BTreeMap>, - rx: mpsc::Receiver, - ) { - let mut rx = rx; - let mut messages = Vec::::new(); - loop { - let msg = match rx.recv().await { - Some(msg) => msg, - None => break, - }; - messages.push(msg); - - while let Ok(msg) = rx.try_recv() { - messages.push(msg) - } - - while !messages.is_empty() { - // Pull a random message from the list, - // to increase the chances that they are delivered out of order. - let message_idx = rand::thread_rng().gen_range(0..messages.len()); - let (id_from, id_to, message) = messages.swap_remove(message_idx); - - txs[&id_to].send((id_from, message)).await.unwrap(); - - // Give up execution so that the tasks could process messages. - sleep(Duration::from_millis(0)).await; - - if let Ok(msg) = rx.try_recv() { - messages.push(msg); - }; - } - } - } - - pub async fn run_nodes( - sessions: Vec>, - ) -> Vec - where - Res: ProtocolResult + Send + 'static, - Res::Success: Send + 'static, - { - let num_parties = sessions.len(); - - let (dispatcher_tx, dispatcher_rx) = mpsc::channel::(100); - - let channels = (0..num_parties).map(|_| mpsc::channel::(100)); - let (txs, rxs): (Vec>, Vec>) = - channels.unzip(); - let tx_map = - sessions.iter().map(|session| session.verifier()).zip(txs.into_iter()).collect(); - - let dispatcher_task = message_dispatcher(tx_map, dispatcher_rx); - let dispatcher = tokio::spawn(dispatcher_task); - - let handles: Vec> = rxs - .into_iter() - .zip(sessions.into_iter()) - .map(|(rx, session)| { - let node_task = run_session(dispatcher_tx.clone(), rx, session); - tokio::spawn(node_task) - }) - .collect(); - - // Drop the last copy of the dispatcher's incoming channel so that it could finish. - drop(dispatcher_tx); - - let mut results = Vec::with_capacity(num_parties); - for handle in handles { - results.push(handle.await.unwrap()); - } - - dispatcher.await.unwrap(); - - results - } -} diff --git a/crates/threshold-signature-server/Cargo.toml b/crates/threshold-signature-server/Cargo.toml index 1edae38d8..fffd556a8 100644 --- a/crates/threshold-signature-server/Cargo.toml +++ b/crates/threshold-signature-server/Cargo.toml @@ -22,9 +22,11 @@ zeroize ="1.8.1" hex ="0.4.3" reqwest-eventsource="0.6" serde_derive ="1.0.147" -synedrion ="0.2.0" +synedrion ={ version="0.3.0", features=["k256", "bip32"] } +manul ={ version="0.2.1" } strum ="0.27.1" backoff ={ version="0.4.0", features=["tokio"] } +k256 ={ version="0.13", features=["ecdsa"] } # Async futures="0.3" diff --git a/crates/threshold-signature-server/src/signing_client/api.rs b/crates/threshold-signature-server/src/signing_client/api.rs index ae631e482..8c5758709 100644 --- a/crates/threshold-signature-server/src/signing_client/api.rs +++ b/crates/threshold-signature-server/src/signing_client/api.rs @@ -41,7 +41,7 @@ use subxt::{ utils::{AccountId32 as SubxtAccountId32, Static}, OnlineClient, }; -use synedrion::{AuxInfo, KeyResharingInputs, NewHolder, OldHolder, ThresholdKeyShare}; +use synedrion::{AuxInfo, KeyResharing, NewHolder, OldHolder, ThresholdKeyShare}; use tokio::time::timeout; use x25519_dalek::StaticSecret; @@ -181,16 +181,16 @@ pub async fn do_proactive_refresh( let party_ids: BTreeSet = tss_accounts.iter().cloned().map(PartyId::new).collect(); - let inputs = KeyResharingInputs { - old_holder: Some(OldHolder { key_share: old_key.clone() }), - new_holder: Some(NewHolder { - verifying_key: old_key.verifying_key(), + let inputs = KeyResharing::new( + Some(OldHolder { key_share: old_key.clone() }), + Some(NewHolder { + verifying_key: old_key.verifying_key()?, old_threshold: party_ids.len(), old_holders: party_ids.clone(), }), - new_holders: party_ids.clone(), - new_threshold: old_key.threshold(), - }; + party_ids.clone(), + old_key.threshold(), + ); let channels = get_channels( state, diff --git a/crates/threshold-signature-server/src/signing_client/errors.rs b/crates/threshold-signature-server/src/signing_client/errors.rs index a2e1c55c9..6f7e5f8f2 100644 --- a/crates/threshold-signature-server/src/signing_client/errors.rs +++ b/crates/threshold-signature-server/src/signing_client/errors.rs @@ -118,6 +118,8 @@ pub enum ProtocolErr { NotReady, #[error("Application State Error: {0}")] AppStateError(#[from] crate::helpers::app_state::AppStateError), + #[error("Manul local error: {0}")] + ManulLocal(String), } impl IntoResponse for ProtocolErr { @@ -128,6 +130,12 @@ impl IntoResponse for ProtocolErr { } } +impl From for ProtocolErr { + fn from(err: manul::session::LocalError) -> Self { + Self::ManulLocal(format!("{err:?}")) + } +} + /// Errors for the `subscribe` API #[derive(Debug, Error)] pub enum SubscribeErr { diff --git a/crates/threshold-signature-server/src/signing_client/protocol_execution/mod.rs b/crates/threshold-signature-server/src/signing_client/protocol_execution/mod.rs index 38140333b..412ddd3a8 100644 --- a/crates/threshold-signature-server/src/signing_client/protocol_execution/mod.rs +++ b/crates/threshold-signature-server/src/signing_client/protocol_execution/mod.rs @@ -122,9 +122,9 @@ impl<'a> ThresholdSigningService<'a> { let rsig = execute_signing_protocol( session_id, channels, - &key_share.to_key_share(&parties), + &key_share.to_key_share(&parties)?, aux_info, - &message_hash, + &message_hash.into(), threshold_signer, threshold_accounts, ) diff --git a/crates/threshold-signature-server/src/user/api.rs b/crates/threshold-signature-server/src/user/api.rs index abcf16497..100c0f529 100644 --- a/crates/threshold-signature-server/src/user/api.rs +++ b/crates/threshold-signature-server/src/user/api.rs @@ -496,7 +496,7 @@ async fn setup_dkg( ) .await?; - let verifying_key = key_share.verifying_key().to_encoded_point(true).as_bytes().to_vec(); + let verifying_key = key_share.verifying_key()?.to_encoded_point(true).as_bytes().to_vec(); let serialized_key_share = key_serialize(&(key_share, aux_info)) .map_err(|_| UserErr::KvSerialize("Kv Serialize Error".to_string()))?; diff --git a/crates/threshold-signature-server/src/user/errors.rs b/crates/threshold-signature-server/src/user/errors.rs index ce2fe9e7a..a63cf59ee 100644 --- a/crates/threshold-signature-server/src/user/errors.rs +++ b/crates/threshold-signature-server/src/user/errors.rs @@ -187,6 +187,8 @@ pub enum UserErr { TryFromSlice(#[from] std::array::TryFromSliceError), #[error("Application State Error: {0}")] AppStateError(#[from] crate::helpers::app_state::AppStateError), + #[error("Manul local error: {0}")] + ManulLocal(String), } impl From for UserErr { @@ -202,3 +204,9 @@ impl IntoResponse for UserErr { (StatusCode::INTERNAL_SERVER_ERROR, body).into_response() } } + +impl From for UserErr { + fn from(err: manul::session::LocalError) -> Self { + Self::ManulLocal(format!("{err:?}")) + } +} diff --git a/crates/threshold-signature-server/src/user/tests.rs b/crates/threshold-signature-server/src/user/tests.rs index ebfcb49ce..24ad98863 100644 --- a/crates/threshold-signature-server/src/user/tests.rs +++ b/crates/threshold-signature-server/src/user/tests.rs @@ -48,6 +48,7 @@ use entropy_testing_utils::{ test_node_process_testing_state, ChainSpecType, }; use futures::future::try_join_all; +use k256::ecdsa::{RecoveryId, Signature as k256Signature, VerifyingKey}; use more_asserts as ma; use parity_scale_codec::Encode; use rand::Rng; @@ -65,8 +66,7 @@ use subxt::{ utils::{AccountId32 as subxtAccountId32, MultiAddress, MultiSignature}, OnlineClient, }; -use synedrion::k256::ecdsa::{RecoveryId, Signature as k256Signature, VerifyingKey}; -use synedrion::{ecdsa::VerifyingKey as SynedrionVerifyingKey, DeriveChildKey}; +use synedrion::DeriveChildKey; use tokio_tungstenite::connect_async; use crate::{ @@ -483,8 +483,7 @@ async fn signature_request_with_derived_account_works() { // We expect that the signature we get back is valid let message_hash = Hasher::keccak(PREIMAGE_SHOULD_SUCCEED); let verifying_key = - SynedrionVerifyingKey::try_from(signature_request.signature_verifying_key.as_slice()) - .unwrap(); + VerifyingKey::try_from(signature_request.signature_verifying_key.as_slice()).unwrap(); let all_signers_info = get_all_signers_from_chain( &spawn_results.chain_connection.api, @@ -561,10 +560,9 @@ async fn signature_request_overload() { .map_err(|e| anyhow!("Failed to submit transaction request: {}", e))?; let message_hash = Hasher::keccak(&hex::decode(signature_request.message).unwrap()); - let verifying_key = SynedrionVerifyingKey::try_from( - signature_request.signature_verifying_key.as_slice(), - ) - .map_err(|e| anyhow!("Failed to parse verifying key: {}", e))?; + let verifying_key = + VerifyingKey::try_from(signature_request.signature_verifying_key.as_slice()) + .map_err(|e| anyhow!("Failed to parse verifying key: {}", e))?; let all_signers_info = get_all_signers_from_chain(&api, &rpc) .await @@ -1307,8 +1305,14 @@ async fn test_jumpstart_network() { entropy_kvdb::kv_manager::helpers::deserialize(&response_key); assert!(key_share.is_some()); - verifying_key = - key_share.unwrap().0.verifying_key().to_encoded_point(true).as_bytes().to_vec(); + verifying_key = key_share + .unwrap() + .0 + .verifying_key() + .unwrap() + .to_encoded_point(true) + .as_bytes() + .to_vec(); } let jump_start_progress_query = entropy::storage().staking_extension().jump_start_progress(); @@ -2065,8 +2069,7 @@ async fn test_registration_flow() { ); // Next, let's check that the child verifying key matches - let network_verifying_key = - SynedrionVerifyingKey::try_from(network_verifying_key.as_slice()).unwrap(); + let network_verifying_key = VerifyingKey::try_from(network_verifying_key.as_slice()).unwrap(); // We hardcode the derivation path here since we know that there's only been one registration // request (ours). diff --git a/crates/threshold-signature-server/src/validator/api.rs b/crates/threshold-signature-server/src/validator/api.rs index 45cc4a032..00ba0d9eb 100644 --- a/crates/threshold-signature-server/src/validator/api.rs +++ b/crates/threshold-signature-server/src/validator/api.rs @@ -41,7 +41,7 @@ use subxt::{ backend::legacy::LegacyRpcMethods, ext::sp_core::sr25519, tx::PairSigner, utils::AccountId32, OnlineClient, }; -use synedrion::{KeyResharingInputs, NewHolder, OldHolder}; +use synedrion::{KeyResharing, NewHolder, OldHolder}; /// HTTP POST endpoint called by the off-chain worker (propagation pallet) during network reshare. /// @@ -150,12 +150,8 @@ async fn do_reshare( .ok_or_else(|| ValidatorErr::ChainFetch("Failed to get signers info"))? .threshold; - let inputs = KeyResharingInputs { - old_holder, - new_holder: Some(new_holder), - new_holders: new_holders.clone(), - new_threshold: threshold as usize, - }; + let inputs = + KeyResharing::new(old_holder, Some(new_holder), new_holders.clone(), threshold as usize); let session_id = SessionId::Reshare { verifying_key, block_number: data.block_number }; let account_id = app_state.subxt_account_id(); diff --git a/crates/threshold-signature-server/src/validator/errors.rs b/crates/threshold-signature-server/src/validator/errors.rs index 73671472a..910e486d0 100644 --- a/crates/threshold-signature-server/src/validator/errors.rs +++ b/crates/threshold-signature-server/src/validator/errors.rs @@ -21,7 +21,6 @@ use axum::{ response::{IntoResponse, Response}, }; use entropy_protocol::{errors::ProtocolExecutionErr, sign_and_encrypt::EncryptedSignedMessageErr}; -use synedrion::sessions; use thiserror::Error; use tokio::sync::oneshot::error::RecvError; @@ -75,8 +74,6 @@ pub enum ValidatorErr { Timeout(#[from] tokio::time::error::Elapsed), #[error("Oneshot timeout error: {0}")] OneshotTimeout(#[from] RecvError), - #[error("Synedrion session creation error: {0}")] - SessionCreation(sessions::LocalError), #[error("No output from reshare protocol")] NoOutputFromReshareProtocol, #[error("Protocol Error: {0}")] diff --git a/crates/threshold-signature-server/src/validator/tests.rs b/crates/threshold-signature-server/src/validator/tests.rs index 62bd966a6..7bb513376 100644 --- a/crates/threshold-signature-server/src/validator/tests.rs +++ b/crates/threshold-signature-server/src/validator/tests.rs @@ -50,13 +50,13 @@ use entropy_testing_utils::{ substrate_context::{test_node_process_testing_state, testing_context}, test_context_stationary, ChainSpecType, }; +use k256::ecdsa::VerifyingKey; use parity_scale_codec::Encode; use serial_test::serial; use sp_core::Pair; use sp_keyring::AccountKeyring; use std::collections::HashSet; use subxt::{backend::legacy::LegacyRpcMethods, utils::AccountId32, OnlineClient}; -use synedrion::k256::ecdsa::VerifyingKey; #[tokio::test] #[serial] diff --git a/crates/threshold-signature-server/tests/jumpstart_register_sign.rs b/crates/threshold-signature-server/tests/jumpstart_register_sign.rs index dc5d06e5a..34799da35 100644 --- a/crates/threshold-signature-server/tests/jumpstart_register_sign.rs +++ b/crates/threshold-signature-server/tests/jumpstart_register_sign.rs @@ -29,11 +29,11 @@ use entropy_testing_utils::{ ChainSpecType, }; use entropy_tss::helpers::tests::{do_jump_start, initialize_test_logger}; +use k256::ecdsa::VerifyingKey; use serial_test::serial; use sp_core::Pair; use sp_keyring::AccountKeyring; use subxt::utils::AccountId32; -use synedrion::k256::ecdsa::VerifyingKey; // FIXME (#1119): This fails intermittently and needs to be addressed. For now we ignore it since // it's producing false negatives on our CI runs. diff --git a/crates/threshold-signature-server/tests/sign_eth_tx.rs b/crates/threshold-signature-server/tests/sign_eth_tx.rs index 205855c97..a4371a7a1 100644 --- a/crates/threshold-signature-server/tests/sign_eth_tx.rs +++ b/crates/threshold-signature-server/tests/sign_eth_tx.rs @@ -36,11 +36,11 @@ use ethers_core::{ rlp::{Decodable, Rlp}, }, }; +use k256::ecdsa::VerifyingKey; use serial_test::serial; use sp_core::Pair; use sp_keyring::AccountKeyring; use subxt::utils::AccountId32; -use synedrion::k256::ecdsa::VerifyingKey; const GOERLI_CHAIN_ID: u64 = 5; diff --git a/deny.toml b/deny.toml index 68fdcdd1a..2206162ae 100644 --- a/deny.toml +++ b/deny.toml @@ -40,6 +40,7 @@ exceptions=[ { allow=["AGPL-3.0"], name="entropy-programs-core" }, { allow=["AGPL-3.0"], name="entropy-programs-runtime" }, { allow=["AGPL-3.0"], name="synedrion" }, + { allow=["AGPL-3.0"], name="manul" }, { allow=["AGPL-3.0"], name="tdx-quote" }, { allow=["AGPL-3.0"], name="configfs-tsm" }, diff --git a/pallets/registry/Cargo.toml b/pallets/registry/Cargo.toml index d2b33cc1a..f993eed8d 100644 --- a/pallets/registry/Cargo.toml +++ b/pallets/registry/Cargo.toml @@ -16,7 +16,8 @@ bip32 ={ version="0.5.3", default-features=false, features=["alloc"] } codec ={ package="parity-scale-codec", version="3.6.3", default-features=false, features=["derive"] } log ={ version="0.4.27", default-features=false } scale-info={ version="2.11", default-features=false, features=["derive"] } -synedrion ={ version="0.2.0", default-features=false } +synedrion ={ version="0.3.0", default-features=false, features=["bip32", "k256"] } +k256 ={ version="0.13", default-features=false, features=["ecdsa"] } rand ={ version="0.8.5", default-features=false, features=["alloc"] } frame-benchmarking={ version="38.0.0", default-features=false, optional=true } diff --git a/pallets/registry/src/benchmarking.rs b/pallets/registry/src/benchmarking.rs index 6c7b466b9..513e03233 100644 --- a/pallets/registry/src/benchmarking.rs +++ b/pallets/registry/src/benchmarking.rs @@ -235,7 +235,7 @@ mod benchmarks { use synedrion::DeriveChildKey; let network_verifying_key = - synedrion::ecdsa::VerifyingKey::try_from(network_verifying_key.as_slice()).unwrap(); + k256::ecdsa::VerifyingKey::try_from(network_verifying_key.as_slice()).unwrap(); // We subtract one from the count since this gets incremented after a succesful registration, // and we're interested in the account we just registered. diff --git a/pallets/registry/src/lib.rs b/pallets/registry/src/lib.rs index 66cd703fc..561d5f958 100644 --- a/pallets/registry/src/lib.rs +++ b/pallets/registry/src/lib.rs @@ -367,7 +367,8 @@ pub mod pallet { programs_data: BoundedVec, T::MaxProgramHashes>, ) -> DispatchResultWithPostInfo { use core::str::FromStr; - use synedrion::{ecdsa::VerifyingKey as SynedrionVerifyingKey, DeriveChildKey}; + use k256::ecdsa::VerifyingKey as SynedrionVerifyingKey; + use synedrion::DeriveChildKey; let signature_request_account = ensure_signed(origin)?; diff --git a/pallets/registry/src/tests.rs b/pallets/registry/src/tests.rs index 206f7ce4f..3307d7a86 100644 --- a/pallets/registry/src/tests.rs +++ b/pallets/registry/src/tests.rs @@ -84,7 +84,8 @@ fn it_tests_get_validators_info() { #[test] fn it_registers_a_user() { new_test_ext().execute_with(|| { - use synedrion::{ecdsa::VerifyingKey as SynedrionVerifyingKey, DeriveChildKey}; + use k256::ecdsa::VerifyingKey as SynedrionVerifyingKey; + use synedrion::DeriveChildKey; let (alice, bob, _charlie) = (1u64, 2, 3); @@ -152,7 +153,8 @@ fn it_increases_program_reference_count_on_register() { #[test] fn it_registers_different_users_with_the_same_sig_req_account() { new_test_ext().execute_with(|| { - use synedrion::{ecdsa::VerifyingKey as SynedrionVerifyingKey, DeriveChildKey}; + use k256::ecdsa::VerifyingKey as SynedrionVerifyingKey; + use synedrion::DeriveChildKey; let (alice, bob, _charlie) = (1u64, 2, 3); diff --git a/scripts/create-test-keyshares/Cargo.toml b/scripts/create-test-keyshares/Cargo.toml index 5efc19e42..e33c0130a 100644 --- a/scripts/create-test-keyshares/Cargo.toml +++ b/scripts/create-test-keyshares/Cargo.toml @@ -15,7 +15,7 @@ tokio={ version="1.44", features=["macros", "fs", "rt-multi-thread", "io-util", entropy-shared={ version="0.4.0-rc.1", path="../../crates/shared" } entropy-kvdb={ version="0.4.0-rc.1", path="../../crates/kvdb", default-features=false } sp-core="34.0.0" -synedrion="0.2.0" +synedrion="0.3.0" entropy-tss={ version="0.4.0-rc.1", path="../../crates/threshold-signature-server", features=[ "test_helpers", ] } diff --git a/scripts/create-test-keyshares/src/main.rs b/scripts/create-test-keyshares/src/main.rs index e63c70885..bc74260d5 100644 --- a/scripts/create-test-keyshares/src/main.rs +++ b/scripts/create-test-keyshares/src/main.rs @@ -27,7 +27,6 @@ use entropy_tss::helpers::{ }; use sp_core::sr25519; use std::{env::args, iter::zip, path::PathBuf}; -use synedrion::{ProductionParams, TestParams}; #[tokio::main] async fn main() { @@ -53,16 +52,8 @@ async fn main() { let keypairs: [sr25519::Pair; 3] = keypairs.try_into().map_err(|_| "Cannot convert keypair vector to array").unwrap(); - // Create and write test keyshares - let test_keyshares = create_test_keyshares::(secret_key, keypairs.clone()).await; - let test_keyshares_serialized: Vec<_> = - test_keyshares.iter().map(|k| serialize(k).unwrap()).collect(); - let keyshares_and_names = zip(test_keyshares_serialized, names.clone()).collect(); - write_keyshares(base_path.join("test"), keyshares_and_names).await; - // Create and write production keyshares - let production_keyshares = - create_test_keyshares::(secret_key, keypairs.clone()).await; + let production_keyshares = create_test_keyshares(secret_key, keypairs.clone()).await; let production_keyshres_serialized: Vec<_> = production_keyshares.iter().map(|k| serialize(k).unwrap()).collect(); let keyshares_and_names = zip(production_keyshres_serialized, names).collect();