diff --git a/.cargo/config.toml b/.cargo/config.toml new file mode 100644 index 0000000..0e465b2 --- /dev/null +++ b/.cargo/config.toml @@ -0,0 +1,2 @@ +[target.wasm32-unknown-unknown] +rustflags = ["--cfg", "getrandom_backend=\"wasm_js\""] diff --git a/.gitignore b/.gitignore index 4a99ee7..72be353 100644 --- a/.gitignore +++ b/.gitignore @@ -13,3 +13,4 @@ Cargo.lock # MSVC Windows builds of rustc generate these, which store debugging information *.pdb .idea/ +crates/bs-p2p/local_keypair diff --git a/Cargo.toml b/Cargo.toml index cc5aba0..31f901c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,6 +6,10 @@ members = [ "crates/bs", "crates/bs-p2p", "crates/bs-traits", + "crates/comrade", + "crates/comrade-component", + "crates/comrade-reference", + "crates/bs-p2p", "crates/content-addressable", "crates/multibase", "crates/multicid", @@ -17,13 +21,19 @@ members = [ "crates/multiutil", "crates/provenance-log", "crates/rng", - "crates/wacc", + "crates/bs-peer", + "crates/interop-tests", + "crates/bs-wallets", + "crates/bs-server", ] [workspace.package] version = "1.0.0" edition = "2021" -authors = ["Dave Grantham "] +authors = [ + "Dave Grantham ", + "Doug Anderson ", +] description = "The BetterSign Provenance-Based Identity Solution" readme = "README.md" license = "FSL-1.1 OR Apache-2.0" @@ -32,13 +42,17 @@ license = "FSL-1.1 OR Apache-2.0" unexpected_cfgs = { level = "warn", check-cfg = [ 'cfg(feature, values("cargo-clippy"))', 'cfg(fuzzing)', -]} +] } [workspace.dependencies] # Crate ependencies bs = { path = "crates/bs" } bs-p2p = { path = "crates/bs-p2p" } +bs-peer = { path = "crates/bs-peer" } bs-traits = { path = "crates/bs-traits" } +bs-wallets = { path = "crates/bs-wallets" } +comrade = { path = "crates/comrade" } +comrade-reference = { path = "crates/comrade-reference" } multibase = { path = "crates/multibase" } multicid = { path = "crates/multicid" } multicodec = { path = "crates/multicodec" } @@ -49,29 +63,41 @@ multitrait = { path = "crates/multitrait" } multiutil = { path = "crates/multiutil" } provenance-log = { path = "crates/provenance-log" } rng = { path = "crates/rng" } -wacc = { path = "crates/wacc" } # Core dependencies best-practices = { version = "0.1.0", git = "https://github.com/cryptidtech/best-practices.git" } +blockstore = "0.7.1" +cid = "0.11.0" criterion = "0.5.1" elliptic-curve = "0.13.8" hex = "0.4.3" +libp2p = { version = "0.54.1" } +multihash-codetable = { version = "0.1.4" } rand = { version = "0.9.0", features = ["os_rng"] } rand_core = "0.9.3" rand_6 = { version = "0.6.4", package = "rand" } rand_core_6 = { version = "0.6.4", package = "rand_core" } -serde = { version = "1.0.219", default-features = false, features = ["alloc", "derive"]} -serde_cbor = { version = "0.11.2", features = ["tags"]} -serde_json = { version = "1.0.104"} -serde_test = { version = "1.0.104"} +serde = { version = "1.0.219", default-features = false, features = [ + "alloc", + "derive", +] } +serde_cbor = { version = "0.11.2", features = ["tags"] } +serde_json = { version = "1.0.104" } +serde_test = { version = "1.0.104" } sha3 = "0.10.8" test-log = { version = "0.2.17", features = ["trace", "color"] } thiserror = "2.0.12" -tokio = { version = "1.44.2", features = ["fs", "io-util", "macros", "rt", "test-util"] } +tokio = { version = "1.44.2" } tracing = "0.1.41" tracing-subscriber = { version = "0.3.19", features = ["env-filter"] } unsigned-varint = { version = "0.8.0", features = ["std"] } +web-sys = { version = "0.3.77" } +wasm-bindgen-futures = "0.4.50" [profile.bench] opt-level = 3 debug = false + +# until lands: https://github.com/libp2p/rust-libp2p/issues/5877 +[patch.crates-io] +libp2p-webrtc-utils = { git = "https://github.com/DougAnderson444/rust-libp2p.git", branch = "core-v0.42-webrtc-utils-0.3.0" } diff --git a/README.md b/README.md index aedfc18..8995eb9 100644 --- a/README.md +++ b/README.md @@ -8,6 +8,14 @@ [](https://www.youtube.com/watch?v=LxU4wG4ryFo) +## Tests + +Run the [just command](https://just.systems/man/en/) in the root of the repository to run the tests: + +``` +just test +``` + ## Introduction BetterSign (`bs`) is a new signing tool designed to use provenance based diff --git a/cli/Cargo.toml b/cli/Cargo.toml index 8db8101..5122d9f 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -20,6 +20,7 @@ anyhow = "1.0" async-trait = "0.1" best-practices.workspace = true bs.workspace = true +bs-traits.workspace = true clap = { version = "4.5.36", features = ["cargo"] } colored = "3.0.0" csv = "1.3.1" @@ -42,15 +43,15 @@ rustyline = { version = "15.0.0", features = ["derive"] } serde = { workspace = true, optional = true } serde_cbor.workspace = true serde_json.workspace = true -ssh-key = { version = "0.6.2", features = ["crypto", "ed25519"]} +ssh-key = { version = "0.6.2", features = ["crypto", "ed25519"] } ssh-agent-client-rs = "1.0.0" structopt = "0.3.26" thiserror.workspace = true -tokio = { version = "1.44.2", features = ["full"] } +tokio = { workspace = true, features = ["full"] } toml = "0.8.20" tracing.workspace = true tracing-subscriber.workspace = true -wacc.workspace = true +comrade.workspace = true [dev-dependencies] tokio-test = "0.4.4" diff --git a/cli/src/error.rs b/cli/src/error.rs index 18783df..c8dd8d9 100644 --- a/cli/src/error.rs +++ b/cli/src/error.rs @@ -1,4 +1,6 @@ // SPDX-License-Identifier: FSL-1.1 +use provenance_log::Key; + /// Errors generated from this crate #[derive(Debug, thiserror::Error)] #[non_exhaustive] @@ -29,6 +31,15 @@ pub enum Error { /// Bs errors #[error(transparent)] Bs(#[from] bs::Error), + + /// Error opening a provenance log + #[error(transparent)] + Open(#[from] bs::error::OpenError), + + /// Error updating a provenance log + #[error(transparent)] + Update(#[from] bs::error::UpdateError), + /// Multicid error #[error(transparent)] Multicid(#[from] multicid::Error), @@ -98,6 +109,10 @@ pub enum Error { /// Invalid backend type #[error("Invalid backend type {0}")] InvalidBackendType(String), + + /// From + #[error(transparent)] + FromUtf8Error(#[from] std::string::FromUtf8Error), } /// SshAgent error @@ -158,4 +173,8 @@ pub enum PlogError { /// No string value given #[error("No string value given")] NoStringValue, + + /// No key present for that KeyPath + #[error("No key present for that KeyPath {0}")] + NoKeyPresent(Key), } diff --git a/cli/src/subcmds/plog.rs b/cli/src/subcmds/plog.rs index 486618a..95744bf 100644 --- a/cli/src/subcmds/plog.rs +++ b/cli/src/subcmds/plog.rs @@ -2,6 +2,9 @@ /// Plog command pub mod command; +use bs::params::vlad::VladParams; +use bs_traits::sync::{SyncGetKey, SyncPrepareEphemeralSigning, SyncSigner}; +use bs_traits::{EphemeralKey, GetKey, Signer}; pub use command::Command; use crate::{error::PlogError, Config, Error}; @@ -11,6 +14,7 @@ use bs::{ ops::{open, update}, update::OpParams, }; +use comrade::Pairs; use multibase::Base; use multicid::{Cid, EncodedCid, EncodedVlad, Vlad}; use multicodec::Codec; @@ -20,9 +24,114 @@ use multisig::Multisig; use multiutil::{BaseEncoded, CodecInfo, DetectedEncoder, EncodingInfo}; use provenance_log::{Key, Log, Script}; use rng::StdRng; -use std::{collections::VecDeque, convert::TryFrom, path::PathBuf}; +use std::num::{NonZero, NonZeroUsize}; +use std::{ + collections::{HashMap, VecDeque}, + convert::TryFrom, +}; use tracing::debug; -use wacc::Pairs; + +/// Cli KeyManager +#[derive(Clone, Debug, Default)] +struct KeyManager(HashMap); + +impl GetKey for KeyManager { + type Key = Multikey; + type KeyPath = Key; + type Codec = Codec; + type Error = Error; +} + +impl SyncGetKey for KeyManager { + fn get_key( + &self, + key_path: &Self::KeyPath, + codec: &Self::Codec, + threshold: NonZeroUsize, + limit: NonZeroUsize, + ) -> Result { + // Your implementation using crate::error::Error + debug!("Generating {} key ({} of {})...", codec, threshold, limit); + let mut rng = StdRng::from_os_rng(); + let mk = mk::Builder::new_from_random_bytes(*codec, &mut rng)?.try_build()?; + let fingerprint = mk.fingerprint_view()?.fingerprint(Codec::Blake3)?; + + let ef = EncodedMultihash::new(Base::Base32Z, fingerprint); + debug!("Writing {} key fingerprint: {}", key_path, ef); + let w = writer(&Some(format!("{}.multikey", ef).into()))?; + serde_cbor::to_writer(w, &mk)?; // This now works with ? + Ok(mk) + } +} + +// EphemeralKey +impl EphemeralKey for KeyManager { + type PubKey = Multikey; +} + +// Implement the new SyncPrepareEphemeralSigning trait +impl SyncPrepareEphemeralSigning for KeyManager { + type Codec = Codec; + + fn prepare_ephemeral_signing( + &self, + codec: &Self::Codec, + threshold: NonZeroUsize, + limit: NonZeroUsize, + ) -> Result< + ( + ::PubKey, + Box Result<::Signature, ::Error>>, + ), + ::Error, + > { + debug!( + "Preparing ephemeral signing with {} key ({} of {})...", + codec, threshold, limit + ); + + // Generate a new key for signing + let mut rng = StdRng::from_os_rng(); + let secret_key = mk::Builder::new_from_random_bytes(*codec, &mut rng)? + .with_threshold(threshold) + .with_limit(limit) + .try_build()?; + + // Get the public key + let public_key = secret_key.conv_view()?.to_public_key()?; + + // Create the signing closure that owns the secret key + let sign_once = Box::new( + move |data: &[u8]| -> Result<::Signature, ::Error> { + debug!("Signing data with ephemeral key"); + let signature = secret_key.sign_view()?.sign(data, false, None)?; + Ok(signature) + }, + ); + + Ok((public_key, sign_once)) + } +} + +impl Signer for KeyManager { + type KeyPath = Key; + type Signature = Multisig; + type Error = Error; +} + +impl SyncSigner for KeyManager { + fn try_sign( + &self, + key_path: &Self::KeyPath, + data: &[u8], + ) -> Result { + let key = self + .0 + .get(key_path) + .ok_or(PlogError::NoKeyPresent(key_path.clone()))?; + Ok(key.sign_view()?.sign(data, false, None)?) + } +} /// processes plog subcommands pub async fn go(cmd: Command, _config: &Config) -> Result<(), Error> { @@ -39,39 +148,55 @@ pub async fn go(cmd: Command, _config: &Config) -> Result<(), Error> { output, } => { let (vlad_key, vlad_cid) = parse_vlad_params(&vlad_params)?; - let cfg = open::Config::default() - .with_pubkey_params(parse_key_params(&pub_key_params, Some("/pubkey"))?) - .with_additional_ops(&build_key_params(&key_ops)?) - .with_additional_ops(&build_string_params(&string_ops)?) - .with_additional_ops(&build_file_params(&file_ops)?) - .with_vlad_params(vlad_key, vlad_cid) - .with_entrykey_params(parse_key_params(&entry_key_codec, Some("/entrykey"))?) - .with_entry_lock_script(&lock_script_path) - .with_entry_unlock_script(&unlock_script_path); + + let OpParams::KeyGen { + codec: vlad_key_codec, + .. + } = vlad_key + else { + return Err(PlogError::InvalidFileParams.into()); + }; + + let OpParams::CidGen { + hash: vlad_cid_hash, + .. + } = vlad_cid + else { + return Err(PlogError::InvalidFileParams.into()); + }; + + let lock_script = Script::Code( + Key::default(), + std::fs::read_to_string(&lock_script_path).map_err(|_| PlogError::NoKeyPath)?, + ); + let unlock_script = Script::Code( + Key::default(), + std::fs::read_to_string(&unlock_script_path).map_err(|_| PlogError::NoKeyPath)?, + ); + + let mut additional_ops = Vec::new(); + additional_ops.extend(build_key_params(&key_ops)?); + additional_ops.extend(build_string_params(&string_ops)?); + additional_ops.extend(build_file_params(&file_ops)?); + + let cfg = open::Config::builder() + .pubkey(parse_key_params(&pub_key_params, Some("/pubkey"))?) + .vlad( + VladParams::builder() + .key(vlad_key_codec) + .hash(vlad_cid_hash) + .build(), + ) + .entrykey(parse_key_params(&entry_key_codec, Some("/entrykey"))?) + .unlock(unlock_script) + .lock(lock_script.clone()) + .additional_ops(additional_ops) // Add all operations at once + .build(); + + let key_manager = KeyManager::default(); // open the p.log - let plog = open::open_plog( - cfg, - |key: &Key, - codec: Codec, - threshold: usize, - limit: usize| - -> Result { - debug!("Generating {} key ({} of {})...", codec, threshold, limit); - let mut rng = StdRng::from_os_rng(); - let mk = mk::Builder::new_from_random_bytes(codec, &mut rng)?.try_build()?; - let fingerprint = mk.fingerprint_view()?.fingerprint(Codec::Blake3)?; - let ef = EncodedMultihash::new(Base::Base32Z, fingerprint); - debug!("Writing {} key fingerprint: {}", key, ef); - let w = writer(&Some(format!("{}.multikey", ef).into()))?; - serde_cbor::to_writer(w, &mk)?; - Ok(mk) - }, - |mk: &Multikey, data: &[u8]| -> Result { - debug!("Signing the first entry"); - Ok(mk.sign_view()?.sign(data, false, None)?) - }, - )?; + let plog = open::open_plog(&cfg, &key_manager, &key_manager)?; println!("Created p.log {}", writer_name(&output)?.to_string_lossy()); print_plog(&plog)?; @@ -83,7 +208,6 @@ pub async fn go(cmd: Command, _config: &Config) -> Result<(), Error> { let mut v = Vec::default(); reader(&input)?.read_to_end(&mut v)?; let plog: Log = serde_cbor::from_slice(&v)?; - //let plog: Log = serde_cbor::from_reader(reader(&input)?)?; println!("p.log"); print_plog(&plog)?; } @@ -93,7 +217,7 @@ pub async fn go(cmd: Command, _config: &Config) -> Result<(), Error> { key_ops, string_ops, file_ops, - lock_script_path: _, + lock_script_path, unlock_script_path, entry_signing_key, output, @@ -107,45 +231,42 @@ pub async fn go(cmd: Command, _config: &Config) -> Result<(), Error> { }; debug!("read p.log"); - let entry_signing_key = { + let lock_script = Script::Code( + Key::default(), + std::fs::read_to_string(&lock_script_path).map_err(|_| PlogError::NoKeyPath)?, + ); + + let unlock_script = { let mut v = Vec::default(); - reader(&Some(entry_signing_key))?.read_to_end(&mut v)?; - serde_cbor::from_slice::(&v)? + reader(&Some(unlock_script_path))?.read_to_end(&mut v)?; + Script::Code(Key::default(), String::from_utf8(v)?) }; - debug!("read p.log signing key"); - let cfg = update::Config::default() - .with_ops(&build_delete_params(&delete_ops)?) - .with_ops(&build_key_params(&key_ops)?) - .with_ops(&build_string_params(&string_ops)?) - .with_ops(&build_file_params(&file_ops)?) - .with_entry_signing_key(&entry_signing_key) - .with_entry_unlock_script(&unlock_script_path); + // Collect all operations first + let mut entry_ops = Vec::new(); + entry_ops.extend(build_delete_params(&delete_ops)?); + entry_ops.extend(build_key_params(&key_ops)?); + entry_ops.extend(build_string_params(&string_ops)?); + entry_ops.extend(build_file_params(&file_ops)?); + + // read the entry signing key from the path + // on Ok, try into Key, and fail Plog::Error::NoKeyPath + let entry_signing_key = match std::fs::read_to_string(&entry_signing_key) { + Ok(s) => Key::try_from(s.trim())?, + Err(_) => return Err(PlogError::NoKeyPath.into()), + }; + + let cfg = update::Config::builder() + .add_entry_lock_scripts(vec![lock_script.clone()]) + .unlock(unlock_script) + .entry_signing_key(entry_signing_key) + .additional_ops(entry_ops) + .build(); + + let key_manager = KeyManager::default(); // update the p.log - update::update_plog( - &mut plog, - cfg, - |key: &Key, - codec: Codec, - threshold: usize, - limit: usize| - -> Result { - debug!("Generating {} key ({} of {})...", codec, threshold, limit); - let mut rng = StdRng::from_os_rng(); - let mk = mk::Builder::new_from_random_bytes(codec, &mut rng)?.try_build()?; - let fingerprint = mk.fingerprint_view()?.fingerprint(Codec::Blake3)?; - let ef = EncodedMultihash::new(Base::Base32Z, fingerprint); - debug!("Writing {} key fingerprint: {}", key, ef); - let w = writer(&Some(format!("{}.multikey", ef).into()))?; - serde_cbor::to_writer(w, &mk)?; - Ok(mk) - }, - |mk: &Multikey, data: &[u8]| -> Result { - debug!("Signing the first entry"); - Ok(mk.sign_view()?.sign(data, false, None)?) - }, - )?; + update::update_plog::(&mut plog, &cfg, &key_manager, &key_manager)?; println!("Writing p.log {}", writer_name(&output)?.to_string_lossy()); print_plog(&plog)?; @@ -259,16 +380,7 @@ fn print_plog(plog: &Log) -> Result<(), Error> { } } } - /* - let kvp_lines = kvp.to_string().lines().map(|s| s.to_string()).collect::>(); - for i in 0..kvp_lines.len() { - if i < kvp_lines.len() - 1 { - println!(" ├─ {}", kvp_lines[i]); - } else { - println!(" ╰─ {}", kvp_lines[i]); - } - } - */ + Ok(()) } @@ -279,36 +391,18 @@ fn get_codec_from_plog_value(value: &provenance_log::Value) -> Option { _ => None, } } -/* -fn get_from_plog_value<'a, T>(value: &'a provenance_log::Value) -> Option -where - T: TryFrom<&'a [u8]> + EncodingInfo, - BaseEncoded: TryFrom<&'a str>, -{ - match value { - provenance_log::Value::Data(v) => T::try_from(v.as_slice()).ok(), - provenance_log::Value::Str(s) => { - match BaseEncoded::::try_from(s.as_str()) { - Ok(be) => Some(be.to_inner()), - Err(_) => None - } - } - _ => None, - } -} -*/ -fn get_from_wacc_value<'a, T>(value: &'a wacc::Value) -> Option +fn get_from_wacc_value<'a, T>(value: &'a comrade::Value) -> Option where T: TryFrom<&'a [u8]> + EncodingInfo, BaseEncoded: TryFrom<&'a str>, { match value { - wacc::Value::Bin { + comrade::Value::Bin { hint: _, data: ref v, } => T::try_from(v.as_slice()).ok(), - wacc::Value::Str { + comrade::Value::Str { hint: _, data: ref s, } => match BaseEncoded::::try_from(s.as_str()) { @@ -386,8 +480,8 @@ fn parse_key_params(s: &str, key_path: Option<&str>) -> Result Ok(OpParams::KeyGen { key, codec, - threshold, - limit, + threshold: NonZero::new(threshold).unwrap(), + limit: NonZero::new(limit).unwrap(), revoke, }) } @@ -411,7 +505,6 @@ fn parse_file_params(s: &str) -> Result { if !key.is_branch() { return Err(PlogError::InvalidKeyPath.into()); } - let path = PathBuf::from(parts.pop_front().ok_or(PlogError::NoInputFile)?); if !parts.is_empty() && parts.len() != 4 { return Err(PlogError::InvalidFileParams.into()); } @@ -437,14 +530,13 @@ fn parse_file_params(s: &str) -> Result { target, hash, inline, - path, + data: vec![], // TODO: Placeholder for actual data }) } /// [::[:]] fn parse_vlad_params(s: &str) -> Result<(OpParams, OpParams), Error> { let mut parts = s.split(":").collect::>(); - let path = PathBuf::from(parts.pop_front().ok_or(PlogError::NoInputFile)?); if !(parts.is_empty() || parts.len() == 2 || parts.len() == 3) { return Err(PlogError::InvalidFileParams.into()); } @@ -463,8 +555,8 @@ fn parse_vlad_params(s: &str) -> Result<(OpParams, OpParams), Error> { OpParams::KeyGen { key: Key::try_from("/vlad/key")?, codec, - threshold: 0, - limit: 0, + threshold: NonZero::new(0).unwrap(), + limit: NonZero::new(0).unwrap(), revoke: false, }, OpParams::CidGen { @@ -473,7 +565,7 @@ fn parse_vlad_params(s: &str) -> Result<(OpParams, OpParams), Error> { target: Codec::Identity, hash, inline: true, - path, + data: vec![], // TODO: Placeholder for actual data }, )) } diff --git a/crates/bs-p2p/Cargo.toml b/crates/bs-p2p/Cargo.toml index c7f7eb6..0501d83 100644 --- a/crates/bs-p2p/Cargo.toml +++ b/crates/bs-p2p/Cargo.toml @@ -8,7 +8,81 @@ readme = "README.md" license = "Apache 2.0" [dependencies] -libp2p = { version = "0.55.0", features = [ "autonat", "dcutr", "dns", "ed25519", "gossipsub", "identify", "kad", "macros", "memory-connection-limits", "noise", "ping", "quic", "relay", "request-response", "rsa", "tcp", "tls", "tokio", "yamux" ] } +provenance-log.workspace = true +multihash.workspace = true +serde.workspace = true +thiserror.workspace = true +blockstore.workspace = true +rand = "0.8" +tracing = { version = "0.1", features = ["log"] } +tracing-subscriber = { version = "0.3", features = ["fmt"] } +web-time = "1.1.0" +beetswap = "0.4.1" +tokio = { version = "1.0", features = ["sync", "macros"] } +futures = "0.3" +futures-timer = "3.0.2" +multicid.workspace = true +cid.workspace = true +directories = "6.0" + +[target.'cfg(target_arch = "wasm32")'.dependencies] +libp2p = { workspace = true, features = [ + "ed25519", + "macros", + "ping", + "wasm-bindgen", + "gossipsub", + "identify", + "kad", + "request-response", + "cbor", + "relay", + "noise", + "yamux", + "websocket-websys", + "dcutr", +] } +# until 0.4.0-alpha.2 patch get released on crates.io, see: https://github.com/libp2p/rust-libp2p/pull/5569#issuecomment-2552317044 +libp2p-webrtc-websys = { git = "https://github.com/DougAnderson444/rust-libp2p", branch = "webrtc-websys-deps" } +# Needed for: https://github.com/rustwasm/wasm-pack/issues/743#issuecomment-2383907212 +instant = { version = "0.1.12", features = ["wasm-bindgen", "inaccurate"] } +web-sys = { workspace = true, features = ["Window"] } +wasm-bindgen-futures.workspace = true + +[target.'cfg(not(target_arch = "wasm32"))'.dependencies] +libp2p = { workspace = true, features = [ + "ed25519", + "macros", + "ping", + "quic", + "tokio", + "gossipsub", + "identify", + "kad", + "request-response", + "cbor", + "relay", + "noise", + "yamux", + "dcutr", + "dns", + "websocket", + "tcp", + "rsa", +] } +libp2p-webrtc = { version = "0.8.0-alpha", features = ["tokio", "pem"] } +zeroize = "1.8.1" +serde_json = "1.0" + +[dev-dependencies] +sha3.workspace = true +tokio = { workspace = true, features = ["full"] } +multihash-codetable = { workspace = true, features = [ + "sha2", + "sha3", + "blake3", +] } +tempfile = "3.20.0" [lints] workspace = true diff --git a/crates/bs-p2p/justfile b/crates/bs-p2p/justfile new file mode 100644 index 0000000..0a906fe --- /dev/null +++ b/crates/bs-p2p/justfile @@ -0,0 +1,8 @@ +check: + cargo check + cargo check --all --all-targets + cargo check --all --all-targets --target wasm32-unknown-unknown + +# Install required targets +install-targets: + rustup target add wasm32-unknown-unknown aarch64-linux-android armv7-linux-androideabi i686-linux-android x86_64-linux-android diff --git a/crates/bs-p2p/src/behaviour.rs b/crates/bs-p2p/src/behaviour.rs new file mode 100644 index 0000000..b5663e9 --- /dev/null +++ b/crates/bs-p2p/src/behaviour.rs @@ -0,0 +1,156 @@ +//! This module defines the behaviour of the network. +pub(crate) mod req_res; + +use std::time::Duration; + +use blockstore::Blockstore; +use libp2p::request_response::{self, ProtocolSupport}; +use libp2p::{dcutr, relay, StreamProtocol}; +use libp2p::{gossipsub, identify, identity::Keypair, kad, ping, swarm::NetworkBehaviour}; + +use std::collections::hash_map::DefaultHasher; +use std::hash::{Hash, Hasher}; + +use req_res::{PeerRequest, PeerResponse}; + +/// [kad] [StreamProtocol] name +const PROTOCOL_NAME: &str = "/bettersign/0.1.0"; + +/// Extension protocol name +/// +/// Work in progress. +const EXTENSION_PROTOCOL: &str = "/bettersign/extensions/0.1.0"; + +const MAX_MULTIHASH_LENGTH: usize = 64; + +/// The [NetworkBehaviour] also creates a [BehaviourEvent] for us, which we can use to +/// handle events from the behaviour. +#[derive(NetworkBehaviour)] +pub struct Behaviour { + /// Ping remote peers + pub(crate) ping: ping::Behaviour, + /// Publish subscribe to topics + pub(crate) gossipsub: gossipsub::Behaviour, + /// Identify ourselves to other peers + pub(crate) identify: identify::Behaviour, + /// Kademlia DHT for Peer management + pub kad: kad::Behaviour, + /// Use RequestResponse to send data to a peer. Extensions can be used + /// to encode/decode the bytes, giving users a lot of flexibility that they control. + pub(crate) peer_request: request_response::cbor::Behaviour, + /// Relay client + pub(crate) relay_client: relay::client::Behaviour, + /// Dcutr + dcutr: dcutr::Behaviour, + /// Bitswap + pub(crate) bitswap: beetswap::Behaviour, +} + +/// BehaviousBuilder lets us set the bitswap [Blockstore] first, +/// then build with key and relay behaviour later. +pub struct BehaviourBuilder { + blockstore: B, +} + +impl BehaviourBuilder { + pub fn new(blockstore: B) -> Self { + Self { blockstore } + } + + pub fn build(self, key: &Keypair, relay_behaviour: relay::client::Behaviour) -> Behaviour { + // To content-address message, we can take the hash of message and use it as an ID. + let message_id_fn = |message: &gossipsub::Message| { + let mut s = DefaultHasher::new(); + message.data.hash(&mut s); + gossipsub::MessageId::from(s.finish().to_string()) + }; + + // Set a custom gossipsub configuration + let gossipsub_config = gossipsub::ConfigBuilder::default() + .heartbeat_interval(Duration::from_secs(15)) // This is set to aid debugging by not cluttering the log space + .validation_mode(gossipsub::ValidationMode::Strict) // This sets the kind of message validation. The default is Strict (enforce message signing) + .message_id_fn(message_id_fn) // content-address messages. No two messages of the same content will be propagated. + .support_floodsub() + .flood_publish(true) + .build() + .unwrap_or_default(); + + // build a gossipsub network behaviour + let gossipsub = gossipsub::Behaviour::new( + gossipsub::MessageAuthenticity::Signed(key.clone()), + gossipsub_config, + ) + .expect("Config should be valid"); + + let mut kad_config = kad::Config::new(StreamProtocol::new(PROTOCOL_NAME)); + // allows us to validate records before inserting them into the store + // when enabled, we will see the kad event InboundRequest(PutRecord) where we would then + // validate the record and act on the record + //kad_config.set_record_filtering(kad::StoreInserts::FilterBoth); + + let kad = kad::Behaviour::with_config( + key.public().to_peer_id(), + kad::store::MemoryStore::new(key.public().to_peer_id()), + kad_config, + ); + + let bitswap = beetswap::Behaviour::new(self.blockstore.into()); + + Behaviour { + ping: ping::Behaviour::new(ping::Config::new().with_interval(Duration::from_secs(25))), + // Need to include identify until https://github.com/status-im/nim-libp2p/issues/924 is resolved. + identify: identify::Behaviour::new(identify::Config::new( + "/ipfs/id/1.0.0".to_owned(), + key.public(), + )), + gossipsub, + kad, + peer_request: request_response::cbor::Behaviour::new( + [( + StreamProtocol::new(EXTENSION_PROTOCOL), + ProtocolSupport::Full, + )], + request_response::Config::default().with_request_timeout(Duration::from_secs(60)), + ), + relay_client: relay_behaviour, + dcutr: dcutr::Behaviour::new(key.public().to_peer_id()), + bitswap, + } + } +} + +// test and not wasm target +#[cfg(test)] +mod tests { + // NOTE: The StandardMultihasher is incompatible with the cryptid multihash type + + // use beetswap::multihasher::{Multihasher as _, StandardMultihasher}; + // use sha3::{Digest, Sha3_512}; + // + // /// Sha3-512 length is 64 bytes + // const SHA3_512_LEN: usize = 64; + // + // /// Multicodec for Sha3-512, see [multiformats/multicodec](https://github.com/multiformats/multicodec/blob/df81972d764f30da4ad32e1e5b778d8b619de477/table.csv#L15-L16) for details + // /// The code for sha3-512 is hex 0x14, decimal 20 + // pub const SHA3_512_HASH_CODE: u64 = 0x14; + // + // #[tokio::test] + // async fn test_standard_sha3_512_multihasher() { + // use multihash_codetable::Code; + // + // let input = b"hello world"; + // + // let digest = Sha3_512::digest(input); + // + // let len = digest.len(); + // assert_eq!(len, SHA3_512_LEN); + // + // let result: multihash::Multihash = StandardMultihasher + // .hash(Code::Sha3_512.into(), input) + // .await + // .unwrap(); + // + // assert_eq!(result.code(), SHA3_512_HASH_CODE); + // assert_eq!(result.digest(), digest.as_slice()); + // } +} diff --git a/crates/bs-p2p/src/behaviour/req_res.rs b/crates/bs-p2p/src/behaviour/req_res.rs new file mode 100644 index 0000000..9cfa464 --- /dev/null +++ b/crates/bs-p2p/src/behaviour/req_res.rs @@ -0,0 +1,41 @@ +//! Request response types +use std::ops::Deref; + +use serde::{Deserialize, Serialize}; + +/// Simple file exchange protocol +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct PeerRequest(Vec); + +impl PeerRequest { + /// Create a new PeerRequest from bytes + pub fn new(bytes: Vec) -> Self { + Self(bytes) + } +} + +impl Deref for PeerRequest { + type Target = [u8]; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +/// Jeeves Response Bytes +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct PeerResponse(Vec); + +impl PeerResponse { + pub(crate) fn new(file: Vec) -> Self { + Self(file) + } +} + +impl Deref for PeerResponse { + type Target = [u8]; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} diff --git a/crates/bs-p2p/src/error.rs b/crates/bs-p2p/src/error.rs new file mode 100644 index 0000000..66155eb --- /dev/null +++ b/crates/bs-p2p/src/error.rs @@ -0,0 +1,80 @@ +//! Bs Errors + +use crate::events::api::NetworkCommand; +#[derive(thiserror::Error, Debug)] +pub enum Error { + /// Error creating the Swarm + #[error("Error creating the Swarm: {0}")] + CreateSwarm(String), + + /// From oneshot canceled error + #[error("Oneshot canceled")] + OneshotCanceled(#[from] futures::channel::oneshot::Canceled), + + /// from string + #[error("Error: {0}")] + String(String), + + #[error("Multiaddr error")] + Multiaddr(#[from] libp2p::multiaddr::Error), + /// From + #[error("Dial error")] + Dial(#[from] libp2p::swarm::DialError), + + #[error("Libp2p error")] + GossipSubMessageAuthenticity, + + /// From OutboundFailure + #[error("OutboundFailure: {0}")] + OutboundFailure(#[from] libp2p::request_response::OutboundFailure), + + /// SendError + #[error("Send error")] + SendError(#[from] futures::channel::mpsc::SendError), + + #[error("Tokio mpsc Send error")] + TokioSendError(#[from] tokio::sync::mpsc::error::SendError), + + /// From TorySendError + #[error("Could not send the message")] + TrySend(#[from] futures::channel::mpsc::TrySendError), + + #[error("Could not send the message")] + TrySendPublicEvent(#[from] futures::channel::mpsc::TrySendError), + + /// Send failure + #[error("Send failure")] + SendFailure(String), + + /// from &'static str + #[error("{0}")] + StaticStr(&'static str), + + /// From TransportError + #[error("TransportError: {0}")] + TransportIo(#[from] libp2p::core::transport::TransportError), + + /// Failure to resolve Provenance Log from the network client + #[error("Failed to Resolve provenance log {0}")] + ResolveError(#[from] provenance_log::resolver::ResolveError), + + /// From + #[error("Multicid error {0}")] + MulticidError(#[from] multicid::Error), + + /// From + #[error("Multihash error {0}")] + MultihashError(#[from] multihash::Error), + + /// From + #[error("Provenance Log error {0}")] + PlogError(#[from] provenance_log::Error), + + /// From + #[error("Timeout error: {0}")] + TimeoutError(#[from] crate::events::TimeoutError), + + /// From + #[error("Kad GetRecord error: {0}")] + KadGetRecord(#[from] libp2p::kad::GetRecordError), +} diff --git a/crates/bs-p2p/src/events.rs b/crates/bs-p2p/src/events.rs new file mode 100644 index 0000000..cdc8371 --- /dev/null +++ b/crates/bs-p2p/src/events.rs @@ -0,0 +1,64 @@ +//! Event types, and loop handlers for the P2P network. +pub mod api; +pub use api::Client; + +mod timeout; +pub use timeout::TimeoutError; + +pub mod delay; +use api::Libp2pEvent; +pub(crate) use delay::Delay; + +use libp2p::Multiaddr; + +#[derive(Debug, Clone)] +pub enum PublicEvent { + ListenAddr { + address: Multiaddr, + }, + Error { + error: NetworkError, + }, + Pong { + peer: String, + rtt: u64, + }, + /// Data received from a pubsub peer about a topic. + Message { + peer: String, + topic: String, + data: Vec, + }, + /// A Request was made to us, that we may or may not respond to based on screening criteria. + Request { + request: Vec, + peer: String, + }, + NewConnection { + peer: String, + }, + ConnectionClosed { + peer: String, + cause: String, + }, + Connected, + Swarm(Libp2pEvent), + /// A new subscriber has been seen interested in a topic. + NewSubscriber { + peer: String, + topic: String, + }, + Ack { + peer: String, + topic: String, + }, +} + +#[derive(Debug, Clone)] +pub enum NetworkError { + DialFailed, + ListenFailed, + PublishFailed, + SubscribeFailed, + UnsubscribeFailed, +} diff --git a/crates/bs-p2p/src/events/api.rs b/crates/bs-p2p/src/events/api.rs new file mode 100644 index 0000000..71840c7 --- /dev/null +++ b/crates/bs-p2p/src/events/api.rs @@ -0,0 +1,1107 @@ +//! The Events API for interacting witht he netowrk events. +use crate::events::delay; +pub use crate::behaviour::req_res::{PeerRequest, PeerResponse}; +use crate::events::timeout::with_timeout; +use libp2p::Multiaddr; +use provenance_log::resolver::{Resolver, SuperResolver}; +use crate::events::{NetworkError, PublicEvent}; +use crate::behaviour::{Behaviour, BehaviourEvent}; +use crate::Error; +use blockstore::Blockstore; +use futures::stream::StreamExt; +use futures::{ + channel::{ + mpsc::{self, Receiver}, + oneshot, + }, + SinkExt, +}; +use libp2p::core::transport::ListenerId; +use libp2p::kad::store::RecordStore; +use libp2p::kad::PeerRecord; +use libp2p::kad::{InboundRequest, Record}; +pub use libp2p::multiaddr::Protocol; +use libp2p::request_response::{self, OutboundRequestId, ResponseChannel}; +use libp2p::swarm::{Swarm, SwarmEvent}; +use libp2p::{identify, kad, ping, PeerId, }; +use std::collections::{HashMap, HashSet}; +use std::net::Ipv4Addr; +use std::pin::Pin; +use std::time::Duration; +use web_time::Instant; + +const TICK_INTERVAL: Duration = Duration::from_secs(15); + +/// Create new API to interact with the network: +/// +/// - Network Client: Interact with the netowrk by sending +/// - Network Event Loop: Start the network event loop +pub async fn new( + swarm: Swarm>, +) -> (Client, Receiver, EventLoop) { + // These command senders/recvr are used to pass along parsed generic commands to the network event loop + let (command_sender, command_receiver) = tokio::sync::mpsc::channel(32); + let (event_sender, event_receiver) = mpsc::channel(32); + + ( + Client { command_sender }, + event_receiver, + EventLoop::new(swarm, command_receiver, event_sender), + ) +} + +/// This client is used to send [Command]s to the network event loop +/// +/// Can be [Clone]d so that commands can be sent from various sources. +/// +/// Bring [Resolver] into scope to be able to resolve Plogs using the netowrk client. +#[derive(Clone, Debug)] +pub struct Client { + command_sender: tokio::sync::mpsc::Sender, +} + +// impl PartialEq for Client +impl PartialEq for Client { + fn eq(&self, other: &Self) -> bool { + true + } +} + +impl Client { + /// Listen for incoming connections on the given address. + pub async fn start_listening(&mut self, addr: Multiaddr) -> Result { + let (sender, receiver) = oneshot::channel(); + self.command_sender + .send(NetworkCommand::StartListening { addr, sender }) + .await?; + receiver.await? + } + /// Dial the given addresses + pub async fn dial(&self, addr: Multiaddr) -> Result<(), Error> { + let (sender, receiver) = oneshot::channel(); + self.command_sender + .send(NetworkCommand::Dial { addr, sender }) + .await?; + receiver.await? + } + + /// Request a response from a PeerId + pub async fn request_response(&self, request: Vec, peer: PeerId) -> Result, Error> { + tracing::trace!("Sending request to {peer}"); + let (sender, receiver) = oneshot::channel(); + if let Err(e) = self + .command_sender + .send(NetworkCommand::Jeeves { + request, + peer, + sender, + }) + .await + { + tracing::error!("Failed to send request response command: {:?}", e); + } + + receiver.await.map_err(Error::OneshotCanceled)? + } + /// Respond with a file to a request + pub async fn respond_bytes( + &mut self, + bytes: Vec, + channel: ResponseChannel, + ) -> Result<(), Error> { + Ok(self + .command_sender + .send(NetworkCommand::RespondJeeves { bytes, channel }) + .await?) + } + + /// Request bits via Bitswap + pub async fn get_bits(&self, cid: Vec) -> Result, Error> { + tracing::debug!("Requesting bitswap for CID: {:?}", cid); + let (sender, receiver) = oneshot::channel(); + self.command_sender + .send(NetworkCommand::BitswapQuery { cid, sender }) + .await?; + tracing::info!("Awaiting bitswap query response"); + // TODO: Add timeout + Ok(receiver.await?) + } + + /// Get a record from the DHT given the key + pub(crate) async fn get_providers(&self, key: Vec) -> Result, Error> { + let (sender, receiver) = oneshot::channel(); + self.command_sender + .send(NetworkCommand::GetProviders { key, sender }) + .await?; + receiver.await.map_err(Error::OneshotCanceled) + } + + /// Put a record on the DHT + pub async fn put_record(&self, key: Vec, value: Vec) -> Result<(), Error> { + self.command_sender + .send(NetworkCommand::PutRecord { key, value }) + .await?; + Ok(()) + } + + /// Gets a record from the DHT + pub async fn get_record(&self, key: Vec) -> Result, Error> { + let (sender, receiver) = oneshot::channel(); + tracing::debug!("Requesting record for key: {:?}", key); + self.command_sender + .send(NetworkCommand::GetRecord { key, sender }) + .await?; + receiver.await.map_err(Error::OneshotCanceled)? + } + + /// Add a peer to the routing table + pub async fn add_peer(&mut self, peer_id: PeerId) -> Result<(), Error> { + Ok(self + .command_sender + .send(NetworkCommand::AddPeer { peer_id }) + .await?) + } + + /// Publish to a gossipsub topic + pub async fn publish(&self, message: impl AsRef<[u8]>, topic: String) -> Result<(), Error> { + Ok(self + .command_sender + .send(NetworkCommand::Publish { + data: message.as_ref().to_vec(), + topic, + }) + .await?) + } + pub async fn subscribe(&self, topic: String) -> Result<(), Error> { + Ok(self + .command_sender + .send(NetworkCommand::Subscribe { topic }) + .await?) + } + + /// General command PeerPiperCommand parsed into Command then called + pub async fn command(&mut self, command: NetworkCommand) -> Result<(), Error> { + Ok(self.command_sender.send(command).await?) + } + /// Run the Client loop, awaiting commands and passing along network events. + // Loop awaits two separate futures using select: + // 1) Network_events.select_next_some() + // 2) Recieved Network Commands via `command_receiver`, passing along PeerPiperCommand to network_client.command(pp_cmd) + pub async fn run( + &mut self, + mut network_events: Receiver, + mut tx: mpsc::Sender, + ) { + tracing::info!("🚀 Starting network client loop"); + loop { + tokio::select! { + event = network_events.next() => { + let Some(event) = event else { + tracing::warn!("⛔ Network event channel closed, shutting down network event loop"); + break; + }; + tracing::debug!("Network event: {:?}", event); + if let Err(network_event) = tx.send(event).await { + tracing::error!("Failed to send swarm event: {:?}", network_event); + // break; + continue; + } + }, + } + } + } +} + +impl Resolver for Client { + type Error = crate::Error; + + fn resolve( + &self, + cid: &multicid::Cid, + // ) -> Pin, Self::Error>> + CondSend>> { + ) -> Pin + '_>> { + tracing::debug!("DefaultBsPeer Resolving CID over bitswap: {}", cid); + let cid_bytes: Vec = cid.clone().into(); + let client = self.clone(); + Box::pin(async move { + with_timeout(client.get_bits(cid_bytes), Duration::from_secs(10)).await? + }) + } +} +/// PeerPiper Network Commands (Libp2p) +#[derive(Debug)] +pub enum NetworkCommand { + StartListening { + addr: Multiaddr, + sender: oneshot::Sender>, + }, + Dial { + addr: Multiaddr, + sender: oneshot::Sender>, + }, + Publish { + data: Vec, + topic: String, + }, + Subscribe { + topic: String, + }, + Unsubscribe { + topic: String, + }, + AddPeer { + peer_id: PeerId, + }, + ShareMultiaddr, + /// Jeeves RequestResponse. Ask a String from a PeerId. + Jeeves { + request: Vec, + peer: PeerId, + sender: oneshot::Sender, Error>>, + }, + /// Jeeves Response + RespondJeeves { + bytes: Vec, + channel: ResponseChannel, + }, + /// Puts a Record on the DHT + PutRecord { + key: Vec, + value: Vec, + }, + /// Get a record from the DHT + GetRecord { + key: Vec, + sender: oneshot::Sender, Error>>, + }, + /// Get a record from the DHT + GetProviders { + key: Vec, + sender: oneshot::Sender>, + }, + /// Start providing a key on the DHT + StartProviding { + key: Vec, + }, + /// Bitswap Query + BitswapQuery { + cid: Vec, + sender: oneshot::Sender>, + }, +} + +/// Inner Libp2p Events which cannot be serialized +#[derive(Debug, Clone)] +pub enum Libp2pEvent { + // /// The unique Event to this api file that never leaves; all other events propagate out + // InboundRequest { + // request: PeerRequest, + // channel: ResponseChannel, + // }, + // /// DHT Provider Request for when someone asks for a record + // DhtProviderRequest { + // key: Vec, + // channel: ResponseChannel>, + // }, + /// An inbound request to Put a Record into the DHT from a source PeerId + PutRecordRequest { source: PeerId }, +} + + +/// The network event loop. +/// Handles all the network logic for us. +pub struct EventLoop { + /// A future that fires at a regular interval and drives the behaviour of the network. + tick: delay::Delay, + /// The libp2p Swarm that handles all the network logic for us. + swarm: Swarm>, + /// Channel to send commands to the network event loop. + command_receiver: tokio::sync::mpsc::Receiver, + /// Channel to send events from the network event loop to the user. + event_sender: mpsc::Sender, + /// Jeeeves Tracking + pending_requests: HashMap, Error>>>, + + /// GetProviders tracking + pending_get_providers: HashMap>>, + + /// pending bitswap queries + pending_queries: HashMap>>, + + /// Pending Get Records from DHT + pending_get_records: HashMap, Error>>>, +} + +impl EventLoop { + /// Creates a new network event loop. + fn new( + swarm: Swarm>, + command_receiver: tokio::sync::mpsc::Receiver, + event_sender: mpsc::Sender, + ) -> Self { + Self { + tick: delay::Delay::new(TICK_INTERVAL), + swarm, + command_receiver, + event_sender, + pending_requests: HashMap::new(), + pending_get_providers: Default::default(), + pending_queries: Default::default(), + pending_get_records: Default::default(), + } + } + + /// Runs the network event loop. + pub async fn run(mut self) -> Result<(), Error> { + loop { + tokio::select! { + event = self.swarm.next() => self.handle_event(event.expect("Swarm stream to be infinite.")).await?, + command = self.command_receiver.recv() => match command { + Some(c) => self.handle_command(c).await, + // Command channel closed, thus shutting down the network event loop. + None => return Ok(()), + }, + _ = &mut self.tick => self.handle_tick().await, + } + } + } + + /// Handles a tick of the `tick` future. + async fn handle_tick(&mut self) { + tracing::info!("🕒 Tick"); + self.tick.reset(TICK_INTERVAL); + + // Also show all kad records from kad store + let records = self.swarm.behaviour_mut().kad.store_mut().records(); + + if records.clone().count() == 0 { + tracing::debug!("Kad store is empty"); + } + + records.into_iter().for_each(|record| { + tracing::debug!( + "Kad Key ({} bytes): {:?} \n\n Value: {:?}", + record.key.to_vec().len(), + record.key.to_vec(), + record.value + ); + }); + + // clone the records + let records = self + .swarm + .behaviour_mut() + .kad + .store_mut() + .records() + .map(|mut r| r.to_mut().clone()) + .collect::>(); + + for record in records { + let quorum = kad::Quorum::One; + + if let Some(expires) = record.expires { + if expires < Instant::now() + Duration::from_secs(60) { + let expires = Some(Instant::now() + Duration::from_secs(22 * 60 * 60)); + if let Err(e) = self + .swarm + .behaviour_mut() + .kad + .put_record(Record { expires, ..record }, quorum) + { + tracing::error!("Failed to put record: {e}"); + } + } + } + } + + // if let Some(Err(e)) = self + // .swarm + // .behaviour_mut() + // .kademlia + // .as_mut() + // .map(|k| k.bootstrap()) + // { + // tracing::debug!("Failed to run Kademlia bootstrap: {e:?}"); + // } + + let _message = "Hello world! Sent from the rust-peer".to_string(); + + // if let Some(Err(err)) = self + // .swarm + // .behaviour_mut() + // .gossipsub + // .as_mut() + // .map(|g| g.publish(topic::topic(), message.as_bytes())) + // { + // error!("Failed to publish periodic message: {err}") + // } + } + + /// Handles a network event according to the matched Event type + async fn handle_event(&mut self, event: SwarmEvent>) -> Result<(), Error> { + match event { + SwarmEvent::NewListenAddr { address, .. } => { + tracing::info!("🌐 New address: {address}"); + let mut addr_handler = || { + let p2p_addr = address + .clone() + .with(Protocol::P2p(*self.swarm.local_peer_id())); + + // info!("Listen p2p address: \n\x1b[30;1;42m{p2p_addr}\x1b[0m"); + // This address is reachable, add it + self.swarm.add_external_address(p2p_addr.clone()); + + // check off adding this address + tracing::info!("👉 Added {p2p_addr}"); + + // pass the address back to the other task, for display, etc. + self.event_sender + .try_send(PublicEvent::ListenAddr { + address: p2p_addr, + }) + }; + // Protocol::Ip is the first item in the address vector + match address.iter().next() { + Some(Protocol::Ip6(ip6)) => { + // Only add our globally available IPv6 addresses to the external addresses list. + if !ip6.is_loopback() + && !ip6.is_unspecified() + && !ip6.is_multicast() + && (ip6.segments()[0] & 0xffc0) != 0xfe80 // no fe80::/10 addresses, (!ip6.is_unicast_link_local() requires nightly) + && (ip6.segments()[0] & 0xfe00) != 0xfc00 // Unique Local Addresses (ULAs, fd00::/8) are private IPv6 addresses and should not be advertised. + { + addr_handler()?; + } + } + Some(Protocol::Ip4(ip4)) => { + if !(ip4.is_loopback() || ip4.is_unspecified() || ip4.is_private() || ip4.is_multicast() || ip4 == Ipv4Addr::LOCALHOST || ip4.octets()[0] & 240 == 240 && !ip4.is_broadcast()) + { + addr_handler()?; + } + } + _ => { + tracing::warn!("Unknown address type: {address}"); + } + } + } + SwarmEvent::ConnectionEstablished { + peer_id, + //endpoint: ConnectedPoint::Listener { send_back_addr, .. }, + established_in, + .. + } => { + tracing::info!("✔️ Connection Established to {peer_id} in {established_in:?}"); + // add as explitcit peer + self.swarm + .behaviour_mut() + .gossipsub + .add_explicit_peer(&peer_id); + + if let Err(e) = self + .event_sender + .send(PublicEvent::NewConnection { + peer: peer_id.to_string(), + }) + .await + { + tracing::error!("Failed to send NewConnection event: {e}"); + return Err(Error::SendFailure("Failed to send NewConnection event".to_string())); + } + } + SwarmEvent::OutgoingConnectionError { peer_id, error, .. } => { + tracing::warn!("Failed to dial {peer_id:?}: {error}"); + + match (peer_id, &error) { + (Some(_peer_id), libp2p::swarm::DialError::Transport(details_vector)) => { + for (addr, _error) in details_vector.iter() { + // self.swarm + // .behaviour_mut() + // .kademlia + // .as_mut() + // .map(|k| k.remove_address(&peer_id, addr)); + // + // self.swarm.remove_external_address(addr); + + tracing::debug!("Removed ADDR {addr:?} from the routing table (if it was in there)."); + } + } + _ => { + tracing::warn!("{error}"); + return Err(Error::StaticStr("Failed to dial peer")); + } + } + } + SwarmEvent::ConnectionClosed { peer_id, cause, .. } => { + tracing::info!("Connection to {peer_id} closed: {cause:?}"); + // send an event + self.event_sender + .send(PublicEvent::ConnectionClosed { + peer: peer_id.to_string(), + // unwrap cause if is Some, otherwise return "Unknown cause" + cause: cause + .map(|c| c.to_string()) + .unwrap_or_else(|| "Unknown cause".to_string()), + }) + .await?; + } + SwarmEvent::Behaviour(BehaviourEvent::Ping(ping::Event { + peer, + result: Ok(rtt), + .. + })) => { + tracing::info!("🏓 Ping {peer} in {rtt:?}"); + // send msg + self.event_sender + .send(PublicEvent::Pong { + peer: peer.to_string(), + rtt: rtt.as_millis() as u64, + }) + .await?; + } + SwarmEvent::Behaviour(BehaviourEvent::Ping(ping::Event { + peer, + result: Err(err), + connection, + })) => { + tracing::warn!("⚠️ Ping {peer} failed: {err}"); + self.swarm.behaviour_mut().kad.remove_peer(&peer); + let found = self.swarm.close_connection(connection); + tracing::warn!("Connection closed: {found}"); + } + // SwarmEvent::Behaviour(BehaviourEvent::Relay(e)) => { + // tracing::debug!("{:?}", e); + // } + SwarmEvent::Behaviour(BehaviourEvent::Gossipsub( + libp2p::gossipsub::Event::Message { + message_id: _, + propagation_source: peer_id, + message, + }, + )) => { + tracing::info!("📨 Received message from {:?}", message.source); + + self.event_sender + .send(PublicEvent::Message { + peer: peer_id.to_string(), + topic: message.topic.to_string(), + data: message.data.clone(), + }) + .await?; + + // Send ACK back to the sender + let ack_topic = format!("ack/{}", message.topic.to_string()); + if let Err(e) = self.swarm.behaviour_mut().gossipsub.publish(libp2p::gossipsub::IdentTopic::new(&ack_topic), message.data) { + tracing::error!("Failed to publish ACK: {e}"); + } + } + SwarmEvent::Behaviour(BehaviourEvent::Gossipsub( + libp2p::gossipsub::Event::Subscribed { peer_id, topic }, + )) => { + tracing::debug!("{peer_id} subscribed to {topic}"); + + self.swarm + .behaviour_mut() + .gossipsub + .add_explicit_peer(&peer_id); + + self.event_sender + .send(PublicEvent::NewSubscriber { + peer: peer_id.to_string(), + topic: topic.to_string(), + }) + .await?; + + // Query the local DHT for values on the topic as a key. + // If we have key for this value, publish it to the topic. + let key = topic.to_string().into_bytes(); + tracing::debug!("Querying DHT for key: {:?}", key); + if let Some(record) = self + .swarm + .behaviour_mut() + .kad + .store_mut() + .get(&libp2p::kad::RecordKey::new(&key)) + .map(|record| record.into_owned()) { + tracing::debug!("Found record for key {:?}: {:?}", key, record); + // Publish the record to the topic + if let Err(e) = self + .swarm + .behaviour_mut() + .gossipsub + .publish(topic, record.value.clone()) + { + tracing::error!("Failed to publish record to topic: {e}"); + } + } + } + SwarmEvent::Behaviour(BehaviourEvent::PeerRequest( + request_response::Event::Message { message, .. }, + )) => match message { + request_response::Message::Request { + request, channel: _, .. + } => { + tracing::debug!("Received request: {:?}", &request); + + } + request_response::Message::Response { + request_id, + response, + } => self + .pending_requests + .remove(&request_id) + .ok_or(Error::StaticStr("Remove failed"))? + .send(Ok(response.to_vec())) + .map_err(|_| Error::StaticStr("Failed to send response"))?, + }, + SwarmEvent::Behaviour(BehaviourEvent::PeerRequest( + request_response::Event::OutboundFailure { + request_id, + error, + peer, + .. + }, + )) => { + tracing::error!( + "Request failed, couldn't SEND JEEVES to peer {peer}: {error} on request_id: {request_id}" + ); + self.pending_requests + .remove(&request_id) + .ok_or(Error::StaticStr("Remove failed"))? + .send(Err(Error::OutboundFailure(error))) + .map_err(|_| Error::StaticStr("Failed to send response"))?; + } + // SwarmEvent::Behaviour(BehaviourEvent::Identify(identify::Event::Error { + // peer_id, + // error: libp2p::swarm::StreamUpgradeError::Timeout, + // })) => { + // debug!("Identify Error to {peer_id} closed due to timeout"); + // + // // When a browser tab closes, we don't get a swarm event + // // maybe there's a way to get this with TransportEvent + // // but for now remove the peer from routing table if there's an Identify timeout + // + // // Add a warning counter, kick off after 3 tries to Identify + // // if the peer is still in the routing table, remove it + // let warning_count = self.warning_counters.entry(peer_id).or_insert(0); + // *warning_count += 1; + // + // debug!("⚠️ Identify count Warning for {peer_id}: {warning_count}"); + // + // // Remove peer after 3 non responses to Identify + // if *warning_count >= 3 { + // // remove the peer from the Kad routing table + // self.swarm + // .behaviour_mut() + // .kademlia + // .as_mut() + // .map(|k| k.remove_peer(&peer_id)); + // + // // remove from Gossipsub + // if let Some(g) = self.swarm.behaviour_mut().gossipsub.as_mut() { + // g.remove_explicit_peer(&peer_id) + // }; + // + // // remove from swarm. TODO: rm unwrap + // // self.swarm.disconnect_peer_id(peer_id).unwrap(); + // + // // remove the peer from the warning_counters HashMap + // self.warning_counters.remove(&peer_id); + // debug!("Removed PEER {peer_id} from the routing table (if it was in there)."); + // } + // } + SwarmEvent::Behaviour(BehaviourEvent::Identify(identify::Event::Received { + peer_id, + info: + identify::Info { + listen_addrs, + protocols, + observed_addr, + .. + }, + .. + })) => { + tracing::debug!( + "ℹ️ identify Received peer {} observed_addr: {}", + peer_id, + observed_addr + ); + + // remove warning_counters entry for this peer if it exists + //self.warning_counters.remove(&peer_id); + + // Only add the address to the matching protocol name, + if protocols.iter().any(|p| { + self.swarm + .behaviour() + .kad + .protocol_names() + .iter() + .any(|q| p == q) + }) { + for addr in listen_addrs { + tracing::debug!("ℹ️ identify::Event::Received listen addr: {}", addr); + + let webrtc_address = addr + .clone() + .with(Protocol::WebRTCDirect) + .with(Protocol::P2p(peer_id)); + + self.swarm + .behaviour_mut() + .kad + .add_address(&peer_id, webrtc_address.clone()); + + // TODO (fixme): the below doesn't work because the address is still missing /webrtc/p2p even after https://github.com/libp2p/js-libp2p-webrtc/pull/121 + self.swarm + .behaviour_mut() + .kad + .add_address(&peer_id, addr.clone()); + + tracing::debug!("ℹ️ Added {peer_id} to the routing table."); + } + } + } + + SwarmEvent::Behaviour(BehaviourEvent::Kad(kad::Event::OutboundQueryProgressed { + id, + result, + .. + })) => { + + tracing::debug!("Got Kad QueryProgressed: {:?}", result); + match result { + kad::QueryResult::GetProviders(Ok(kad::GetProvidersOk::FoundProviders { + providers, + .. + })) => { + if let Some(sender) = self.pending_get_providers.remove(&id) { + sender.send(providers).expect("Receiver not to be dropped"); + + // Finish the query. We are only interested in the first result. + self.swarm + .behaviour_mut() + .kad + .query_mut(&id) + .unwrap() + .finish(); + } + } + kad::QueryResult::GetRecord(Ok(kad::GetRecordOk::FoundRecord( + PeerRecord { record, .. }, + ))) => { + tracing::debug!("Got QueryResult Record: {:?}", record); + if let Some(sender) = self.pending_get_records.remove(&id) { + sender + .send(Ok(record.value.clone())) + .expect("Receiver not to be dropped"); + + // emit a GotRecord event + // so that DHT Records can be retrieved + // by users without an async environment + + // Finish the query. We are only interested in the first result. + // TODO: Handle comparing and choosing the best record + self.swarm + .behaviour_mut() + .kad + .query_mut(&id) + .unwrap() + .finish(); + } + } + kad::QueryResult::GetRecord(Err(e)) => { + tracing::error!("Failed to get record: {e}"); + if let Some(sender) = self.pending_get_records.remove(&id) { + sender + .send(Err(Error::KadGetRecord(e))) + .expect("Receiver not to be dropped"); + } + } + _ => { + tracing::warn!("Received unknown Kad QueryResult: {:?}", result); + } + } + } + SwarmEvent::Behaviour(BehaviourEvent::Kad(kad::Event::InboundRequest { + request, + .. + })) => { + tracing::debug!("Kademlia Inbound Request: {:?}", request); + match request { + InboundRequest::PutRecord { + source, + record, + .. + } => { + tracing::info!("Received PutRecordRequest from: {:?}", source); + + // TODO: Filter Providers based on criteria? + // for now, add the provider to the DHT as is + if let Some(rec) = record { + if let Err(e) = self + .swarm + .behaviour_mut() + .kad + .store_mut() + .put(rec.clone()) + { + tracing::error!("Failed to add provider to DHT: {e}"); + } + } + + // send evt to external handler plugins to decide whether to include record or not: + if let Err(e) = self + .event_sender + .send(PublicEvent::Swarm(Libp2pEvent::PutRecordRequest { + source, + })) + .await + { + tracing::error!("Failed to send PutRecordRequest event: {e}"); + } + } + InboundRequest::AddProvider { + record: Some(provider_rec), + } => { + tracing::info!("Received AddProviderRequest: {:?}", provider_rec); + // TODO: Filter Providers based on criteria? + // for now, add the provider to the DHT as is + if let Err(e) = self + .swarm + .behaviour_mut() + .kad + .store_mut() + .add_provider(provider_rec) + { + tracing::error!("Failed to add provider to DHT: {e}"); + } + } + _ => { + tracing::warn!("Received unknown InboundRequest: {:?}", request); + } + } + } + SwarmEvent::Behaviour(BehaviourEvent::Kad(evt)) => { + tracing::debug!("Kademlia event: {:?}", evt); + } + // SwarmEvent::Behaviour(BehaviourEvent::Kademlia( + // libp2p::kad::KademliaEvent::OutboundQueryProgressed { + // result: libp2p::kad::QueryResult::Bootstrap(res), + // .. + // }, + // )) => { + // debug!("Kademlia BOOTSTRAP Result: {:?}", res); + // } + // SwarmEvent::Behaviour(BehaviourEvent::Kademlia(event)) => { + // debug!("Kademlia event: {:?}", event) + // } + + // ignore NewExternalAddrOfPeer + SwarmEvent::NewExternalAddrOfPeer { .. } => { + // tracing::debug!("New external address of peer {peer_id}: {address}"); + } + SwarmEvent::NewExternalAddrCandidate { address } => { + tracing::debug!("New external address candidate: {address}"); + } + SwarmEvent::Behaviour(BehaviourEvent::Bitswap(bitswap)) => { + match bitswap { + beetswap::Event::GetQueryResponse { query_id, data } => { + tracing::debug!("Bitswap: received response for {query_id:?}: {data:?}"); + if let Some(sender) = self.pending_queries.remove(&query_id) { + sender.send(data).map_err(|_| { + tracing::error!("Failed to send response for Bitswap result"); + Error::StaticStr("Failed to send response") + })?; + } else { + tracing::info!("received response for unknown cid"); + } + } + beetswap::Event::GetQueryError { query_id, error } => { + tracing::debug!("Bitswap: received error for {query_id:?}: {error}"); + if let Some(sender) = self.pending_queries.remove(&query_id) { + tracing::info!("received error for sender: {error}"); + // Dropping the sender will cause the receiver to get an error + drop(sender); + } else { + tracing::info!("received error for unknown cid: {error}"); + } + } + } + }, + event => { + tracing::debug!("Other type of event: {:?}", event); + } + } + Ok(()) + } + + async fn handle_command(&mut self, command: NetworkCommand) { + match command { + NetworkCommand::StartListening { addr, sender } => { + let _ = match self.swarm.listen_on(addr) { + Ok(id) => sender.send(Ok(id)), + Err(e) => sender.send(Err(Error::TransportIo(e))), + }; + } + NetworkCommand::Dial { addr, sender } => { + let _ = match self.swarm.dial(addr) { + Ok(_) => sender.send(Ok(())), + Err(e) => sender.send(Err(Error::Dial(e))), + }; + } + NetworkCommand::Publish { + data: message, + topic, + } => { + tracing::info!("API: Handling Publish command to {topic}"); + let top = libp2p::gossipsub::IdentTopic::new(&topic); + if let Err(err) = self.swarm.behaviour_mut().gossipsub.publish(top, message) { + tracing::error!("Failed to publish message: {err}"); + + // list of all peers + let peers = self + .swarm + .behaviour() + .gossipsub + .all_peers() + .collect::>(); + // show explicit peers + tracing::info!("All peers: {:?}", peers); + + // let _ = self + // .event_sender + // .send(Event::Error { + // error: NetworkError::PublishFailed, + // }) + // .await; + } + tracing::info!("API: Successfully Published to {topic}"); + } + NetworkCommand::Subscribe { topic } => { + tracing::info!("API: Handling Subscribe command to {topic}"); + if let Err(err) = self + .swarm + .behaviour_mut() + .gossipsub + .subscribe(&libp2p::gossipsub::IdentTopic::new(&topic)) + { + tracing::error!("Failed to subscribe to topic: {err}"); + let _ = self + .event_sender + .send(PublicEvent::Error { + error: NetworkError::SubscribeFailed, + }) + .await; + } + tracing::info!("API: Successfully Subscribed to {topic}"); + } + NetworkCommand::Unsubscribe { topic } => { + if let Err(e) = self + .swarm + .behaviour_mut() + .gossipsub + .unsubscribe(&libp2p::gossipsub::IdentTopic::new(&topic)) + { + tracing::error!("Failed to unsubscribe from topic: {topic} {e}"); + let _ = self + .event_sender + .send(PublicEvent::Error { + error: NetworkError::UnsubscribeFailed, + }) + .await; + } + } + // Add Explicit Peer by PeerId + NetworkCommand::AddPeer { peer_id } => { + self.swarm + .behaviour_mut() + .gossipsub + .add_explicit_peer(&peer_id); + tracing::info!("API: Added Peer {peer_id} to the routing table."); + } + // Share the current Multiaddr for the server + NetworkCommand::ShareMultiaddr => { + let p2p_addr = self + .swarm + .external_addresses() + .next() + .expect("Expected at least one external address.") + .clone() + .with(Protocol::P2p(*self.swarm.local_peer_id())); + + // emit as Event + if let Err(e) = self + .event_sender + .try_send(PublicEvent::ListenAddr { + address: p2p_addr.clone(), + }) + { + tracing::error!("Failed to send share address event: {e}"); + } + } + NetworkCommand::Jeeves { + request, + peer, + sender, + } => { + tracing::info!("API: Handling RequestResponse command to {peer}"); + let response_id = self + .swarm + .behaviour_mut() + .peer_request + .send_request(&peer, PeerRequest::new(request)); + self.pending_requests.insert(response_id, sender); + } + // NetworkCommand for Bitwap: TODO here. + NetworkCommand::BitswapQuery { cid, sender } => { + let Ok(cid) = cid::Cid::try_from(cid) else { + tracing::error!("Failed to parse CID"); + return; + }; + let query_id = self.swarm.behaviour_mut().bitswap.get(&cid); + tracing::info!("API Bitswap query id: {query_id:?} for CID: {cid}"); + self.pending_queries.insert(query_id, sender); + } + NetworkCommand::RespondJeeves { + bytes: file, + channel, + } => { + tracing::info!("API: Handling RespondFile command"); + self.swarm + .behaviour_mut() + .peer_request + .send_response(channel, PeerResponse::new(file)) + .expect("Connection to peer to be still open."); + } + // Put Records on the DHT + NetworkCommand::PutRecord { key, value } => { + tracing::info!("API: Handling PutRecord command"); + let record = kad::Record::new(key, value); + if let Err(e) = self + .swarm + .behaviour_mut() + .kad + .put_record(record, kad::Quorum::One) { + tracing::error!("Failed to put record: {e}"); + } + } + NetworkCommand::GetRecord { key, sender } => { + tracing::info!("API: Handling GetRecord command"); + let query_id = self.swarm.behaviour_mut().kad.get_record(key.into()); + self.pending_get_records.insert(query_id, sender); + } + NetworkCommand::GetProviders { key, sender } => { + let query_id = self.swarm.behaviour_mut().kad.get_providers(key.into()); + self.pending_get_providers.insert(query_id, sender); + } + NetworkCommand::StartProviding { key } => { + tracing::info!("API: Handling StartProviding command"); + if let Err(e) = self.swarm.behaviour_mut().kad.start_providing(key.into()) { + tracing::error!("Failed to start providing: {e}"); + } + } + } + } +} + diff --git a/crates/bs-p2p/src/events/delay.rs b/crates/bs-p2p/src/events/delay.rs new file mode 100644 index 0000000..d5a600b --- /dev/null +++ b/crates/bs-p2p/src/events/delay.rs @@ -0,0 +1,77 @@ +//! Delay implementation using futures_timer::Delay. +use std::{ + ops::{Deref, DerefMut}, + pin::Pin, + task::{Context, Poll}, +}; +use web_time::Duration; + +use futures::{future::FusedFuture, Future, FutureExt}; + +/// We need to wrap futures_timer::Delay in a newtype to implement futures::future::FusedFuture so +/// we can use futures::select! macro. +#[derive(Debug)] +pub struct Delay { + inner: futures_timer::Delay, + active: bool, +} + +impl Delay { + /// Creates a new Delay which will fire after the given duration. + pub fn new(duration: Duration) -> Self { + Self { + inner: futures_timer::Delay::new(duration), + active: true, + } + } + + /// Stops the Delay from firing. + pub fn stop(&mut self) { + self.active = false; + } + + /// Restarts the Delay. + pub fn start(&mut self, duration: Duration) { + match self.active { + true => { + self.inner.reset(duration); + } + false => { + self.active = true; + self.inner.reset(duration); + } + } + } +} + +impl Deref for Delay { + type Target = futures_timer::Delay; + + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +impl DerefMut for Delay { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} + +impl Future for Delay { + type Output = (); + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + if self.active { + self.inner.poll_unpin(cx) + } else { + Poll::Pending + } + } +} + +impl FusedFuture for Delay { + fn is_terminated(&self) -> bool { + !self.active + } +} diff --git a/crates/bs-p2p/src/events/timeout.rs b/crates/bs-p2p/src/events/timeout.rs new file mode 100644 index 0000000..a9f987d --- /dev/null +++ b/crates/bs-p2p/src/events/timeout.rs @@ -0,0 +1,58 @@ +//! Time over events +use std::future::Future; + +use web_time::Duration; + +#[cfg(target_arch = "wasm32")] +pub(crate) async fn delay(duration: Duration) { + use wasm_bindgen_futures::JsFuture; + use web_sys::js_sys; + + let millis = duration.as_millis() as f64; + let promise = js_sys::Promise::new(&mut |resolve, _| { + let window = web_sys::window().unwrap(); + window + .set_timeout_with_callback_and_timeout_and_arguments_0(&resolve, millis as i32) + .unwrap(); + }); + + JsFuture::from(promise).await.unwrap(); +} + +#[cfg(not(target_arch = "wasm32"))] +pub(crate) async fn delay(duration: Duration) { + tokio::time::sleep(duration).await; +} + +// Generic timeout wrapper +pub(crate) async fn with_timeout( + future: F, + timeout_duration: Duration, +) -> Result +where + F: Future, +{ + use futures::future::{select, Either}; + use futures::pin_mut; + + let timeout_future = delay(timeout_duration); + + pin_mut!(future); + pin_mut!(timeout_future); + + match select(future, timeout_future).await { + Either::Left((result, _)) => Ok(result), + Either::Right((_, _)) => Err(TimeoutError), + } +} + +#[derive(Debug)] +pub struct TimeoutError; + +impl std::fmt::Display for TimeoutError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Operation timed out") + } +} + +impl std::error::Error for TimeoutError {} diff --git a/crates/bs-p2p/src/lib.rs b/crates/bs-p2p/src/lib.rs index b93cf3f..a71c0e1 100644 --- a/crates/bs-p2p/src/lib.rs +++ b/crates/bs-p2p/src/lib.rs @@ -1,14 +1,12 @@ -pub fn add(left: u64, right: u64) -> u64 { - left + right -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn it_works() { - let result = add(2, 2); - assert_eq!(result, 4); - } -} +//! BetterSign peer to peer communication + +/// Entry point for the crate. Create a libp2p swarm either natively or in wasm32. +pub mod swarm; + +mod error; +pub use error::Error; + +mod behaviour; +pub use behaviour::BehaviourBuilder; + +pub mod events; diff --git a/crates/bs-p2p/src/swarm.rs b/crates/bs-p2p/src/swarm.rs new file mode 100644 index 0000000..71c8ed7 --- /dev/null +++ b/crates/bs-p2p/src/swarm.rs @@ -0,0 +1,168 @@ +//! Create a swarm for your target. +#[cfg(not(target_arch = "wasm32"))] +mod config; +#[cfg(not(target_arch = "wasm32"))] +use config::Config; + +use libp2p::{identity::Keypair, relay, swarm::NetworkBehaviour}; +// web-time crate uses std::time in native targets +use std::path::PathBuf; +use web_time::Duration; + +/// Get the default project directory for storing configuration files +#[cfg(not(target_arch = "wasm32"))] +fn get_project_dir() -> Option { + directories::ProjectDirs::from("org", "bs", "bs-p2p") + .map(|proj_dirs| proj_dirs.config_dir().to_path_buf()) +} + +#[cfg(not(target_os = "android"))] +pub async fn create( + behaviour_constructor: impl FnOnce(&Keypair, relay::client::Behaviour) -> B, + base_path: Option, +) -> Result, String> { + #[cfg(target_arch = "wasm32")] + { + use libp2p::core::upgrade::Version; + use libp2p::{noise, websocket_websys, yamux, Transport as _}; + + Ok(libp2p::SwarmBuilder::with_new_identity() + .with_wasm_bindgen() + .with_other_transport(|key| { + libp2p_webrtc_websys::Transport::new(libp2p_webrtc_websys::Config::new(key)) + }) + .expect("infalliable to never exist") + .with_other_transport(|local_key| { + Ok(websocket_websys::Transport::default() + .upgrade(Version::V1Lazy) + .authenticate( + noise::Config::new(local_key) + .map_err(|e| format!("failed to initialise noise: {:?}", e))?, + ) + .multiplex(yamux::Config::default())) + }) + .expect("infalliable to never exist") + .with_relay_client(noise::Config::new, yamux::Config::default) + .map_err(|e| e.to_string())? + .with_behaviour(behaviour_constructor) + .expect("infalliable to never exist") + // Ping does not KeepAlive, so we set the idle connection timeout to 32_212_254u64, + // which is the largest value that works with the wasm32 target. + .with_swarm_config(|c| { + c.with_idle_connection_timeout(Duration::from_secs(32_212_254u64)) + }) + .build()) + } + #[cfg(not(target_arch = "wasm32"))] + { + use libp2p::{noise, yamux}; + use libp2p_webrtc::tokio::Certificate; + use rand::thread_rng; + + // Use the provided base_path or fall back to project directory + let config_path = base_path.or_else(get_project_dir); + + tracing::info!("Using configuration path: {:?}", config_path); + + let (keypair, cert) = Config::load(config_path.clone()).unwrap_or_else(|_| { + tracing::info!("Generating new keypair and certificate"); + let keypair = Keypair::generate_ed25519(); + let cert = Certificate::generate(&mut thread_rng()).unwrap(); + Config::save(&keypair, &cert, config_path).unwrap(); + (keypair, cert) + }); + + tracing::info!("🐝 Loaded keypair and certificate, creating swarm"); + + Ok(libp2p::SwarmBuilder::with_existing_identity(keypair) + .with_tokio() + .with_quic() + .with_other_transport(|id_keys| { + Ok(libp2p_webrtc::tokio::Transport::new(id_keys.clone(), cert)) + }) + .map_err(|e| { + tracing::error!("⭕ Error creating webrtc: {:?}", e); + + e.to_string() + })? + .with_dns() + .map_err(|e| { + tracing::error!("⭕ Error creating dns: {:?}", e); + e.to_string() + })? + .with_websocket(noise::Config::new, yamux::Config::default) + .await + .map_err(|e| { + tracing::error!("⭕ Error creating websocket: {:?}", e); + e.to_string() + })? + .with_relay_client(noise::Config::new, yamux::Config::default) + .map_err(|e| { + tracing::error!("⭕ Error creating relay client: {:?}", e); + e.to_string() + })? + .with_behaviour(behaviour_constructor) + .map_err(|e| { + tracing::error!("⭕ Error creating behaviour: {:?}", e); + e.to_string() + })? + .with_swarm_config(|cfg| { + cfg.with_idle_connection_timeout(Duration::from_secs(32_212_254u64)) + }) + .build()) + } +} + +#[cfg(target_os = "android")] +pub async fn create( + behaviour_constructor: impl FnOnce(&Keypair, relay::client::Behaviour) -> B, + base_path: Option, +) -> Result, String> { + mod config; + use config::Config; + + use libp2p::{noise, yamux}; + use libp2p_webrtc::tokio::Certificate; + use rand::thread_rng; + + // Use the provided base_path or fall back to project directory + let config_path = base_path.or_else(get_project_dir); + + tracing::info!("Using configuration path: {:?}", config_path); + + let (keypair, cert) = Config::load(config_path.clone()).unwrap_or_else(|_| { + tracing::info!("Generating new keypair and certificate"); + let keypair = Keypair::generate_ed25519(); + let cert = Certificate::generate(&mut thread_rng()).unwrap(); + Config::save(&keypair, &cert, config_path).unwrap(); + (keypair, cert) + }); + + tracing::info!("🐝 Loaded keypair and certificate, creating swarm"); + + Ok(libp2p::SwarmBuilder::with_existing_identity(keypair) + .with_tokio() + .with_quic() + .with_other_transport(|id_keys| { + Ok(libp2p_webrtc::tokio::Transport::new(id_keys.clone(), cert)) + }) + .map_err(|e| { + tracing::error!("⭕ Error creating webrtc: {:?}", e); + + e.to_string() + })? + .with_relay_client(noise::Config::new, yamux::Config::default) + .map_err(|e| { + tracing::error!("⭕ Error creating relay client: {:?}", e); + e.to_string() + })? + .with_behaviour(behaviour_constructor) + .map_err(|e| { + tracing::error!("⭕ Error creating behaviour: {:?}", e); + e.to_string() + })? + .with_swarm_config(|cfg| { + cfg.with_idle_connection_timeout(Duration::from_secs(32_212_254u64)) + }) + .build()) +} diff --git a/crates/bs-p2p/src/swarm/config.rs b/crates/bs-p2p/src/swarm/config.rs new file mode 100644 index 0000000..497cad2 --- /dev/null +++ b/crates/bs-p2p/src/swarm/config.rs @@ -0,0 +1,190 @@ +//! Configuration for the libp2p node for native target swarms + +#![cfg(not(target_arch = "wasm32"))] +use libp2p::identity; +use libp2p::identity::Keypair; +use libp2p::identity::PeerId; +use libp2p_webrtc::tokio::Certificate; +use serde::{Deserialize, Serialize}; +use std::error::Error; +use std::fs; +use std::path::Path; +use std::path::PathBuf; +use std::str::FromStr; + +pub const DEFAULT_CONFIG_FILENAME: &str = "bs_p2p_config.json"; + +/// The configuration of the libp2p node. +#[derive(Clone, Deserialize, Serialize)] +#[serde(rename_all = "PascalCase")] +pub struct Config { + /// The path where the config was loaded from or will be saved to + pub path: PathBuf, + pub identity: Identity, +} + +impl Default for Config { + fn default() -> Self { + Self { + path: PathBuf::from(DEFAULT_CONFIG_FILENAME), + identity: Identity::default(), + } + } +} + +impl zeroize::Zeroize for Config { + fn zeroize(&mut self) { + self.identity.peer_id.zeroize(); + self.identity.priv_key.zeroize(); + self.identity.cert_pem.zeroize(); + } +} + +/// The identity of this node, the PeerId, priv key, and cert pem. +#[derive(Clone, Deserialize, Serialize, Default)] +#[serde(rename_all = "PascalCase")] +pub struct Identity { + pub peer_id: String, + priv_key: Vec, + cert_pem: String, +} + +impl Config { + /// Loads a Config from a file + pub fn from_file(path: &Path) -> Result> { + let config: Config = serde_json::from_str(&std::fs::read_to_string(path)?)?; + // Ensure the loaded config knows where it came from + let mut config = config; + config.path = path.to_path_buf(); + Ok(config) + } + + /// Loads keypair and certificate from config file, or returns an error if not found. + /// The base_path parameter specifies where to look for the config file. + pub fn load(base_path: Option) -> Result<(Keypair, Certificate), Box> { + let config_path = determine_config_path(base_path); + tracing::info!("Loading configuration from: {:?}", config_path); + + let config = Config::from_file(&config_path)?; + tracing::info!("Found existing configuration"); + + let config = zeroize::Zeroizing::new(config); + let keypair = identity::Keypair::from_protobuf_encoding(&zeroize::Zeroizing::new( + config.identity.priv_key.clone(), + ))?; + + let cert = Certificate::from_pem(&config.identity.cert_pem)?; + + // Verify the peer ID matches what we expect + let peer_id = keypair.public().into(); + assert_eq!( + PeerId::from_str(&config.identity.peer_id)?, + peer_id, + "Peer ID derived from private key doesn't match the stored peer ID." + ); + + Ok((keypair, cert)) + } + + /// Saves the keypair and certificate to the filesystem. + /// The base_path parameter specifies where to save the config file. + pub fn save( + keypair: &Keypair, + cert: &Certificate, + base_path: Option, + ) -> Result<(), Box> { + let config_path = determine_config_path(base_path); + + let config = Config { + path: config_path.clone(), + identity: Identity { + peer_id: keypair.public().to_peer_id().to_string(), + priv_key: keypair.to_protobuf_encoding().expect("valid keypair"), + cert_pem: cert.serialize_pem(), + }, + }; + + // Ensure the directory exists + if let Some(parent) = config_path.parent() { + tracing::info!("💾 Creating directory: {:?}", parent); + fs::create_dir_all(parent)?; + } + + tracing::info!("💾 Saving configuration to: {:?}", config_path); + fs::write(&config_path, serde_json::to_string_pretty(&config)?)?; + + Ok(()) + } +} + +/// Helper function to determine the final config file path +fn determine_config_path(base_path: Option) -> PathBuf { + match base_path { + Some(path) => { + if path.is_dir() { + path.join(DEFAULT_CONFIG_FILENAME) + } else { + path + } + } + None => PathBuf::from(DEFAULT_CONFIG_FILENAME), + } +} + +#[cfg(test)] +mod tests { + use super::*; + use libp2p::identity::Keypair; + use libp2p_webrtc::tokio::Certificate; + use tempfile::TempDir; + + #[test] + fn test_roundtrip() { + let temp_dir = TempDir::new().unwrap(); + + let keypair = Keypair::generate_ed25519(); + let cert = Certificate::generate(&mut rand::thread_rng()).unwrap(); + + Config::save(&keypair, &cert, Some(temp_dir.path().to_path_buf())).unwrap(); + + let (keypair2, cert2) = Config::load(Some(temp_dir.path().to_path_buf())).unwrap(); + + assert_eq!( + keypair.to_protobuf_encoding().unwrap(), + keypair2.to_protobuf_encoding().unwrap() + ); + assert_eq!(cert, cert2); + } + + #[test] + fn test_custom_path() { + let temp_dir = TempDir::new().unwrap(); + + let keypair = Keypair::generate_ed25519(); + let cert = Certificate::generate(&mut rand::thread_rng()).unwrap(); + + Config::save(&keypair, &cert, Some(temp_dir.path().to_path_buf())).unwrap(); + + let (loaded_keypair, loaded_cert) = + Config::load(Some(temp_dir.path().to_path_buf())).unwrap(); + + assert_eq!( + keypair.to_protobuf_encoding().unwrap(), + loaded_keypair.to_protobuf_encoding().unwrap() + ); + assert_eq!(cert, loaded_cert); + } + + #[test] + fn test_path_handling() { + let temp_dir = TempDir::new().unwrap(); + + let expected_path = temp_dir.path().join(DEFAULT_CONFIG_FILENAME); + let actual_path = determine_config_path(Some(temp_dir.path().to_path_buf())); + assert_eq!(expected_path, actual_path); + + let file_path = temp_dir.path().join("bs_path_test.json"); + let actual_path = determine_config_path(Some(file_path.clone())); + assert_eq!(file_path, actual_path); + } +} diff --git a/crates/bs-peer/.cargo/config.toml b/crates/bs-peer/.cargo/config.toml new file mode 100644 index 0000000..208a516 --- /dev/null +++ b/crates/bs-peer/.cargo/config.toml @@ -0,0 +1,6 @@ +[env] +# RUST_LOG = "debug" + +# add RUSTFLAGS='--cfg getrandom_backend="wasm_js"' for browser wasm32 targets +[target.wasm32-unknown-unknown] +rustflags = ["--cfg", "getrandom_backend=\"wasm_js\""] diff --git a/crates/bs-peer/Cargo.toml b/crates/bs-peer/Cargo.toml new file mode 100644 index 0000000..59e9a0a --- /dev/null +++ b/crates/bs-peer/Cargo.toml @@ -0,0 +1,74 @@ +[package] +name = "bs-peer" +version.workspace = true +edition.workspace = true +authors.workspace = true +description.workspace = true +readme.workspace = true +license.workspace = true + +[lib] +crate-type = ["cdylib", "rlib"] + +[dependencies] +thiserror.workspace = true +bs.workspace = true +bs-p2p.workspace = true +bs-traits.workspace = true +bs-wallets.workspace = true +libp2p.workspace = true +tracing.workspace = true +provenance-log.workspace = true +multicodec.workspace = true +multikey.workspace = true +multisig.workspace = true +multicid.workspace = true +multihash.workspace = true +blockstore.workspace = true +serde = { workspace = true, optional = true } +anyhow = "1.0" +cid.workspace = true +futures = "0.3" +multihash-codetable = { workspace = true, features = [ + "sha2", + "sha3", + "blake3", +] } +tokio = { workspace = true, features = ["sync"] } + +[target.'cfg(not(target_arch = "wasm32"))'.dependencies] +directories = "6.0.0" + +[target.'cfg(target_arch = "wasm32")'.dependencies] +send_wrapper = { version = "0.6.0", features = ["futures"] } +wasm-bindgen = "0.2.93" +wasm-bindgen-futures.workspace = true +futures = "0.3.29" +js-sys = "0.3.66" +getrandom = { version = "0.3", features = ["wasm_js"] } +web-sys = { workspace = true, features = [ + "Window", + "Navigator", + "Storage", + "StorageManager", + "Blob", + "File", + "FileSystem", + "FileSystemFileHandle", + "FileSystemGetFileOptions", + "FileSystemDirectoryHandle", + "FileSystemWritableFileStream", + "FileSystemCreateWritableOptions", +] } + +[dev-dependencies] +wasm-bindgen-test = "0.3" +wasm-bindgen = "0.2.93" +tempfile = "3.20.0" +tracing-subscriber.workspace = true + +[lints] +workspace = true + +[features] +serde = ["dep:serde"] diff --git a/crates/bs-peer/README.md b/crates/bs-peer/README.md new file mode 100644 index 0000000..5f76421 --- /dev/null +++ b/crates/bs-peer/README.md @@ -0,0 +1,11 @@ +# BetterSign Peer + +Create Vlads and update BetterSign Plogs in the browser and on the desktop. + +## Tests + +Run the following command to run the all tests (native and web): + +```sh +just test +``` diff --git a/crates/bs-peer/justfile b/crates/bs-peer/justfile new file mode 100644 index 0000000..63761b2 --- /dev/null +++ b/crates/bs-peer/justfile @@ -0,0 +1,97 @@ +check32: + RUSTFLAGS='--cfg getrandom_backend="wasm_js"' cargo check --target wasm32-unknown-unknown + +test: test-web + cargo test + +test-web: + RUSTFLAGS='--cfg getrandom_backend="wasm_js"' wasm-pack build --target web + RUSTFLAGS='--cfg getrandom_backend="wasm_js"' wasm-pack test --headless --chrome --all-features + +# Update ChromeDriver to match the installed Chrome/Chromium version +update-chromedriver: + #!/usr/bin/env bash + set -euo pipefail + + # Function to get the major version + get_major_version() { + echo "$1" | cut -d '.' -f1 + } + + # Find the installed Chrome/Chromium version + if command -v google-chrome &> /dev/null; then + CHROME_VERSION=$(google-chrome --version | awk '{print $3}') + echo "Using Google Chrome" + elif command -v google-chrome-stable &> /dev/null; then + CHROME_VERSION=$(google-chrome-stable --version | awk '{print $3}') + echo "Using Google Chrome" + elif command -v chromium &> /dev/null; then + CHROME_VERSION=$(chromium --version | awk '{print $2}') + echo "Using Chromium" + elif command -v chromium-browser &> /dev/null; then + CHROME_VERSION=$(chromium-browser --version | awk '{print $2}') + echo "Using Chromium" + else + echo "Neither Chrome nor Chromium found. Please install one and try again." + exit 1 + fi + + echo "Installed Chrome/Chromium version: $CHROME_VERSION" + + # Get the major version + MAJOR_VERSION=$(get_major_version "$CHROME_VERSION") + + # Determine the operating system + OS=$(uname -s) + case $OS in + Linux) + OS_PATH="linux64" + CHROMEDRIVER_FILE="chromedriver-linux64.zip" + ;; + Darwin) + OS_PATH="mac-x64" + CHROMEDRIVER_FILE="chromedriver-mac-x64.zip" + ;; + *) + echo "Unsupported operating system: $OS" + exit 1 + ;; + esac + + # Construct the download URL + DOWNLOAD_URL="https://storage.googleapis.com/chrome-for-testing-public/${CHROME_VERSION}/${OS_PATH}/${CHROMEDRIVER_FILE}" + + echo "Attempting to download ChromeDriver from: $DOWNLOAD_URL" + + # Try to download ChromeDriver + if ! wget -O chromedriver.zip "$DOWNLOAD_URL"; then + echo "Failed to download ChromeDriver for version $CHROME_VERSION" + echo "Trying with major version only..." + + # Construct the download URL with major version only + DOWNLOAD_URL="https://storage.googleapis.com/chrome-for-testing-public/${MAJOR_VERSION}.0.0.0/${OS_PATH}/${CHROMEDRIVER_FILE}" + + echo "Attempting to download ChromeDriver from: $DOWNLOAD_URL" + + if ! wget -O chromedriver.zip "$DOWNLOAD_URL"; then + echo "Failed to download ChromeDriver. Please check your Chrome/Chromium version and try again." + exit 1 + fi + fi + + # Extract ChromeDriver + unzip -o chromedriver.zip + + # Make ChromeDriver executable + chmod +x chromedriver-*/chromedriver + + # Move ChromeDriver to /usr/local/bin (may require sudo) + sudo mv chromedriver-*/chromedriver /usr/local/bin/ + + # Clean up + rm -rf chromedriver.zip chromedriver-*/ + + echo "ChromeDriver for Chrome/Chromium version $CHROME_VERSION has been installed to /usr/local/bin/chromedriver" + + # Verify installation + chromedriver --version diff --git a/crates/bs-peer/local_keypair b/crates/bs-peer/local_keypair new file mode 100644 index 0000000..fb082aa --- /dev/null +++ b/crates/bs-peer/local_keypair @@ -0,0 +1,76 @@ +{ + "Identity": { + "PeerId": "12D3KooWPzrq2EPGefGhMu3YWEv4xPWR2muGbs849AihVLGJ2vJs", + "PrivKey": [ + 8, + 1, + 18, + 64, + 184, + 133, + 130, + 194, + 119, + 205, + 175, + 207, + 28, + 254, + 20, + 16, + 17, + 218, + 37, + 245, + 255, + 40, + 73, + 132, + 94, + 241, + 150, + 233, + 252, + 95, + 152, + 23, + 153, + 28, + 5, + 172, + 210, + 179, + 212, + 186, + 248, + 164, + 3, + 155, + 9, + 154, + 253, + 118, + 136, + 119, + 74, + 176, + 216, + 192, + 135, + 198, + 193, + 62, + 127, + 3, + 177, + 59, + 151, + 200, + 217, + 96, + 155, + 216 + ], + "CertPem": "-----BEGIN EXPIRES-----\r\nAPfhng8AAAA=\r\n-----END EXPIRES-----\r\n\n-----BEGIN PRIVATE_KEY-----\r\nMIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgp5aW9UVRWz46fZ6M\r\nrGDvW6oRBW3PJEby60uPvSPxonKhRANCAAQSSpw4BXGFfDZRTxpVxpobEITbrAhB\r\nGXZyXmEgiQiPZCy7pXLhRsZoF2KntCU6mgrzpEkZyk9ZJXXe3aPH9bnu\r\n-----END PRIVATE_KEY-----\r\n\r\n-----BEGIN CERTIFICATE-----\r\nMIIBZDCCAQugAwIBAgIUZIaUZMnFm9GMo7frbMSDdoiEfCkwCgYIKoZIzj0EAwIw\r\nITEfMB0GA1UEAwwWcmNnZW4gc2VsZiBzaWduZWQgY2VydDAgFw03NTAxMDEwMDAw\r\nMDBaGA80MDk2MDEwMTAwMDAwMFowITEfMB0GA1UEAwwWcmNnZW4gc2VsZiBzaWdu\r\nZWQgY2VydDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABBJKnDgFcYV8NlFPGlXG\r\nmhsQhNusCEEZdnJeYSCJCI9kLLulcuFGxmgXYqe0JTqaCvOkSRnKT1kldd7do8f1\r\nue6jHzAdMBsGA1UdEQQUMBKCEEZrVEhpRDNwQWs0a3lxSDgwCgYIKoZIzj0EAwID\r\nRwAwRAIgNaDscdYErrfx/AgdcFA6Pwbx26t2YZXEMP8Lg6ECbgYCIAcUdUK9fyuI\r\n4W7aQcazPOTd063ZQq8skNoxVsVvcP0u\r\n-----END CERTIFICATE-----\r\n" + } +} \ No newline at end of file diff --git a/crates/bs-peer/src/config.rs b/crates/bs-peer/src/config.rs new file mode 100644 index 0000000..9626979 --- /dev/null +++ b/crates/bs-peer/src/config.rs @@ -0,0 +1,3 @@ +//! Configuration modules for open and update. + +mod open; diff --git a/crates/bs-peer/src/config/open.rs b/crates/bs-peer/src/config/open.rs new file mode 100644 index 0000000..26878d9 --- /dev/null +++ b/crates/bs-peer/src/config/open.rs @@ -0,0 +1 @@ +//! Open Config diff --git a/crates/bs-peer/src/error.rs b/crates/bs-peer/src/error.rs new file mode 100644 index 0000000..943a4e4 --- /dev/null +++ b/crates/bs-peer/src/error.rs @@ -0,0 +1,106 @@ +//! BsPeer Errors +use thiserror::Error; + +/// Errors that can occur in the BsPeer library. +#[derive(Error, Debug)] +pub enum Error { + /// Error from the P2P library. + #[error("P2P error: {0}")] + P2p(#[from] bs_p2p::Error), + + /// Error from the multiaddr library. + #[error("Multiaddr error: {0}")] + Multiaddr(#[from] libp2p::multiaddr::Error), + + /// Error from the identity library. + #[error("Identity error: {0}")] + Identity(#[from] libp2p::identity::ParseError), + + /// No data directory specified. + #[error("No data directory specified")] + NoDataDir, + + /// Input/output error. + #[error("I/O error: {0}")] + Io(#[from] std::io::Error), + + /// Platform-specific errors + #[error("Platform error: {0}")] + Platform(#[from] crate::platform::Error), + + /// Plog already exists + #[error("Plog already exists")] + PlogAlreadyExists, + + /// From + #[error("Open error: {0}")] + Open(#[from] bs::error::OpenError), + + /// From + #[error("Update error: {0}")] + Update(#[from] bs::error::UpdateError), + + /// From + #[error("Bs error: {0}")] + Bs(#[from] bs::Error), + + /// From + #[error("Provenance log error: {0}")] + Plog(#[from] provenance_log::Error), + + /// From + #[error("Multikey error: {0}")] + Multikey(#[from] multikey::Error), + + /// From + #[error("Multicid error: {0}")] + Multicid(#[from] multicid::Error), + + /// cid::Cid error + #[error("CID error: {0}")] + Cid(#[from] cid::Error), + + /// From + #[error("Multihash error: {0}")] + Multihash(#[from] multihash::Error), + + /// Error during verification of the provenance log. + #[error("Plog verification failed: {0}")] + PlogVerificationFailed(provenance_log::Error), + + /// Blockstore error + #[error("Blockstore error: {0}")] + Blockstore(#[from] blockstore::Error), + + /// Generic string error + #[error("{0}")] + StringError(String), + + /// Plog not initialized + #[error("Plog not initialized")] + PlogNotInitialized, + + /// Wallets error + #[error(transparent)] + Wallets(#[from] bs_wallets::Error), + + /// No network connection for this peer + #[error("Peer is not connected to a network")] + NotConnected, + + /// Mutex Lock Poisoned + #[error("Mutex lock poisoned")] + LockPosioned, +} + +impl From for Error { + fn from(s: String) -> Self { + Error::StringError(s) + } +} + +impl From<&str> for Error { + fn from(s: &str) -> Self { + Error::StringError(s.to_string()) + } +} diff --git a/crates/bs-peer/src/lib.rs b/crates/bs-peer/src/lib.rs new file mode 100644 index 0000000..8ca86fc --- /dev/null +++ b/crates/bs-peer/src/lib.rs @@ -0,0 +1,21 @@ +//! BetterSign Peer + +// include readme +#![doc = include_str!("../README.md")] + +// Test the README.md code snippets +#[cfg(doctest)] +pub struct ReadmeDoctests; + +pub mod peer; +pub use peer::{BsPeer, DefaultBsPeer}; + +pub mod platform; + +pub mod error; +pub use error::Error; + +mod config; + +pub mod utils; +pub use utils::create_default_scripts; diff --git a/crates/bs-peer/src/peer.rs b/crates/bs-peer/src/peer.rs new file mode 100644 index 0000000..c43f18c --- /dev/null +++ b/crates/bs-peer/src/peer.rs @@ -0,0 +1,502 @@ +//! BetterSign Peer: BetterSign core + libp2p networking + Blockstore +use std::sync::{Arc, Mutex}; + +use crate::{platform, Error}; +use ::cid::Cid; +use blockstore::Blockstore as BlockstoreTrait; +pub use bs::resolver_ext::ResolverExt; +pub use bs::update::Config as UpdateConfig; +use bs::{ + config::sync::{KeyManager, MultiSigner}, + params::{ + anykey::PubkeyParams, + vlad::{FirstEntryKeyParams, VladParams}, + }, + update::OpParams, +}; +pub use bs_p2p::events::api::{Client, Libp2pEvent}; +pub use bs_p2p::events::PublicEvent; +use bs_traits::CondSync; +use futures::channel::mpsc::{self}; +pub use libp2p::PeerId; +use multicid::cid; +use multicodec::Codec; +use multihash::mh; +use provenance_log::key::key_paths::ValidatedKeyParams; +pub use provenance_log::resolver::{ResolvedPlog, Resolver}; +pub use provenance_log::{self as p, Key, Script}; + +/// A peer that is generic over the blockstore type. +/// +/// Can operate offline with just a local blockstore, or connect to a network +#[derive(Debug)] +pub struct BsPeer +where + KP: KeyManager + MultiSigner, + BS: BlockstoreTrait + CondSync, +{ + /// The Provenance Log of the peer, which contains the history of operations + plog: Arc>>, + /// Key provider for the peer, used for signing and key management + key_provider: KP, + /// [Blockstore] to save data + blockstore: BS, + /// Client handle to send commands to the network + pub network_client: Option, + /// Events emitted from the network + pub events: Option>, + /// The peer ID of this peer in the network + pub peer_id: Option, +} + +impl PartialEq for BsPeer +where + KP: KeyManager + MultiSigner + CondSync, + BS: BlockstoreTrait + CondSync, +{ + fn eq(&self, other: &Self) -> bool { + // Compare peer IDs and blockstore references + // Equal is the plogs match + self.peer_id == other.peer_id && Arc::ptr_eq(&self.plog, &other.plog) + } +} + +/// Impl Clone for BsPeer - You get everything except the events because you can't clone a +/// Receiver. +/// +/// Plog is wraped in Arc to allow shared access of a single [provenance_log::Log] across threads, +impl Clone for BsPeer +where + KP: KeyManager + MultiSigner + CondSync + Clone, + BS: BlockstoreTrait + CondSync + Clone, +{ + fn clone(&self) -> Self { + Self { + plog: self.plog.clone(), + key_provider: self.key_provider.clone(), + blockstore: self.blockstore.clone(), + network_client: self.network_client.clone(), + events: None, + peer_id: self.peer_id, + } + } +} + +// Default platform-specific version of BsPeer +pub type DefaultBsPeer = BsPeer; + +#[cfg(target_arch = "wasm32")] +fn directories() -> String { + // For wasm, we use a default directory + "bs-peer".into() +} + +#[cfg(not(target_arch = "wasm32"))] +fn directories() -> std::path::PathBuf { + // For non-wasm, we use the platform-specific directories + directories::ProjectDirs::from("tech", "cryptid", "BetterSignPeer") + .map(|dirs| dirs.data_dir().to_path_buf()) + .unwrap_or_else(|| "bs-peer".into()) +} + +impl DefaultBsPeer +where + KP: KeyManager + MultiSigner + CondSync, +{ + /// Create a new [BsPeer] with the given key provider [KeyManager] and [MultiSigner], + /// open a new platform-specific Blockstore, + /// start a [bs_p2p] network node, + /// set a network access [Client] to send commands, + /// link an event receiver for network [bs_p2p::events::PublicEvent]s. + pub async fn new(key_provider: KP, config: platform::StartConfig) -> Result { + let blockstore = platform::Blockstore::new(directories()).await.unwrap(); + + let (tx_evts, rx_evts) = mpsc::channel(16); + let blockstore_clone = blockstore.clone(); + + let (network_client, peer_id) = platform::start(tx_evts, blockstore_clone, config).await?; + + Ok(Self { + network_client: Some(network_client), + plog: Default::default(), + key_provider, + blockstore, + events: Some(rx_evts), + peer_id: Some(peer_id), + }) + } +} + +impl BsPeer +where + KP: KeyManager + MultiSigner + CondSync, + BS: BlockstoreTrait + CondSync, +{ + /// Returns a clone of the p[p::Log] of the peer, if it exists. + pub fn plog(&self) -> Option { + self.plog.lock().unwrap().as_ref().cloned() + // { + // Ok(plog) => plog.clone(), + // Err(_) => { + // tracing::error!("Failed to acquire lock on Plog"); + // None + // } + // } + } + + /// use lock to replace current plog with given plog + fn set_plog(&mut self, plog: p::Log) -> Result<(), Error> { + let mut plog_lock = self.plog.lock().map_err(|_| Error::LockPosioned)?; + *plog_lock = Some(plog); + Ok(()) + } + + /// Create an offline (no network) [BsPeer] with a custom blockstore implementation + pub fn with_blockstore(key_provider: KP, blockstore: BS) -> Self { + Self { + key_provider, + plog: Default::default(), + blockstore, + network_client: Default::default(), + events: None, + peer_id: None, + } + } + + // Get a reference to the blockstore + pub fn blockstore(&self) -> &BS { + &self.blockstore + } + + /// Store CIDs from config to the blockstore + async fn store_ops(&self, ops: Vec) -> Result<(), Error> { + tracing::debug!("Storing CIDs in blockstore... {:?}", ops); + for params in ops { + if let OpParams::CidGen { + version, + target, + hash, + data, + .. + } = params + { + // Create CID using same approach as in open.rs + let multi_cid = cid::Builder::new(version) + .with_target_codec(target) + .with_hash(&mh::Builder::new_from_bytes(hash, &data)?.try_build()?) + .try_build()?; + + // we need to convert multicid::Cid to cid:Cid first before putting it in the blockstore, + // as the two are different types. + let multi_cid_bytes: Vec = multi_cid.into(); + let cid = Cid::try_from(multi_cid_bytes)?; + + // Store the CID and data in blockstore + self.blockstore.put_keyed(&cid, &data).await?; + + tracing::debug!("Stored CID in blockstore: {:?}", cid); + + // get bytes back to verify + let stored_data = self.blockstore.get(&cid).await?; + if let Some(ref stored_data) = stored_data { + tracing::debug!("Stored data: {:?}", stored_data); + } else { + tracing::error!("No data found for CID: {:?}", cid); + } + + debug_assert!(stored_data.is_some(), "Data should be stored in blockstore"); + debug_assert_eq!(stored_data.unwrap(), data); + } + } + Ok(()) + } + + /// Store all the plog [provenance_log::Entry]s in the [blockstore::Blockstore] + async fn store_entries(&self) -> Result<(), Error> { + let (first_lock_cid, first_lock_bytes, entries) = { + let plog = self.plog.lock().map_err(|_| Error::LockPosioned)?; + + plog.as_ref() + .map(|p| { + let first_lock_cid_bytes: Vec = p.vlad.cid().clone().into(); + let first_lock_cid = Cid::try_from(first_lock_cid_bytes).unwrap(); + let first_lock_bytes: Vec = p.first_lock.clone().into(); + + (first_lock_cid, first_lock_bytes, p.entries.clone()) + }) + .ok_or(Error::PlogNotInitialized)? + }; + + self.blockstore + .put_keyed(&first_lock_cid, &first_lock_bytes) + .await?; + + // Put all the entries in the blockstore + for (multi_cid, entry) in entries.clone() { + let entry_bytes: Vec = entry.into(); + + // we need to convert multicid::Cid to cid:Cid first before putting it in the blockstore, + // as the two are different types. + let multi_cid_bytes: Vec = multi_cid.into(); + let cid = Cid::try_from(multi_cid_bytes)?; + + self.blockstore.put_keyed(&cid, &entry_bytes).await?; + + tracing::debug!("Stored entry CID in blockstore: {:?}", cid); + + // Get the bytes we just put there to confirm it was stored correctly + let stored_data = self.blockstore.get(&cid).await?; + if let Some(ref stored_data) = stored_data { + tracing::debug!("Stored entry data: {:?}", stored_data); + } else { + tracing::error!("No data found for CID: {:?}", cid); + } + } + + tracing::debug!("Stored all Plog entries in blockstore"); + + Ok(()) + } + + /// Generate a new Plog with the given configuration. + pub async fn generate_with_config(&mut self, config: bs::open::Config) -> Result<(), Error> { + { + match self.plog.lock() { + Ok(plog) => { + if plog.is_some() { + tracing::error!("[generate_with_config]: Plog already exists, cannot generate a new one"); + return Err(Error::PlogAlreadyExists); + } else { + tracing::debug!("[generate_with_config]: Acquired lock on Plog"); + } + } + Err(_) => { + tracing::error!("[generate_with_config]: Failed to acquire lock on Plog"); + return Err(Error::LockPosioned); + } + } + } + + // Pass the key_provider directly as both key_manager and signer + let plog = bs::ops::open_plog(&config, &self.key_provider, &self.key_provider)?; + { + let verify_iter = &mut plog.verify(); + + for result in verify_iter { + if let Err(e) = result { + tracing::error!("Plog verification failed: {}", e); + return Err(Error::PlogVerificationFailed(e)); + } + } + } + + self.store_ops(config.into()).await?; + self.set_plog(plog)?; + self.store_entries().await?; + self.record_plog_to_dht().await?; + self.publish_to_pubsub().await?; + Ok(()) + } + + /// Generate a new Plog with the given lock and unlock scripts. + pub async fn generate( + &mut self, + lock: impl AsRef, + unlock: impl AsRef, + ) -> Result<(), Error> { + { + let plog = self.plog.lock().map_err(|_| Error::LockPosioned)?; + if plog.is_some() { + tracing::error!("[generate]: Plog already exists, cannot generate a new one"); + return Err(Error::PlogAlreadyExists); + } + } + + let config = bs::open::Config::builder() + .vlad(VladParams::::default()) + .pubkey( + PubkeyParams::builder() + .codec(Codec::Ed25519Priv) + .build() + .into(), + ) + .entrykey( + FirstEntryKeyParams::builder() + .codec(Codec::Ed25519Priv) + .build() + .into(), + ) + .lock(Script::Code(Key::default(), lock.as_ref().into())) + .unlock(Script::Code(Key::default(), unlock.as_ref().into())) + .additional_ops(vec![]) + .build(); + + self.generate_with_config(config).await + } + + /// Update the BsPeer's Plog with new data. + pub async fn update(&mut self, config: UpdateConfig) -> Result<(), Error> { + { + let mut plog = self.plog.lock().map_err(|_| Error::LockPosioned)?; + let Some(ref mut plog) = *plog else { + return Err(Error::PlogNotInitialized); + }; + // Apply the update to the plog + bs::ops::update_plog(plog, &config, &self.key_provider, &self.key_provider)?; + + // Verify the updated plog + let verify_iter = &mut plog.verify(); + for result in verify_iter { + if let Err(e) = result { + tracing::error!("Plog verification failed after update: {}", e); + return Err(Error::PlogVerificationFailed(e)); + } + } + } + + // After successful update, store CIDs and publish DHT record + self.store_ops(config.into()).await?; + self.store_entries().await?; + self.record_plog_to_dht().await?; + self.publish_to_pubsub().await?; + Ok(()) + } + + /// Load a Plog into ths BsPeer. + pub async fn load(&mut self, plog: p::Log) -> Result<(), Error> { + { + let plog = self.plog.lock().map_err(|_| Error::LockPosioned)?; + if plog.is_some() { + return Err(Error::PlogAlreadyExists); + } + } + + // Verify the plog + { + let verify_iter = &mut plog.verify(); + for result in verify_iter { + if let Err(e) = result { + tracing::error!("Plog verification failed: {}", e); + return Err(Error::PlogVerificationFailed(e)); + } + } + } + + // Store the plog, entries, and record to DHT + self.set_plog(plog)?; + self.store_entries().await?; + self.record_plog_to_dht().await?; + + Ok(()) + } + + // Publish to pubsub + pub async fn publish_to_pubsub(&self) -> Result<(), Error> { + // publish Vlad as topic with head cid bytes to pubsub + let (vlad_bytes, head) = { + let plog = self.plog.lock().map_err(|_| Error::LockPosioned)?; + let Some(ref plog) = *plog else { + return Err(Error::PlogNotInitialized); + }; + + let vlad_bytes: Vec = plog.vlad.clone().into(); + + (vlad_bytes, plog.head.clone()) + }; + if let Some(client) = &self.network_client { + client.publish(vlad_bytes, head.to_string()).await?; + tracing::debug!("Published Vlad to pubsub"); + } else { + tracing::warn!("Network client not available, skipping pubsub publication"); + } + Ok(()) + } + + /// Records the current Plog to the DHT. + /// + /// This method takes the current Plog (if available), extracts its `vlad` and `head` CIDs, + /// and attempts to put them into the DHT via the `network_client`. + /// + /// The `vlad` CID is used as the key, and the `head` CID's bytes are used as the value. + /// This allows other peers to discover and retrieve the latest Plog for a given `vlad`. + /// + /// # Errors + /// + /// Returns an `Error` if: + /// - The Plog cannot be locked (e.g., due to a poisoned lock). + /// - The Plog is not initialized (i.e., `self.plog` is `None`). + /// - The `network_client` is not available. + /// - There is an error putting the record into the DHT. + pub async fn record_plog_to_dht(&mut self) -> Result<(), Error> { + let (vlad_bytes, head_bytes) = { + let plog = self.plog.lock().map_err(|_| Error::LockPosioned)?; + let Some(ref plog) = *plog else { + return Err(Error::PlogNotInitialized); + }; + + let vlad_bytes: Vec = plog.vlad.clone().into(); + let head_bytes: Vec = plog.head.clone().into(); + (vlad_bytes, head_bytes) + }; + + if let Some(client) = &self.network_client { + client.put_record(vlad_bytes, head_bytes).await?; + tracing::debug!("Recorded Plog to DHT"); + self.publish_to_pubsub().await?; + } else { + tracing::warn!("Network client not available, skipping DHT record"); + } + Ok(()) + } +} + +#[cfg(not(target_arch = "wasm32"))] +#[cfg(test)] +mod tests { + use crate::utils; + use tracing_subscriber::fmt; + + #[allow(dead_code)] + fn init_logger() { + let subscriber = fmt().with_env_filter("bs_peer=trace").finish(); + if let Err(e) = tracing::subscriber::set_global_default(subscriber) { + tracing::warn!("failed to set subscriber: {}", e); + } + } + + #[tokio::test] + async fn basic_test() { + // init_logger(); + utils::run_basic_test().await; + } + + #[tokio::test] + async fn in_memory_blockstore_test() { + // init_logger(); + utils::run_in_memory_blockstore_test().await; + } + + #[tokio::test] + async fn test_store_entries() { + // init_logger(); + utils::run_store_entries_test().await; + } + + #[tokio::test] + async fn run_update_test() { + // init_logger(); + utils::run_update_test().await; + } + + #[tokio::test] + async fn run_load_test() { + // init_logger(); + utils::run_load_test().await; + } + + #[tokio::test] + async fn test_peer_initialization() { + // init_logger(); + utils::run_peer_initialization_test().await; + } +} diff --git a/crates/bs-peer/src/platform.rs b/crates/bs-peer/src/platform.rs new file mode 100644 index 0000000..247b8cf --- /dev/null +++ b/crates/bs-peer/src/platform.rs @@ -0,0 +1,43 @@ +//! Platform specific code (Browser and Native) +use bs_traits::CondSend; +pub(super) mod common; +pub use common::RawBlakeBlock; + +/// Wasm32 platform code for browsers +#[cfg(target_arch = "wasm32")] +mod browser; + +/// Native platform code for non-WASM targets +#[cfg(not(target_arch = "wasm32"))] +mod native; + +use std::future::Future; + +#[cfg(target_arch = "wasm32")] +pub use browser::{start, Error, OPFSWrapped as Blockstore, StartConfig}; + +#[cfg(not(target_arch = "wasm32"))] +pub use native::{start, NativeBlockstore as Blockstore, NativeError as Error, StartConfig}; + +// #[cfg(target_arch = "wasm32")] +// pub use peerpiper_browser::{start, StartConfig}; +// +// #[cfg(not(target_arch = "wasm32"))] +// pub use native::{start, }; + +/// Spawn for tokio +// allow dead +#[allow(unused)] +#[cfg(not(target_arch = "wasm32"))] +pub fn spawn(f: impl Future + CondSend + 'static) { + tokio::spawn(f); +} + +/// Spawn for browser wasm32 +// allow dead +#[allow(dead_code)] +#[cfg(target_arch = "wasm32")] +pub fn spawn(f: impl Future + 'static) { + tracing::debug!("Spawning wasm_bingen future"); + wasm_bindgen_futures::spawn_local(f); +} diff --git a/crates/bs-peer/src/platform/browser.rs b/crates/bs-peer/src/platform/browser.rs new file mode 100644 index 0000000..e75b5b9 --- /dev/null +++ b/crates/bs-peer/src/platform/browser.rs @@ -0,0 +1,87 @@ +//! browser specific bindings +mod error; +pub use error::Error; + +use wasm_bindgen_futures::spawn_local; +mod opfs; +use blockstore::Blockstore; +use bs_p2p::{ + events::{ + api::{self, Client}, + PublicEvent, + }, + swarm, BehaviourBuilder, +}; +use futures::channel::{mpsc, oneshot}; +use libp2p::multiaddr::{Multiaddr, Protocol}; +use libp2p::PeerId; +pub use opfs::OPFSWrapped; + +/// Config for starting +/// - libp2p_endpoints: List of libp2p endpoints to connect to. +/// - base_path: Path to the base directory for the blockstore and other data. +#[derive(Clone, Default)] +pub struct StartConfig { + // TODO: This native node can dial other native nodes, like BOOTNODES + pub libp2p_endpoints: Vec, + pub base_path: Option, +} + +pub async fn start( + tx: mpsc::Sender, + blockstore: B, + config: StartConfig, +) -> Result<(Client, PeerId), Error> { + let StartConfig { + libp2p_endpoints, + base_path, + } = config; + + tracing::info!("Spawning swarm. Using multiaddr {:?}", libp2p_endpoints); + + let behaviour_builder = BehaviourBuilder::new(blockstore); + + let swarm = swarm::create( + |key, relay_behaviour| behaviour_builder.build(key, relay_behaviour), + base_path, + ) + .await?; + + let peer_id = *swarm.local_peer_id(); + + let (mut network_client, network_events, network_event_loop) = api::new(swarm).await; + + spawn_local(async move { + if let Err(e) = network_event_loop.run().await { + tracing::error!("Network event loop failed: {}", e); + } + }); + + for endpoint in libp2p_endpoints.iter() { + let mut remote_address = endpoint.parse::()?; + + match network_client.dial(remote_address.clone()).await { + Ok(_) => { + tracing::info!("☎️ 🎉 Dialed remote peer at {}", remote_address); + } + Err(err) => { + tracing::warn!("Failed to dial remote peer at {}: {}", remote_address, err); + } + } + + // add remote peer_id as explicit peer so we can gossipsub to it with minimal peers available + if let Some(Protocol::P2p(rpid)) = remote_address.pop() { + network_client.add_peer(rpid).await?; + tracing::info!("Added remote peer_id as explicit peer: {:?}", rpid); + } + } + + tracing::info!("Running network client loop:"); + + let mut client_clone = network_client.clone(); + spawn_local(async move { + client_clone.run(network_events, tx).await; + }); + + Ok((network_client, peer_id)) +} diff --git a/crates/bs-peer/src/platform/browser/error.rs b/crates/bs-peer/src/platform/browser/error.rs new file mode 100644 index 0000000..cf66dac --- /dev/null +++ b/crates/bs-peer/src/platform/browser/error.rs @@ -0,0 +1,46 @@ +#[derive(thiserror::Error, Debug)] +pub enum Error { + /// From + #[error("Infallible")] + Infallible(#[from] std::convert::Infallible), + /// From + #[error("Multiaddr error")] + Multiaddr(#[from] libp2p::multiaddr::Error), + /// From + #[error("Dial error")] + Dial(#[from] libp2p::swarm::DialError), + /// From core::Error + #[error("Core error {0}")] + Core(#[from] bs_p2p::Error), + /// From futures channel mspc send Error + #[error("Send error {0}")] + Send(#[from] futures::channel::mpsc::SendError), + + /// anyhow + #[error("Anyhow error {0}")] + Anyhow(#[from] anyhow::Error), + + /// From String + #[error("{0}")] + String(String), + + /// From + #[error("IO error {0}")] + Io(#[from] std::io::Error), + + /// Error creatign OPFS Blockstore + #[error("OPFS Blockstore Error: {0}")] + OPFSBlockstore(String), +} + +impl From for Error { + fn from(s: String) -> Self { + Error::String(s) + } +} + +impl From<&str> for Error { + fn from(s: &str) -> Self { + Error::String(s.to_string()) + } +} diff --git a/crates/bs-peer/src/platform/browser/opfs.rs b/crates/bs-peer/src/platform/browser/opfs.rs new file mode 100644 index 0000000..c3decbc --- /dev/null +++ b/crates/bs-peer/src/platform/browser/opfs.rs @@ -0,0 +1,199 @@ +//! Origin Privacy File System (OPFS) blockstore implementation. +//! +//! There are two types: [`OPFSBlockstore`] which is the actual blockstore implementation +//! and [`OPFSWrapped`] which wraps it in a `SendWrapper` to make it [std::marker::Send]. +use super::Error; +use wasm_bindgen::JsValue; +use wasm_bindgen_futures::JsFuture; +use web_sys::{ + window, Blob, FileSystemDirectoryHandle, FileSystemFileHandle, FileSystemGetFileOptions, + FileSystemWritableFileStream, +}; + +use blockstore::Blockstore; +use send_wrapper::SendWrapper; +use std::ops::Deref; + +/// Uses Origin Privacy File System (OPFS) to store blocks +#[derive(Debug, Clone)] +pub struct OPFSBlockstore { + inner: FileSystemDirectoryHandle, +} + +impl OPFSBlockstore { + /// Create a new OPFSBlockstore + pub async fn new() -> Result { + let window = window().ok_or_else(|| JsValue::from_str("No window available"))?; + let navigator = window.navigator(); + let storage = navigator.storage(); + + let directory_handle: FileSystemDirectoryHandle = + JsFuture::from(storage.get_directory()).await?.into(); + + Ok(Self { + inner: directory_handle, + }) + } + + /// Put block bytes into the OPFS under the given name + pub async fn put_opfs(&self, name: &str, data: Vec) -> Result<(), JsValue> { + let options = FileSystemGetFileOptions::new(); + options.set_create(true); + + let file_handle: FileSystemFileHandle = + JsFuture::from(self.inner.get_file_handle_with_options(name, &options)) + .await? + .into(); + + let writable: FileSystemWritableFileStream = + JsFuture::from(file_handle.create_writable()).await?.into(); + + JsFuture::from(writable.write_with_u8_array(&data)?).await?; + + JsFuture::from(writable.close()).await?; + + Ok(()) + } + + /// Get block bytes from the OPFS by name + pub async fn get_opfs(&self, name: &str) -> Result, JsValue> { + let file_handle_result = JsFuture::from(self.inner.get_file_handle(name)).await; + + match file_handle_result { + Ok(handle) => { + let file_handle: FileSystemFileHandle = handle.into(); + + let file: Blob = JsFuture::from(file_handle.get_file()).await?.into(); + let array_bufer = JsFuture::from(file.array_buffer()).await?; + let u8_array = js_sys::Uint8Array::new(&array_bufer); + + Ok(u8_array.to_vec()) + } + Err(_) => Err(JsValue::from_str("File not found")), + } + } +} + +impl Blockstore for OPFSBlockstore { + async fn get( + &self, + cid: &cid::CidGeneric, + ) -> blockstore::Result>> { + match self.get_opfs(&cid.to_string()).await { + Ok(data) => Ok(Some(data)), + Err(_) => Ok(None), + } + } + + async fn put_keyed( + &self, + cid: &cid::CidGeneric, + data: &[u8], + ) -> blockstore::Result<()> { + self.put_opfs(&cid.to_string(), data.to_vec()) + .await + .map_err(|_| blockstore::Error::StoredDataError("Failed to put data".to_string()))?; + + Ok(()) + } + + async fn remove(&self, _cid: &cid::CidGeneric) -> blockstore::Result<()> { + Ok(()) + } + + async fn close(self) -> blockstore::Result<()> { + Ok(()) + } +} + +/// A Wrapper sturct around OPFSBlockstore so that we can make it [Send] +/// +/// # Example +/// ```no_run +/// use wasm_bindgen_futures::spawn_local; +/// use peerpiper_browser::opfs::OPFSWrapped; +/// use peerpiper_core::Blockstore; +/// +/// spawn_local(async move { +/// let Ok(blockstore) = OPFSWrapped::new().await else { +/// panic!("Failed to create OPFSWrapped"); +/// }; +/// +/// // Use blockstore when starting peerpiper +/// +/// // 16 is arbitrary, but should be enough for now +/// let (tx_evts, mut rx_evts) = mpsc::channel(16); +/// +/// // client sync oneshot +/// let (tx_client, rx_client) = oneshot::channel(); +/// +/// // command_sender will be used by other wasm_bindgen functions to send commands to the network +/// // so we will need to wrap it in a Mutex or something to make it thread safe. +/// let (network_command_sender, network_command_receiver) = tokio::sync::mpsc::channel(8); +/// +/// let bstore = blockstore.clone(); +/// +/// spawn_local(async move { +/// peerpiper::start( +/// tx_evts, +/// network_command_receiver, +/// tx_client, +/// libp2p_endpoints, +/// bstore, +/// ) +/// .await +/// .expect("never end") +/// }); +/// +/// // wait on rx_client to get the client handle +/// let client_handle = rx_client.await?; +/// +/// commander +/// .with_network(network_command_sender) +/// .with_client(client_handle); +/// }); +/// ``` +#[derive(Debug, Clone)] +pub struct OPFSWrapped { + inner: SendWrapper, +} + +impl OPFSWrapped { + /// Create a new OPFSWrapped blockstore + // Takes a noop string parameter to match the native blockstore interface + pub async fn new(_noop: String) -> Result { + let handler = OPFSBlockstore::new() + .await + .map_err(|e| Error::OPFSBlockstore(e.as_string().unwrap_or_default()))?; + Ok(Self { + inner: SendWrapper::new(handler), + }) + } +} + +impl Blockstore for OPFSWrapped { + async fn get( + &self, + cid: &cid::CidGeneric, + ) -> blockstore::Result>> { + tracing::debug!("Getting block from OPFS for CID: {:?}", cid); + self.inner.deref().get(cid).await + } + + async fn put_keyed( + &self, + cid: &cid::CidGeneric, + data: &[u8], + ) -> blockstore::Result<()> { + self.inner.deref().put_keyed(cid, data).await + } + + async fn remove(&self, _cid: &cid::CidGeneric) -> blockstore::Result<()> { + //todo!(); + Ok(()) + } + + async fn close(self) -> blockstore::Result<()> { + Ok(()) + } +} diff --git a/crates/bs-peer/src/platform/common.rs b/crates/bs-peer/src/platform/common.rs new file mode 100644 index 0000000..55e04fc --- /dev/null +++ b/crates/bs-peer/src/platform/common.rs @@ -0,0 +1,22 @@ +//! Common types and constants used across the platform. + +use blockstore::block::{Block, CidError}; +use cid::Cid; +use multihash_codetable::{Code, MultihashDigest}; + +const RAW_CODEC: u64 = 0x55; + +/// A block that is just raw bytes encoded into a block +/// using the `RAW_CODEC` and `Blake3_256` hash function. +pub struct RawBlakeBlock(pub Vec); + +impl Block<64> for RawBlakeBlock { + fn cid(&self) -> Result { + let hash = Code::Blake3_256.digest(&self.0); + Ok(Cid::new_v1(RAW_CODEC, hash)) + } + + fn data(&self) -> &[u8] { + self.0.as_ref() + } +} diff --git a/crates/bs-peer/src/platform/native.rs b/crates/bs-peer/src/platform/native.rs new file mode 100644 index 0000000..bcfa7f0 --- /dev/null +++ b/crates/bs-peer/src/platform/native.rs @@ -0,0 +1,105 @@ +//! Native specific code +mod native_blockstore; +use bs_p2p::{ + events::{ + api::{self, Client}, + PublicEvent, + }, + swarm, BehaviourBuilder, +}; +pub use native_blockstore::NativeBlockstore; + +mod error; +pub use error::NativeError; + +use blockstore::Blockstore; +use futures::channel::{mpsc, oneshot}; +use libp2p::{ + multiaddr::{Multiaddr, Protocol}, + PeerId, +}; +use std::net::{Ipv4Addr, Ipv6Addr}; +use tokio::spawn; + +use crate::Error; + +/// Config for starting the network. +/// - libp2p_endpoints: List of libp2p endpoints to connect to. +/// - base_path: Path to the base directory for the blockstore and other data. +#[derive(Clone, Default)] +pub struct StartConfig { + // TODO: This native node can dial other native nodes, like BOOTNODES + pub libp2p_endpoints: Vec, + pub base_path: Option, +} + +/// Create the swarm, and get handles to control it. +/// Any protocols that are passed will be updated with the incoming streams. +pub async fn start( + tx: mpsc::Sender, + blockstore: B, + config: StartConfig, +) -> Result<(Client, PeerId), NativeError> { + let StartConfig { + libp2p_endpoints: _, + base_path, + } = config; + + let behaviour_builder = BehaviourBuilder::new(blockstore); + + let mut swarm = swarm::create( + |key, relay_behaviour| behaviour_builder.build(key, relay_behaviour), + base_path, + ) + .await?; + + let peer_id = *swarm.local_peer_id(); + + swarm + .behaviour_mut() + .kad + .set_mode(Some(libp2p::kad::Mode::Server)); + + let peer_id = *swarm.local_peer_id(); + tracing::info!("Local peer id: {:?}", peer_id); + + let (mut network_client, network_events, network_event_loop) = api::new(swarm).await; + + // We need to start the network event loop first in order to listen for our address + tokio::spawn(async move { + if let Err(e) = network_event_loop.run().await { + tracing::error!("Network event loop failed: {}", e); + } + }); + + let address_webrtc = Multiaddr::from(Ipv6Addr::UNSPECIFIED) + .with(Protocol::Udp(0)) + .with(Protocol::WebRTCDirect); + + let addr_webrtc_ipv4 = Multiaddr::from(Ipv4Addr::UNSPECIFIED) + .with(Protocol::Udp(0)) + .with(Protocol::WebRTCDirect); + + for addr in [ + address_webrtc, + addr_webrtc_ipv4, + // address_quic, address_tcp + ] { + tracing::info!("Listening on {:?}", addr.clone()); + network_client.start_listening(addr).await?; + } + + // for peer in &BOOTNODES { + // let addr = Multiaddr::from_str("/dnsaddr/bootstrap.libp2p.io")? + // .with(Protocol::P2p(libp2p::PeerId::from_str(peer)?)); + // network_client.dial(addr).await?; + // } + + let mut client_clone = network_client.clone(); + + tokio::spawn(async move { + client_clone.run(network_events, tx).await; + }); + + Ok((network_client, peer_id)) +} diff --git a/crates/bs-peer/src/platform/native/error.rs b/crates/bs-peer/src/platform/native/error.rs new file mode 100644 index 0000000..60ab227 --- /dev/null +++ b/crates/bs-peer/src/platform/native/error.rs @@ -0,0 +1,41 @@ +#[derive(thiserror::Error, Debug)] +pub enum NativeError { + #[error("Error: {0}")] + P2p(#[from] bs_p2p::Error), + + /// From + #[error("Multiaddr error")] + Multiaddr(#[from] libp2p::multiaddr::Error), + + /// From + #[error("Identity error")] + Identity(#[from] libp2p::identity::ParseError), + + /// No data directory + #[error("No data directory")] + NoDataDir, + + /// Input output error + #[error("IO error")] + Io(#[from] std::io::Error), + + /// from anyhow + #[error("error")] + Anyhow(#[from] anyhow::Error), + + // From + #[error("Error: {0}")] + String(String), +} + +impl From for NativeError { + fn from(s: String) -> Self { + NativeError::String(s) + } +} + +impl From<&str> for NativeError { + fn from(s: &str) -> Self { + NativeError::String(s.to_string()) + } +} diff --git a/crates/bs-peer/src/platform/native/native_blockstore.rs b/crates/bs-peer/src/platform/native/native_blockstore.rs new file mode 100644 index 0000000..1eaf7dc --- /dev/null +++ b/crates/bs-peer/src/platform/native/native_blockstore.rs @@ -0,0 +1,154 @@ +//! The native platform impl of [blockstore::Blockstore] +use std::path::PathBuf; + +//use bytes::Bytes; +//use tokio::io::AsyncReadExt as _; +//use wnfs_unixfs_file::builder::FileBuilder; +//use wnfs_unixfs_file::unixfs::UnixFsFile; + +use blockstore::Blockstore; + +#[derive(thiserror::Error, Debug)] +pub enum Error { + /// From core::Error + #[error("Core error {0}")] + P2p(#[from] bs_p2p::Error), + + /// From + #[error("Multiaddr error")] + Multiaddr(#[from] libp2p::multiaddr::Error), + + /// From + #[error("Identity error")] + Identity(#[from] libp2p::identity::ParseError), + + /// No data directory + #[error("No data directory")] + NoDataDir, + + /// Input output error + #[error("IO error")] + Io(#[from] std::io::Error), + // /// from anyhow + // #[error("error")] + // Anyhow(#[from] anyhow::Error), +} + +#[derive(Clone, Debug)] +pub struct NativeBlockstore { + directory: PathBuf, +} + +impl NativeBlockstore { + /// Creates a new [NativeBlockstore] + /// with the given directory path. + pub async fn new(directory: PathBuf) -> Result { + // us tokio to create the directory if it does not exist + if !directory.exists() { + tokio::fs::create_dir_all(&directory).await?; + } + Ok(Self { directory }) + } +} + +impl Blockstore for NativeBlockstore { + async fn get( + &self, + cid: &cid::CidGeneric, + ) -> blockstore::Result>> { + let path = self.directory.join(cid.to_string()); + + if !path.exists() { + return Ok(None); + } + + let bytes = + std::fs::read(&path).map_err(|e| blockstore::Error::StoredDataError(e.to_string()))?; + + Ok(Some(bytes)) + } + + async fn put_keyed( + &self, + cid: &cid::CidGeneric, + data: &[u8], + ) -> blockstore::Result<()> { + let path = self.directory.join(cid.to_string()); + + std::fs::write(&path, data) + .map_err(|e| blockstore::Error::StoredDataError(e.to_string()))?; + + Ok(()) + } + + async fn remove(&self, cid: &cid::CidGeneric) -> blockstore::Result<()> { + let path = self.directory.join(cid.to_string()); + + std::fs::remove_file(&path) + .map_err(|e| blockstore::Error::StoredDataError(e.to_string()))?; + + Ok(()) + } + + async fn close(self) -> blockstore::Result<()> { + Ok(()) + } +} + +///// A Chunker that takes bytes and chunks them +//pub async fn put_chunks( +// blockstore: B, +// data: Vec, +//) -> Result { +// let root_cid = FileBuilder::new() +// .content_bytes(data.clone()) +// .fixed_chunker(256 * 1024) +// .build()? +// .store(&blockstore) +// .await?; +// +// Ok(root_cid) +//} + +#[cfg(test)] +mod tests { + use crate::platform::common::RawBlakeBlock; + + use super::*; + use blockstore::block::Block; + use tempfile::tempdir; + + #[tokio::test] + async fn test_native_blockstore() { + let tempdir = tempdir().unwrap().path().to_path_buf(); + let blockstore = NativeBlockstore::new(tempdir).await.unwrap(); + + let data = b"hello world".to_vec(); + + let block = RawBlakeBlock(data.clone()); + let cid = block.cid().unwrap(); + + blockstore.put(block).await.unwrap(); + let retrieved_data = blockstore.get(&cid).await.unwrap(); + + assert_eq!(data, retrieved_data.unwrap()); + } + + #[tokio::test] + async fn test_put_large_bytes() { + let tempdir = tempdir().unwrap().path().to_path_buf(); + let blockstore = NativeBlockstore::new(tempdir).await.unwrap(); + + let len = 1 << 19; // 512KB, 2^19 bytes + let data = vec![42; len]; + + let block = RawBlakeBlock(data.clone()); + let root_cid = block.cid().unwrap(); + + blockstore.put(block).await.unwrap(); + + let retrieved_data = blockstore.get(&root_cid).await.unwrap(); + + assert_eq!(data, retrieved_data.unwrap()); + } +} diff --git a/crates/bs-peer/src/platform/native/network.rs b/crates/bs-peer/src/platform/native/network.rs new file mode 100644 index 0000000..e69de29 diff --git a/crates/bs-peer/src/utils.rs b/crates/bs-peer/src/utils.rs new file mode 100644 index 0000000..9d26c8f --- /dev/null +++ b/crates/bs-peer/src/utils.rs @@ -0,0 +1,928 @@ +//! Common tests between web and native + +use crate::peer::DefaultBsPeer; +use crate::platform::StartConfig; +use crate::{peer::BsPeer, Error}; +use ::cid::Cid; +use blockstore::Blockstore as BlockstoreTrait; +use blockstore::InMemoryBlockstore; +use bs::params::vlad::FirstEntryKeyParams; +use bs::params::vlad::VladParams; +use bs::update::OpParams; +use bs::{ + config::sync::{KeyManager, MultiSigner}, + params::anykey::PubkeyParams, +}; +use bs_traits::CondSync; +use bs_wallets::memory::InMemoryKeyManager; +use multicid::cid; +use multicodec::Codec; +use multihash::mh; +use multikey::mk; +use provenance_log::Key; +use provenance_log::Script; +use provenance_log::{entry::Field, key::key_paths::ValidatedKeyParams as _}; + +/// Basic test fixture without network capabilities +pub struct TestFixture { + pub peer: BsPeer, InMemoryBlockstore<64>>, + pub lock_script: String, + pub unlock_script: String, +} + +/// Test fixture with network capabilities +pub struct NetworkTestFixture { + pub peer: DefaultBsPeer>, + pub lock_script: String, + pub unlock_script: String, +} + +impl BsPeer +where + KP: KeyManager + MultiSigner + CondSync, + BS: BlockstoreTrait + CondSync, +{ + /// Helper to create CID from the same parameters for testing + pub fn verify_cid_stored( + &self, + version: Codec, + target: Codec, + hash: Codec, + data: &[u8], + ) -> Result { + // Create CID + let multi_cid = cid::Builder::new(version) + .with_target_codec(target) + .with_hash(&mh::Builder::new_from_bytes(hash, data)?.try_build()?) + .try_build()?; + + // Convert to cid::Cid + let multi_cid_bytes: Vec = multi_cid.into(); + let cid = Cid::try_from(multi_cid_bytes)?; + + // Check if stored in blockstore + // self.blockstore().has(&cid).await?; + Ok(cid) + } +} + +/// Creates test scripts to be used in test fixtures +pub fn create_default_scripts() -> (String, String) { + let entry_key = Field::ENTRY; + let proof_key = Field::PROOF; + let pubkey = PubkeyParams::KEY_PATH; + + let unlock_script = format!( + r#" + // push the serialized Entry as the message + push("{entry_key}"); + + // push the proof data + push("{proof_key}"); + "# + ); + + let lock_script = format!( + r#" + // then check a possible threshold sig... + check_signature("/recoverykey", "{entry_key}") || + + // then check a possible pubkey sig... + check_signature("{pubkey}", "{entry_key}") || + + // then the pre-image proof... + check_preimage("/hash") + "# + ); + + (lock_script, unlock_script) +} + +pub async fn setup_test_peer() -> TestFixture { + // Set up key manager + let key_manager = InMemoryKeyManager::::default(); + + // Create an in-memory blockstore + let blockstore = InMemoryBlockstore::<64>::new(); + + // Create peer with the in-memory blockstore + let peer = BsPeer::with_blockstore(key_manager, blockstore); + + let (lock_script, unlock_script) = create_default_scripts(); + + TestFixture { + peer, + lock_script, + unlock_script, + } +} + +/// Setup a peer with network capabilities +pub async fn setup_network_test_peer() -> Result { + // Set up key manager + let key_manager = InMemoryKeyManager::::default(); + + // Create a network-enabled peer + let peer = DefaultBsPeer::new(key_manager, StartConfig::default()).await?; + + let (lock_script, unlock_script) = create_default_scripts(); + + Ok(NetworkTestFixture { + peer, + lock_script, + unlock_script, + }) +} + +pub async fn setup_initialized_peer() -> TestFixture { + let mut fixture = setup_test_peer().await; + + // Initialize the peer + let res = fixture + .peer + .generate(&fixture.lock_script, &fixture.unlock_script) + .await; + debug_assert!(res.is_ok(), "Expected successful creation of peer"); + fixture +} + +/// Setup an initialized peer with network capabilities +pub async fn setup_initialized_network_peer() -> Result { + let mut fixture = setup_network_test_peer().await?; + + // Initialize the peer + let res = fixture + .peer + .generate(&fixture.lock_script, &fixture.unlock_script) + .await; + + if let Err(e) = res { + tracing::error!("Failed to initialize network peer: {:?}", e); + return Err(e); + } + + Ok(fixture) +} + +pub async fn run_basic_test() { + tracing::info!("Starting basic_test"); + tracing::debug!("Initializing key manager and peer"); + + let seed: [u8; 32] = [42; 32]; + let codec = Codec::Ed25519Priv; + let _mk = mk::Builder::new_from_seed(codec, &seed) + .unwrap() + .try_build() + .unwrap(); + + let mut fixture = setup_test_peer().await; + + // Now we create the peer with valid scripts + let res = fixture + .peer + .generate(&fixture.lock_script, &fixture.unlock_script) + .await; + + // Check if the creation was successful + assert!(res.is_ok(), "Expected successful creation of peer"); + + // Check if the plog is initialized + assert!( + fixture.peer.plog().is_some(), + "Expected plog to be initialized" + ); + + // Check if the plog can be verified + let plog = fixture.peer.plog().unwrap(); + let verify_iter = &mut plog.verify(); + for result in verify_iter { + if let Err(e) = result { + panic!("Plog verification failed: {}", e); + } + } +} + +/// Test basic peer initialization with network capabilities +pub async fn run_basic_network_test() { + tracing::info!("Starting basic network test"); + + // Create a networked peer + let mut fixture = setup_network_test_peer() + .await + .expect("Should create network peer"); + + // Generate with valid scripts + let res = fixture + .peer + .generate(&fixture.lock_script, &fixture.unlock_script) + .await; + + // Check if creation was successful + assert!(res.is_ok(), "Expected successful creation of network peer"); + + // Verify network client exists + assert!( + fixture.peer.network_client.is_some(), + "Network client should be present" + ); + + // Verify events channel exists + assert!( + fixture.peer.events.is_some(), + "Events channel should be present" + ); + + // Verify PeerId exists + assert!(fixture.peer.peer_id.is_some(), "PeerId should be present"); + + // Check if the plog is initialized + assert!( + fixture.peer.plog().is_some(), + "Expected plog to be initialized in network peer" + ); + + // Check if the plog can be verified + let plog = fixture.peer.plog().unwrap(); + let verify_iter = &mut plog.verify(); + for result in verify_iter { + if let Err(e) = result { + panic!("Plog verification failed in network peer: {}", e); + } + } +} + +// Add more shared test functions here +pub async fn run_in_memory_blockstore_test() { + tracing::info!("Starting in_memory_blockstore_test"); + + let mut fixture = setup_test_peer().await; + + // Create test data + let test_data = b"test data".to_vec(); + + let hash = Codec::Sha2256; + let target = Codec::Raw; + let version = Codec::Cidv1; + + // Create the peer with scripts and with CIDs to store + // Add some OpParams::CidGen entries to test blockstore storage + // This is a bit awkward: + // We're stating the PubkeyParams here, yet + // the actual key is in the wallet. Would be better if one came from the other, yeah? + let config = bs::open::Config::builder() + .vlad(VladParams::::default()) + .pubkey( + PubkeyParams::builder() + .codec(Codec::Ed25519Priv) + .build() + .into(), + ) + .entrykey( + FirstEntryKeyParams::builder() + .codec(Codec::Ed25519Priv) + .build() + .into(), + ) + .lock(Script::Code(Key::default(), fixture.lock_script.clone())) + .unlock(Script::Code(Key::default(), fixture.unlock_script.clone())) + .additional_ops(vec![OpParams::CidGen { + key: Key::try_from("/test/image/").unwrap(), + version, + target, + hash, + inline: true, + data: test_data.clone(), + }]) + .build(); + + // Create peer with this config + let res = fixture.peer.generate_with_config(config).await; + + match &res { + Ok(_) => tracing::info!("create_with_config succeeded"), + Err(e) => tracing::error!("create_with_config failed: {:?}", e), + } + + assert!(res.is_ok(), "Expected successful creation of peer"); + + // verify the plog + let plog = fixture.peer.plog(); + + // Verify the CID was stored + let cid = { + let binding = plog.unwrap(); + let verify_iter = &mut binding.verify(); + for result in verify_iter { + if let Err(e) = result { + tracing::error!("Plog verification failed: {}", e); + panic!("Plog verification failed: {}", e); + } + } + fixture + .peer + .verify_cid_stored(version, target, hash, &test_data) + .unwrap() + }; + + let stored = fixture.peer.blockstore().has(&cid).await.unwrap(); + + assert!(stored, "CID should be stored in blockstore"); + + // If you want to verify the actual data: + let multi_cid = cid::Builder::new(version) + .with_target_codec(target) + .with_hash( + &mh::Builder::new_from_bytes(hash, &test_data) + .unwrap() + .try_build() + .unwrap(), + ) + .try_build() + .unwrap(); + + let multi_cid_bytes: Vec = multi_cid.into(); + let cid = Cid::try_from(multi_cid_bytes).unwrap(); + + let stored_data = fixture.peer.blockstore().get(&cid).await.unwrap(); + assert!(stored_data.is_some(), "Data should be in blockstore"); + assert_eq!( + stored_data.unwrap(), + test_data, + "Stored data should match original" + ); +} + +/// Test data storage in a network-enabled peer's blockstore +pub async fn run_network_blockstore_test() { + tracing::info!("Starting network blockstore test"); + + let mut fixture = setup_network_test_peer() + .await + .expect("Should create network peer"); + + // Create test data + let test_data = b"network test data".to_vec(); + + let hash = Codec::Sha2256; + let target = Codec::Raw; + let version = Codec::Cidv1; + + // Create a config with CID to store + let config = bs::open::Config::builder() + .vlad(VladParams::::default()) + .pubkey( + PubkeyParams::builder() + .codec(Codec::Ed25519Priv) + .build() + .into(), + ) + .entrykey( + FirstEntryKeyParams::builder() + .codec(Codec::Ed25519Priv) + .build() + .into(), + ) + .lock(Script::Code(Key::default(), fixture.lock_script.clone())) + .unlock(Script::Code(Key::default(), fixture.unlock_script.clone())) + .additional_ops(vec![OpParams::CidGen { + key: Key::try_from("/network/test/data/").unwrap(), + version, + target, + hash, + inline: true, + data: test_data.clone(), + }]) + .build(); + + // Create peer with this config + let res = fixture.peer.generate_with_config(config).await; + assert!( + res.is_ok(), + "Expected successful creation of network peer with CID" + ); + + // Verify the CID was stored + let cid = fixture + .peer + .verify_cid_stored(version, target, hash, &test_data) + .unwrap(); + + let stored = fixture.peer.blockstore().has(&cid).await.unwrap(); + + assert!(stored, "CID should be stored in network peer blockstore"); + + // Create multicid and verify data + let multi_cid = cid::Builder::new(version) + .with_target_codec(target) + .with_hash( + &mh::Builder::new_from_bytes(hash, &test_data) + .unwrap() + .try_build() + .unwrap(), + ) + .try_build() + .unwrap(); + + let multi_cid_bytes: Vec = multi_cid.into(); + let cid = Cid::try_from(multi_cid_bytes).unwrap(); + + let stored_data = fixture.peer.blockstore().get(&cid).await.unwrap(); + assert!( + stored_data.is_some(), + "Data should be in network peer blockstore" + ); + assert_eq!( + stored_data.unwrap(), + test_data, + "Retrieved data from network peer should match original" + ); +} + +pub async fn run_store_entries_test() { + // init_logger(); + tracing::info!("Starting test_store_entries"); + + let fixture = setup_initialized_peer().await; + + // The peer is initialized, so store_entries has already been called + // Let's verify the stored entries + + // Get the first lock CID from the plog for verification + let plog = fixture.peer.plog(); + let cid = { + let binding = plog.as_ref().unwrap(); + let first_lock_cid = binding.vlad.cid(); + let first_lock_cid_bytes: Vec = first_lock_cid.clone().into(); + + Cid::try_from(first_lock_cid_bytes).unwrap() + }; + + // Verify first lock is in blockstore + let stored_first_lock = fixture.peer.blockstore().has(&cid).await.unwrap(); + assert!( + stored_first_lock, + "First lock should be stored in blockstore" + ); + + // Verify we can retrieve the first lock data + let first_lock_data = fixture.peer.blockstore().get(&cid).await.unwrap(); + assert!( + first_lock_data.is_some(), + "First lock data should be retrievable" + ); + + let entries = { + let binding = plog.as_ref().unwrap(); + binding.entries.clone() + }; + + // Verify each entry is stored in the blockstore + for (multi_cid, _) in entries.iter() { + let multi_cid_bytes: Vec = multi_cid.clone().into(); + let entry_cid = Cid::try_from(multi_cid_bytes).unwrap(); + + let stored_entry = fixture.peer.blockstore().has(&entry_cid).await.unwrap(); + assert!(stored_entry, "Entry should be stored in blockstore"); + + let entry_data = fixture.peer.blockstore().get(&entry_cid).await.unwrap(); + assert!(entry_data.is_some(), "Entry data should be retrievable"); + } +} + +/// Test storing entries in a network-enabled peer +pub async fn run_network_store_entries_test() { + tracing::info!("Starting network_store_entries_test"); + + let fixture = setup_initialized_network_peer() + .await + .expect("Should create initialized network peer"); + + let plog = fixture.peer.plog(); + + // Get the first lock CID from the plog for verification + let cid = { + let binding = plog.as_ref().unwrap(); + let first_lock_cid = binding.vlad.cid(); + let first_lock_cid_bytes: Vec = first_lock_cid.clone().into(); + Cid::try_from(first_lock_cid_bytes).unwrap() + }; + + // Verify first lock is in blockstore + let stored_first_lock = fixture.peer.blockstore().has(&cid).await.unwrap(); + assert!( + stored_first_lock, + "First lock should be stored in network peer blockstore" + ); + + // Verify we can retrieve the first lock data + let first_lock_data = fixture.peer.blockstore().get(&cid).await.unwrap(); + assert!( + first_lock_data.is_some(), + "First lock data should be retrievable from network peer" + ); + + let entries = { + let binding = plog.unwrap(); + binding.entries.clone() + }; + + // Verify each entry is stored in the blockstore + for (multi_cid, _) in entries.iter() { + let multi_cid_bytes: Vec = multi_cid.clone().into(); + let entry_cid = Cid::try_from(multi_cid_bytes).unwrap(); + + let stored_entry = fixture.peer.blockstore().has(&entry_cid).await.unwrap(); + assert!( + stored_entry, + "Entry should be stored in network peer blockstore" + ); + + let entry_data = fixture.peer.blockstore().get(&entry_cid).await.unwrap(); + assert!( + entry_data.is_some(), + "Entry data should be retrievable from network peer" + ); + } +} + +pub async fn run_update_test() { + tracing::info!("Starting update_test"); + + // Setup peer with initial configuration + let mut fixture = setup_initialized_peer().await; + + // Create some new data to update with + let new_data = b"updated data".to_vec(); + let hash = Codec::Sha2256; + let target = Codec::Raw; + let version = Codec::Cidv1; + + // Create an update configuration + let update_config = bs::update::Config::builder() + .entry_signing_key(PubkeyParams::KEY_PATH.into()) + .unlock(Script::Code(Key::default(), fixture.unlock_script.clone())) + .additional_ops(vec![OpParams::CidGen { + key: Key::try_from("/test/updated/").unwrap(), + version, + target, + hash, + inline: true, + data: new_data.clone(), + }]) + .build(); + + // Apply the update + let res = fixture.peer.update(update_config).await; + assert!(res.is_ok(), "Expected successful update"); + + // Verify the update was stored + let cid = fixture + .peer + .verify_cid_stored(version, target, hash, &new_data) + .unwrap(); + + let stored = fixture.peer.blockstore().has(&cid).await.unwrap(); + + assert!(stored, "Updated CID should be stored in blockstore"); + + // Verify plog is still valid after update + let plog = fixture.peer.plog(); + let binding = plog.as_ref().unwrap(); + let verify_iter = &mut binding.verify(); + for result in verify_iter { + if let Err(e) = result { + panic!("Plog verification failed after update: {}", e); + } + } +} + +/// Test updating a network-enabled peer +pub async fn run_network_update_test() { + tracing::info!("Starting network_update_test"); + + // Setup initialized network peer + let mut fixture = setup_initialized_network_peer() + .await + .expect("Should create initialized network peer"); + + // Create update data + let new_data = b"network updated data".to_vec(); + let hash = Codec::Sha2256; + let target = Codec::Raw; + let version = Codec::Cidv1; + + // Create update config + let update_config = bs::update::Config::builder() + .entry_signing_key(PubkeyParams::KEY_PATH.into()) + .unlock(Script::Code(Key::default(), fixture.unlock_script.clone())) + .additional_ops(vec![OpParams::CidGen { + key: Key::try_from("/network/test/updated/").unwrap(), + version, + target, + hash, + inline: true, + data: new_data.clone(), + }]) + .build(); + + // Apply the update + let res = fixture.peer.update(update_config).await; + assert!(res.is_ok(), "Expected successful update of network peer"); + + // Verify the update was stored + let cid = fixture + .peer + .verify_cid_stored(version, target, hash, &new_data) + .unwrap(); + + let stored = fixture.peer.blockstore().has(&cid).await.unwrap(); + + assert!( + stored, + "Updated CID should be stored in network peer blockstore" + ); + + // Verify plog is still valid + let plog = fixture.peer.plog(); + let binding = plog.as_ref().unwrap(); + let verify_iter = &mut binding.verify(); + for result in verify_iter { + if let Err(e) = result { + panic!("Network peer plog verification failed after update: {}", e); + } + } +} + +pub async fn run_load_test() { + tracing::info!("Starting load_test"); + + // Setup an initialized peer to get a valid plog + let fixture = setup_initialized_peer().await; + + // Get the plog from the initialized peer + let original_plog = fixture.peer.plog().as_ref().unwrap().clone(); + + // Create a new peer with empty state + let mut new_fixture = setup_test_peer().await; + + // Ensure the new peer has no plog yet + assert!( + new_fixture.peer.plog().is_none(), + "New peer should have no plog initially" + ); + + // Load the plog into the new peer + let res = new_fixture.peer.load(original_plog.clone()).await; + assert!(res.is_ok(), "Expected successful loading of plog"); + + // Verify the plog was loaded + assert!( + new_fixture.peer.plog().is_some(), + "Plog should now be loaded" + ); + + // Verify the loaded plog has the correct data + let loaded_plog = new_fixture.peer.plog().unwrap(); + assert_eq!( + loaded_plog.vlad.cid(), + original_plog.vlad.cid(), + "Loaded plog should have same first lock CID" + ); + assert_eq!( + loaded_plog.entries.len(), + original_plog.entries.len(), + "Loaded plog should have same number of entries" + ); + + // Verify the loaded plog can be verified + let verify_iter = &mut loaded_plog.verify(); + for result in verify_iter { + if let Err(e) = result { + panic!("Loaded plog verification failed: {}", e); + } + } + + // Verify that entries were stored in the blockstore during load + // Check first lock CID + let first_lock_cid = loaded_plog.vlad.cid(); + let first_lock_cid_bytes: Vec = first_lock_cid.clone().into(); + let cid = Cid::try_from(first_lock_cid_bytes).unwrap(); + let has_first_lock = new_fixture.peer.blockstore().has(&cid).await.unwrap(); + assert!( + has_first_lock, + "First lock should be in blockstore after load" + ); + + // Check entries + for (multi_cid, _) in loaded_plog.entries.iter() { + let multi_cid_bytes: Vec = multi_cid.clone().into(); + let entry_cid = Cid::try_from(multi_cid_bytes).unwrap(); + let has_entry = new_fixture.peer.blockstore().has(&entry_cid).await.unwrap(); + assert!(has_entry, "Entry should be in blockstore after load"); + } +} + +/// Test loading a plog into a network-enabled peer +pub async fn run_network_load_test() { + tracing::info!("Starting network_load_test"); + + // Setup an initialized peer to get a valid plog + let fixture = setup_initialized_peer().await; + + // Get the plog from the initialized peer + let original_plog = fixture.peer.plog().clone().unwrap().clone(); + + // Create a new network peer with empty state + let mut new_fixture = setup_network_test_peer() + .await + .expect("Should create new network peer"); + + // Ensure the new network peer has no plog yet + assert!( + new_fixture.peer.plog().is_none(), + "New network peer should have no plog initially" + ); + + // Load the plog into the new network peer + let res = new_fixture.peer.load(original_plog.clone()).await; + assert!( + res.is_ok(), + "Expected successful loading of plog into network peer" + ); + + // Verify the plog was loaded + assert!( + new_fixture.peer.plog().is_some(), + "Plog should now be loaded in network peer" + ); + + // Verify the loaded plog has the correct data + let loaded_plog = new_fixture.peer.plog().as_ref().unwrap().clone(); + assert_eq!( + loaded_plog.vlad.cid(), + original_plog.vlad.cid(), + "Loaded plog in network peer should have same first lock CID" + ); + + // Verify entries were stored in blockstore + let first_lock_cid = loaded_plog.vlad.cid(); + let first_lock_cid_bytes: Vec = first_lock_cid.clone().into(); + let cid = Cid::try_from(first_lock_cid_bytes).unwrap(); + let has_first_lock = new_fixture.peer.blockstore().has(&cid).await.unwrap(); + assert!( + has_first_lock, + "First lock should be in network peer blockstore after load" + ); +} + +pub async fn run_peer_initialization_test() { + tracing::info!("Starting peer initialization test"); + + // Initialize key manager for the peer + let key_manager = InMemoryKeyManager::::default(); + + // Create a new peer with the default platform blockstore + let peer_result = DefaultBsPeer::new(key_manager, StartConfig::default()).await; + + match &peer_result { + Ok(_) => tracing::info!("Peer initialization succeeded"), + Err(e) => tracing::error!("Peer initialization failed: {:?}", e), + } + + // Check that peer creation succeeded + assert!(peer_result.is_ok(), "Peer should initialize successfully"); + + let peer = peer_result.unwrap(); + + // Check if network client was established + assert!( + peer.network_client.as_ref().is_some(), + "Network client should be initialized" + ); + assert!(peer.events.is_some(), "Event channel should be initialized"); + + // Verify we have a working blockstore + let blockstore = peer.blockstore(); + + // Try to store and retrieve some data to verify blockstore works + let test_cid = + Cid::try_from("bafkreihwsnuregceqh263vgdathcprnbvatyat6h6mu7ipjhhodcdbyhg4").unwrap(); + let test_data = b"test network initialization".to_vec(); + + let put_result = blockstore.put_keyed(&test_cid, &test_data).await; + assert!( + put_result.is_ok(), + "Should be able to store data in blockstore" + ); + + let get_result = blockstore.get(&test_cid).await; + assert!( + get_result.is_ok(), + "Should be able to retrieve data from blockstore" + ); + assert_eq!( + get_result.unwrap().unwrap(), + test_data, + "Retrieved data should match stored data" + ); +} + +/// Test network functionality of the peer +pub async fn run_network_functionality_test() { + tracing::info!("Starting network functionality test"); + + // Create a network-enabled peer + let peer_result = setup_network_test_peer().await; + assert!(peer_result.is_ok(), "Should create network-enabled peer"); + + let fixture = peer_result.unwrap(); + + // Verify network components are initialized + assert!( + fixture.peer.network_client.is_some(), + "Network client should be initialized" + ); + assert!( + fixture.peer.events.is_some(), + "Event channel should be initialized" + ); + assert!( + fixture.peer.peer_id.is_some(), + "Peer ID should be initialized" + ); + + // Verify peer ID is valid + let peer_id = fixture.peer.peer_id.unwrap(); + tracing::info!("Network peer has PeerId: {}", peer_id); + + // Store some data in blockstore and verify network client can access it + let test_data = b"network test functionality".to_vec(); + let test_cid = + Cid::try_from("bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku").unwrap(); + + // Store data in blockstore + let put_result = fixture + .peer + .blockstore() + .put_keyed(&test_cid, &test_data) + .await; + assert!(put_result.is_ok(), "Should store data successfully"); + + // Verify data is retrievable + let has_result = fixture.peer.blockstore().has(&test_cid).await; + assert!( + has_result.is_ok() && has_result.unwrap(), + "Data should be in blockstore" + ); + + // Retrieve and verify data + let get_result = fixture.peer.blockstore().get(&test_cid).await; + assert!(get_result.is_ok(), "Should retrieve data successfully"); + assert_eq!( + get_result.unwrap().unwrap(), + test_data, + "Retrieved data should match stored data" + ); +} + +/// Test that resolver works with network client +pub async fn run_resolver_test() { + tracing::info!("Starting resolver test"); + + // Create network peer + let peer_result = setup_network_test_peer().await; + assert!( + peer_result.is_ok(), + "Should create network peer for resolver test" + ); + + let fixture = peer_result.unwrap(); + + // Create a multicid for testing + let test_data = b"resolver test data".to_vec(); + let hash = Codec::Sha2256; + let target = Codec::Raw; + let version = Codec::Cidv1; + + let multi_cid = cid::Builder::new(version) + .with_target_codec(target) + .with_hash( + &mh::Builder::new_from_bytes(hash, &test_data) + .unwrap() + .try_build() + .unwrap(), + ) + .try_build() + .unwrap(); + + // Convert to cid::Cid and store in blockstore + let multi_cid_bytes: Vec = multi_cid.clone().into(); + let cid = Cid::try_from(multi_cid_bytes).unwrap(); + + let put_result = fixture.peer.blockstore().put_keyed(&cid, &test_data).await; + assert!(put_result.is_ok(), "Should store data for resolver test"); + + // The resolver functionality is tested in integration/e2e tests since it requires + // actual network interaction between peers to fully verify + tracing::info!("Resolver test setup complete - actual resolver functionality would be tested in integration tests"); +} diff --git a/crates/bs-peer/tests/web.rs b/crates/bs-peer/tests/web.rs new file mode 100644 index 0000000..f5ab916 --- /dev/null +++ b/crates/bs-peer/tests/web.rs @@ -0,0 +1,38 @@ +#![cfg(target_arch = "wasm32")] +#![cfg(test)] +use bs_peer::utils; +use wasm_bindgen_test::wasm_bindgen_test_configure; +use wasm_bindgen_test::*; + +wasm_bindgen_test_configure!(run_in_browser); + +#[wasm_bindgen_test] +async fn basic_test() { + utils::run_basic_test().await; +} + +#[wasm_bindgen_test] +async fn in_memory_blockstore_test() { + utils::run_in_memory_blockstore_test().await; +} + +#[wasm_bindgen_test] +async fn test_store_entries() { + utils::run_store_entries_test().await; +} + +#[wasm_bindgen_test] +async fn run_update_test() { + utils::run_update_test().await; +} + +#[wasm_bindgen_test] +async fn run_load_test() { + utils::run_load_test().await; +} + +#[wasm_bindgen_test] +async fn test_peer_initialization() { + // init_logger(); + utils::run_peer_initialization_test().await; +} diff --git a/crates/bs-server/.cargo/config.toml b/crates/bs-server/.cargo/config.toml new file mode 100644 index 0000000..0890d5c --- /dev/null +++ b/crates/bs-server/.cargo/config.toml @@ -0,0 +1,3 @@ +# set rust_log for showing logs debug and above +[env] +RUST_LOG = "debug" diff --git a/crates/bs-server/Cargo.toml b/crates/bs-server/Cargo.toml new file mode 100644 index 0000000..15da641 --- /dev/null +++ b/crates/bs-server/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "bs-server" +version.workspace = true +edition.workspace = true +authors.workspace = true +description.workspace = true +readme.workspace = true +license.workspace = true + +[dependencies] +bs-peer.workspace = true +bs-p2p.workspace = true +anyhow = "1.0" +directories = "6.0" +futures = "0.3" +libp2p.workspace = true +serde = { workspace = true, features = ["derive"] } +serde_json = "1.0" +tokio = { workspace = true, features = ["full", "signal"] } +tracing.workspace = true +tracing-subscriber = { workspace = true, features = ["env-filter"] } +axum = "0.8" + +[lints] +workspace = true diff --git a/crates/bs-server/README.md b/crates/bs-server/README.md new file mode 100644 index 0000000..25655cc --- /dev/null +++ b/crates/bs-server/README.md @@ -0,0 +1,3 @@ +# BetterSign Server + +A headless peer designed to run on a server as a superpeer which pins for offline peers and acts as a well-known bootstrap peer for other peers to connect to the mesh network. diff --git a/crates/bs-server/justfile b/crates/bs-server/justfile new file mode 100644 index 0000000..28ee5b3 --- /dev/null +++ b/crates/bs-server/justfile @@ -0,0 +1,31 @@ +# Automatically loads environment variables from a `.env` file when you run any `just` command +# - Makes those environment variables available to your recipes +# - Allows you to reference environment variables like `$IP_ADDRESS` in your recipes +set dotenv-load + +serve: + cargo run + +build: + cargo build --release --bin bestsign-superpeer + +add-target: + rustup target add x86_64-unknown-linux-gnu + +release: + cargo build --release --bin bestsign-superpeer --target x86_64-unknown-linux-gnu + +scp: + scp -i ~/.ssh/oracle_cloud ../../target/x86_64-unknown-linux-gnu/release/bestsign-superpeer ubuntu@$IP_ADDRESS:~ + +run-on-server: + ssh -i ~/.ssh/oracle_cloud ubuntu@$IP_ADDRESS './bettersign-superpeer' + +login: + ssh -i ~/.ssh/oracle_cloud ubuntu@$IP_ADDRESS + + +# scp the bestsign_superpeer.service file to the remote host +scp-bestsign-superpeer-service: + scp -i ~/.ssh/oracle_cloud crates/superpeer/bestsign_superpeer.service ubuntu@$IP_ADDRESS:/etc/systemd/system/bestsign_superpeer.service + diff --git a/crates/bs-server/src/main.rs b/crates/bs-server/src/main.rs new file mode 100644 index 0000000..82b33a6 --- /dev/null +++ b/crates/bs-server/src/main.rs @@ -0,0 +1,151 @@ +//! Headless peer designed to run without a UI, suitable for server environments. +use anyhow::Result; +use axum::{extract::State, routing::get, Json, Router}; +use bs_p2p::events::{api::Libp2pEvent, Client, PublicEvent}; +use bs_peer::platform::{start, Blockstore, StartConfig}; +use futures::{channel::mpsc, StreamExt as _}; +use libp2p::PeerId; +use std::{net::SocketAddr, path::PathBuf, sync::Arc}; +use tokio::signal; +use tracing::{error, info}; +use tracing_subscriber::EnvFilter; + +#[tokio::main] +async fn main() -> Result<()> { + // Initialize logging + // tracing_subscriber::fmt() + // .with_env_filter(EnvFilter::from_default_env()) + // .init(); + // set bs_server=info + tracing_subscriber::fmt() + .with_env_filter(EnvFilter::new("bs_server=info")) + .with_file(true) + .with_line_number(true) + .init(); + + info!("Starting bs-server headless peer..."); + + // Configuration (could come from command line args or config file) + let config = parse_config()?; + let base_path = config.data_dir.unwrap_or_else(|| { + directories::ProjectDirs::from("org", "bettersign", "superpeer") + .map(|proj_dirs| proj_dirs.data_dir().to_path_buf()) + .unwrap_or_else(|| PathBuf::from("./.bs-server")) + }); + + // Create blockstore + let blockstore = Blockstore::new(base_path.clone()).await?; + + // Create channel for p2p events + let (tx, rx) = mpsc::channel::(32); + + // Start the peer + let start_config = StartConfig { + libp2p_endpoints: config.bootstrap_peers, + base_path: Some(base_path), + }; + + let (client, peer_id) = match start(tx, blockstore, start_config).await { + Ok((client, peer_id)) => { + info!("Peer started with ID: {peer_id}"); + (client, peer_id) + } + Err(e) => { + error!("Failed to start peer: {e}"); + return Err(e.into()); + } + }; + + // Start API server if configured + if let Some(api_port) = config.api_port { + spawn_api_server(api_port, client.clone(), peer_id).await?; + } + + // Process events from the network + tokio::spawn(handle_network_events(rx)); + + // Wait for termination signal + match signal::ctrl_c().await { + Ok(()) => { + info!("Received shutdown signal. Shutting down gracefully..."); + } + Err(err) => { + error!("Unable to listen for shutdown signal: {}", err); + } + } + + info!("Server stopped."); + Ok(()) +} + +/// Server configuration +struct ServerConfig { + bootstrap_peers: Vec, + data_dir: Option, + api_port: Option, +} + +fn parse_config() -> Result { + // Could use clap or config crate for more sophisticated config + // This is a simple example + Ok(ServerConfig { + bootstrap_peers: vec![], + data_dir: None, + api_port: Some(8000), + }) +} + +// Wrap the client in Arc for sharing between handlers +struct ApiState { + client: Client, + peer_id: PeerId, +} + +async fn spawn_api_server(port: u16, client: Client, peer_id: PeerId) -> Result<()> { + let state = Arc::new(ApiState { client, peer_id }); + + // Build API routes + let app = Router::new() + .route("/health", get(|| async { "OK" })) + .route("/info", get(get_peer_info)) + // Add more routes as needed + .with_state(state); + + // Start the server + let addr = SocketAddr::from(([0, 0, 0, 0], port)); + info!("API server listening on {addr}"); + + tokio::spawn(async move { + // Create a TCP listener + let listener = tokio::net::TcpListener::bind(addr).await.unwrap(); + + // Use the new axum::serve function to serve the application + if let Err(e) = axum::serve(listener, app).await { + error!("API server error: {e}"); + } + }); + + Ok(()) +} + +async fn get_peer_info(State(state): State>) -> Json { + Json(serde_json::json!({ + "peer_id": state.peer_id.to_string(), + // TODO: Add more peer info + })) +} + +async fn handle_network_events(mut rx: mpsc::Receiver) { + while let Some(event) = rx.next().await { + match event { + PublicEvent::Connected => { + info!("Peer connected"); + } + PublicEvent::ConnectionClosed { peer, cause } => { + info!("Peer disconnected: {peer}, cause: {cause}"); + } + PublicEvent::Swarm(Libp2pEvent::PutRecordRequest { source }) => {} + _ => {} + } + } +} diff --git a/crates/bs-traits/Cargo.toml b/crates/bs-traits/Cargo.toml index b13b8aa..69a254e 100644 --- a/crates/bs-traits/Cargo.toml +++ b/crates/bs-traits/Cargo.toml @@ -8,12 +8,15 @@ readme = "README.md" license = "Functional Source License 1.1" [dependencies] -async-trait = "0.1" thiserror.workspace = true [dev-dependencies] futures = "0.3" -tokio = { version = "1", features = ["full"] } +tokio = { workspace = true, features = ["full"] } [lints] workspace = true + +[features] +default = ["dyn-compatible"] +dyn-compatible = [] diff --git a/crates/bs-traits/src/async.rs b/crates/bs-traits/src/async.rs deleted file mode 100644 index 9dc7211..0000000 --- a/crates/bs-traits/src/async.rs +++ /dev/null @@ -1,107 +0,0 @@ -// SPDX-License-Identifier: FSL-1.1 - -use crate::*; -use async_trait::*; -use std::num::NonZeroUsize; - -/// Trait for types that can sign data asynchronously -#[async_trait] -pub trait AsyncSigner: Signer { - /// Attempt to sign the data asynchronously - async fn try_sign_async(&self, key: &Self::Key, data: &[u8]) -> Result; - - /// Sign the data asynchronously - async fn sign_async(&self, key: &Self::Key, data: &[u8]) -> Self::Signature { - self.try_sign_async(key, data) - .await - .expect("signing operation failed") - } -} - -/// Trait for types that can verify data asynchronously -#[async_trait] -pub trait AsyncVerifier: Verifier { - /// Verify the data asynchronously - async fn verify_async( - &self, - key: &Self::Key, - data: &[u8], - signature: &Self::Signature, - ) -> Result<(), Error>; -} - -/// Trait for types that can encrypt data asynchronously -#[async_trait] -pub trait AsyncEncryptor: Encryptor { - /// Attempt to encrypt the data asynchronously - async fn try_encrypt_async( - &self, - key: &Self::Key, - plaintext: &Self::Plaintext, - ) -> Result; - - /// Encrypt the data asynchronously - async fn encrypt_async( - &self, - key: &Self::Key, - plaintext: &Self::Plaintext, - ) -> Self::Ciphertext { - self.try_encrypt_async(key, plaintext) - .await - .expect("encryption operation failed") - } -} - -/// Trait for types that can decrypt data asynchronously -#[async_trait] -pub trait AsyncDecryptor: Decryptor { - /// Decrypt the data asynchronously - async fn decrypt_async( - &self, - key: &Self::Key, - ciphertext: &Self::Ciphertext, - ) -> Result; -} - -/// Trait for types that can split a secret into shares asynchronously -#[async_trait] -pub trait AsyncSecretSplitter: SecretSplitter { - /// Split the secret into shares asynchronously - async fn split_async( - &self, - secret: &Self::Secret, - threshold: NonZeroUsize, - limit: NonZeroUsize, - ) -> Result; - - /// Split the secret into shares with the given identifiers asynchronously - async fn split_with_identifiers_async( - &self, - secret: &Self::Secret, - threshold: NonZeroUsize, - identifiers: &[Self::Identifier], - ) -> Result; -} - -/// Trait for types that can combine shares into a secret asynchronously -#[async_trait] -pub trait AsyncSecretCombiner: SecretCombiner { - /// Combine the shares into a secret asynchronously - async fn combine_async( - &self, - shares: &[(Self::Identifier, Self::Shares)], - ) -> Result; -} - -/// Trait for types that can get a key asynchronously -#[async_trait] -pub trait AsyncGetKey: GetKey { - /// Get the key asynchronously - async fn get_key_async( - &self, - key_path: &Self::KeyPath, - codec: &Self::Codec, - threshold: usize, - limit: usize, - ) -> Result; -} diff --git a/crates/bs-traits/src/asyncro.rs b/crates/bs-traits/src/asyncro.rs new file mode 100644 index 0000000..e28dbae --- /dev/null +++ b/crates/bs-traits/src/asyncro.rs @@ -0,0 +1,164 @@ +// SPDX-License-Identifier: FSL-1.1 +//! This module provides traits for asynchronous operations +use crate::cond_send::CondSend; +use crate::*; +use std::future::Future; +use std::num::NonZeroUsize; +use std::pin::Pin; + +// Helper trait that combines Future with CondSend +pub trait CondSendFuture: Future + crate::cond_send::CondSend {} + +// Blanket implementation for all types that implement both traits +impl CondSendFuture for F where F: Future + crate::cond_send::CondSend {} + +// Type aliases for common return types +pub type BoxFuture<'a, T> = Pin + 'a>>; + +// Specific aliases for different trait return types +pub type SignerFuture<'a, S, E> = BoxFuture<'a, Result>; +pub type GetKeyFuture<'a, K, E> = BoxFuture<'a, Result>; +pub type VerifierFuture<'a, E> = BoxFuture<'a, Result<(), E>>; +pub type EncryptorFuture<'a, C, E> = BoxFuture<'a, Result>; +pub type DecryptorFuture<'a, P, E> = BoxFuture<'a, Result>; +pub type SecretSplitterFuture<'a, O, E> = BoxFuture<'a, Result>; +pub type SecretCombinerFuture<'a, S, E> = BoxFuture<'a, Result>; + +/// Trait for types that can sign data asynchronously +pub trait AsyncSigner: Signer { + /// Attempt to sign the data asynchronously + fn try_sign<'a>( + &'a self, + key: &'a Self::KeyPath, + data: &'a [u8], + ) -> SignerFuture<'a, Self::Signature, Self::Error>; + + /// Sign the data asynchronously, unchedked. + /// + /// # Dyn Compatibility + /// + /// This function is not compatible with `dyn` trait objects + /// + /// # Panics + /// + /// This function will panic if the signing operation fails. + #[cfg(not(feature = "dyn-compatible"))] + fn sign<'a>( + &'a self, + key: &'a Self::KeyPath, + data: &'a [u8], + ) -> Pin + 'a>> + where + Self: CondSync, + Self::KeyPath: CondSync, + { + Box::pin(async move { + self.try_sign(key, data) + .await + .expect("signing operation failed") + }) + } +} + +/// Trait for types that can verify data asynchronously +pub trait AsyncVerifier: Verifier { + fn verify<'a>( + &'a self, + key: &'a Self::Key, + data: &'a [u8], + signature: &'a Self::Signature, + ) -> Pin> + 'a>>; +} + +pub trait AsyncEncryptor: Encryptor { + fn try_encrypt<'a>( + &'a self, + key: &'a Self::Key, + plaintext: &'a Self::Plaintext, + ) -> EncryptorFuture<'a, Self::Ciphertext, Self::Error>; + + /// Encrypt the data asynchronously, unchecked. + /// + /// # Dyn Compatibility + /// This function is not compatible with `dyn` trait objects + /// + /// # Panics + /// This function will panic if the encryption operation fails. + #[cfg(not(feature = "dyn-compatible"))] + fn encrypt<'a>( + &'a self, + key: &'a Self::Key, + plaintext: &'a Self::Plaintext, + ) -> Pin + 'a>> + where + Self: CondSync, + Self::Key: CondSync, + Self::Plaintext: CondSync, + { + Box::pin(async move { + self.try_encrypt(key, plaintext) + .await + .expect("encryption operation failed") + }) + } +} +/// Trait for types that can decrypt data asynchronously +pub trait AsyncDecryptor: Decryptor { + /// Decrypt the data asynchronously + fn decrypt( + &self, + key: &Self::Key, + ciphertext: &Self::Ciphertext, + ) -> impl Future> + CondSend + '_; +} + +/// Trait for types that can split a secret into shares asynchronously +pub trait AsyncSecretSplitter: SecretSplitter { + /// Split the secret into shares asynchronously + /// + /// Conditions for `split` to succeed: + /// - Threshold must be less than or equal to limit. + /// - Threshold must be greater than or equal to 2. + fn split( + &self, + secret: &Self::Secret, + threshold: NonZeroUsize, + limit: NonZeroUsize, + ) -> impl Future> + CondSend + '_; + + /// Split the secret into shares with the given identifiers asynchronously + /// The number of shares will be equal to the number of identifiers i.e. the `limit`. + /// + /// Conditions for `split_with_identifiers` to succeed: + /// - Threshold must be less than or equal to the number of identifiers. + /// - Threshold must be greater than or equal to 2. + /// - Identifiers must be unique. + /// - Identifiers must not be empty. + fn split_with_identifiers( + &self, + secret: &Self::Secret, + threshold: NonZeroUsize, + identifiers: &[Self::Identifier], + ) -> impl Future> + CondSend + '_; +} + +/// Trait for types that can combine shares into a secret asynchronously +pub trait AsyncSecretCombiner: SecretCombiner { + /// Combine the shares into a secret asynchronously + fn combine( + &self, + shares: &[(Self::Identifier, Self::Shares)], + ) -> impl Future> + CondSend + '_; +} + +/// Trait for types that can get a key asynchronously +pub trait AsyncGetKey: GetKey { + /// Get the key asynchronously + fn get_key<'a>( + &'a self, + key_path: &'a Self::KeyPath, + codec: &'a Self::Codec, + threshold: usize, + limit: usize, + ) -> Result, Self::Error>; +} diff --git a/crates/bs-traits/src/cond_send.rs b/crates/bs-traits/src/cond_send.rs new file mode 100644 index 0000000..df30005 --- /dev/null +++ b/crates/bs-traits/src/cond_send.rs @@ -0,0 +1,33 @@ +//! Utilities for conditionally adding `Send` and `Sync` constraints. + +/// A conditionally compiled trait indirection for `Send` bounds. +/// This target makes it require `Send`. +#[cfg(not(target_arch = "wasm32"))] +pub trait CondSend: Send {} + +/// A conditionally compiled trait indirection for `Send` bounds. +/// This target makes it not require any marker traits. +#[cfg(target_arch = "wasm32")] +pub trait CondSend {} + +#[cfg(not(target_arch = "wasm32"))] +impl CondSend for S where S: Send {} + +#[cfg(target_arch = "wasm32")] +impl CondSend for S {} + +/// A conditionally compiled trait indirection for `Send + Sync` bounds. +/// This target makes it require `Send + Sync`. +#[cfg(not(target_arch = "wasm32"))] +pub trait CondSync: Send + Sync {} + +/// A conditionally compiled trait indirection for `Send + Sync` bounds. +/// This target makes it not require any marker traits. +#[cfg(target_arch = "wasm32")] +pub trait CondSync {} + +#[cfg(not(target_arch = "wasm32"))] +impl CondSync for S where S: Send + Sync {} + +#[cfg(target_arch = "wasm32")] +impl CondSync for S {} diff --git a/crates/bs-traits/src/lib.rs b/crates/bs-traits/src/lib.rs index d74ab8b..d6df7a3 100644 --- a/crates/bs-traits/src/lib.rs +++ b/crates/bs-traits/src/lib.rs @@ -4,12 +4,121 @@ /// /// It also provides a `WaitQueue` type that can be used to implement synchronous and asynchronous operations /// without having to use tokio::block_in_place or similar. -mod r#async; +pub mod asyncro; +mod cond_send; +pub use cond_send::{CondSend, CondSync}; mod error; -mod sync; +pub mod sync; mod wait_queue; pub use error::Error; -pub use r#async::*; -pub use sync::*; pub use wait_queue::*; + +use std::{fmt::Debug, num::NonZeroUsize}; + +/// Trait for types that sets the type of public key used for ephemeral signing operations, +/// can sign data using [SyncPrepareEphemeralSigning] or [AsyncPrepareEphemeralSigning], +pub trait EphemeralKey { + /// The type of public key used to sign + type PubKey; +} + +/// Each key needs to have a Codec, KeyPath, threshold, and limit. +pub trait KeyDetails { + /// The key type + type Key; + /// The codec used for the key + type Codec; + /// The key path used to identify the key + type KeyPath; + + fn key(&self) -> &Self::Key; + fn codec(&self) -> Self::Codec; + fn key_path(&self) -> &Self::KeyPath; + fn threshold(&self) -> NonZeroUsize; + fn limit(&self) -> NonZeroUsize; +} + +/// Trait for types that can sign data using [AsyncSigner] or [SyncSigner] +pub trait Signer { + /// The type of key used to sign + type KeyPath; + /// The type of signature + type Signature; + /// Any Signing Error + type Error: Debug; +} + +/// Trait for types that can verify signatures using [AsyncVerifier] or [SyncVerifier] +pub trait Verifier { + /// The type of key used to verify + type Key; + /// The type of signature + type Signature; + /// Error type for verification operations + type Error; +} + +/// Trait for types that can encrypt data using [AsyncEncryptor] or [SyncEncryptor] +pub trait Encryptor { + /// The type of key used to encrypt + type Key: Send + Sync; + /// The type of ciphertext + type Ciphertext: Send + Sync; + /// The type of plaintext, might include the nonce, and additional authenticated data + type Plaintext: Send + Sync; + /// Error type for encryption operations + type Error: Debug; +} + +/// Trait for types that can decrypt data using [AsyncDecryptor] or [SyncDecryptor] +pub trait Decryptor { + /// The type of key used to decrypt + type Key: Send + Sync; + /// The type of ciphertext + type Ciphertext: Send + Sync; + /// The type of plaintext + type Plaintext: Send + Sync; + /// Error type for decryption operations + type Error; +} + +/// Trait for types that can split a secret into shares, using [AsyncSecretSplitter] or [SyncSecretSplitter] +pub trait SecretSplitter { + /// The type of secret to split + type Secret: Send + Sync; + /// The type of identifier for the shares + type Identifier: Send + Sync; + /// The output from splitting the secret. + /// Might include the threshold and limit used to split the secret, + /// the shares, and the verifiers, identifiers, + /// or any other information needed to reconstruct the secret + /// and verify the shares. + type Output: Send + Sync; + /// Error type for secret splitting operations + type Error; +} + +/// Trait for types that can combine shares into a secret, using [AsyncSecretCombiner] or [SyncSecretCombiner] +pub trait SecretCombiner { + /// The type of secret to combine + type Secret: Send + Sync; + /// The type of identifier for the shares + type Identifier: Send + Sync; + /// The type of shares to combine + type Shares: Send + Sync; + /// Error type for secret combining operations + type Error; +} + +/// Trait for types that can retrieve a key, using [AsyncGetKey] or [SyncGetKey] +pub trait GetKey { + /// The type of key + type Key; + /// The type of key path + type KeyPath; + /// The type of codec + type Codec; + /// The Error returned + type Error; +} diff --git a/crates/bs-traits/src/sync.rs b/crates/bs-traits/src/sync.rs index d0916f8..dde4572 100644 --- a/crates/bs-traits/src/sync.rs +++ b/crates/bs-traits/src/sync.rs @@ -1,54 +1,64 @@ +//! This module contains traits for synchronous operations. use core::num::NonZeroUsize; -use crate::Error; +use crate::*; /// Trait for types that can sign data -pub trait Signer { - /// The type of key used to sign - type Key: Send + Sync; - /// The type of signature - type Signature: Send + Sync; - +pub trait SyncSigner: Signer { /// Attempt to sign the data - fn try_sign(&self, key: &Self::Key, data: &[u8]) -> Result; + fn try_sign(&self, key: &Self::KeyPath, data: &[u8]) -> Result; /// Sign the data and return the signature - fn sign(&self, key: &Self::Key, data: &[u8]) -> Self::Signature { + /// + /// # Panics + /// + /// This function will panic if the signing operation fails. + fn sign(&self, key: &Self::KeyPath, data: &[u8]) -> Self::Signature { self.try_sign(key, data).expect("signing operation failed") } } -/// Trait for types that can verify signatures -pub trait Verifier { - /// The type of key used to verify - type Key: Send + Sync; - /// The type of signature - type Signature: Send + Sync; +pub type OneTimeSignFn = Box Result>; + +pub type EphemeralSigningTuple = Result<(PK, OneTimeSignFn), E>; + +/// Trait for types that can prepare an ephemeral key for signing +pub trait SyncPrepareEphemeralSigning: Signer + EphemeralKey { + /// The codec used for encoding/decoding keys + type Codec; + /// Prepares an ephemeral keypair, returning the public key and a one-time signing function + fn prepare_ephemeral_signing( + &self, + codec: &Self::Codec, + threshold: NonZeroUsize, + limit: NonZeroUsize, + ) -> EphemeralSigningTuple< + ::PubKey, + ::Signature, + ::Error, + >; +} + +/// Trait for types that can verify signatures +pub trait SyncVerifier: Verifier { /// Verify that the provided signature for the given data is authentic fn verify( &self, key: &Self::Key, data: &[u8], signature: &Self::Signature, - ) -> Result<(), Error>; + ) -> Result<(), Self::Error>; } /// Trait for types that can encrypt data -pub trait Encryptor { - /// The type of key used to encrypt - type Key: Send + Sync; - /// The type of ciphertext - type Ciphertext: Send + Sync; - /// The type of plaintext, might include the nonce, and additional authenticated data - type Plaintext: Send + Sync; - +pub trait SyncEncryptor: Encryptor { /// Attempt to encrypt the plaintext fn try_encrypt( &self, key: &Self::Key, plaintext: &Self::Plaintext, - ) -> Result; + ) -> Result; /// Encrypt the plaintext fn encrypt(&self, key: &Self::Key, plaintext: &Self::Plaintext) -> Self::Ciphertext { @@ -58,35 +68,17 @@ pub trait Encryptor { } /// Trait for types that can decrypt data -pub trait Decryptor { - /// The type of key used to decrypt - type Key: Send + Sync; - /// The type of ciphertext - type Ciphertext: Send + Sync; - /// The type of plaintext - type Plaintext: Send + Sync; - +pub trait SyncDecryptor: Decryptor { /// Attempt to decrypt the ciphertext fn decrypt( &self, key: &Self::Key, ciphertext: &Self::Ciphertext, - ) -> Result; + ) -> Result; } /// Trait for types that can split a secret into shares -pub trait SecretSplitter { - /// The type of secret to split - type Secret: Send + Sync; - /// The type of identifier for the shares - type Identifier: Send + Sync; - /// The output from splitting the secret. - /// Might include the threshold and limit used to split the secret, - /// the shares, and the verifiers, identifiers, - /// or any other information needed to reconstruct the secret - /// and verify the shares. - type Output: Send + Sync; - +pub trait SyncSecretSplitter: SecretSplitter { /// Split the secret into shares. /// /// Conditions for `split` to succeed: @@ -97,7 +89,7 @@ pub trait SecretSplitter { secret: &Self::Secret, threshold: NonZeroUsize, limit: NonZeroUsize, - ) -> Result; + ) -> Result; /// Split the secret into shares with the given identifiers. /// The number of shares will be equal to the number of identifiers i.e. the `limit`. @@ -112,37 +104,26 @@ pub trait SecretSplitter { secret: &Self::Secret, threshold: NonZeroUsize, identifiers: &[Self::Identifier], - ) -> Result; + ) -> Result; } /// Trait for types that can combine shares into a secret -pub trait SecretCombiner { - /// The type of secret to combine - type Secret: Send + Sync; - /// The type of identifier for the shares - type Identifier: Send + Sync; - /// The type of shares to combine - type Shares: Send + Sync; - +pub trait SyncSecretCombiner: SecretCombiner { /// Combine the shares into a secret - fn combine(&self, shares: &[(Self::Identifier, Self::Shares)]) -> Result; + fn combine( + &self, + shares: &[(Self::Identifier, Self::Shares)], + ) -> Result; } /// Trait for types that can retrieve a key -pub trait GetKey { - /// The type of key - type Key: Send + Sync; - /// The type of key path - type KeyPath: Send + Sync; - /// The type of codec - type Codec: Send + Sync; - +pub trait SyncGetKey: GetKey { /// Get the key fn get_key( &self, key_path: &Self::KeyPath, codec: &Self::Codec, - threshold: usize, - limit: usize, - ) -> Result; + threshold: NonZeroUsize, + limit: NonZeroUsize, + ) -> Result; } diff --git a/crates/bs-wallets/Cargo.toml b/crates/bs-wallets/Cargo.toml new file mode 100644 index 0000000..ea6b653 --- /dev/null +++ b/crates/bs-wallets/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "bs-wallets" +version.workspace = true +edition.workspace = true +authors.workspace = true +description.workspace = true +readme.workspace = true +license.workspace = true + +[dependencies] +bs-traits.workspace = true +multibase.workspace = true +multicodec.workspace = true +multikey.workspace = true +multihash.workspace = true +multicid.workspace = true +multiutil.workspace = true +multisig.workspace = true +thiserror.workspace = true +tracing.workspace = true +rand.workspace = true +rand_core_6.workspace = true +provenance-log.workspace = true + +[dev-dependencies] +tokio.workspace = true +bs.workspace = true +bs-peer.workspace = true +tracing-subscriber.workspace = true + +[lints] +workspace = true diff --git a/crates/bs-wallets/README.md b/crates/bs-wallets/README.md new file mode 100644 index 0000000..519f922 --- /dev/null +++ b/crates/bs-wallets/README.md @@ -0,0 +1,11 @@ +# BetterSign Wallets + +Some reference implementations of wallets for the BetterSign protocol. + +### In-Memory Wallet + +This is a simple in-memory wallet implementation that can be used for testing purposes. It allows you to create, sign, and verify transactions without the need for a persistent storage. + +### Argon2 Wallet + +This wallet implementation uses Argon2 for key derivation and storage. It is designed to be secure and resistant to brute-force attacks. The wallet can store multiple keys and allows you to derive keys from a master key using a passphrase. diff --git a/crates/bs-wallets/src/error.rs b/crates/bs-wallets/src/error.rs new file mode 100644 index 0000000..b8e05ad --- /dev/null +++ b/crates/bs-wallets/src/error.rs @@ -0,0 +1,23 @@ +//! Crate errors + +use provenance_log::Key; + +/// Errors that can occur in the BS Wallet library. +#[derive(Debug, thiserror::Error)] +pub enum Error { + /// From + #[error("Multikey error: {0}")] + Multikey(#[from] multikey::Error), + + /// No key present for that KeyPath + #[error("No key present for that KeyPath {0}")] + NoKeyPresent(Key), + + /// From + #[error("Multihash error: {0}")] + Multihash(#[from] multihash::Error), + + /// From + #[error("Multicid error: {0}")] + Multicid(#[from] multicid::Error), +} diff --git a/crates/bs-wallets/src/lib.rs b/crates/bs-wallets/src/lib.rs new file mode 100644 index 0000000..347577c --- /dev/null +++ b/crates/bs-wallets/src/lib.rs @@ -0,0 +1,5 @@ +mod error; +pub use error::Error; + +/// In memory Key manager and signer +pub mod memory; diff --git a/crates/bs-wallets/src/memory.rs b/crates/bs-wallets/src/memory.rs new file mode 100644 index 0000000..76f4cca --- /dev/null +++ b/crates/bs-wallets/src/memory.rs @@ -0,0 +1,462 @@ +//! Basic in-memory wallet implementation. +//! In memory Key manager and [Signer] +pub use bs_traits::sync::{ + EphemeralSigningTuple, SyncGetKey, SyncPrepareEphemeralSigning, SyncSigner, +}; +use bs_traits::{self, EphemeralKey, GetKey, Signer}; +use multicodec::Codec; +use multikey::{mk, Multikey, Views as _}; +use multisig::Multisig; +use provenance_log::Key; +use std::collections::HashMap; +use std::fmt::Debug; +use std::marker::PhantomData; +use std::num::NonZeroUsize; +use std::sync::{Arc, Mutex}; + +/// In-memory key manager that provides key management and signing capabilities. +/// +/// You can specify an Error type that implements From<[multikey::Error]> and From<[multihash::Error]> +/// by using the turbo-fish operator `::`. +/// +/// # Example +/// ``` +/// use bs_wallets::memory::InMemoryKeyManager; +/// use bs::config::sync::MultiSigner; +/// use bs::config::sync::KeyManager; +/// +/// let key_manager = InMemoryKeyManager::default(); // same as InMemoryKeyManager::::new(); +/// test_default_error(key_manager); +/// +/// // specify a custom error type, as long as it meets the bounds: +/// let key_manager = InMemoryKeyManager::::new(); +/// +/// // fixture +/// fn test_default_error + MultiSigner>( +/// _kp: KP, +/// ) { +/// } +#[derive(Debug)] +pub struct InMemoryKeyManager { + // Map of key fingerprints to their corresponding secret keys + keys: Arc, Multikey>>>, + // Map of key paths to their corresponding key fingerprints + paths: Arc>>>, + /// The [Key] used to sign [provenance_log::Entry]s + entry_signing_key: Option, + // PhantomData to hold the error type + _phantom: PhantomData, +} + +impl Clone for InMemoryKeyManager { + fn clone(&self) -> Self { + Self { + keys: self.keys.clone(), + paths: self.paths.clone(), + entry_signing_key: None, + _phantom: PhantomData, + } + } +} + +impl Default for InMemoryKeyManager +where + E: From + From + Debug, +{ + fn default() -> Self { + Self::new() + } +} + +impl InMemoryKeyManager +where + E: From + From + Debug, +{ + /// Create a new key manager with auto-generated keys + pub fn new() -> Self { + Self { + keys: Arc::new(Mutex::new(HashMap::new())), + paths: Arc::new(Mutex::new(HashMap::new())), + entry_signing_key: None, + _phantom: PhantomData, + } + } + + /// Get public key by path - enhanced to support custom path lookups too + fn get_public_key_by_path(&self, path: &Key) -> Result, E> { + let paths = self.paths.lock().unwrap(); + if let Some(fingerprint) = paths.get(path) { + let keys = self.keys.lock().unwrap(); + if let Some(secret_key) = keys.get(fingerprint) { + let public_key = secret_key.conv_view()?.to_public_key()?; + return Ok(Some(public_key)); + } + } + Ok(None) + } + + /// Get secret key by path + fn get_secret_key(&self, path: &Key) -> Result, E> { + let paths = self.paths.lock().unwrap(); + if let Some(fingerprint) = paths.get(path) { + let keys = self.keys.lock().unwrap(); + return Ok(keys.get(fingerprint).cloned()); + } + Ok(None) + } + + /// Store secret key by path + pub fn store_secret_key(&self, path: Key, secret_key: Multikey) -> Result<(), E> { + let fingerprint = secret_key.fingerprint_view()?.fingerprint(Codec::Sha2256)?; + let mut keys = self.keys.lock().unwrap(); + keys.insert(fingerprint.clone().into(), secret_key); + let mut paths = self.paths.lock().unwrap(); + paths.insert(path, fingerprint.into()); + Ok(()) + } + + /// Explicitly set the entry signing key + pub fn set_entry_signing_key(&mut self, key: Key) { + self.entry_signing_key = Some(key); + } + + /// Convenience method to get the entry signing [Key] if it exists + pub fn get_entry_signing_key(&self) -> &Option { + &self.entry_signing_key + } + + /// Remove secret key by path + pub fn remove_secret_key(&self, path: &Key) -> Result<(), E> { + let mut paths = self.paths.lock().unwrap(); + if let Some(fingerprint) = paths.remove(path) { + let mut keys = self.keys.lock().unwrap(); + keys.remove(&fingerprint); + } + Ok(()) + } + + /// Generate a new key for the given codec + pub fn generate_key(codec: &Codec) -> Result { + let mut rng = rand_core_6::OsRng; + Ok(mk::Builder::new_from_random_bytes(*codec, &mut rng)?.try_build()?) + } + + /// Generates from seed + pub fn generate_from_seed(codec: &Codec, seed: &[u8]) -> Result { + let mk = mk::Builder::new_from_seed(*codec, seed)?.try_build()?; + Ok(mk) + } + + /// Update the path mapping for a key + pub fn update_path_mapping(&self, path: Key, fingerprint: Vec) -> Result<(), E> { + let mut paths = self.paths.lock().unwrap(); + paths.insert(path, fingerprint); + Ok(()) + } +} + +impl GetKey for InMemoryKeyManager +where + E: From + From + Debug, +{ + type KeyPath = Key; + type Codec = Codec; + type Key = Multikey; + type Error = E; +} + +impl SyncGetKey for InMemoryKeyManager +where + E: From + From + Debug, +{ + /// Gets a key for the given path and codec, generating it if necessary under the specified threshold and limit. + /// Saves the secret key for future use under the KeyPath provided. + fn get_key<'a>( + &'a self, + key_path: &'a Self::KeyPath, + codec: &'a Self::Codec, + _threshold: NonZeroUsize, + _limit: NonZeroUsize, + ) -> Result { + tracing::trace!("Key request for {}", key_path); + + // Return the existing public key if we have it already + if let Some(key) = self.get_public_key_by_path(key_path)? { + tracing::debug!( + "Returning existing key for path {}: {:?}", + key_path, + key.fingerprint_view()?.fingerprint(Codec::Sha2256)? + ); + // log whether it's priv key or not using AttrView is_secret_key + if key.attr_view()?.is_secret_key() { + tracing::debug!("Key is a secret key"); + } else { + tracing::debug!("Key is a public key"); + } + + return Ok(key); + } + + // Generate a new key since we don't have it yet + let secret_key = Self::generate_key(codec)?; + let fingerprint = secret_key.fingerprint_view()?.fingerprint(Codec::Sha2256)?; + tracing::debug!( + "Generated new key for path {}: {:?}", + key_path, + fingerprint + ); + let public_key = secret_key.conv_view()?.to_public_key()?; + + // Store the secret key for future use + self.store_secret_key(key_path.clone(), secret_key)?; + + Ok(public_key) + } +} + +impl Signer for InMemoryKeyManager +where + E: From + From + Debug, +{ + type KeyPath = Key; + type Signature = Multisig; + type Error = E; +} + +impl EphemeralKey for InMemoryKeyManager +where + E: From + From + Debug, +{ + type PubKey = Multikey; +} + +impl SyncSigner for InMemoryKeyManager +where + E: From + + From + + From + + From + + Debug, +{ + fn try_sign( + &self, + key_path: &Self::KeyPath, + data: &[u8], + ) -> Result { + // Get the secret key corresponding to the provided path + let secret_key = self + .get_secret_key(key_path)? + .ok_or(crate::Error::NoKeyPresent(key_path.clone()))?; + + let msg = data; + let combined = false; + let scheme = None; + + let signmk = secret_key.sign_view()?; + let signature = signmk.sign(msg, combined, scheme)?; + + let sig_bytes_raw: Vec = signature.clone().into(); + tracing::debug!( + "try_sign Signature created with {} bytes, first 4 bytes: {:02x?} ({:?} dec)", + sig_bytes_raw.len(), + &sig_bytes_raw[..4], + &sig_bytes_raw[..4] + ); + + Ok(signature) + } +} + +impl SyncPrepareEphemeralSigning for InMemoryKeyManager +where + E: From + + From + + From + + From + + Debug + + 'static, +{ + type Codec = Codec; + + fn prepare_ephemeral_signing( + &self, + codec: &Self::Codec, + threshold: NonZeroUsize, + limit: NonZeroUsize, + ) -> EphemeralSigningTuple< + ::PubKey, + ::Signature, + ::Error, + > { + let mut rng = rand_core_6::OsRng; + + // Generate the secret key + let secret_key = multikey::Builder::new_from_random_bytes(*codec, &mut rng)? + .with_threshold(threshold) + .with_limit(limit) + .try_build()?; + + // Get the public key + let public_key = secret_key.conv_view()?.to_public_key()?; + + // Create a FnOnce closure that owns the secret key and will be destroyed after use (on drop) + let sign_once = Box::new( + move |data: &[u8]| -> Result<::Signature, ::Error> { + let signature = secret_key.sign_view()?.sign(data, false, None)?; + Ok(signature) + }, + ); + + Ok((public_key, sign_once)) + } +} + +#[cfg(test)] +mod tests { + use std::num::NonZero; + + use super::*; + use bs::config::sync::{KeyManager, MultiSigner}; + use bs_traits::sync::SyncSigner; + use tracing_subscriber::fmt; + + fn init_logger() { + let subscriber = fmt().with_env_filter("trace").finish(); + if let Err(e) = tracing::subscriber::set_global_default(subscriber) { + tracing::warn!("failed to set subscriber: {}", e); + } + } + + // test fixture that fixes the Error tpye to bs::Error + fn test_default_error + MultiSigner>(_kp: KP) {} + + // can use default, Error is bs:Error + #[test] + fn test_default_key_manager() { + // Create key manager with default error type + let key_manager = InMemoryKeyManager::default(); + test_default_error(key_manager); + } + + #[tokio::test] + async fn test_in_memory_key_manager() { + // Create key manager with auto-generated keys + let key_manager = InMemoryKeyManager::::new(); + + // Test a regular non-ephemeral key + let key_path = Key::try_from("/non/ephermal/key/path").unwrap(); + let test_mk = + multikey::Builder::new_from_random_bytes(Codec::Ed25519Priv, &mut rand_core_6::OsRng) + .unwrap() + .try_build() + .unwrap(); + + // add to Wallet + key_manager + .store_secret_key(key_path.clone(), test_mk.clone()) + .unwrap(); + + // can sign with stored key + let data = b"test data"; + let signature = key_manager.try_sign(&key_path, data).unwrap(); + let verify_result = test_mk + .verify_view() + .unwrap() + .verify(&signature, Some(data)); + assert!(verify_result.is_ok()); + } + + #[tokio::test] + async fn test_dynamic_key_generation() { + // Create key manager + let key_manager = InMemoryKeyManager::::new(); + + // Request a key with a custom path + let custom_path = Key::try_from("/custom/key/path").unwrap(); + + // First call to get_key generates a key pair and stores the secret key, + // but returns the public key + let public_key = key_manager + .get_key( + &custom_path, + &Codec::Ed25519Priv, + NonZero::new(1).unwrap(), + NonZero::new(1).unwrap(), + ) + .unwrap(); + + // Verify we got a public key + assert!(public_key.attr_view().unwrap().is_public_key()); + + // Get the key again - should be the same public key + let public_key2 = key_manager + .get_key( + &custom_path, + &Codec::Ed25519Priv, + NonZero::new(1).unwrap(), + NonZero::new(1).unwrap(), + ) + .unwrap(); + + assert!( + public_key.eq(&public_key2), + "The two retrieved public keys should be equal" + ); + + // Try signing with the key at the custom path + // This works because the secret key is stored internally in the key manager + let data = b"test custom key"; + let signature = key_manager.try_sign(&custom_path, data).unwrap(); + + // Verify signature with the public key we have + let verify_result = public_key + .verify_view() + .unwrap() + .verify(&signature, Some(data)); + assert!( + verify_result.is_ok(), + "Signature verification should succeed" + ); + } + + #[tokio::test] + async fn test_prepare_ephemeral_signing() { + init_logger(); + + tracing::info!("Starting test_prepare_ephemeral_signing"); + + let key_manager = InMemoryKeyManager::::new(); + let data = b"test ephemeral signing"; + + // Use get an ephemeral public key and a one-time signing function + let (public_key, sign_once) = key_manager + .prepare_ephemeral_signing( + &Codec::Ed25519Priv, + NonZero::new(1).unwrap(), + NonZero::new(1).unwrap(), + ) + .expect("Failed to prepare ephemeral signing"); + + // Verify that we got a public key + assert!(public_key.attr_view().unwrap().is_public_key()); + + // Sign the data with the one-time function + let signature = sign_once(data).expect("Failed to sign with ephemeral key"); + + // Create a new multikey for verification since we only have the public key + let verify_key = public_key.clone(); + + // Verify the signature + let verify_result = verify_key + .verify_view() + .unwrap() + .verify(&signature, Some(data)); + + assert!( + verify_result.is_ok(), + "Signature verification should succeed" + ); + + tracing::info!("Ephemeral signing test completed successfully"); + } +} diff --git a/crates/bs/Cargo.toml b/crates/bs/Cargo.toml index a575852..cb0cdf9 100644 --- a/crates/bs/Cargo.toml +++ b/crates/bs/Cargo.toml @@ -9,18 +9,29 @@ license = "Functional Source License 1.1" [dependencies] bs-traits.workspace = true +bs-wallets.workspace = true best-practices.workspace = true +comrade.workspace = true multicid.workspace = true multicodec.workspace = true multihash.workspace = true multikey.workspace = true multisig.workspace = true +multiutil.workspace = true provenance-log.workspace = true rand.workspace = true -serde_cbor.workspace = true thiserror.workspace = true tracing.workspace = true -wacc.workspace = true +serde = { workspace = true, optional = true } +bon = "3.6.3" + +[dev-dependencies] +tracing-subscriber.workspace = true +bs-wallets.workspace = true +multibase.workspace = true [lints] workspace = true + +[features] +serde = ["dep:serde"] diff --git a/crates/bs/src/config.rs b/crates/bs/src/config.rs new file mode 100644 index 0000000..9eedbb2 --- /dev/null +++ b/crates/bs/src/config.rs @@ -0,0 +1,18 @@ +//! Holds opinionated configuration about what concrete types should be used for the traits. +//! +//! Users can pick any concrete types that implement the traits, but this module provides +//! default implementations that can be used directly. + +/// Opinionated configuration for the async traits types +pub mod asynchronous; +/// Opinionated configuration for the sync traits types +pub mod sync; + +use crate::Error; +use bs_traits::{GetKey, Signer}; + +/// Re-export the types used in the traits +pub use multicodec::Codec; +pub use multikey::Multikey; +pub use multisig::Multisig; +pub use provenance_log::Key; diff --git a/crates/bs/src/config/asynchronous.rs b/crates/bs/src/config/asynchronous.rs new file mode 100644 index 0000000..f2f074e --- /dev/null +++ b/crates/bs/src/config/asynchronous.rs @@ -0,0 +1,36 @@ +use super::*; +use bs_traits::asyncro::{AsyncGetKey, AsyncSigner}; + +/// Supertrait for key management operations +pub trait KeyManager: + GetKey + + AsyncGetKey + + Send + + Sync + + 'static +{ +} + +/// Supertrait for signing operations +pub trait MultiSigner: + Signer + AsyncSigner + Send + Sync + 'static +{ +} + +impl KeyManager for T where + T: GetKey + + AsyncGetKey + + Send + + Sync + + 'static +{ +} + +impl MultiSigner for T where + T: Signer + + AsyncSigner + + Send + + Sync + + 'static +{ +} diff --git a/crates/bs/src/config/sync.rs b/crates/bs/src/config/sync.rs new file mode 100644 index 0000000..b68ef03 --- /dev/null +++ b/crates/bs/src/config/sync.rs @@ -0,0 +1,35 @@ +//! Sync alterntives to the asynchronous traits. +use bs_traits::sync::{SyncGetKey, SyncPrepareEphemeralSigning, SyncSigner}; +use bs_traits::EphemeralKey; + +use super::*; + +/// Supertrait for key management operations +pub trait KeyManager: + GetKey + SyncGetKey +{ +} + +impl KeyManager for T where + T: GetKey + SyncGetKey +{ +} + +/// Supertrait for signing operations +pub trait MultiSigner: + Signer + + SyncSigner + + EphemeralKey + + GetKey + + SyncPrepareEphemeralSigning +{ +} + +impl MultiSigner for T where + T: Signer + + SyncSigner + + EphemeralKey + + GetKey + + SyncPrepareEphemeralSigning +{ +} diff --git a/crates/bs/src/error.rs b/crates/bs/src/error.rs index fe72900..418418f 100644 --- a/crates/bs/src/error.rs +++ b/crates/bs/src/error.rs @@ -1,4 +1,10 @@ // SPDX-License-Identifier: FSL-1.1 +use multicid::Error as MulticidError; +use multihash::Error as MultihashError; +use multikey::Error as MultikeyError; +use provenance_log::Error as PlogError; +use std::fmt::Debug; + /// Errors generated from this crate #[derive(Debug, thiserror::Error)] #[non_exhaustive] @@ -39,9 +45,10 @@ pub enum Error { /// I/O error #[error(transparent)] Io(#[from] std::io::Error), - /// Serde CBOR error + + /// Wallets error #[error(transparent)] - SerdeCbor(#[from] serde_cbor::Error), + Wallets(#[from] bs_wallets::Error), } /// Open op errors @@ -103,3 +110,33 @@ pub enum UpdateError { #[error("No entry signing key")] NoSigningKey, } + +/// Trait alias for errors that can be used with BS operations +pub trait BsCompatibleError: + From + + From + + From + + From + + From + + From + + From + + From + + ToString + + Debug +{ +} + +// Blanket implementation for any type that satisfies the bounds +impl BsCompatibleError for T where + T: From + + From + + From + + From + + From + + From + + From + + From + + ToString + + Debug +{ +} diff --git a/crates/bs/src/lib.rs b/crates/bs/src/lib.rs index 4ccb6ea..818d3d2 100644 --- a/crates/bs/src/lib.rs +++ b/crates/bs/src/lib.rs @@ -22,3 +22,9 @@ pub use ops::prelude::*; pub mod prelude { pub use super::*; } + +/// Opinionated configuation for the BetterSign library +pub mod config; + +/// Resolver extension for bettersign +pub mod resolver_ext; diff --git a/crates/bs/src/ops.rs b/crates/bs/src/ops.rs index 5db94e3..acd9a51 100644 --- a/crates/bs/src/ops.rs +++ b/crates/bs/src/ops.rs @@ -12,3 +12,6 @@ pub use update::{op, script, update_plog}; pub mod prelude { pub use super::*; } + +/// Parameters for building operations +pub mod params; diff --git a/crates/bs/src/ops/open.rs b/crates/bs/src/ops/open.rs index 3b0bc5b..ac6e904 100644 --- a/crates/bs/src/ops/open.rs +++ b/crates/bs/src/ops/open.rs @@ -2,188 +2,193 @@ /// Config for the open operation pub mod config; -pub use config::Config; +use std::num::NonZeroUsize; use crate::{ - error::OpenError, - update::{op, script, OpParams}, - Error, + error::{BsCompatibleError, OpenError}, + params::vlad::{FirstEntryKeyParams, VladParams}, + update::{op, OpParams}, }; -use bs_traits::{GetKey, Signer}; -use multicid::{cid, vlad, Cid}; +pub use config::Config; +use multicid::{cid, Cid, Vlad}; use multicodec::Codec; use multihash::mh; use multikey::{Multikey, Views}; -use multisig::Multisig; -use provenance_log::{entry, error::EntryError, Error as PlogError, Key, Log, OpId, Script}; -use std::{fs::read, path::Path}; +use provenance_log::{entry, error::EntryError, Error as PlogError, Key, Log, OpId}; use tracing::debug; -/// open a new provenance log based on the config -pub fn open_plog(config: Config, get_key: &G, sign_entry: &S) -> Result -where - G: GetKey, - S: Signer, -{ - // 0. Set up the list of ops we're going to add +/// Open a new provenance log based on the [Config] provided. +// +// To Open a Plog, the critical steps are: +// - First get the public key of the ephemeral first entry key +// - Add the public key of the ephemeral first entry key operation to `op_params` +// - Add ALL operations to the entry builder +// - Sign that operated entry using the ephemeral first entry key's one-time signing function +// - Finalize the Entry with the signature +// +// When the script runtime checks the first entry data (the Entry without the proof), against the +// first lock script, it will use the first entry key's public key to verify the signature. +pub fn open_plog( + config: &Config, + key_manager: &dyn crate::config::sync::KeyManager, + signer: &dyn crate::config::sync::MultiSigner, +) -> Result { + // 0. Set up the list of ops let mut op_params = Vec::default(); - // go through the additional ops and generate CIDs and keys and adding the resulting op params - // to the vec of op params + // Process initial operations config - .additional_ops + .additional_ops() .iter() - .try_for_each(|params| -> Result<(), Error> { + .try_for_each(|params| -> Result<(), E> { match params { p @ OpParams::KeyGen { .. } => { - let _ = load_key(&mut op_params, p, get_key)?; + let _ = load_key::(&mut op_params, p, key_manager)?; } p @ OpParams::CidGen { .. } => { - let _ = load_cid(&mut op_params, p, |path| -> Result, Error> { - Ok(read(path)?) - })?; + let _ = load_cid::(&mut op_params, p)?; } p => op_params.push(p.clone()), } Ok(()) })?; - // 1. Construct the VLAD from provided parameters - - // get the codec for the vlad signing key and cid - let (vlad_key_params, vlad_cid_params) = config - .vlad_params - .ok_or::(OpenError::InvalidVladParams.into())?; - // get the vlad signing key - let vlad_mk = load_key(&mut op_params, &vlad_key_params, get_key)?; - // get the cid for the first lock script - let mut first_lock_script: Option