diff --git a/Cargo.toml b/Cargo.toml index d505c1a0a..966a66ac0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,6 +8,7 @@ members = [ "crates/esplora", "crates/bitcoind_rpc", "crates/testenv", + "crates/common", "examples/example_cli", "examples/example_electrum", "examples/example_esplora", diff --git a/ci/pin-msrv.sh b/ci/pin-msrv.sh index 17253eed1..b076b75da 100755 --- a/ci/pin-msrv.sh +++ b/ci/pin-msrv.sh @@ -29,6 +29,7 @@ cargo update -p base64ct --precise "1.6.0" cargo update -p minreq --precise "2.13.2" cargo update -p tracing-core --precise "0.1.33" cargo update -p webpki-roots@1.0.2 --precise "1.0.1" +cargo update -p tracing-attributes --precise "0.1.28" cargo update -p rayon --precise "1.10.0" cargo update -p rayon-core --precise "1.12.1" cargo update -p socket2@0.6.0 --precise "0.5.10" diff --git a/crates/bitcoind_rpc/Cargo.toml b/crates/bitcoind_rpc/Cargo.toml index bdece9800..3e6fd18be 100644 --- a/crates/bitcoind_rpc/Cargo.toml +++ b/crates/bitcoind_rpc/Cargo.toml @@ -19,6 +19,7 @@ workspace = true bitcoin = { version = "0.32.0", default-features = false } bitcoincore-rpc = { version = "0.19.0" } bdk_core = { path = "../core", version = "0.6.1", default-features = false } +bdk_common = { path = "../common" } [dev-dependencies] bdk_bitcoind_rpc = { path = "." } @@ -29,6 +30,7 @@ bdk_chain = { path = "../chain" } default = ["std"] std = ["bitcoin/std", "bdk_core/std"] serde = ["bitcoin/serde", "bdk_core/serde"] +log = ["bdk_common/log"] [[example]] name = "filter_iter" diff --git a/crates/bitcoind_rpc/src/lib.rs b/crates/bitcoind_rpc/src/lib.rs index eb58018e9..a6e84c88d 100644 --- a/crates/bitcoind_rpc/src/lib.rs +++ b/crates/bitcoind_rpc/src/lib.rs @@ -14,6 +14,10 @@ #[macro_use] extern crate alloc; +#[allow(unused_imports)] +#[macro_use] +extern crate bdk_common; + use alloc::sync::Arc; use bdk_core::collections::{HashMap, HashSet}; use bdk_core::{BlockId, CheckPoint}; @@ -124,6 +128,12 @@ where pub fn mempool_at(&mut self, sync_time: u64) -> Result { let client = &*self.client; + log_trace!( + start_height = self.start_height, + sync_time = sync_time, + "enter mempool_at" + ); + let mut rpc_tip_height; let mut rpc_tip_hash; let mut rpc_mempool; @@ -164,6 +174,13 @@ where ..Default::default() }; + log_trace!( + rpc_mempool_count = rpc_mempool_txids.len(), + rpc_height = rpc_tip_height, + rpc_block_hash = %rpc_tip_hash, + "fetched raw mempool" + ); + let at_tip = rpc_tip_height == self.last_cp.height() as u64 && rpc_tip_hash == self.last_cp.hash(); @@ -200,11 +217,21 @@ where /// Emit the next block height and block (if any). pub fn next_block(&mut self) -> Result>, bitcoincore_rpc::Error> { + log_trace!( + last_block_height = self.last_block.as_ref().map(|r| r.height), + "enter next_block" + ); + if let Some((checkpoint, block)) = poll(self, move |hash, client| client.get_block(hash))? { // Stop tracking unconfirmed transactions that have been confirmed in this block. for tx in &block.txdata { self.mempool_snapshot.remove(&tx.compute_txid()); } + log_trace!( + block_height = checkpoint.height(), + tx_count = block.txdata.len(), + "emit block" + ); return Ok(Some(BlockEvent { block, checkpoint })); } Ok(None) @@ -279,6 +306,12 @@ where C: Deref, C::Target: RpcApi, { + log_trace!( + last_block_height = emitter.last_block.as_ref().map(|r| r.height), + start_height = emitter.start_height, + "enter poll_once" + ); + let client = &*emitter.client; if let Some(last_res) = &emitter.last_block { @@ -287,21 +320,31 @@ where let next_hash = client.get_block_hash(emitter.start_height as _)?; // make sure last emission is still in best chain if client.get_block_hash(last_res.height as _)? != last_res.hash { + log_trace!("block not in best chain"); return Ok(PollResponse::BlockNotInBestChain); } next_hash } else { match last_res.nextblockhash { - None => return Ok(PollResponse::NoMoreBlocks), + None => { + log_trace!("no more blocks"); + return Ok(PollResponse::NoMoreBlocks); + } Some(next_hash) => next_hash, } }; let res = client.get_block_info(&next_hash)?; if res.confirmations < 0 { + log_trace!("block not in best chain"); return Ok(PollResponse::BlockNotInBestChain); } + log_trace!( + height = res.height, + hash = %res.hash, + "agreement found" + ); return Ok(PollResponse::Block(res)); } @@ -321,6 +364,11 @@ where }; // agreement point found + log_trace!( + "poll(): PollResponse::AgreementFound, height={}, hash={}", + res.height, + res.hash + ); return Ok(PollResponse::AgreementFound(res, cp)); } diff --git a/crates/common/Cargo.toml b/crates/common/Cargo.toml new file mode 100644 index 000000000..9cced771e --- /dev/null +++ b/crates/common/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "bdk_common" +version = "0.1.0" +edition = "2021" +rust-version = "1.63" +homepage = "https://bitcoindevkit.org" +repository = "https://github.com/bitcoindevkit/bdk" +description = "Shared utilities and macros for BDK chain sources." +license = "MIT OR Apache-2.0" +readme = "README.md" +publish = false + +[dependencies] +tracing = { version = "0.1", optional = true } + +[features] +log = ["tracing"] diff --git a/crates/common/README.md b/crates/common/README.md new file mode 100644 index 000000000..4741dd769 --- /dev/null +++ b/crates/common/README.md @@ -0,0 +1,11 @@ +# BDK Common + +`bdk_common` is an **internal-only** crate providing zero-overhead, feature-gated logging macros for +all BDK chain-sync crates. Enabling the single `log` feature pulls in `tracing = "0.1"`. When it’s +off, the macros compile to no-ops and there’s no runtime or dependency impact. + +## Features + +- **`log`** (off by default) + – Re-exports `tracing` and enables logging. + – When disabled, `log_trace!`, `log_debug!`, `log_info!`, `log_warn!`, `log_error!`, and `log_span!` expand to nothing. diff --git a/crates/common/src/lib.rs b/crates/common/src/lib.rs new file mode 100644 index 000000000..3ffd25cb0 --- /dev/null +++ b/crates/common/src/lib.rs @@ -0,0 +1,5 @@ +#[cfg(feature = "log")] +pub use tracing; + +#[macro_use] +mod log; diff --git a/crates/common/src/log.rs b/crates/common/src/log.rs new file mode 100644 index 000000000..433ce95ac --- /dev/null +++ b/crates/common/src/log.rs @@ -0,0 +1,99 @@ +/// Trace-level logging. Expands to `tracing::trace!()` when the `log` feature is enabled, +/// otherwise into a no-op. +#[cfg(feature = "log")] +#[macro_export] +macro_rules! log_trace { + ($($tt:tt)*) => { + $crate::tracing::trace!($($tt)*); + }; +} +#[cfg(not(feature = "log"))] +#[macro_export] +macro_rules! log_trace { + ($($tt:tt)*) => {}; +} + +/// Trace-level span. Expands to `tracing::trace_span!()` when the `log` feature is enabled, +/// otherwise to a dummy no-op span whose `.entered()` does nothing. +#[cfg(feature = "log")] +#[macro_export] +macro_rules! log_span { + ( $($tts:tt)* ) => { + $crate::tracing::trace_span!($($tts)*) + }; +} +#[cfg(not(feature = "log"))] +#[macro_export] +macro_rules! log_span { + ( $($tts:tt)* ) => {{ + struct NoopSpan; + impl NoopSpan { + #[allow(dead_code)] + pub fn entered(self) -> NoopEntered { + NoopEntered + } + } + struct NoopEntered; + NoopSpan + }}; +} + +/// Debug-level logging. Expands to `tracing::debug!()` when the `log` feature is enabled, +/// otherwise into a no-op. +#[cfg(feature = "log")] +#[macro_export] +macro_rules! log_debug { + ($($tt:tt)*) => { + $crate::tracing::debug!($($tt)*); + }; +} +#[cfg(not(feature = "log"))] +#[macro_export] +macro_rules! log_debug { + ($($tt:tt)*) => {}; +} + +/// Info-level logging. Expands to `tracing::info!()` when the `log` feature is enabled, +/// otherwise into a no-op. +#[cfg(feature = "log")] +#[macro_export] +macro_rules! log_info { + ($($tt:tt)*) => { + $crate::tracing::info!($($tt)*); + }; +} +#[cfg(not(feature = "log"))] +#[macro_export] +macro_rules! log_info { + ($($tt:tt)*) => {}; +} + +/// Warn-level logging. Expands to `tracing::warn!()` when the `log` feature is enabled, +/// otherwise into a no-op. +#[cfg(feature = "log")] +#[macro_export] +macro_rules! log_warn { + ($($tt:tt)*) => { + $crate::tracing::warn!($($tt)*); + }; +} +#[cfg(not(feature = "log"))] +#[macro_export] +macro_rules! log_warn { + ($($tt:tt)*) => {}; +} + +/// Error-level logging. Expands to `tracing::error!()` when the `log` feature is enabled, +/// otherwise into a no-op. +#[cfg(feature = "log")] +#[macro_export] +macro_rules! log_error { + ($($tt:tt)*) => { + $crate::tracing::error!($($tt)*); + }; +} +#[cfg(not(feature = "log"))] +#[macro_export] +macro_rules! log_error { + ($($tt:tt)*) => {}; +} diff --git a/crates/electrum/Cargo.toml b/crates/electrum/Cargo.toml index 7ad0d6c3d..43c446c4f 100644 --- a/crates/electrum/Cargo.toml +++ b/crates/electrum/Cargo.toml @@ -14,6 +14,7 @@ workspace = true [dependencies] bdk_core = { path = "../core", version = "0.6.1" } +bdk_common = { path = "../common" } electrum-client = { version = "0.24.0", features = [ "proxy" ], default-features = false } [dev-dependencies] @@ -26,6 +27,7 @@ default = ["use-rustls"] use-rustls = ["electrum-client/use-rustls"] use-rustls-ring = ["electrum-client/use-rustls-ring"] use-openssl = ["electrum-client/use-openssl"] +log = ["bdk_common/log"] [[test]] name = "test_electrum" diff --git a/crates/electrum/src/bdk_electrum_client.rs b/crates/electrum/src/bdk_electrum_client.rs index f5eee7a80..92e0ad059 100644 --- a/crates/electrum/src/bdk_electrum_client.rs +++ b/crates/electrum/src/bdk_electrum_client.rs @@ -72,11 +72,20 @@ impl BdkElectrumClient { let tx_cache = self.tx_cache.lock().unwrap(); if let Some(tx) = tx_cache.get(&txid) { + log_trace!( + event = "tx_cache_hit", + txid = %txid, + ); return Ok(Arc::clone(tx)); } drop(tx_cache); + log_trace!( + event = "tx_cache_miss", + txid = %txid, + ); + let tx = Arc::new(self.inner.transaction_get(&txid)?); self.tx_cache.lock().unwrap().insert(txid, Arc::clone(&tx)); @@ -121,6 +130,15 @@ impl BdkElectrumClient { let mut request: FullScanRequest = request.into(); let start_time = request.start_time(); + let _span = log_span!( + "bdk_electrum::full_scan", + event = "enter_full_scan", + stop_gap = stop_gap, + batch_size = batch_size, + fetch_prev = fetch_prev_txouts, + ) + .entered(); + let tip_and_latest_blocks = match request.chain_tip() { Some(chain_tip) => Some(fetch_tip_and_latest_blocks(&self.inner, chain_tip)?), None => None, @@ -204,6 +222,14 @@ impl BdkElectrumClient { let mut request: SyncRequest = request.into(); let start_time = request.start_time(); + let _span = log_span!( + "bdk_electrum::sync", + event = "enter_sync", + batch_size = batch_size, + fetch_prev = fetch_prev_txouts, + ) + .entered(); + let tip_and_latest_blocks = match request.chain_tip() { Some(chain_tip) => Some(fetch_tip_and_latest_blocks(&self.inner, chain_tip)?), None => None, @@ -283,10 +309,15 @@ impl BdkElectrumClient { let spks = (0..batch_size) .map_while(|_| spks_with_expected_txids.next()) .collect::>(); + log_trace!(event = "script_history_batch", batch_size = spks.len(),); + if spks.is_empty() { + log_trace!(event = "script_history_empty",); return Ok(last_active_index); } + log_trace!(event = "spk_has_history",); + let spk_histories = self .inner .batch_script_get_history(spks.iter().map(|(_, s)| s.spk.as_script()))?; @@ -295,7 +326,14 @@ impl BdkElectrumClient { if spk_history.is_empty() { match unused_spk_count.checked_add(1) { Some(i) if i < stop_gap => unused_spk_count = i, - _ => return Ok(last_active_index), + _ => { + log_trace!( + event = "gap_limit_reached", + unused_spk_count = unused_spk_count, + stop_gap = stop_gap, + ); + return Ok(last_active_index); + } }; } else { last_active_index = Some(spk_index); @@ -377,10 +415,19 @@ impl BdkElectrumClient { if !has_residing && res.tx_hash == outpoint.txid { has_residing = true; + log_trace!( + event = "outpoint_reside", + outpoint = %outpoint, + ); tx_update.txs.push(Arc::clone(&tx)); match res.height.try_into() { // Returned heights 0 & -1 are reserved for unconfirmed txs. Ok(height) if height > 0 => { + log_trace!( + event = "anchor_added_outpoint", + txid = %res.tx_hash, + height = height, + ); pending_anchors.push((res.tx_hash, height)); } _ => { @@ -396,13 +443,24 @@ impl BdkElectrumClient { .input .iter() .any(|txin| txin.previous_output == outpoint); - if !has_spending { + if has_spending { + log_trace!( + event = "outpoint_spent", + outpoint = %outpoint, + spending_txid = %res.tx_hash, + ); + } else { continue; } tx_update.txs.push(Arc::clone(&res_tx)); match res.height.try_into() { // Returned heights 0 & -1 are reserved for unconfirmed txs. Ok(height) if height > 0 => { + log_trace!( + event = "anchor_added_from_outpoint_resolution", + txid = %res.tx_hash, + height = height, + ); pending_anchors.push((res.tx_hash, height)); } _ => { @@ -437,9 +495,17 @@ impl BdkElectrumClient { .expect("tx must have an output") .clone(); txs.push((txid, tx)); + log_trace!( + event = "fetched_tx_for_confirmation", + txid = %txid, + ); scripts.push(spk); } Err(electrum_client::Error::Protocol(_)) => { + log_debug!( + event = "protocol_error", + txid = %txid, + ); continue; } Err(e) => return Err(e), @@ -457,6 +523,11 @@ impl BdkElectrumClient { match res.height.try_into() { // Returned heights 0 & -1 are reserved for unconfirmed txs. Ok(height) if height > 0 => { + log_trace!( + event = "anchor_candidate_txid_history", + txid = %res.tx_hash, + height, + ); pending_anchors.push((tx.0, height)); } _ => { @@ -516,8 +587,18 @@ impl BdkElectrumClient { let h = height as u32; let hash = height_to_hash[&h]; if let Some(anchor) = anchor_cache.get(&(txid, hash)) { + log_trace!( + event = "anchor_cache_hit", + txid = %txid, + height, + ); results.push((txid, *anchor)); } else { + log_trace!( + event = "anchor_cache_miss", + txid = %txid, + height, + ); to_fetch.push((txid, height)); } } @@ -538,6 +619,11 @@ impl BdkElectrumClient { let mut valid = electrum_client::utils::validate_merkle_proof(&txid, &header.merkle_root, &proof); if !valid { + log_debug!( + event = "merkle_validation_failed", + txid = %txid, + height, + ); header = self.inner.block_header(height)?; self.block_header_cache .lock() @@ -548,6 +634,13 @@ impl BdkElectrumClient { &header.merkle_root, &proof, ); + if valid { + log_trace!( + event = "merkle_validated_retry", + txid = %txid, + height, + ); + } } // Build and cache the anchor if merkle proof is valid. @@ -564,6 +657,12 @@ impl BdkElectrumClient { .lock() .unwrap() .insert((txid, hash), anchor); + log_trace!( + event = "anchor_inserted", + txid = %txid, + height , + hash = %hash, + ); results.push((txid, anchor)); } } diff --git a/crates/electrum/src/lib.rs b/crates/electrum/src/lib.rs index 9c1d9f452..96c6453a3 100644 --- a/crates/electrum/src/lib.rs +++ b/crates/electrum/src/lib.rs @@ -19,6 +19,10 @@ #![cfg_attr(coverage_nightly, feature(coverage_attribute))] #![warn(missing_docs)] +#[allow(unused_imports)] +#[macro_use] +extern crate bdk_common; + mod bdk_electrum_client; pub use bdk_electrum_client::*; diff --git a/crates/esplora/Cargo.toml b/crates/esplora/Cargo.toml index e4c553f77..d5e4bf259 100644 --- a/crates/esplora/Cargo.toml +++ b/crates/esplora/Cargo.toml @@ -16,6 +16,7 @@ workspace = true [dependencies] bdk_core = { path = "../core", version = "0.6.1", default-features = false } +bdk_common = { path = "../common" } esplora-client = { version = "0.12.1", default-features = false } async-trait = { version = "0.1.66", optional = true } futures = { version = "0.3.26", optional = true } @@ -38,6 +39,7 @@ blocking = ["esplora-client/blocking"] blocking-https = ["blocking", "esplora-client/blocking-https"] blocking-https-rustls = ["blocking", "esplora-client/blocking-https-rustls"] blocking-https-native = ["blocking", "esplora-client/blocking-https-native"] +log = ["bdk_common/log"] [[test]] name = "blocking_ext" diff --git a/crates/esplora/src/async_ext.rs b/crates/esplora/src/async_ext.rs index c0e55ab50..4a99064d7 100644 --- a/crates/esplora/src/async_ext.rs +++ b/crates/esplora/src/async_ext.rs @@ -68,6 +68,8 @@ where let start_time = request.start_time(); let keychains = request.keychains(); + log_trace!(stop_gap, parallel_requests, "enter full_scan"); + let chain_tip = request.chain_tip(); let latest_blocks = if chain_tip.is_some() { Some(fetch_latest_blocks(self).await?) @@ -119,6 +121,8 @@ where let mut request: SyncRequest = request.into(); let start_time = request.start_time(); + log_trace!(parallel_requests, "enter sync"); + let chain_tip = request.chain_tip(); let latest_blocks = if chain_tip.is_some() { Some(fetch_latest_blocks(self).await?) @@ -183,6 +187,8 @@ where async fn fetch_latest_blocks( client: &esplora_client::AsyncClient, ) -> Result, Error> { + log_trace!("fetch_latest_blocks()"); + Ok(client .get_blocks(None) .await? @@ -199,6 +205,8 @@ async fn fetch_block( latest_blocks: &BTreeMap, height: u32, ) -> Result, Error> { + log_trace!(height, "fetch_block()"); + if let Some(&hash) = latest_blocks.get(&height) { return Ok(Some(hash)); } @@ -230,6 +238,8 @@ async fn chain_update( local_tip: &CheckPoint, anchors: &BTreeSet<(ConfirmationBlockTime, Txid)>, ) -> Result { + log_trace!("chain_update()"); + let mut point_of_agreement = None; let mut local_cp_hash = local_tip.hash(); let mut conflicts = vec![]; @@ -311,6 +321,13 @@ where I: Iterator> + Send, S: Sleeper + Clone + Send + Sync, { + log_trace!( + start_time, + stop_gap, + parallel_requests, + "fetch_txs_with_keychain_spks" + ); + type TxsOfSpkIndex = (u32, Vec, HashSet); let mut update = TxUpdate::::default(); @@ -402,6 +419,8 @@ where I::IntoIter: Send, S: Sleeper + Clone + Send + Sync, { + log_trace!(start_time, parallel_requests, "fetch_txs_with_spks"); + fetch_txs_with_keychain_spks( client, start_time, @@ -432,6 +451,8 @@ where I::IntoIter: Send, S: Sleeper + Clone + Send + Sync, { + log_trace!(start_time, parallel_requests, "fetch_txs_with_txids"); + let mut update = TxUpdate::::default(); // Only fetch for non-inserted txs. let mut txids = txids @@ -484,6 +505,8 @@ where I::IntoIter: Send, S: Sleeper + Clone + Send + Sync, { + log_trace!(start_time, parallel_requests, "fetch_txs_with_outpoints"); + let outpoints = outpoints.into_iter().collect::>(); let mut update = TxUpdate::::default(); diff --git a/crates/esplora/src/blocking_ext.rs b/crates/esplora/src/blocking_ext.rs index 5a52b7a09..aac39434a 100644 --- a/crates/esplora/src/blocking_ext.rs +++ b/crates/esplora/src/blocking_ext.rs @@ -58,6 +58,8 @@ impl EsploraExt for esplora_client::BlockingClient { let mut request: FullScanRequest = request.into(); let start_time = request.start_time(); + log_trace!(stop_gap, parallel_requests, "enter full_scan"); + let chain_tip = request.chain_tip(); let latest_blocks = if chain_tip.is_some() { Some(fetch_latest_blocks(self)?) @@ -111,6 +113,8 @@ impl EsploraExt for esplora_client::BlockingClient { let mut request: SyncRequest = request.into(); let start_time = request.start_time(); + log_trace!(parallel_requests, "enter sync"); + let chain_tip = request.chain_tip(); let latest_blocks = if chain_tip.is_some() { Some(fetch_latest_blocks(self)?) @@ -169,6 +173,8 @@ impl EsploraExt for esplora_client::BlockingClient { fn fetch_latest_blocks( client: &esplora_client::BlockingClient, ) -> Result, Error> { + log_trace!("fetch_latest_blocks()"); + Ok(client .get_blocks(None)? .into_iter() @@ -184,6 +190,8 @@ fn fetch_block( latest_blocks: &BTreeMap, height: u32, ) -> Result, Error> { + log_trace!(height, "fetch_block()"); + if let Some(&hash) = latest_blocks.get(&height) { return Ok(Some(hash)); } @@ -215,6 +223,8 @@ fn chain_update( local_tip: &CheckPoint, anchors: &BTreeSet<(ConfirmationBlockTime, Txid)>, ) -> Result { + log_trace!("chain_update()"); + let mut point_of_agreement = None; let mut local_cp_hash = local_tip.hash(); let mut conflicts = vec![]; @@ -279,6 +289,13 @@ fn fetch_txs_with_keychain_spks stop_gap: usize, parallel_requests: usize, ) -> Result<(TxUpdate, Option), Error> { + log_trace!( + start_time, + stop_gap, + parallel_requests, + "fetch_txs_with_keychain_spks" + ); + type TxsOfSpkIndex = (u32, Vec, HashSet); let mut update = TxUpdate::::default(); @@ -366,6 +383,8 @@ fn fetch_txs_with_spks>( spks: I, parallel_requests: usize, ) -> Result, Error> { + log_trace!(start_time, parallel_requests, "fetch_txs_with_spks"); + fetch_txs_with_keychain_spks( client, start_time, @@ -390,6 +409,8 @@ fn fetch_txs_with_txids>( txids: I, parallel_requests: usize, ) -> Result, Error> { + log_trace!(start_time, parallel_requests, "fetch_txs_with_txids"); + let mut update = TxUpdate::::default(); // Only fetch for non-inserted txs. let mut txids = txids @@ -443,6 +464,8 @@ fn fetch_txs_with_outpoints>( outpoints: I, parallel_requests: usize, ) -> Result, Error> { + log_trace!(start_time, parallel_requests, "fetch_txs_with_outpoints"); + let outpoints = outpoints.into_iter().collect::>(); let mut update = TxUpdate::::default(); diff --git a/crates/esplora/src/lib.rs b/crates/esplora/src/lib.rs index 60b4f1eb3..1ccb14e49 100644 --- a/crates/esplora/src/lib.rs +++ b/crates/esplora/src/lib.rs @@ -27,6 +27,10 @@ use esplora_client::TxStatus; pub use esplora_client; +#[allow(unused_imports)] +#[macro_use] +extern crate bdk_common; + #[cfg(feature = "blocking")] mod blocking_ext; #[cfg(feature = "blocking")] diff --git a/justfile b/justfile index b5ca94d16..76a31e57f 100644 --- a/justfile +++ b/justfile @@ -33,6 +33,7 @@ test: @just _test-esplora @just _test-file_store @just _test-testenv + @just _test-common _test-bitcoind_rpc: cargo test -p bdk_bitcoind_rpc --all-features @@ -55,5 +56,8 @@ _test-file_store: _test-testenv: cargo test -p bdk_testenv --all-features +_test-common: + cargo test -p bdk_common --all-features + # Run pre-push suite: format, check, and test pre-push: fmt check test