//! This crate is used for emitting blockchain data from the `bitcoind` RPC interface. It does not
//! use the wallet RPC API, so this crate can be used with wallet-disabled Bitcoin Core nodes.
//!
-//! [`Emitter`] is the main structure which sources blockchain data from [`bitcoincore_rpc::Client`].
+//! [`Emitter`] is the main structure which sources blockchain data from
+//! [`bitcoincore_rpc::Client`].
//!
//! To only get block updates (exclude mempool transactions), the caller can use
//! [`Emitter::next_block`] until it returns `Ok(None)` (which means the chain tip is reached). A
/// A set of txids currently assumed to still be in the mempool.
///
/// This is used to detect mempool evictions by comparing the set against the latest mempool
- /// snapshot from bitcoind. Any txid in this set that is missing from the snapshot is considered
- /// evicted.
+ /// snapshot from bitcoind. Any txid in this set that is missing from the snapshot is
+ /// considered evicted.
///
/// When the emitter emits a block, confirmed txids are removed from this set. This prevents
/// confirmed transactions from being mistakenly marked with an `evicted_at` timestamp.
// Loop to make sure that the fetched mempool content and the fetched tip are consistent
// with one another.
let (raw_mempool, raw_mempool_txids, rpc_height, rpc_block_hash) = loop {
- // Determine if height and hash matches the best block from the RPC. Evictions are deferred
- // if we are not at the best block.
+ // Determine if height and hash matches the best block from the RPC. Evictions are
+ // deferred if we are not at the best block.
let height = client.get_block_count()?;
let hash = client.get_block_hash(height)?;
///
/// To understand the second condition, consider a receiver which filters transactions based on
/// whether it alters the UTXO set of tracked script pubkeys. If an emitted mempool transaction
- /// spends a tracked UTXO which is confirmed at height `h`, but the receiver has only seen up to
- /// block of height `h-1`, we want to re-emit this transaction until the receiver has seen the
- /// block at height `h`.
+ /// spends a tracked UTXO which is confirmed at height `h`, but the receiver has only seen up
+ /// to block of height `h-1`, we want to re-emit this transaction until the receiver has
+ /// seen the block at height `h`.
pub new_txs: Vec<(Transaction, u64)>,
/// [`Txid`]s of all transactions that have been evicted from mempool.
/// The checkpoint of the new block.
///
/// A [`CheckPoint`] is a node of a linked list of [`BlockId`]s. This checkpoint is linked to
- /// all [`BlockId`]s originally passed in [`Emitter::new`] as well as emitted blocks since then.
- /// These blocks are guaranteed to be of the same chain.
+ /// all [`BlockId`]s originally passed in [`Emitter::new`] as well as emitted blocks since
+ /// then. These blocks are guaranteed to be of the same chain.
///
/// This is important as BDK structures require block-to-apply to be connected with another
/// block in the original chain.
/// Apply an `update` directly.
///
- /// `update` is a [`tx_graph::TxUpdate<A>`] and the resultant changes is returned as [`ChangeSet`].
+ /// `update` is a [`tx_graph::TxUpdate<A>`] and the resultant changes is returned as
+ /// [`ChangeSet`].
pub fn apply_update(&mut self, update: tx_graph::TxUpdate<A>) -> ChangeSet<A, I::ChangeSet> {
let tx_graph = self.graph.apply_update(update);
let indexer = self.index_tx_graph_changeset(&tx_graph);
{
/// List txids that are expected to exist under the given spks.
///
- /// This is used to fill [`SyncRequestBuilder::expected_spk_txids`](bdk_core::spk_client::SyncRequestBuilder::expected_spk_txids).
+ /// This is used to fill
+ /// [`SyncRequestBuilder::expected_spk_txids`](bdk_core::spk_client::SyncRequestBuilder::expected_spk_txids).
+ ///
///
/// The spk index range can be contrained with `range`.
///
///
/// # Change sets
///
-/// Methods that can update the last revealed index or add keychains will return [`ChangeSet`] to report
-/// these changes. This should be persisted for future recovery.
+/// Methods that can update the last revealed index or add keychains will return [`ChangeSet`] to
+/// report these changes. This should be persisted for future recovery.
///
/// ## Synopsis
///
.map(|((_, i), spk)| (*i, spk))
}
- /// Get the next derivation index for `keychain`. The next index is the index after the last revealed
- /// derivation index.
+ /// Get the next derivation index for `keychain`. The next index is the index after the last
+ /// revealed derivation index.
///
/// The second field in the returned tuple represents whether the next derivation index is new.
/// There are two scenarios where the next derivation index is reused (not new):
/// This will derive and reveal a new script pubkey if no more unused script pubkeys exist.
///
/// If the descriptor has no wildcard and already has a used script pubkey or if a descriptor
- /// has used all scripts up to the derivation bounds, then the last derived script pubkey will be
- /// returned.
+ /// has used all scripts up to the derivation bounds, then the last derived script pubkey will
+ /// be returned.
///
/// Returns `None` if there are no script pubkeys that have been used and no new script pubkey
/// could be revealed (see [`reveal_next_spk`] for when this happens).
-//! [`SpkTxOutIndex`] is an index storing [`TxOut`]s that have a script pubkey that matches those in a list.
+//! [`SpkTxOutIndex`] is an index storing [`TxOut`]s that have a script pubkey that matches those in
+//! a list.
use core::ops::RangeBounds;
/// index will look at any txouts you pass in and store and index any txouts matching one of its
/// script pubkeys.
///
-/// Each script pubkey is associated with an application-defined index script index `I`, which must be
-/// [`Ord`]. Usually, this is used to associate the derivation index of the script pubkey or even a
-/// combination of `(keychain, derivation_index)`.
+/// Each script pubkey is associated with an application-defined index script index `I`, which must
+/// be [`Ord`]. Usually, this is used to associate the derivation index of the script pubkey or even
+/// a combination of `(keychain, derivation_index)`.
///
/// Note there is no harm in scanning transactions that disappear from the blockchain or were never
/// in there in the first place. `SpkTxOutIndex` is intentionally *monotone* -- you cannot delete or
-/// modify txouts that have been indexed. To find out which txouts from the index are actually in the
-/// chain or unspent, you must use other sources of information like a [`TxGraph`].
+/// modify txouts that have been indexed. To find out which txouts from the index are actually in
+/// the chain or unspent, you must use other sources of information like a [`TxGraph`].
///
/// [`TxOut`]: bitcoin::TxOut
/// [`insert_spk`]: Self::insert_spk
///
/// Typically, this is used in two situations:
///
- /// 1. After loading transaction data from the disk, you may scan over all the txouts to restore all
- /// your txouts.
- /// 2. When getting new data from the chain, you usually scan it before incorporating it into your chain state.
+ /// 1. After loading transaction data from the disk, you may scan over all the txouts to restore
+ /// all your txouts.
+ /// 2. When getting new data from the chain, you usually scan it before incorporating it into
+ /// your chain state.
pub fn scan(&mut self, tx: &Transaction) -> BTreeSet<I> {
let mut scanned_indices = BTreeSet::new();
let txid = tx.compute_txid();
&self.spks
}
- /// Adds a script pubkey to scan for. Returns `false` and does nothing if spk already exists in the map
+ /// Adds a script pubkey to scan for. Returns `false` and does nothing if spk already exists in
+ /// the map
///
/// the index will look for outputs spending to this spk whenever it scans new data.
pub fn insert_spk(&mut self, index: I, spk: ScriptBuf) -> bool {
!self.unused.contains(index)
}
- /// Marks the script pubkey at `index` as used even though it hasn't seen an output spending to it.
- /// This only affects when the `index` had already been added to `self` and was unused.
+ /// Marks the script pubkey at `index` as used even though it hasn't seen an output spending to
+ /// it. This only affects when the `index` had already been added to `self` and was unused.
///
/// Returns whether the `index` was initially present as `unused`.
///
/// This is useful when you want to reserve a script pubkey for something but don't want to add
- /// the transaction output using it to the index yet. Other callers will consider the `index` used
- /// until you call [`unmark_used`].
+ /// the transaction output using it to the index yet. Other callers will consider the `index`
+ /// used until you call [`unmark_used`].
///
/// [`unmark_used`]: Self::unmark_used
pub fn mark_used(&mut self, index: &I) -> bool {
/// matches one of our script pubkeys.
///
/// It is easily possible to misuse this method and get false negatives by calling it before you
- /// have scanned the `TxOut`s the transaction is spending. For example, if you want to filter out
- /// all the transactions in a block that are irrelevant, you **must first scan all the
+ /// have scanned the `TxOut`s the transaction is spending. For example, if you want to filter
+ /// out all the transactions in a block that are irrelevant, you **must first scan all the
/// transactions in the block** and only then use this method.
pub fn is_relevant(&self, tx: &Transaction) -> bool {
let input_matches = tx
//! Our design goals for these mechanisms are:
//!
//! 1. Data source agnostic -- nothing in `bdk_chain` cares about where you get data from or whether
-//! you do it synchronously or asynchronously. If you know a fact about the blockchain, you can just
-//! tell `bdk_chain`'s APIs about it, and that information will be integrated, if it can be done
-//! consistently.
-//! 2. Data persistence agnostic -- `bdk_chain` does not care where you cache on-chain data, what you
-//! cache or how you retrieve it from persistent storage.
+//! you do it synchronously or asynchronously. If you know a fact about the blockchain, you can
+//! just tell `bdk_chain`'s APIs about it, and that information will be integrated, if it can be
+//! done consistently.
+//! 2. Data persistence agnostic -- `bdk_chain` does not care where you cache on-chain data, what
+//! you cache or how you retrieve it from persistent storage.
//!
//! [Bitcoin Dev Kit]: https://bitcoindevkit.org/
Ok(changeset)
}
- /// Update the chain with a given [`Header`] at `height` which you claim is connected to a existing block in the chain.
+ /// Update the chain with a given [`Header`] at `height` which you claim is connected to a
+ /// existing block in the chain.
///
/// This is useful when you have a block header that you want to record as part of the chain but
/// don't necessarily know that the `prev_blockhash` is in the chain.
pub struct ChangeSet {
/// Changes to the [`LocalChain`] blocks.
///
- /// The key represents the block height, and the value either represents added a new [`CheckPoint`]
- /// (if [`Some`]), or removing a [`CheckPoint`] (if [`None`]).
+ /// The key represents the block height, and the value either represents added a new
+ /// [`CheckPoint`] (if [`Some`]), or removing a [`CheckPoint`] (if [`None`]).
pub blocks: BTreeMap<u32, Option<BlockHash>>,
}
let mut is_update_height_superset_of_original = true;
// To find the difference between the new chain and the original we iterate over both of them
- // from the tip backwards in tandem. We are always dealing with the highest one from either chain
- // first and move to the next highest. The crucial logic is applied when they have blocks at the
- // same height.
+ // from the tip backwards in tandem. We are always dealing with the highest one from either
+ // chain first and move to the next highest. The crucial logic is applied when they have
+ // blocks at the same height.
loop {
if curr_orig.is_none() {
curr_orig = orig.next();
if o.hash() == u.hash() {
// We have found our point of agreement 🎉 -- we require that the previous (i.e.
// higher because we are iterating backwards) block in the original chain was
- // invalidated (if it exists). This ensures that there is an unambiguous point of
- // connection to the original chain from the update chain (i.e. we know the
- // precisely which original blocks are invalid).
+ // invalidated (if it exists). This ensures that there is an unambiguous point
+ // of connection to the original chain from the update chain
+ // (i.e. we know the precisely which original blocks are
+ // invalid).
if !prev_orig_was_invalidated && !point_of_agreement_found {
if let (Some(prev_orig), Some(_prev_update)) = (&prev_orig, &prev_update) {
return Err(CannotConnectError {
/// An iterator for derived script pubkeys.
///
/// [`SpkIterator`] is an implementation of the [`Iterator`] trait which possesses its own `next()`
-/// and `nth()` functions, both of which circumvent the unnecessary intermediate derivations required
-/// when using their default implementations.
+/// and `nth()` functions, both of which circumvent the unnecessary intermediate derivations
+/// required when using their default implementations.
///
/// ## Examples
///
type Item = Indexed<ScriptBuf>;
fn next(&mut self) -> Option<Self::Item> {
- // For non-wildcard descriptors, we expect the first element to be Some((0, spk)), then None after.
- // For wildcard descriptors, we expect it to keep iterating until exhausted.
+ // For non-wildcard descriptors, we expect the first element to be Some((0, spk)), then None
+ // after. For wildcard descriptors, we expect it to keep iterating until exhausted.
if self.next_index >= self.end {
return None;
}
//! anchored in chain of `chain_tip`, then the transaction must be canonical.
//! * `last_seen` - This is the timestamp of when a transaction is last-seen in the mempool. This
//! value is updated by [`insert_seen_at`](TxGraph::insert_seen_at) and
-//! [`apply_update`](TxGraph::apply_update). Transactions that are seen later have higher
-//! priority than those that are seen earlier. `last_seen` values are transitive. This means
-//! that the actual `last_seen` value of a transaction is the max of all the `last_seen` values
-//! from it's descendants.
+//! [`apply_update`](TxGraph::apply_update). Transactions that are seen later have higher priority
+//! than those that are seen earlier. `last_seen` values are transitive. This means that the
+//! actual `last_seen` value of a transaction is the max of all the `last_seen` values from it's
+//! descendants.
//! * `last_evicted` - This is the timestamp of when a transaction last went missing from the
-//! mempool. If this value is equal to or higher than the transaction's `last_seen` value, then
-//! it will not be considered canonical.
+//! mempool. If this value is equal to or higher than the transaction's `last_seen` value, then it
+//! will not be considered canonical.
//!
//! # Graph traversal
//!
})
}
- /// Calculates the fee of a given transaction. Returns [`Amount::ZERO`] if `tx` is a coinbase transaction.
- /// Returns `OK(_)` if we have all the [`TxOut`]s being spent by `tx` in the graph (either as
- /// the full transactions or individual txouts).
+ /// Calculates the fee of a given transaction. Returns [`Amount::ZERO`] if `tx` is a coinbase
+ /// transaction. Returns `OK(_)` if we have all the [`TxOut`]s being spent by `tx` in the
+ /// graph (either as the full transactions or individual txouts).
///
/// To calculate the fee for a [`Transaction`] that depends on foreign [`TxOut`] values you must
- /// first manually insert the foreign TxOuts into the tx graph using the [`insert_txout`] function.
- /// Only insert TxOuts you trust the values for!
+ /// first manually insert the foreign TxOuts into the tx graph using the [`insert_txout`]
+ /// function. Only insert TxOuts you trust the values for!
///
/// Note `tx` does not have to be in the graph for this to work.
///
///
/// The supplied closure takes in two inputs `(depth, descendant_txid)`:
///
- /// * `depth` is the distance between the starting `txid` and the `descendant_txid`. I.e., if the
- /// descendant is spending an output of the starting `txid` then `depth` will be 1.
+ /// * `depth` is the distance between the starting `txid` and the `descendant_txid`. I.e., if
+ /// the descendant is spending an output of the starting `txid` then `depth` will be 1.
/// * `descendant_txid` is the descendant's txid which we are considering to walk.
///
/// The supplied closure returns an `Option<T>`, allowing the caller to map each node it visits
/// Updates the first-seen and last-seen timestamps for a given `txid` in the [`TxGraph`].
///
/// This method records the time a transaction was observed by updating both:
- /// - the **first-seen** timestamp, which only changes if `seen_at` is earlier than the current value, and
- /// - the **last-seen** timestamp, which only changes if `seen_at` is later than the current value.
+ /// - the **first-seen** timestamp, which only changes if `seen_at` is earlier than the current
+ /// value, and
+ /// - the **last-seen** timestamp, which only changes if `seen_at` is later than the current
+ /// value.
///
/// `seen_at` is a UNIX timestamp in seconds.
///
/// Extends this graph with the given `update`.
///
- /// The returned [`ChangeSet`] is the set difference between `update` and `self` (transactions that
- /// exist in `update` but not in `self`).
+ /// The returned [`ChangeSet`] is the set difference between `update` and `self` (transactions
+ /// that exist in `update` but not in `self`).
pub fn apply_update(&mut self, update: TxUpdate<A>) -> ChangeSet<A> {
let mut changeset = ChangeSet::<A>::default();
for tx in update.txs {
/// List txids that are expected to exist under the given spks.
///
- /// This is used to fill [`SyncRequestBuilder::expected_spk_txids`](bdk_core::spk_client::SyncRequestBuilder::expected_spk_txids).
+ /// This is used to fill
+ /// [`SyncRequestBuilder::expected_spk_txids`](bdk_core::spk_client::SyncRequestBuilder::expected_spk_txids).
+ ///
///
/// The spk index range can be constrained with `range`.
///
{
let canonical_positions = build_canonical_positions(&local_chain, &graph);
- // Because this tx conflicts with an already confirmed transaction, chain position should return none.
+ // Because this tx conflicts with an already confirmed transaction, chain position should
+ // return none.
assert!(canonical_positions
.get(&tx_1_conflict.compute_txid())
.is_none());
}
#[test]
-/// The `map_anchors` allow a caller to pass a function to reconstruct the [`TxGraph`] with any [`Anchor`],
-/// even though the function is non-deterministic.
+/// The `map_anchors` allow a caller to pass a function to reconstruct the [`TxGraph`] with any
+/// [`Anchor`], even though the function is non-deterministic.
fn call_map_anchors_with_non_deterministic_anchor() {
#[derive(Debug, Default, Clone, PartialEq, Eq, Copy, PartialOrd, Ord, core::hash::Hash)]
/// A non-deterministic anchor
/// Puts another checkpoint onto the linked list representing the blockchain.
///
- /// Returns an `Err(self)` if the block you are pushing on is not at a greater height that the one you
- /// are pushing on to.
+ /// Returns an `Err(self)` if the block you are pushing on is not at a greater height that the
+ /// one you are pushing on to.
pub fn push(self, block: BlockId) -> Result<Self, Self> {
if self.height() < block.height {
Ok(Self(Arc::new(CPInner {
/// # Example
///
/// Sync revealed script pubkeys obtained from a
- /// [`KeychainTxOutIndex`](../../bdk_chain/indexer/keychain_txout/struct.KeychainTxOutIndex.html).
+ /// [`KeychainTxOutIndex`](../../bdk_chain/indexer/keychain_txout/struct.KeychainTxOutIndex.
+ /// html).
///
/// ```rust
/// # use bdk_chain::spk_client::SyncRequest;
pub txs: Vec<Arc<Transaction>>,
/// Floating txouts. These are `TxOut`s that exist but the whole transaction wasn't included in
- /// `txs` since only knowing about the output is important. These are often used to help determine
- /// the fee of a wallet transaction.
+ /// `txs` since only knowing about the output is important. These are often used to help
+ /// determine the fee of a wallet transaction.
pub txouts: BTreeMap<OutPoint, TxOut>,
/// Transaction anchors. Anchors tells us a position in the chain where a transaction was
/// associated transactions.
/// - `batch_size`: specifies the max number of script pubkeys to request for in a single batch
/// request.
- /// - `fetch_prev_txouts`: specifies whether we want previous `TxOut`s for fee calculation.
- /// Note that this requires additional calls to the Electrum server, but is necessary for
+ /// - `fetch_prev_txouts`: specifies whether we want previous `TxOut`s for fee calculation. Note
+ /// that this requires additional calls to the Electrum server, but is necessary for
/// calculating the fee on a transaction if your wallet does not own the inputs. Methods like
/// [`Wallet.calculate_fee`] and [`Wallet.calculate_fee_rate`] will return a
/// [`CalculateFeeError::MissingTxOut`] error if those `TxOut`s are not present in the
/// Sync a set of scripts with the blockchain (via an Electrum client) for the data specified
/// and returns updates for [`bdk_chain`] data structures.
///
- /// - `request`: struct with data required to perform a spk-based blockchain client sync,
- /// see [`SyncRequest`]
+ /// - `request`: struct with data required to perform a spk-based blockchain client sync, see
+ /// [`SyncRequest`]
/// - `batch_size`: specifies the max number of script pubkeys to request for in a single batch
/// request
- /// - `fetch_prev_txouts`: specifies whether we want previous `TxOut`s for fee calculation.
- /// Note that this requires additional calls to the Electrum server, but is necessary for
+ /// - `fetch_prev_txouts`: specifies whether we want previous `TxOut`s for fee calculation. Note
+ /// that this requires additional calls to the Electrum server, but is necessary for
/// calculating the fee on a transaction if your wallet does not own the inputs. Methods like
/// [`Wallet.calculate_fee`] and [`Wallet.calculate_fee_rate`] will return a
/// [`CalculateFeeError::MissingTxOut`] error if those `TxOut`s are not present in the
/// Populate the `tx_update` with associated transactions/anchors of `outpoints`.
///
- /// Transactions in which the outpoint resides, and transactions that spend from the outpoint are
- /// included. Anchors of the aforementioned transactions are included.
+ /// Transactions in which the outpoint resides, and transactions that spend from the outpoint
+ /// are included. Anchors of the aforementioned transactions are included.
fn populate_with_outpoints(
&self,
start_time: u64,
Ok(())
}
- // Helper function which fetches the `TxOut`s of our relevant transactions' previous transactions,
- // which we do not have by default. This data is needed to calculate the transaction fee.
+ // Helper function which fetches the `TxOut`s of our relevant transactions' previous
+ // transactions, which we do not have by default. This data is needed to calculate the
+ // transaction fee.
fn fetch_prev_txout(
&self,
tx_update: &mut TxUpdate<ConfirmationBlockTime>,
}};
}
- /// Ensure that update does not remove heights (from original), and all anchor heights are included.
+ /// Ensure that update does not remove heights (from original), and all anchor heights are
+ /// included.
#[tokio::test]
pub async fn test_finalize_chain_update() -> anyhow::Result<()> {
struct TestCase<'a> {
initial_cps: &'a [u32],
/// The final blockchain height of the env.
final_env_height: u32,
- /// The anchors to test with: `(height, txid)`. Only the height is provided as we can fetch
- /// the blockhash from the env.
+ /// The anchors to test with: `(height, txid)`. Only the height is provided as we can
+ /// fetch the blockhash from the env.
anchors: &'a [(u32, Txid)],
}
}};
}
- /// Ensure that update does not remove heights (from original), and all anchor heights are included.
+ /// Ensure that update does not remove heights (from original), and all anchor heights are
+ /// included.
#[test]
pub fn test_finalize_chain_update() -> anyhow::Result<()> {
struct TestCase<'a> {
initial_cps: &'a [u32],
/// The final blockchain height of the env.
final_env_height: u32,
- /// The anchors to test with: `(height, txid)`. Only the height is provided as we can fetch
- /// the blockhash from the env.
+ /// The anchors to test with: `(height, txid)`. Only the height is provided as we can
+ /// fetch the blockhash from the env.
anchors: &'a [(u32, Txid)],
}
/// # Errors
///
/// If the prefixed bytes of the loaded file do not match the provided `magic`, a
- /// [`StoreErrorWithDump`] will be returned with the [`StoreError::InvalidMagicBytes`] error variant in
- /// its error field and changeset field set to [`Option::None`]
+ /// [`StoreErrorWithDump`] will be returned with the [`StoreError::InvalidMagicBytes`] error
+ /// variant in its error field and changeset field set to [`Option::None`]
///
/// If there exist changesets in the file, [`load`] will try to aggregate them in
/// a single changeset to verify their integrity. If aggregation fails
// get the selected plan utxos
let selected: Vec<_> = selector.apply_selection(&plan_utxos).collect();
- // if the selection tells us to use change and the change value is sufficient, we add it as an output
+ // if the selection tells us to use change and the change value is sufficient, we add it as an
+ // output
let mut change_info = Option::<ChangeInfo>::None;
let drain = selector.drain(target, change_policy);
if drain.value > min_drain_value {
let changeset = graph.insert_tx(tx);
- // We know the tx is at least unconfirmed now. Note if persisting here fails,
- // it's not a big deal since we can always find it again from the
- // blockchain.
+ // We know the tx is at least unconfirmed now. Note if persisting here
+ // fails, it's not a big deal since we can
+ // always find it again from the blockchain.
db.lock().unwrap().append(&ChangeSet {
tx_graph: changeset.tx_graph,
indexer: changeset.indexer,
})?;
}
Err(e) => {
- // We failed to broadcast, so allow our change address to be used in the future
+ // We failed to broadcast, so allow our change address to be used in the
+ // future
let (change_keychain, _) = graph
.index
.keychains()
#[derive(clap::Args, Debug, Clone)]
pub struct ElectrumArgs {
- /// The electrum url to use to connect to. If not provided it will use a default electrum server
- /// for your chosen network.
+ /// The electrum url to use to connect to. If not provided it will use a default electrum
+ /// server for your chosen network.
electrum_url: Option<String>,
}
let client = BdkElectrumClient::new(electrum_cmd.electrum_args().client(network)?);
- // Tell the electrum client about the txs we've already got locally so it doesn't re-download them
+ // Tell the electrum client about the txs we've already got locally so it doesn't re-download
+ // them
client.populate_tx_cache(
graph
.lock()