All associated examples are also updated.
///
/// [`EsploraExt`]: crate::EsploraExt
/// [crate-level documentation]: crate
-#[cfg(feature = "async")]
#[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
#[cfg_attr(not(target_arch = "wasm32"), async_trait)]
pub trait EsploraAsyncExt {
}
}
-#[cfg(feature = "async")]
#[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
#[cfg_attr(not(target_arch = "wasm32"), async_trait)]
impl EsploraAsyncExt for esplora_client::AsyncClient {
) -> Result<KeychainScan<K, ConfirmationTime>, Error> {
let txids = txids.into_iter();
let outpoints = outpoints.into_iter();
- let parallel_requests = parallel_requests.max(1);
+ let parallel_requests = Ord::max(parallel_requests, 1);
let mut scan = KeychainScan::default();
let update = &mut scan.update;
let last_active_indices = &mut scan.last_active_indices;
}
let reorg_occurred = {
- if let Some(checkpoint) = update.chain().latest_checkpoint() {
+ if let Some(checkpoint) = ChainGraph::chain(update).latest_checkpoint() {
self.get_block_hash(checkpoint.height).await? != checkpoint.hash
} else {
false
if reorg_occurred {
// A reorg occurred, so let's find out where all the txids we found are in the chain now.
// XXX: collect required because of weird type naming issues
- let txids_found = update
- .chain()
+ let txids_found = ChainGraph::chain(update)
.txids()
.map(|(_, txid)| *txid)
.collect::<Vec<_>>();
stop_gap: usize,
parallel_requests: usize,
) -> Result<KeychainScan<K, ConfirmationTime>, Error> {
- let parallel_requests = parallel_requests.max(1);
+ let parallel_requests = Ord::max(parallel_requests, 1);
let mut scan = KeychainScan::default();
let update = &mut scan.update;
let last_active_indices = &mut scan.last_active_indices;
}
let reorg_occurred = {
- if let Some(checkpoint) = update.chain().latest_checkpoint() {
+ if let Some(checkpoint) = ChainGraph::chain(update).latest_checkpoint() {
self.get_block_hash(checkpoint.height)? != checkpoint.hash
} else {
false
if reorg_occurred {
// A reorg occurred, so let's find out where all the txids we found are now in the chain.
// XXX: collect required because of weird type naming issues
- let txids_found = update
- .chain()
+ let txids_found = ChainGraph::chain(update)
.txids()
.map(|(_, txid)| *txid)
.collect::<Vec<_>>();
#![doc = include_str!("../README.md")]
-use bdk_chain::ConfirmationTime;
+use bdk_chain::{BlockId, ConfirmationTime, ConfirmationTimeAnchor};
use esplora_client::TxStatus;
pub use esplora_client;
+pub mod v2;
#[cfg(feature = "blocking")]
mod blocking_ext;
_ => ConfirmationTime::Unconfirmed { last_seen: 0 },
}
}
+
+pub(crate) fn map_confirmation_time_anchor(
+ tx_status: &TxStatus,
+ tip_at_start: BlockId,
+) -> Option<ConfirmationTimeAnchor> {
+ match (tx_status.block_time, tx_status.block_height) {
+ (Some(confirmation_time), Some(confirmation_height)) => Some(ConfirmationTimeAnchor {
+ anchor_block: tip_at_start,
+ confirmation_height,
+ confirmation_time,
+ }),
+ _ => None,
+ }
+}
--- /dev/null
+use async_trait::async_trait;
+use bdk_chain::{
+ bitcoin::{BlockHash, OutPoint, Script, Txid},
+ collections::BTreeMap,
+ keychain::LocalUpdate,
+ BlockId, ConfirmationTimeAnchor,
+};
+use esplora_client::{Error, OutputStatus};
+use futures::{stream::FuturesOrdered, TryStreamExt};
+
+use crate::map_confirmation_time_anchor;
+
+/// Trait to extend [`esplora_client::AsyncClient`] functionality.
+///
+/// This is the async version of [`EsploraExt`]. Refer to
+/// [crate-level documentation] for more.
+///
+/// [`EsploraExt`]: crate::EsploraExt
+/// [crate-level documentation]: crate
+#[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
+#[cfg_attr(not(target_arch = "wasm32"), async_trait)]
+pub trait EsploraAsyncExt {
+ /// Scan the blockchain (via esplora) for the data specified and returns a
+ /// [`LocalUpdate<K, ConfirmationTimeAnchor>`].
+ ///
+ /// - `local_chain`: the most recent block hashes present locally
+ /// - `keychain_spks`: keychains that we want to scan transactions for
+ /// - `txids`: transactions for which we want updated [`ChainPosition`]s
+ /// - `outpoints`: transactions associated with these outpoints (residing, spending) that we
+ /// want to included in the update
+ ///
+ /// The scan for each keychain stops after a gap of `stop_gap` script pubkeys with no associated
+ /// transactions. `parallel_requests` specifies the max number of HTTP requests to make in
+ /// parallel.
+ ///
+ /// [`ChainPosition`]: bdk_chain::sparse_chain::ChainPosition
+ #[allow(clippy::result_large_err)] // FIXME
+ async fn scan<K: Ord + Clone + Send>(
+ &self,
+ local_chain: &BTreeMap<u32, BlockHash>,
+ keychain_spks: BTreeMap<
+ K,
+ impl IntoIterator<IntoIter = impl Iterator<Item = (u32, Script)> + Send> + Send,
+ >,
+ txids: impl IntoIterator<IntoIter = impl Iterator<Item = Txid> + Send> + Send,
+ outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
+ stop_gap: usize,
+ parallel_requests: usize,
+ ) -> Result<LocalUpdate<K, ConfirmationTimeAnchor>, Error>;
+
+ /// Convenience method to call [`scan`] without requiring a keychain.
+ ///
+ /// [`scan`]: EsploraAsyncExt::scan
+ #[allow(clippy::result_large_err)] // FIXME
+ async fn scan_without_keychain(
+ &self,
+ local_chain: &BTreeMap<u32, BlockHash>,
+ misc_spks: impl IntoIterator<IntoIter = impl Iterator<Item = Script> + Send> + Send,
+ txids: impl IntoIterator<IntoIter = impl Iterator<Item = Txid> + Send> + Send,
+ outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
+ parallel_requests: usize,
+ ) -> Result<LocalUpdate<(), ConfirmationTimeAnchor>, Error> {
+ self.scan(
+ local_chain,
+ [(
+ (),
+ misc_spks
+ .into_iter()
+ .enumerate()
+ .map(|(i, spk)| (i as u32, spk)),
+ )]
+ .into(),
+ txids,
+ outpoints,
+ usize::MAX,
+ parallel_requests,
+ )
+ .await
+ }
+}
+
+#[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
+#[cfg_attr(not(target_arch = "wasm32"), async_trait)]
+impl EsploraAsyncExt for esplora_client::AsyncClient {
+ #[allow(clippy::result_large_err)] // FIXME
+ async fn scan<K: Ord + Clone + Send>(
+ &self,
+ local_chain: &BTreeMap<u32, BlockHash>,
+ keychain_spks: BTreeMap<
+ K,
+ impl IntoIterator<IntoIter = impl Iterator<Item = (u32, Script)> + Send> + Send,
+ >,
+ txids: impl IntoIterator<IntoIter = impl Iterator<Item = Txid> + Send> + Send,
+ outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
+ stop_gap: usize,
+ parallel_requests: usize,
+ ) -> Result<LocalUpdate<K, ConfirmationTimeAnchor>, Error> {
+ let parallel_requests = Ord::max(parallel_requests, 1);
+
+ let (mut update, tip_at_start) = loop {
+ let mut update = LocalUpdate::<K, ConfirmationTimeAnchor>::default();
+
+ for (&height, &original_hash) in local_chain.iter().rev() {
+ let update_block_id = BlockId {
+ height,
+ hash: self.get_block_hash(height).await?,
+ };
+ let _ = update
+ .chain
+ .insert_block(update_block_id)
+ .expect("cannot repeat height here");
+ if update_block_id.hash == original_hash {
+ break;
+ }
+ }
+
+ let tip_at_start = BlockId {
+ height: self.get_height().await?,
+ hash: self.get_tip_hash().await?,
+ };
+
+ if update.chain.insert_block(tip_at_start).is_ok() {
+ break (update, tip_at_start);
+ }
+ };
+
+ for (keychain, spks) in keychain_spks {
+ let mut spks = spks.into_iter();
+ let mut last_active_index = None;
+ let mut empty_scripts = 0;
+ type IndexWithTxs = (u32, Vec<esplora_client::Tx>);
+
+ loop {
+ let futures = (0..parallel_requests)
+ .filter_map(|_| {
+ let (index, script) = spks.next()?;
+ let client = self.clone();
+ Some(async move {
+ let mut related_txs = client.scripthash_txs(&script, None).await?;
+
+ let n_confirmed =
+ related_txs.iter().filter(|tx| tx.status.confirmed).count();
+ // esplora pages on 25 confirmed transactions. If there are 25 or more we
+ // keep requesting to see if there's more.
+ if n_confirmed >= 25 {
+ loop {
+ let new_related_txs = client
+ .scripthash_txs(
+ &script,
+ Some(related_txs.last().unwrap().txid),
+ )
+ .await?;
+ let n = new_related_txs.len();
+ related_txs.extend(new_related_txs);
+ // we've reached the end
+ if n < 25 {
+ break;
+ }
+ }
+ }
+
+ Result::<_, esplora_client::Error>::Ok((index, related_txs))
+ })
+ })
+ .collect::<FuturesOrdered<_>>();
+
+ let n_futures = futures.len();
+
+ for (index, related_txs) in futures.try_collect::<Vec<IndexWithTxs>>().await? {
+ if related_txs.is_empty() {
+ empty_scripts += 1;
+ } else {
+ last_active_index = Some(index);
+ empty_scripts = 0;
+ }
+ for tx in related_txs {
+ let anchor = map_confirmation_time_anchor(&tx.status, tip_at_start);
+
+ let _ = update.graph.insert_tx(tx.to_tx());
+ if let Some(anchor) = anchor {
+ let _ = update.graph.insert_anchor(tx.txid, anchor);
+ }
+ }
+ }
+
+ if n_futures == 0 || empty_scripts >= stop_gap {
+ break;
+ }
+ }
+
+ if let Some(last_active_index) = last_active_index {
+ update.keychain.insert(keychain, last_active_index);
+ }
+ }
+
+ for txid in txids.into_iter() {
+ if update.graph.get_tx(txid).is_none() {
+ match self.get_tx(&txid).await? {
+ Some(tx) => {
+ let _ = update.graph.insert_tx(tx);
+ }
+ None => continue,
+ }
+ }
+ match self.get_tx_status(&txid).await? {
+ Some(tx_status) => {
+ if let Some(anchor) = map_confirmation_time_anchor(&tx_status, tip_at_start) {
+ let _ = update.graph.insert_anchor(txid, anchor);
+ }
+ }
+ None => continue,
+ }
+ }
+
+ for op in outpoints.into_iter() {
+ let mut op_txs = Vec::with_capacity(2);
+ if let (Some(tx), Some(tx_status)) = (
+ self.get_tx(&op.txid).await?,
+ self.get_tx_status(&op.txid).await?,
+ ) {
+ op_txs.push((tx, tx_status));
+ if let Some(OutputStatus {
+ txid: Some(txid),
+ status: Some(spend_status),
+ ..
+ }) = self.get_output_status(&op.txid, op.vout as _).await?
+ {
+ if let Some(spend_tx) = self.get_tx(&txid).await? {
+ op_txs.push((spend_tx, spend_status));
+ }
+ }
+ }
+
+ for (tx, status) in op_txs {
+ let txid = tx.txid();
+ let anchor = map_confirmation_time_anchor(&status, tip_at_start);
+
+ let _ = update.graph.insert_tx(tx);
+ if let Some(anchor) = anchor {
+ let _ = update.graph.insert_anchor(txid, anchor);
+ }
+ }
+ }
+
+ if tip_at_start.hash != self.get_block_hash(tip_at_start.height).await? {
+ // A reorg occurred, so let's find out where all the txids we found are now in the chain
+ let txids_found = update
+ .graph
+ .full_txs()
+ .map(|tx_node| tx_node.txid)
+ .collect::<Vec<_>>();
+ update.chain = EsploraAsyncExt::scan_without_keychain(
+ self,
+ local_chain,
+ [],
+ txids_found,
+ [],
+ parallel_requests,
+ )
+ .await?
+ .chain;
+ }
+
+ Ok(update)
+ }
+}
--- /dev/null
+use bdk_chain::bitcoin::{BlockHash, OutPoint, Script, Txid};
+use bdk_chain::collections::BTreeMap;
+use bdk_chain::BlockId;
+use bdk_chain::{keychain::LocalUpdate, ConfirmationTimeAnchor};
+use esplora_client::{Error, OutputStatus};
+
+use crate::map_confirmation_time_anchor;
+
+/// Trait to extend [`esplora_client::BlockingClient`] functionality.
+///
+/// Refer to [crate-level documentation] for more.
+///
+/// [crate-level documentation]: crate
+pub trait EsploraExt {
+ /// Scan the blockchain (via esplora) for the data specified and returns a
+ /// [`LocalUpdate<K, ConfirmationTimeAnchor>`].
+ ///
+ /// - `local_chain`: the most recent block hashes present locally
+ /// - `keychain_spks`: keychains that we want to scan transactions for
+ /// - `txids`: transactions for which we want updated [`ChainPosition`]s
+ /// - `outpoints`: transactions associated with these outpoints (residing, spending) that we
+ /// want to included in the update
+ ///
+ /// The scan for each keychain stops after a gap of `stop_gap` script pubkeys with no associated
+ /// transactions. `parallel_requests` specifies the max number of HTTP requests to make in
+ /// parallel.
+ ///
+ /// [`ChainPosition`]: bdk_chain::sparse_chain::ChainPosition
+ #[allow(clippy::result_large_err)] // FIXME
+ fn scan<K: Ord + Clone>(
+ &self,
+ local_chain: &BTreeMap<u32, BlockHash>,
+ keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, Script)>>,
+ txids: impl IntoIterator<Item = Txid>,
+ outpoints: impl IntoIterator<Item = OutPoint>,
+ stop_gap: usize,
+ parallel_requests: usize,
+ ) -> Result<LocalUpdate<K, ConfirmationTimeAnchor>, Error>;
+
+ /// Convenience method to call [`scan`] without requiring a keychain.
+ ///
+ /// [`scan`]: EsploraExt::scan
+ #[allow(clippy::result_large_err)] // FIXME
+ fn scan_without_keychain(
+ &self,
+ local_chain: &BTreeMap<u32, BlockHash>,
+ misc_spks: impl IntoIterator<Item = Script>,
+ txids: impl IntoIterator<Item = Txid>,
+ outpoints: impl IntoIterator<Item = OutPoint>,
+ parallel_requests: usize,
+ ) -> Result<LocalUpdate<(), ConfirmationTimeAnchor>, Error> {
+ self.scan(
+ local_chain,
+ [(
+ (),
+ misc_spks
+ .into_iter()
+ .enumerate()
+ .map(|(i, spk)| (i as u32, spk)),
+ )]
+ .into(),
+ txids,
+ outpoints,
+ usize::MAX,
+ parallel_requests,
+ )
+ }
+}
+
+impl EsploraExt for esplora_client::BlockingClient {
+ fn scan<K: Ord + Clone>(
+ &self,
+ local_chain: &BTreeMap<u32, BlockHash>,
+ keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, Script)>>,
+ txids: impl IntoIterator<Item = Txid>,
+ outpoints: impl IntoIterator<Item = OutPoint>,
+ stop_gap: usize,
+ parallel_requests: usize,
+ ) -> Result<LocalUpdate<K, ConfirmationTimeAnchor>, Error> {
+ let parallel_requests = Ord::max(parallel_requests, 1);
+
+ let (mut update, tip_at_start) = loop {
+ let mut update = LocalUpdate::<K, ConfirmationTimeAnchor>::default();
+
+ for (&height, &original_hash) in local_chain.iter().rev() {
+ let update_block_id = BlockId {
+ height,
+ hash: self.get_block_hash(height)?,
+ };
+ let _ = update
+ .chain
+ .insert_block(update_block_id)
+ .expect("cannot repeat height here");
+ if update_block_id.hash == original_hash {
+ break;
+ }
+ }
+
+ let tip_at_start = BlockId {
+ height: self.get_height()?,
+ hash: self.get_tip_hash()?,
+ };
+
+ if update.chain.insert_block(tip_at_start).is_ok() {
+ break (update, tip_at_start);
+ }
+ };
+
+ for (keychain, spks) in keychain_spks {
+ let mut spks = spks.into_iter();
+ let mut last_active_index = None;
+ let mut empty_scripts = 0;
+ type IndexWithTxs = (u32, Vec<esplora_client::Tx>);
+
+ loop {
+ let handles = (0..parallel_requests)
+ .filter_map(
+ |_| -> Option<std::thread::JoinHandle<Result<IndexWithTxs, _>>> {
+ let (index, script) = spks.next()?;
+ let client = self.clone();
+ Some(std::thread::spawn(move || {
+ let mut related_txs = client.scripthash_txs(&script, None)?;
+
+ let n_confirmed =
+ related_txs.iter().filter(|tx| tx.status.confirmed).count();
+ // esplora pages on 25 confirmed transactions. If there are 25 or more we
+ // keep requesting to see if there's more.
+ if n_confirmed >= 25 {
+ loop {
+ let new_related_txs = client.scripthash_txs(
+ &script,
+ Some(related_txs.last().unwrap().txid),
+ )?;
+ let n = new_related_txs.len();
+ related_txs.extend(new_related_txs);
+ // we've reached the end
+ if n < 25 {
+ break;
+ }
+ }
+ }
+
+ Result::<_, esplora_client::Error>::Ok((index, related_txs))
+ }))
+ },
+ )
+ .collect::<Vec<_>>();
+
+ let n_handles = handles.len();
+
+ for handle in handles {
+ let (index, related_txs) = handle.join().unwrap()?; // TODO: don't unwrap
+ if related_txs.is_empty() {
+ empty_scripts += 1;
+ } else {
+ last_active_index = Some(index);
+ empty_scripts = 0;
+ }
+ for tx in related_txs {
+ let anchor = map_confirmation_time_anchor(&tx.status, tip_at_start);
+
+ let _ = update.graph.insert_tx(tx.to_tx());
+ if let Some(anchor) = anchor {
+ let _ = update.graph.insert_anchor(tx.txid, anchor);
+ }
+ }
+ }
+
+ if n_handles == 0 || empty_scripts >= stop_gap {
+ break;
+ }
+ }
+
+ if let Some(last_active_index) = last_active_index {
+ update.keychain.insert(keychain, last_active_index);
+ }
+ }
+
+ for txid in txids.into_iter() {
+ if update.graph.get_tx(txid).is_none() {
+ match self.get_tx(&txid)? {
+ Some(tx) => {
+ let _ = update.graph.insert_tx(tx);
+ }
+ None => continue,
+ }
+ }
+ match self.get_tx_status(&txid)? {
+ Some(tx_status) => {
+ if let Some(anchor) = map_confirmation_time_anchor(&tx_status, tip_at_start) {
+ let _ = update.graph.insert_anchor(txid, anchor);
+ }
+ }
+ None => continue,
+ }
+ }
+
+ for op in outpoints.into_iter() {
+ let mut op_txs = Vec::with_capacity(2);
+ if let (Some(tx), Some(tx_status)) =
+ (self.get_tx(&op.txid)?, self.get_tx_status(&op.txid)?)
+ {
+ op_txs.push((tx, tx_status));
+ if let Some(OutputStatus {
+ txid: Some(txid),
+ status: Some(spend_status),
+ ..
+ }) = self.get_output_status(&op.txid, op.vout as _)?
+ {
+ if let Some(spend_tx) = self.get_tx(&txid)? {
+ op_txs.push((spend_tx, spend_status));
+ }
+ }
+ }
+
+ for (tx, status) in op_txs {
+ let txid = tx.txid();
+ let anchor = map_confirmation_time_anchor(&status, tip_at_start);
+
+ let _ = update.graph.insert_tx(tx);
+ if let Some(anchor) = anchor {
+ let _ = update.graph.insert_anchor(txid, anchor);
+ }
+ }
+ }
+
+ if tip_at_start.hash != self.get_block_hash(tip_at_start.height)? {
+ // A reorg occurred, so let's find out where all the txids we found are now in the chain
+ let txids_found = update
+ .graph
+ .full_txs()
+ .map(|tx_node| tx_node.txid)
+ .collect::<Vec<_>>();
+ update.chain = EsploraExt::scan_without_keychain(
+ self,
+ local_chain,
+ [],
+ txids_found,
+ [],
+ parallel_requests,
+ )?
+ .chain;
+ }
+
+ Ok(update)
+ }
+}
--- /dev/null
+#[cfg(feature = "blocking")]
+mod blocking_ext;
+#[cfg(feature = "blocking")]
+pub use blocking_ext::*;
+
+#[cfg(feature = "async")]
+mod async_ext;
+#[cfg(feature = "async")]
+pub use async_ext::*;
[package]
-name = "bdk-esplora-wallet-example"
+name = "wallet_esplora"
version = "0.1.0"
edition = "2021"
publish = false
-// use bdk::{
-// bitcoin::{Address, Network},
-// wallet::AddressIndex,
-// SignOptions, Wallet,
-// };
-// use bdk_esplora::esplora_client;
-// use bdk_esplora::EsploraExt;
-// use bdk_file_store::KeychainStore;
-// use std::{io::Write, str::FromStr};
+const DB_MAGIC: &str = "bdk_wallet_esplora_example";
+const SEND_AMOUNT: u64 = 5000;
+const STOP_GAP: usize = 50;
+const PARALLEL_REQUESTS: usize = 5;
-// const SEND_AMOUNT: u64 = 5000;
-// const STOP_GAP: usize = 50;
-// const PARALLEL_REQUESTS: usize = 5;
+use std::{io::Write, str::FromStr};
+
+use bdk::{
+ bitcoin::{Address, Network},
+ wallet::AddressIndex,
+ SignOptions, Wallet,
+};
+use bdk_esplora::{esplora_client, v2::EsploraExt};
+use bdk_file_store::Store;
fn main() -> Result<(), Box<dyn std::error::Error>> {
- todo!("update this exampe!");
- // let db_path = std::env::temp_dir().join("bdk-esplora-example");
- // let db = KeychainStore::new_from_path(db_path)?;
- // let external_descriptor = "wpkh(tprv8ZgxMBicQKsPdy6LMhUtFHAgpocR8GC6QmwMSFpZs7h6Eziw3SpThFfczTDh5rW2krkqffa11UpX3XkeTTB2FvzZKWXqPY54Y6Rq4AQ5R8L/84'/0'/0'/0/*)";
- // let internal_descriptor = "wpkh(tprv8ZgxMBicQKsPdy6LMhUtFHAgpocR8GC6QmwMSFpZs7h6Eziw3SpThFfczTDh5rW2krkqffa11UpX3XkeTTB2FvzZKWXqPY54Y6Rq4AQ5R8L/84'/0'/0'/1/*)";
+ let db_path = std::env::temp_dir().join("bdk-esplora-example");
+ let db = Store::<bdk::wallet::ChangeSet>::new_from_path(DB_MAGIC.as_bytes(), db_path)?;
+ let external_descriptor = "wpkh(tprv8ZgxMBicQKsPdy6LMhUtFHAgpocR8GC6QmwMSFpZs7h6Eziw3SpThFfczTDh5rW2krkqffa11UpX3XkeTTB2FvzZKWXqPY54Y6Rq4AQ5R8L/84'/0'/0'/0/*)";
+ let internal_descriptor = "wpkh(tprv8ZgxMBicQKsPdy6LMhUtFHAgpocR8GC6QmwMSFpZs7h6Eziw3SpThFfczTDh5rW2krkqffa11UpX3XkeTTB2FvzZKWXqPY54Y6Rq4AQ5R8L/84'/0'/0'/1/*)";
+
+ let mut wallet = Wallet::new(
+ external_descriptor,
+ Some(internal_descriptor),
+ db,
+ Network::Testnet,
+ )?;
- // let mut wallet = Wallet::new(
- // external_descriptor,
- // Some(internal_descriptor),
- // db,
- // Network::Testnet,
- // )?;
+ let address = wallet.get_address(AddressIndex::New);
+ println!("Generated Address: {}", address);
- // let address = wallet.get_address(AddressIndex::New);
- // println!("Generated Address: {}", address);
+ let balance = wallet.get_balance();
+ println!("Wallet balance before syncing: {} sats", balance.total());
- // let balance = wallet.get_balance();
- // println!("Wallet balance before syncing: {} sats", balance.total());
+ print!("Syncing...");
+ let client =
+ esplora_client::Builder::new("https://blockstream.info/testnet/api").build_blocking()?;
- // print!("Syncing...");
- // // Scanning the chain...
- // let esplora_url = "https://mempool.space/testnet/api";
- // let client = esplora_client::Builder::new(esplora_url).build_blocking()?;
- // let checkpoints = wallet.checkpoints();
- // let spks = wallet
- // .spks_of_all_keychains()
- // .into_iter()
- // .map(|(k, spks)| {
- // let mut first = true;
- // (
- // k,
- // spks.inspect(move |(spk_i, _)| {
- // if first {
- // first = false;
- // print!("\nScanning keychain [{:?}]:", k);
- // }
- // print!(" {}", spk_i);
- // let _ = std::io::stdout().flush();
- // }),
- // )
- // })
- // .collect();
- // let update = client.scan(
- // checkpoints,
- // spks,
- // core::iter::empty(),
- // core::iter::empty(),
- // STOP_GAP,
- // PARALLEL_REQUESTS,
- // )?;
- // println!();
- // wallet.apply_update(update)?;
- // wallet.commit()?;
+ let local_chain = wallet.checkpoints();
+ let keychain_spks = wallet
+ .spks_of_all_keychains()
+ .into_iter()
+ .map(|(k, k_spks)| {
+ let mut once = Some(());
+ let mut stdout = std::io::stdout();
+ let k_spks = k_spks
+ .inspect(move |(spk_i, _)| match once.take() {
+ Some(_) => print!("\nScanning keychain [{:?}]", k),
+ None => print!(" {:<3}", spk_i),
+ })
+ .inspect(move |_| stdout.flush().expect("must flush"));
+ (k, k_spks)
+ })
+ .collect();
+ let update = client.scan(
+ local_chain,
+ keychain_spks,
+ None,
+ None,
+ STOP_GAP,
+ PARALLEL_REQUESTS,
+ )?;
+ println!();
+ wallet.apply_update(update)?;
+ wallet.commit()?;
- // let balance = wallet.get_balance();
- // println!("Wallet balance after syncing: {} sats", balance.total());
+ let balance = wallet.get_balance();
+ println!("Wallet balance after syncing: {} sats", balance.total());
- // if balance.total() < SEND_AMOUNT {
- // println!(
- // "Please send at least {} sats to the receiving address",
- // SEND_AMOUNT
- // );
- // std::process::exit(0);
- // }
+ if balance.total() < SEND_AMOUNT {
+ println!(
+ "Please send at least {} sats to the receiving address",
+ SEND_AMOUNT
+ );
+ std::process::exit(0);
+ }
- // let faucet_address = Address::from_str("mkHS9ne12qx9pS9VojpwU5xtRd4T7X7ZUt")?;
+ let faucet_address = Address::from_str("mkHS9ne12qx9pS9VojpwU5xtRd4T7X7ZUt")?;
- // let mut tx_builder = wallet.build_tx();
- // tx_builder
- // .add_recipient(faucet_address.script_pubkey(), SEND_AMOUNT)
- // .enable_rbf();
+ let mut tx_builder = wallet.build_tx();
+ tx_builder
+ .add_recipient(faucet_address.script_pubkey(), SEND_AMOUNT)
+ .enable_rbf();
- // let (mut psbt, _) = tx_builder.finish()?;
- // let finalized = wallet.sign(&mut psbt, SignOptions::default())?;
- // assert!(finalized);
+ let (mut psbt, _) = tx_builder.finish()?;
+ let finalized = wallet.sign(&mut psbt, SignOptions::default())?;
+ assert!(finalized);
- // let tx = psbt.extract_tx();
- // client.broadcast(&tx)?;
- // println!("Tx broadcasted! Txid: {}", tx.txid());
+ let tx = psbt.extract_tx();
+ client.broadcast(&tx)?;
+ println!("Tx broadcasted! Txid: {}", tx.txid());
- // Ok(())
+ Ok(())
}
-// use std::{io::Write, str::FromStr};
+use std::{io::Write, str::FromStr};
-// use bdk::{
-// bitcoin::{Address, Network},
-// wallet::AddressIndex,
-// SignOptions, Wallet,
-// };
-// use bdk_esplora::{esplora_client, EsploraAsyncExt};
-// use bdk_file_store::KeychainStore;
+use bdk::{
+ bitcoin::{Address, Network},
+ wallet::AddressIndex,
+ SignOptions, Wallet,
+};
+use bdk_esplora::{esplora_client, v2::EsploraAsyncExt};
+use bdk_file_store::Store;
-// const SEND_AMOUNT: u64 = 5000;
-// const STOP_GAP: usize = 50;
-// const PARALLEL_REQUESTS: usize = 5;
+const DB_MAGIC: &str = "bdk_wallet_esplora_async_example";
+const SEND_AMOUNT: u64 = 5000;
+const STOP_GAP: usize = 50;
+const PARALLEL_REQUESTS: usize = 5;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
- todo!("update this example!");
- // let db_path = std::env::temp_dir().join("bdk-esplora-example");
- // let db = KeychainStore::new_from_path(db_path)?;
- // let external_descriptor = "wpkh(tprv8ZgxMBicQKsPdy6LMhUtFHAgpocR8GC6QmwMSFpZs7h6Eziw3SpThFfczTDh5rW2krkqffa11UpX3XkeTTB2FvzZKWXqPY54Y6Rq4AQ5R8L/84'/0'/0'/0/*)";
- // let internal_descriptor = "wpkh(tprv8ZgxMBicQKsPdy6LMhUtFHAgpocR8GC6QmwMSFpZs7h6Eziw3SpThFfczTDh5rW2krkqffa11UpX3XkeTTB2FvzZKWXqPY54Y6Rq4AQ5R8L/84'/0'/0'/1/*)";
+ let db_path = std::env::temp_dir().join("bdk-esplora-async-example");
+ let db = Store::<bdk::wallet::ChangeSet>::new_from_path(DB_MAGIC.as_bytes(), db_path)?;
+ let external_descriptor = "wpkh(tprv8ZgxMBicQKsPdy6LMhUtFHAgpocR8GC6QmwMSFpZs7h6Eziw3SpThFfczTDh5rW2krkqffa11UpX3XkeTTB2FvzZKWXqPY54Y6Rq4AQ5R8L/84'/0'/0'/0/*)";
+ let internal_descriptor = "wpkh(tprv8ZgxMBicQKsPdy6LMhUtFHAgpocR8GC6QmwMSFpZs7h6Eziw3SpThFfczTDh5rW2krkqffa11UpX3XkeTTB2FvzZKWXqPY54Y6Rq4AQ5R8L/84'/0'/0'/1/*)";
- // let mut wallet = Wallet::new(
- // external_descriptor,
- // Some(internal_descriptor),
- // db,
- // Network::Testnet,
- // )?;
+ let mut wallet = Wallet::new(
+ external_descriptor,
+ Some(internal_descriptor),
+ db,
+ Network::Testnet,
+ )?;
- // let address = wallet.get_address(AddressIndex::New);
- // println!("Generated Address: {}", address);
+ let address = wallet.get_address(AddressIndex::New);
+ println!("Generated Address: {}", address);
- // let balance = wallet.get_balance();
- // println!("Wallet balance before syncing: {} sats", balance.total());
+ let balance = wallet.get_balance();
+ println!("Wallet balance before syncing: {} sats", balance.total());
- // print!("Syncing...");
- // // Scanning the blockchain
- // let esplora_url = "https://mempool.space/testnet/api";
- // let client = esplora_client::Builder::new(esplora_url).build_async()?;
- // let checkpoints = wallet.checkpoints();
- // let spks = wallet
- // .spks_of_all_keychains()
- // .into_iter()
- // .map(|(k, spks)| {
- // let mut first = true;
- // (
- // k,
- // spks.inspect(move |(spk_i, _)| {
- // if first {
- // first = false;
- // print!("\nScanning keychain [{:?}]:", k);
- // }
- // print!(" {}", spk_i);
- // let _ = std::io::stdout().flush();
- // }),
- // )
- // })
- // .collect();
- // let update = client
- // .scan(
- // checkpoints,
- // spks,
- // std::iter::empty(),
- // std::iter::empty(),
- // STOP_GAP,
- // PARALLEL_REQUESTS,
- // )
- // .await?;
- // println!();
- // wallet.apply_update(update)?;
- // wallet.commit()?;
+ print!("Syncing...");
+ let client =
+ esplora_client::Builder::new("https://blockstream.info/testnet/api").build_async()?;
- // let balance = wallet.get_balance();
- // println!("Wallet balance after syncing: {} sats", balance.total());
+ let local_chain = wallet.checkpoints();
+ let keychain_spks = wallet
+ .spks_of_all_keychains()
+ .into_iter()
+ .map(|(k, k_spks)| {
+ let mut once = Some(());
+ let mut stdout = std::io::stdout();
+ let k_spks = k_spks
+ .inspect(move |(spk_i, _)| match once.take() {
+ Some(_) => print!("\nScanning keychain [{:?}]", k),
+ None => print!(" {:<3}", spk_i),
+ })
+ .inspect(move |_| stdout.flush().expect("must flush"));
+ (k, k_spks)
+ })
+ .collect();
+ let update = client
+ .scan(
+ local_chain,
+ keychain_spks,
+ [],
+ [],
+ STOP_GAP,
+ PARALLEL_REQUESTS,
+ )
+ .await?;
+ println!();
+ wallet.apply_update(update)?;
+ wallet.commit()?;
- // if balance.total() < SEND_AMOUNT {
- // println!(
- // "Please send at least {} sats to the receiving address",
- // SEND_AMOUNT
- // );
- // std::process::exit(0);
- // }
+ let balance = wallet.get_balance();
+ println!("Wallet balance after syncing: {} sats", balance.total());
- // let faucet_address = Address::from_str("mkHS9ne12qx9pS9VojpwU5xtRd4T7X7ZUt")?;
+ if balance.total() < SEND_AMOUNT {
+ println!(
+ "Please send at least {} sats to the receiving address",
+ SEND_AMOUNT
+ );
+ std::process::exit(0);
+ }
- // let mut tx_builder = wallet.build_tx();
- // tx_builder
- // .add_recipient(faucet_address.script_pubkey(), SEND_AMOUNT)
- // .enable_rbf();
+ let faucet_address = Address::from_str("mkHS9ne12qx9pS9VojpwU5xtRd4T7X7ZUt")?;
- // let (mut psbt, _) = tx_builder.finish()?;
- // let finalized = wallet.sign(&mut psbt, SignOptions::default())?;
- // assert!(finalized);
+ let mut tx_builder = wallet.build_tx();
+ tx_builder
+ .add_recipient(faucet_address.script_pubkey(), SEND_AMOUNT)
+ .enable_rbf();
- // let tx = psbt.extract_tx();
- // client.broadcast(&tx).await?;
- // println!("Tx broadcasted! Txid: {}", tx.txid());
+ let (mut psbt, _) = tx_builder.finish()?;
+ let finalized = wallet.sign(&mut psbt, SignOptions::default())?;
+ assert!(finalized);
- // Ok(())
+ let tx = psbt.extract_tx();
+ client.broadcast(&tx).await?;
+ println!("Tx broadcasted! Txid: {}", tx.txid());
+
+ Ok(())
}