readme = "README.md"
license = "MIT OR Apache-2.0"
authors = ["Bitcoin Dev Kit Developers"]
-edition = "2018"
-
+edition = "2021"
+rust-version = "1.57"
[dependencies]
log = "^0.4"
pub use bdk_chain::keychain::Balance;
use bdk_chain::{
chain_graph,
- keychain::{KeychainChangeSet, KeychainScan, KeychainTracker},
+ keychain::{persist, KeychainChangeSet, KeychainScan, KeychainTracker},
sparse_chain, BlockId, ConfirmationTime, IntoOwned,
};
use bitcoin::consensus::encode::serialize;
#[cfg(feature = "hardware-signer")]
#[cfg_attr(docsrs, doc(cfg(feature = "hardware-signer")))]
pub mod hardwaresigner;
-pub mod persist;
pub use utils::IsDust;
signers: Arc<SignersContainer>,
change_signers: Arc<SignersContainer>,
keychain_tracker: KeychainTracker<KeychainKind, ConfirmationTime>,
- persist: persist::Persist<D>,
+ persist: persist::Persist<KeychainKind, ConfirmationTime, D>,
network: Network,
secp: SecpCtx,
}
network: Network,
) -> Result<Self, NewError<D::LoadError>>
where
- D: persist::Backend,
+ D: persist::PersistBackend<KeychainKind, ConfirmationTime>,
{
let secp = Secp256k1::new();
/// (i.e. does not end with /*) then the same address will always be returned for any [`AddressIndex`].
pub fn get_address(&mut self, address_index: AddressIndex) -> AddressInfo
where
- D: persist::Backend,
+ D: persist::PersistBackend<KeychainKind, ConfirmationTime>,
{
self._get_address(address_index, KeychainKind::External)
}
/// be returned for any [`AddressIndex`].
pub fn get_internal_address(&mut self, address_index: AddressIndex) -> AddressInfo
where
- D: persist::Backend,
+ D: persist::PersistBackend<KeychainKind, ConfirmationTime>,
{
self._get_address(address_index, KeychainKind::Internal)
}
fn _get_address(&mut self, address_index: AddressIndex, keychain: KeychainKind) -> AddressInfo
where
- D: persist::Backend,
+ D: persist::PersistBackend<KeychainKind, ConfirmationTime>,
{
let keychain = self.map_keychain(keychain);
let txout_index = &mut self.keychain_tracker.txout_index;
params: TxParams,
) -> Result<(psbt::PartiallySignedTransaction, TransactionDetails), Error>
where
- D: persist::Backend,
+ D: persist::PersistBackend<KeychainKind, ConfirmationTime>,
{
let external_descriptor = self
.keychain_tracker
/// [`commit`]: Self::commit
pub fn apply_update<Tx>(&mut self, update: Update<Tx>) -> Result<(), UpdateError>
where
- D: persist::Backend,
+ D: persist::PersistBackend<KeychainKind, ConfirmationTime>,
Tx: IntoOwned<Transaction> + Clone,
{
let changeset = self.keychain_tracker.apply_update(update)?;
/// [`staged`]: Self::staged
pub fn commit(&mut self) -> Result<(), D::WriteError>
where
- D: persist::Backend,
+ D: persist::PersistBackend<KeychainKind, ConfirmationTime>,
{
self.persist.commit()
}
+++ /dev/null
-//! Persistence for changes made to a [`Wallet`].
-//!
-//! BDK's [`Wallet`] needs somewhere to persist changes it makes during operation.
-//! Operations like giving out a new address are crucial to persist so that next time the
-//! application is loaded it can find transactions related to that address.
-//!
-//! Note that `Wallet` does not read this persisted data during operation since it always has a copy
-//! in memory
-//!
-//! [`Wallet`]: crate::Wallet
-use crate::KeychainKind;
-use bdk_chain::{keychain::KeychainTracker, ConfirmationTime};
-
-/// `Persist` wraps a [`Backend`] to create a convienient staging area for changes before they are
-/// persisted. Not all changes made to the [`Wallet`] need to be written to disk right away so you
-/// can use [`Persist::stage`] to *stage* it first and then [`Persist::commit`] to finally write it
-/// to disk.
-///
-/// [`Wallet`]: crate::Wallet
-#[derive(Debug)]
-pub struct Persist<P> {
- backend: P,
- stage: ChangeSet,
-}
-
-impl<P> Persist<P> {
- /// Create a new `Persist` from a [`Backend`]
- pub fn new(backend: P) -> Self {
- Self {
- backend,
- stage: Default::default(),
- }
- }
-
- /// Stage a `changeset` to later persistence with [`commit`].
- ///
- /// [`commit`]: Self::commit
- pub fn stage(&mut self, changeset: ChangeSet) {
- self.stage.append(changeset)
- }
-
- /// Get the changes that haven't been commited yet
- pub fn staged(&self) -> &ChangeSet {
- &self.stage
- }
-
- /// Commit the staged changes to the underlying persistence backend.
- ///
- /// Retuns a backend defined error if this fails
- pub fn commit(&mut self) -> Result<(), P::WriteError>
- where
- P: Backend,
- {
- self.backend.append_changeset(&self.stage)?;
- self.stage = Default::default();
- Ok(())
- }
-}
-
-/// A persistence backend for [`Wallet`]
-///
-/// [`Wallet`]: crate::Wallet
-pub trait Backend {
- /// The error the backend returns when it fails to write
- type WriteError: core::fmt::Debug;
- /// The error the backend returns when it fails to load
- type LoadError: core::fmt::Debug;
- /// Appends a new changeset to the persistance backend.
- ///
- /// It is up to the backend what it does with this. It could store every changeset in a list or
- /// it insert the actual changes to a more structured database. All it needs to guarantee is
- /// that [`load_into_keychain_tracker`] restores a keychain tracker to what it should be if all
- /// changesets had been applied sequentially.
- ///
- /// [`load_into_keychain_tracker`]: Self::load_into_keychain_tracker
- fn append_changeset(&mut self, changeset: &ChangeSet) -> Result<(), Self::WriteError>;
-
- /// Applies all the changesets the backend has received to `tracker`.
- fn load_into_keychain_tracker(
- &mut self,
- tracker: &mut KeychainTracker<KeychainKind, ConfirmationTime>,
- ) -> Result<(), Self::LoadError>;
-}
-
-#[cfg(feature = "file-store")]
-mod file_store {
- use super::*;
- use bdk_chain::file_store::{IterError, KeychainStore};
-
- type FileStore = KeychainStore<KeychainKind, ConfirmationTime>;
-
- impl Backend for FileStore {
- type WriteError = std::io::Error;
- type LoadError = IterError;
- fn append_changeset(&mut self, changeset: &ChangeSet) -> Result<(), Self::WriteError> {
- self.append_changeset(changeset)
- }
- fn load_into_keychain_tracker(
- &mut self,
- tracker: &mut KeychainTracker<KeychainKind, ConfirmationTime>,
- ) -> Result<(), Self::LoadError> {
- self.load_into_keychain_tracker(tracker)
- }
- }
-}
-
-impl Backend for () {
- type WriteError = ();
- type LoadError = ();
- fn append_changeset(&mut self, _changeset: &ChangeSet) -> Result<(), Self::WriteError> {
- Ok(())
- }
- fn load_into_keychain_tracker(
- &mut self,
- _tracker: &mut KeychainTracker<KeychainKind, ConfirmationTime>,
- ) -> Result<(), Self::LoadError> {
- Ok(())
- }
-}
-
-#[cfg(feature = "file-store")]
-pub use file_store::*;
-
-use super::ChangeSet;
use crate::collections::BTreeMap;
use crate::collections::HashSet;
use alloc::{boxed::Box, rc::Rc, string::String, vec::Vec};
+use bdk_chain::ConfirmationTime;
use core::cell::RefCell;
use core::marker::PhantomData;
/// [`BIP174`]: https://github.com/bitcoin/bips/blob/master/bip-0174.mediawiki
pub fn finish(self) -> Result<(Psbt, TransactionDetails), Error>
where
- D: persist::Backend,
+ D: persist::PersistBackend<KeychainKind, ConfirmationTime>,
{
self.wallet
.borrow_mut()
name = "bdk_chain"
version = "0.3.1"
edition = "2021"
+rust-version = "1.57"
homepage = "https://bitcoindevkit.org"
repository = "https://github.com/bitcoindevkit/bdk"
documentation = "https://docs.rs/bdk_chain"
[dependencies]
bitcoin = { version = "0.29" }
serde_crate = { package = "serde", version = "1", optional = true, features = ["derive"] }
+
# Use hashbrown as a feature flag to have HashSet and HashMap from it.
-hashbrown = { version = "0.13.2", optional = true }
+# note version 0.13 breaks outs MSRV.
+hashbrown = { version = "0.12", optional = true, features = ["serde"] }
miniscript = { version = "9.0.0", optional = true }
[dev-dependencies]
[features]
default = ["std", "miniscript"]
std = []
-serde = ["serde_crate", "bitcoin/serde"]
+serde = ["serde_crate", "bitcoin/serde" ]
&'a self,
tx: &'a Transaction,
) -> impl Iterator<Item = (&'a P, Txid)> + 'a {
- self.graph.walk_conflicts(tx, |_, conflict_txid| {
+ self.graph.walk_conflicts(tx, move |_, conflict_txid| {
self.chain
.tx_position(conflict_txid)
.map(|conflict_pos| (conflict_pos, conflict_txid))
&self,
changeset: &mut ChangeSet<P, T>,
) -> Result<(), UnresolvableConflict<P>> {
- let chain_conflicts = changeset
- .chain
- .txids
- .iter()
- // we want to find new txid additions by the changeset (all txid entries in the
- // changeset with Some(position_change))
- .filter_map(|(&txid, pos_change)| pos_change.as_ref().map(|pos| (txid, pos)))
- // we don't care about txids that move, only newly added txids
- .filter(|&(txid, _)| self.chain.tx_position(txid).is_none())
- // full tx should exist (either in graph, or additions)
- .filter_map(|(txid, pos)| {
- let full_tx = self
+ let mut chain_conflicts = vec![];
+
+ for (&txid, pos_change) in &changeset.chain.txids {
+ let pos = match pos_change {
+ Some(pos) => {
+ // Ignore txs that are still in the chain -- we only care about new ones
+ if self.chain.tx_position(txid).is_some() {
+ continue;
+ }
+ pos
+ }
+ // Ignore txids that are being delted by the change (they can't conflict)
+ None => continue,
+ };
+
+ let mut full_tx = self.graph.get_tx(txid);
+
+ if full_tx.is_none() {
+ full_tx = changeset
.graph
- .get_tx(txid)
- .or_else(|| {
- changeset
- .graph
- .tx
- .iter()
- .find(|tx| tx.as_tx().txid() == txid)
- })
- .map(|tx| (txid, tx, pos));
- debug_assert!(full_tx.is_some(), "should have full tx at this point");
- full_tx
- })
- .flat_map(|(new_txid, new_tx, new_pos)| {
- self.tx_conflicts_in_chain(new_tx.as_tx()).map(
- move |(conflict_pos, conflict_txid)| {
- (new_pos.clone(), new_txid, conflict_pos, conflict_txid)
- },
- )
- })
- .collect::<Vec<_>>();
+ .tx
+ .iter()
+ .find(|tx| tx.as_tx().txid() == txid)
+ }
+
+ debug_assert!(full_tx.is_some(), "should have full tx at this point");
+
+ let full_tx = match full_tx {
+ Some(full_tx) => full_tx,
+ None => continue,
+ };
+
+ for (conflict_pos, conflict_txid) in self.tx_conflicts_in_chain(full_tx.as_tx()) {
+ chain_conflicts.push((pos.clone(), txid, conflict_pos, conflict_txid))
+ }
+ }
for (update_pos, update_txid, conflicting_pos, conflicting_txid) in chain_conflicts {
// We have found a tx that conflicts with our update txid. Only allow this when the
pub fn transactions_in_chain(&self) -> impl DoubleEndedIterator<Item = (&P, &T)> {
self.chain
.txids()
- .map(|(pos, txid)| (pos, self.graph.get_tx(*txid).expect("must exist")))
+ .map(move |(pos, txid)| (pos, self.graph.get_tx(*txid).expect("must exist")))
}
/// Finds the transaction in the chain that spends `outpoint` given the input/output
stage: keychain::KeychainChangeSet<K, P>,
}
-impl<K, P, B: PersistBackend<K, P>> Persist<K, P, B> {
+impl<K, P, B> Persist<K, P, B> {
/// Create a new `Persist` from a [`PersistBackend`].
pub fn new(backend: B) -> Self {
Self {
/// Commit the staged changes to the underlying persistence backend.
///
/// Retuns a backend defined error if this fails
- pub fn commit(&mut self) -> Result<(), B::WriteError> {
+ pub fn commit(&mut self) -> Result<(), B::WriteError>
+ where
+ B: PersistBackend<K, P>,
+ {
self.backend.append_changeset(&self.stage)?;
self.stage = Default::default();
Ok(())
pub fn full_txouts(&self) -> impl Iterator<Item = (&(K, u32), FullTxOut<P>)> + '_ {
self.txout_index
.txouts()
- .filter_map(|(spk_i, op, _)| Some((spk_i, self.chain_graph.full_txout(op)?)))
+ .filter_map(move |(spk_i, op, _)| Some((spk_i, self.chain_graph.full_txout(op)?)))
}
/// Iterates through [`FullTxOut`]s that are unspent outputs.
Cow::Owned(descriptor.clone()),
next_reveal_index..index + 1,
),
- DerivationAdditions([(keychain.clone(), index)].into()),
+ DerivationAdditions(core::iter::once((keychain.clone(), index)).collect()),
)
}
None => (
.take_while(move |&index| has_wildcard || index == 0)
// we can only iterate over non-hardened indices
.take_while(|&index| index <= BIP32_MAX_INDEX)
- // take until failure
- .map_while(move |index| {
- descriptor
- .derived_descriptor(&secp, index)
- .map(|desc| (index, desc.script_pubkey()))
- .ok()
- })
+ .map(
+ move |index| -> Result<_, miniscript::descriptor::ConversionError> {
+ Ok((
+ index,
+ descriptor
+ .at_derivation_index(index)
+ .derived_descriptor(&secp)?
+ .script_pubkey(),
+ ))
+ },
+ )
+ .take_while(Result::is_ok)
+ .map(Result::unwrap)
}
changeset
.txids
.iter()
- .filter(|(&txid, pos)| {
+ .filter(move |(&txid, pos)| {
pos.is_some() /*it was not a deletion*/ &&
self.tx_position(txid).is_none() /* we don't have the txid already */
})
/// See [`ForEachTxout`] for the types that support this.
///
/// [`ForEachTxout`]: crate::ForEachTxOut
- pub fn scan(&mut self, txouts: &impl ForEachTxOut) -> BTreeSet<&I> {
- // let scanner = &mut SpkTxOutScanner::new(self);
+ pub fn scan(&mut self, txouts: &impl ForEachTxOut) -> BTreeSet<I> {
let mut scanned_indices = BTreeSet::new();
txouts.for_each_txout(|(op, txout)| {
if let Some(spk_i) = scan_txout!(self, op, txout) {
- scanned_indices.insert(spk_i);
+ scanned_indices.insert(spk_i.clone());
}
});
{
self.unused
.range(range)
- .map(|index| (index, self.spk_at_index(index).expect("must exist")))
+ .map(move |index| (index, self.spk_at_index(index).expect("must exist")))
}
/// Returns whether the script pubkey at `index` has been used or not.
tx.input
.iter()
.enumerate()
- .filter_map(|(vin, txin)| self.spends.get(&txin.previous_output).zip(Some(vin)))
+ .filter_map(move |(vin, txin)| self.spends.get(&txin.previous_output).zip(Some(vin)))
.flat_map(|(spends, vin)| core::iter::repeat(vin).zip(spends.iter().cloned()))
.filter(move |(_, conflicting_txid)| *conflicting_txid != txid)
}
.output
.iter()
.enumerate()
- .map(|(vout, txout)| (OutPoint::new(tx.as_tx().txid(), vout as _), txout))
+ .map(move |(vout, txout)| (OutPoint::new(tx.as_tx().txid(), vout as _), txout))
})
.chain(self.txout.iter().map(|(op, txout)| (*op, txout)))
}
(0..=15)
.into_iter()
- .chain([17, 20, 23].into_iter())
+ .chain(vec![17, 20, 23].into_iter())
.for_each(|index| assert!(txout_index.mark_used(&TestKeychain::External, index)));
assert_eq!(txout_index.next_index(&TestKeychain::External), (26, true));
let client = electrum_client::Client::from_config(electrum_url, config)?;
- let electrum_cmd = match args.command {
+ let electrum_cmd = match args.command.clone() {
cli::Commands::ChainSpecific(electrum_cmd) => electrum_cmd,
general_command => {
return cli::handle_commands(
bdk_tmp_plan = { path = "../../nursery/tmp_plan" }
bdk_coin_select = { path = "../../nursery/coin_select" }
-clap = { version = "4", features = ["derive", "env"] }
+clap = { version = "3.2.23", features = ["derive", "env"] }
anyhow = "1"
serde = { version = "1", features = ["derive"] }
serde_json = { version = "^1.0" }
) -> impl Iterator<Item = (bdk_tmp_plan::Plan<AK>, FullTxOut<P>)> + 'a {
tracker
.full_utxos()
- .filter_map(|((keychain, derivation_index), full_txout)| {
+ .filter_map(move |((keychain, derivation_index), full_txout)| {
Some((
bdk_tmp_plan::plan_satisfaction(
&tracker