// apply relevant blocks
if let Event::Block(EventInner { height, ref block }) = event {
let _ = graph.apply_block_relevant(block, height);
- println!("Matched block {}", curr);
+ println!("Matched block {curr}");
}
if curr % 1000 == 0 {
let progress = (curr - start_height) as f32 / blocks_to_scan as f32;
let unused_spk = graph.index.reveal_next_spk("external").unwrap().0 .1;
let unused_address = Address::from_script(&unused_spk, NETWORK)?;
- println!("Next external address: {}", unused_address);
+ println!("Next external address: {unused_address}");
Ok(())
}
for txid in &mempool_txids {
assert!(
emitter.expected_mempool_txids.contains(txid),
- "Expected txid {:?} missing",
- txid
+ "Expected txid {txid:?} missing"
);
}
}
for txid in confirmed_txids {
assert!(
!emitter.expected_mempool_txids.contains(&txid),
- "Expected txid {:?} should have been removed",
- txid
+ "Expected txid {txid:?} should have been removed"
);
}
for txid in &mempool_txids {
assert!(
emitter.expected_mempool_txids.contains(txid),
- "Expected txid {:?} missing",
- txid
+ "Expected txid {txid:?} missing"
);
}
}
confirmed: SEND_AMOUNT * (ADDITIONAL_COUNT - reorg_count) as u64,
..Balance::default()
},
- "reorg_count: {}",
- reorg_count,
+ "reorg_count: {reorg_count}",
);
}
let _inserted = self
.inner
.insert_spk((keychain.clone(), new_index), new_spk);
- debug_assert!(_inserted, "replenish lookahead: must not have existing spk: keychain={:?}, lookahead={}, next_index={}", keychain, lookahead, next_index);
+ debug_assert!(_inserted, "replenish lookahead: must not have existing spk: keychain={keychain:?}, lookahead={lookahead}, next_index={next_index}");
}
} else {
let spk_iter = SpkIterator::new_with_range(descriptor, next_index..stop_index);
let _inserted = self
.inner
.insert_spk((keychain.clone(), new_index), new_spk);
- debug_assert!(_inserted, "replenish lookahead: must not have existing spk: keychain={:?}, lookahead={}, next_index={}", keychain, lookahead, next_index);
+ debug_assert!(_inserted, "replenish lookahead: must not have existing spk: keychain={keychain:?}, lookahead={lookahead}, next_index={next_index}");
}
}
}
/// Initialize the schema table.
fn init_schemas_table(db_tx: &Transaction) -> rusqlite::Result<()> {
- let sql = format!("CREATE TABLE IF NOT EXISTS {}( name TEXT PRIMARY KEY NOT NULL, version INTEGER NOT NULL ) STRICT", SCHEMAS_TABLE_NAME);
+ let sql = format!("CREATE TABLE IF NOT EXISTS {SCHEMAS_TABLE_NAME}( name TEXT PRIMARY KEY NOT NULL, version INTEGER NOT NULL ) STRICT");
db_tx.execute(&sql, ())?;
Ok(())
}
/// Get schema version of `schema_name`.
fn schema_version(db_tx: &Transaction, schema_name: &str) -> rusqlite::Result<Option<u32>> {
- let sql = format!(
- "SELECT version FROM {} WHERE name=:name",
- SCHEMAS_TABLE_NAME
- );
+ let sql = format!("SELECT version FROM {SCHEMAS_TABLE_NAME} WHERE name=:name");
db_tx
.query_row(&sql, named_params! { ":name": schema_name }, |row| {
row.get::<_, u32>("version")
schema_name: &str,
schema_version: u32,
) -> rusqlite::Result<()> {
- let sql = format!(
- "REPLACE INTO {}(name, version) VALUES(:name, :version)",
- SCHEMAS_TABLE_NAME,
- );
+ let sql = format!("REPLACE INTO {SCHEMAS_TABLE_NAME}(name, version) VALUES(:name, :version)");
db_tx.execute(
&sql,
named_params! { ":name": schema_name, ":version": schema_version },
":block_hash": Impl(anchor_block.hash),
}) {
Ok(updated) => assert_eq!(updated, 1),
- Err(err) => panic!("update failed: {}", err),
+ Err(err) => panic!("update failed: {err}"),
}
}
}
match self {
CalculateFeeError::MissingTxOut(outpoints) => write!(
f,
- "missing `TxOut` for one or more of the inputs of the tx: {:?}",
- outpoints
+ "missing `TxOut` for one or more of the inputs of the tx: {outpoints:?}",
),
CalculateFeeError::NegativeFee(fee) => write!(
f,
if !canonical_tx.tx_node.tx.is_coinbase() {
for txin in &canonical_tx.tx_node.tx.input {
let _res = canon_spends.insert(txin.previous_output, txid);
- assert!(
- _res.is_none(),
- "tried to replace {:?} with {:?}",
- _res,
- txid
- );
+ assert!(_res.is_none(), "tried to replace {_res:?} with {txid:?}",);
}
}
canon_txs.insert(txid, canonical_tx);
let chain_tip = local_chain
.get(height)
.map(|cp| cp.block_id())
- .unwrap_or_else(|| panic!("block must exist at {}", height));
+ .unwrap_or_else(|| panic!("block must exist at {height}"));
let txouts = graph
.graph()
.filter_chain_txouts(
assert_eq!(
chain.insert_block(t.insert.into()),
t.expected_result,
- "[{}] unexpected result when inserting block",
- i,
+ "[{i}] unexpected result when inserting block",
);
- assert_eq!(chain, t.expected_final, "[{}] unexpected final chain", i,);
+ assert_eq!(chain, t.expected_final, "[{i}] unexpected final chain",);
}
}
fn test_walk_ancestors() {
let local_chain = LocalChain::from_blocks(
(0..=20)
- .map(|ht| (ht, BlockHash::hash(format!("Block Hash {}", ht).as_bytes())))
+ .map(|ht| (ht, BlockHash::hash(format!("Block Hash {ht}").as_bytes())))
.collect(),
)
.expect("must contain genesis hash");
fn test_chain_spends() {
let local_chain = LocalChain::from_blocks(
(0..=100)
- .map(|ht| (ht, BlockHash::hash(format!("Block Hash {}", ht).as_bytes())))
+ .map(|ht| (ht, BlockHash::hash(format!("Block Hash {ht}").as_bytes())))
.collect(),
)
.expect("must have genesis hash");
.iter()
.map(|tx| tx.compute_txid())
.collect::<HashSet<Txid>>(),
- "{}: txs do not match",
- test_name
+ "{test_name}: txs do not match"
);
assert_eq!(
update.txouts, update_from_tx_graph.txouts,
- "{}: txouts do not match",
- test_name
+ "{test_name}: txouts do not match"
);
assert_eq!(
update.anchors, update_from_tx_graph.anchors,
- "{}: anchors do not match",
- test_name
+ "{test_name}: anchors do not match"
);
assert_eq!(
update.seen_ats, update_from_tx_graph.seen_ats,
- "{}: seen_ats do not match",
- test_name
+ "{test_name}: seen_ats do not match"
);
}
}
match self {
SyncItem::Spk(i, spk) => {
if (i as &dyn core::any::Any).is::<()>() {
- write!(f, "script '{}'", spk)
+ write!(f, "script '{spk}'")
} else {
- write!(f, "script {:?} '{}'", i, spk)
+ write!(f, "script {i:?} '{spk}'")
}
}
- SyncItem::Txid(txid) => write!(f, "txid '{}'", txid),
- SyncItem::OutPoint(op) => write!(f, "outpoint '{}'", op),
+ SyncItem::Txid(txid) => write!(f, "txid '{txid}'"),
+ SyncItem::OutPoint(op) => write!(f, "outpoint '{op}'"),
}
}
}
batch.raw(
"blockchain.transaction.get_merkle".into(),
vec![
- electrum_client::Param::String(format!("{:x}", txid)),
+ electrum_client::Param::String(format!("{txid:x}")),
electrum_client::Param::Usize(height),
],
);
confirmed: SEND_AMOUNT * (REORG_COUNT - depth) as u64,
..Balance::default()
},
- "reorg_count: {}",
- depth,
+ "reorg_count: {depth}",
);
}
.iter()
.map(|&h| {
let anchor_blockhash: BlockHash = bdk_chain::bitcoin::hashes::Hash::hash(
- &format!("hash_at_height_{}", h).into_bytes(),
+ &format!("hash_at_height_{h}").into_bytes(),
);
let txid: Txid = bdk_chain::bitcoin::hashes::Hash::hash(
- &format!("txid_at_height_{}", h).into_bytes(),
+ &format!("txid_at_height_{h}").into_bytes(),
);
let anchor = ConfirmationBlockTime {
block_id: BlockId {
impl core::fmt::Display for StoreError {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self {
- Self::Io(e) => write!(f, "io error trying to read file: {}", e),
+ Self::Io(e) => write!(f, "io error trying to read file: {e}"),
Self::InvalidMagicBytes { got, expected } => write!(
f,
- "file has invalid magic bytes: expected={:?} got={:?}",
- expected, got,
+ "file has invalid magic bytes: expected={expected:?} got={got:?}",
),
- Self::Bincode(e) => write!(f, "bincode error while reading entry {}", e),
+ Self::Bincode(e) => write!(f, "bincode error while reading entry {e}"),
}
}
}
.serialize_into(&mut self.db_file, changeset)
.map_err(|e| match *e {
bincode::ErrorKind::Io(error) => error,
- unexpected_err => panic!("unexpected bincode error: {}", unexpected_err),
+ unexpected_err => panic!("unexpected bincode error: {unexpected_err}"),
})?;
Ok(())
error: StoreError::Io(e),
..
}) => assert_eq!(e.kind(), std::io::ErrorKind::UnexpectedEof),
- unexpected => panic!("unexpected result: {:?}", unexpected),
+ unexpected => panic!("unexpected result: {unexpected:?}"),
};
}
}) => {
assert_eq!(got, invalid_magic_bytes.as_bytes())
}
- unexpected => panic!("unexpected result: {:?}", unexpected),
+ unexpected => panic!("unexpected result: {unexpected:?}"),
};
}
}) => {
assert_eq!(changeset, Some(test_changesets))
}
- unexpected_res => panic!("unexpected result: {:?}", unexpected_res),
+ unexpected_res => panic!("unexpected result: {unexpected_res:?}"),
}
}
}) => {
assert_eq!(changeset, Some(test_changesets))
}
- unexpected_res => panic!("unexpected result: {:?}", unexpected_res),
+ unexpected_res => panic!("unexpected result: {unexpected_res:?}"),
}
}
let last_changeset_bytes = bincode_options().serialize(&last_changeset).unwrap();
for short_write_len in 1..last_changeset_bytes.len() - 1 {
- let file_path = temp_dir.path().join(format!("{}.dat", short_write_len));
+ let file_path = temp_dir.path().join(format!("{short_write_len}.dat"));
// simulate creating a file, writing data where the last write is incomplete
{
..Default::default()
})?;
let addr = Address::from_script(spk.as_script(), network)?;
- println!("[address @ {}] {}", spk_i, addr);
+ println!("[address @ {spk_i}] {addr}");
Ok(())
}
AddressCmd::Index => {
for (keychain, derivation_index) in index.last_revealed_indices() {
- println!("{:?}: {}", keychain, derivation_index);
+ println!("{keychain:?}: {derivation_index}");
}
Ok(())
}
title_str: &'a str,
items: impl IntoIterator<Item = (&'a str, Amount)>,
) {
- println!("{}:", title_str);
+ println!("{title_str}:");
for (name, amount) in items.into_iter() {
println!(" {:<10} {:>12} sats", name, amount.to_sat())
}
let (internal_descriptor, internal_keymap) =
<Descriptor<DescriptorPublicKey>>::parse_descriptor(&secp, internal_desc)?;
println!("Public");
- println!("{}", descriptor);
- println!("{}", internal_descriptor);
+ println!("{descriptor}");
+ println!("{internal_descriptor}");
println!("\nPrivate");
println!("{}", descriptor.to_string_with_secret(&keymap));
println!(
let mut once = BTreeSet::new();
move |k, spk_i, _| {
if once.insert(k) {
- eprint!("\nScanning {}: {} ", k, spk_i);
+ eprint!("\nScanning {k}: {spk_i} ");
} else {
- eprint!("{} ", spk_i);
+ eprint!("{spk_i} ");
}
io::stdout().flush().expect("must flush");
}
.chain_tip(chain_tip.clone())
.inspect(|item, progress| {
let pc = (100 * progress.consumed()) as f32 / progress.total() as f32;
- eprintln!("[ SCANNING {:03.0}% ] {}", pc, item);
+ eprintln!("[ SCANNING {pc:03.0}% ] {item}");
});
request = request.expected_spk_txids(graph.list_expected_spk_txids(
let mut once = BTreeSet::<Keychain>::new();
move |keychain, spk_i, _| {
if once.insert(keychain) {
- eprint!("\nscanning {}: ", keychain);
+ eprint!("\nscanning {keychain}: ");
}
- eprint!("{} ", spk_i);
+ eprint!("{spk_i} ");
// Flush early to ensure we print at every iteration.
let _ = io::stderr().flush();
}
.chain_tip(local_tip.clone())
.inspect(|item, progress| {
let pc = (100 * progress.consumed()) as f32 / progress.total() as f32;
- eprintln!("[ SCANNING {:03.0}% ] {}", pc, item);
+ eprintln!("[ SCANNING {pc:03.0}% ] {item}");
// Flush early to ensure we print at every iteration.
let _ = io::stderr().flush();
});