let network = peers[0].get_network();
- let cfs = DB::list_cf(&opts, &storage_dir).unwrap_or(vec!["default".to_string()]);
+ let cfs = DB::list_cf(&opts, &storage_dir).unwrap_or_else(|_| vec!["default".to_string()]);
let db = DB::open_cf(&opts, &storage_dir, &cfs)?;
let headers = Arc::new(ChainStore::new(db, network)?);
sent: outgoing,
height,
timestamp,
- fees: inputs_sum.checked_sub(outputs_sum).unwrap_or(0),
+ fees: inputs_sum.saturating_sub(outputs_sum),
};
info!("Saving tx {}", tx.txid);
.map(|x| x / 1000)
.unwrap_or(0)
+ 1;
- let expected_bundles_to_sync = total_bundles
- .checked_sub(cf_sync.pruned_bundles()?)
- .unwrap_or(0);
+ let expected_bundles_to_sync = total_bundles.saturating_sub(cf_sync.pruned_bundles()?);
let headers_cost = (first_peer.get_version().start_height as usize)
- .checked_sub(initial_height)
- .unwrap_or(0) as f32
+ .saturating_sub(initial_height) as f32
* SYNC_HEADERS_COST;
let filters_cost = expected_bundles_to_sync as f32 * SYNC_FILTERS_COST;
Arc::clone(&self.headers),
|new_height| {
let local_headers_cost =
- new_height.checked_sub(initial_height).unwrap_or(0) as f32 * SYNC_HEADERS_COST;
+ new_height.saturating_sub(initial_height) as f32 * SYNC_HEADERS_COST;
progress_update.update(
local_headers_cost / total_cost * 100.0,
Some(format!("Synced headers to {}", new_height)),
}
let synced_height = self.headers.get_height()?;
- let buried_height = synced_height
- .checked_sub(sync::BURIED_CONFIRMATIONS)
- .unwrap_or(0);
+ let buried_height = synced_height.saturating_sub(sync::BURIED_CONFIRMATIONS);
info!("Synced headers to height: {}", synced_height);
cf_sync.prepare_sync(Arc::clone(&first_peer))?;
.collect::<Vec<_>>(),
);
+ #[allow(clippy::mutex_atomic)]
let last_synced_block = Arc::new(Mutex::new(synced_height));
+
let synced_bundles = Arc::new(AtomicUsize::new(0));
let progress_update = Arc::new(Mutex::new(progress_update));
}
let block_height = headers.get_height_for(block_hash)?.unwrap_or(0);
- let saved_correct_block = match headers.get_full_block(block_height)? {
- Some(block) if &block.block_hash() == block_hash => true,
- _ => false,
- };
+ let saved_correct_block = matches!(headers.get_full_block(block_height)?, Some(block) if &block.block_hash() == block_hash);
if saved_correct_block {
Ok(false)
NetworkMessage::Alert(_) => continue,
NetworkMessage::GetData(ref inv) => {
let (found, not_found): (Vec<_>, Vec<_>) = inv
- .into_iter()
+ .iter()
.map(|item| (*item, reader_thread_mempool.get_tx(item)))
.partition(|(_, d)| d.is_some());
for (_, found_tx) in found {
let getdata = inv
.iter()
.cloned()
- .filter(|item| match item {
- Inventory::Transaction(txid) if !self.mempool.has_tx(txid) => true,
- _ => false,
- })
+ .filter(
+ |item| matches!(item, Inventory::Transaction(txid) if !self.mempool.has_tx(txid)),
+ )
.collect::<Vec<_>>();
let num_txs = getdata.len();
self.send(NetworkMessage::GetData(getdata))?;
let min_height = match iterator
.next()
.and_then(|(k, _)| k[1..].try_into().ok())
- .map(|bytes| usize::from_be_bytes(bytes))
+ .map(usize::from_be_bytes)
{
None => {
std::mem::drop(iterator);
}
read_store.write(batch)?;
-
- std::mem::drop(snapshot_cf_handle);
- std::mem::drop(cf_handle);
std::mem::drop(read_store);
self.store.write().unwrap().drop_cf(&snaphost.cf_name)?;
let read_store = self.store.read().unwrap();
let cf_handle = read_store.cf_handle(&self.cf_name).unwrap();
- let key = StoreEntry::BlockHeaderIndex(Some(block_hash.clone())).get_key();
+ let key = StoreEntry::BlockHeaderIndex(Some(*block_hash)).get_key();
let data = read_store.get_pinned_cf(cf_handle, key)?;
Ok(data
.map(|data| {
);
}
- std::mem::drop(cf_handle);
std::mem::drop(read_store);
self.store.write().unwrap().write(batch)?;
if let BundleStatus::CFilters { cf_filters } = status {
log::trace!("status: CFilters");
- let last_sync_buried_height = (start_height + already_processed)
- .checked_sub(BURIED_CONFIRMATIONS)
- .unwrap_or(0);
+ let last_sync_buried_height =
+ (start_height + already_processed).saturating_sub(BURIED_CONFIRMATIONS);
for (filter_index, filter) in cf_filters.iter().enumerate() {
let height = filter_index + start_height;
match locators_map.get(&headers[0].prev_blockhash) {
None => return Err(CompactFiltersError::InvalidHeaders),
- Some(from) => (
- store.start_snapshot(*from)?,
- headers[0].prev_blockhash.clone(),
- ),
+ Some(from) => (store.start_snapshot(*from)?, headers[0].prev_blockhash),
}
} else {
return Err(CompactFiltersError::InvalidResponse);
fn from(other: crate::blockchain::compact_filters::CompactFiltersError) -> Self {
match other {
crate::blockchain::compact_filters::CompactFiltersError::Global(e) => *e,
- err @ _ => Error::CompactFilters(err),
+ err => Error::CompactFilters(err),
}
}
}