Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 21 additions & 13 deletions src/new_index/fetch.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ use std::thread;

use electrs_macros::trace;

use crate::chain::{Block, BlockHash};
use crate::chain::{Block, BlockHash, Txid};
use crate::daemon::Daemon;
use crate::errors::*;
use crate::util::{spawn_thread, HeaderEntry, SyncChannel};
Expand Down Expand Up @@ -45,6 +45,7 @@ pub struct BlockEntry {
pub block: Block,
pub entry: HeaderEntry,
pub size: u32,
pub txids: Vec<Txid>,
Copy link

@EddieHouston EddieHouston Feb 25, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

might add a doc comment on this:
/// Pre-computed txids, must always correspond 1:1 with block.txdata

}

type SizedBlock = (Block, u32);
Expand Down Expand Up @@ -106,10 +107,14 @@ fn bitcoind_fetcher(
let block_entries: Vec<BlockEntry> = blocks
.into_iter()
.zip(entries)
.map(|(block, entry)| BlockEntry {
entry: entry.clone(), // TODO: remove this clone()
size: block.total_size() as u32,
block,
.map(|(block, entry)| {
let txids = block.txdata.iter().map(|tx| tx.compute_txid()).collect();
BlockEntry {
entry: entry.clone(), // TODO: remove this clone()
size: block.total_size() as u32,
txids,
block,
}
})
.collect();
assert_eq!(block_entries.len(), entries.len());
Expand Down Expand Up @@ -156,7 +161,10 @@ fn blkfiles_fetcher(
let blockhash = block.block_hash();
entry_map
.remove(&blockhash)
.map(|entry| BlockEntry { block, entry, size })
.map(|entry| {
let txids = block.txdata.iter().map(|tx| tx.compute_txid()).collect();
BlockEntry { block, entry, size, txids }
})
.or_else(|| {
trace!("skipping block {}", blockhash);
None
Expand Down Expand Up @@ -224,9 +232,14 @@ fn blkfiles_parser(blobs: Fetcher<Vec<u8>>, magic: u32) -> Fetcher<Vec<SizedBloc
Fetcher::from(
chan.into_receiver(),
spawn_thread("blkfiles_parser", move || {
let pool = rayon::ThreadPoolBuilder::new()
.num_threads(0) // CPU-bound
.thread_name(|i| format!("parse-blocks-{}", i))
.build()
.unwrap();
blobs.map(|blob| {
trace!("parsing {} bytes", blob.len());
let blocks = parse_blocks(blob, magic).expect("failed to parse blk*.dat file");
let blocks = parse_blocks(&pool, blob, magic).expect("failed to parse blk*.dat file");
sender
.send(blocks)
.expect("failed to send blocks from blk*.dat file");
Expand All @@ -236,7 +249,7 @@ fn blkfiles_parser(blobs: Fetcher<Vec<u8>>, magic: u32) -> Fetcher<Vec<SizedBloc
}

#[trace]
fn parse_blocks(blob: Vec<u8>, magic: u32) -> Result<Vec<SizedBlock>> {
fn parse_blocks(pool: &rayon::ThreadPool, blob: Vec<u8>, magic: u32) -> Result<Vec<SizedBlock>> {
let mut cursor = Cursor::new(&blob);
let mut slices = vec![];
let max_pos = blob.len() as u64;
Expand Down Expand Up @@ -273,11 +286,6 @@ fn parse_blocks(blob: Vec<u8>, magic: u32) -> Result<Vec<SizedBlock>> {
cursor.set_position(end as u64);
}

let pool = rayon::ThreadPoolBuilder::new()
.num_threads(0) // CPU-bound
.thread_name(|i| format!("parse-blocks-{}", i))
.build()
.unwrap();
Ok(pool.install(|| {
slices
.into_par_iter()
Expand Down
16 changes: 9 additions & 7 deletions src/new_index/schema.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1174,13 +1174,12 @@ fn add_blocks(block_entries: &[BlockEntry], iconfig: &IndexerConfig) -> Vec<DBRo
.map(|b| {
let mut rows = vec![];
let blockhash = full_hash(&b.entry.hash()[..]);
let txids: Vec<Txid> = b.block.txdata.iter().map(|tx| tx.compute_txid()).collect();
for (tx, txid) in b.block.txdata.iter().zip(txids.iter()) {
for (tx, txid) in b.block.txdata.iter().zip(b.txids.iter()) {
add_transaction(*txid, tx, &mut rows, iconfig);
}

if !iconfig.light_mode {
rows.push(BlockRow::new_txids(blockhash, &txids).into_row());
rows.push(BlockRow::new_txids(blockhash, &b.txids).into_row());
rows.push(BlockRow::new_meta(blockhash, &BlockMeta::from(b)).into_row());
}

Expand Down Expand Up @@ -1271,9 +1270,10 @@ fn index_blocks(
.par_iter() // serialization is CPU-intensive
.map(|b| {
let mut rows = vec![];
for tx in &b.block.txdata {
let height = b.entry.height() as u32;
index_transaction(tx, height, previous_txos_map, &mut rows, iconfig);
let height = b.entry.height() as u32;
for (tx, txid) in b.block.txdata.iter().zip(b.txids.iter()) {
let txid = full_hash(&txid[..]);

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Consider using different name for the inner txid --> let txid_hash = full_hash(&txid[..]);

Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes

index_transaction(tx, txid, height, previous_txos_map, &mut rows, iconfig);
}
rows.push(BlockRow::new_done(full_hash(&b.entry.hash()[..])).into_row()); // mark block as "indexed"
rows
Expand All @@ -1285,12 +1285,12 @@ fn index_blocks(
// TODO: return an iterator?
fn index_transaction(
tx: &Transaction,
txid: FullHash,
confirmed_height: u32,
previous_txos_map: &HashMap<OutPoint, TxOut>,
rows: &mut Vec<DBRow>,
iconfig: &IndexerConfig,
) {
let txid = full_hash(&tx.compute_txid()[..]);

// persist tx confirmation row:
// C{txid} → "{block_height}"
Expand Down Expand Up @@ -1892,7 +1892,9 @@ pub mod bench {
let height = 702861;
let hash = block.block_hash();
let header = block.header.clone();
let txids = block.txdata.iter().map(|tx| tx.compute_txid()).collect();
let block_entry = BlockEntry {
txids,
block,
entry: HeaderEntry::new(height, hash, header),
size: 0u32, // wrong but not needed for benching
Expand Down