From 10200a189d8426402dfdbe9fc9b9ee83bb8a7429 Mon Sep 17 00:00:00 2001 From: Rodrigo Oliveri Date: Wed, 1 Oct 2025 16:08:32 -0300 Subject: [PATCH 01/14] initial chunk test --- crates/networking/p2p/peer_handler.rs | 131 ++++++++++++++++++-------- 1 file changed, 92 insertions(+), 39 deletions(-) diff --git a/crates/networking/p2p/peer_handler.rs b/crates/networking/p2p/peer_handler.rs index 93282297053..9187360095c 100644 --- a/crates/networking/p2p/peer_handler.rs +++ b/crates/networking/p2p/peer_handler.rs @@ -65,6 +65,8 @@ pub const SNAP_LIMIT: usize = 128; // increasing them may be the cause of peers disconnection pub const MAX_BLOCK_BODIES_TO_REQUEST: usize = 128; +const STORAGE_ROOTS_PER_CHUNK: usize = 300; + /// An abstraction over the [Kademlia] containing logic to make requests to peers #[derive(Debug, Clone)] pub struct PeerHandler { @@ -1279,33 +1281,73 @@ impl PeerHandler { .set(CurrentStepValue::RequestingStorageRanges); debug!("Starting request_storage_ranges function"); // 1) split the range in chunks of same length - let mut accounts_by_root_hash: BTreeMap<_, Vec<_>> = BTreeMap::new(); - for (account, (maybe_root_hash, _)) in &account_storage_roots.accounts_with_storage_root { - match maybe_root_hash { - Some(root) => { - accounts_by_root_hash - .entry(*root) - .or_default() - .push(*account); - } + let account_root_pairs: Vec<(H256, Option)> = account_storage_roots + .accounts_with_storage_root + .iter() + .map(|(account, (maybe_root_hash, _))| (*account, *maybe_root_hash)) + .collect(); + let mut chunk_groups: BTreeMap> = BTreeMap::new(); + + for (account, maybe_root_hash) in account_root_pairs { + let root = match maybe_root_hash { + Some(root) => root, None => { - let root = store - .get_account_state_by_acc_hash(pivot_header.hash(), *account) + store + .get_account_state_by_acc_hash(pivot_header.hash(), account) .expect("Failed to get account in state trie") - .expect("Could not find account that should have been downloaded or healed") - .storage_root; - accounts_by_root_hash - .entry(root) - .or_default() - .push(*account); + .expect( + "Could not find account that should have been downloaded or healed", + ) + .storage_root } + }; + + chunk_groups.entry(root).or_default().push(account); + + if chunk_groups.len() >= STORAGE_ROOTS_PER_CHUNK { + let chunk_accounts = Vec::from_iter(chunk_groups.into_iter()); + self.process_storage_chunk( + chunk_accounts, + account_storage_roots, + account_storages_snapshots_dir, + &mut chunk_index, + pivot_header, + ) + .await?; + chunk_groups = BTreeMap::new(); } } - let mut accounts_by_root_hash = Vec::from_iter(accounts_by_root_hash); - // TODO: Turn this into a stable sort for binary search. + + if !chunk_groups.is_empty() { + let chunk_accounts = Vec::from_iter(chunk_groups.into_iter()); + self.process_storage_chunk( + chunk_accounts, + account_storage_roots, + account_storages_snapshots_dir, + &mut chunk_index, + pivot_header, + ) + .await?; + } + + Ok(chunk_index) + } + + async fn process_storage_chunk( + &mut self, + mut accounts_by_root_hash: Vec<(H256, Vec)>, + account_storage_roots: &mut AccountStorageRoots, + account_storages_snapshots_dir: &Path, + chunk_index: &mut u64, + pivot_header: &mut BlockHeader, + ) -> Result<(), PeerHandlerError> { + if accounts_by_root_hash.is_empty() { + return Ok(()); + } + + // Maintain previous prioritization of busy roots accounts_by_root_hash.sort_unstable_by_key(|(_, accounts)| !accounts.len()); - let chunk_size = 300; - let chunk_count = (accounts_by_root_hash.len() / chunk_size) + 1; + let chunk_count = (accounts_by_root_hash.len() / STORAGE_ROOTS_PER_CHUNK) + 1; // list of tasks to be executed // Types are (start_index, end_index, starting_hash) @@ -1313,8 +1355,8 @@ impl PeerHandler { let mut tasks_queue_not_started = VecDeque::::new(); for i in 0..chunk_count { - let chunk_start = chunk_size * i; - let chunk_end = (chunk_start + chunk_size).min(accounts_by_root_hash.len()); + let chunk_start = STORAGE_ROOTS_PER_CHUNK * i; + let chunk_end = (chunk_start + STORAGE_ROOTS_PER_CHUNK).min(accounts_by_root_hash.len()); tasks_queue_not_started.push_back(StorageTask { start_index: chunk_start, end_index: chunk_end, @@ -1340,7 +1382,7 @@ impl PeerHandler { // vector of hashed storage keys and storage values. let mut current_account_storages: BTreeMap = BTreeMap::new(); - debug!("Starting request_storage_ranges loop"); + debug!("Starting request_storage_ranges chunk loop"); loop { if current_account_storages .values() @@ -1371,15 +1413,16 @@ impl PeerHandler { }) .map_err(PeerHandlerError::DumpError)?; } + let file_index = *chunk_index; disk_joinset.spawn(async move { let path = get_account_storages_snapshot_file( &account_storages_snapshots_dir_cloned, - chunk_index, + file_index, ); dump_storages_to_file(&path, snapshot) }); - chunk_index += 1; + *chunk_index += 1; } if let Ok(result) = task_receiver.try_recv() { @@ -1397,9 +1440,7 @@ impl PeerHandler { for (_, accounts) in accounts_by_root_hash[start_index..remaining_start].iter() { for account in accounts { - if !accounts_done.contains_key(account) { - accounts_done.insert(*account, vec![]); - } + accounts_done.entry(*account).or_insert_with(Vec::new); } } @@ -1430,7 +1471,11 @@ impl PeerHandler { let acc_hash = accounts_by_root_hash[remaining_start].1[0]; let (_, old_intervals) = account_storage_roots .accounts_with_storage_root - .get_mut(&acc_hash).ok_or(PeerHandlerError::UnrecoverableError("Tried to get the old download intervals for an account but did not find them".to_owned()))?; + .get_mut(&acc_hash) + .ok_or(PeerHandlerError::UnrecoverableError( + "Tried to get the old download intervals for an account but did not find them" + .to_owned(), + ))?; for (old_start, end) in old_intervals { if end == &hash_end { *old_start = hash_start; @@ -1452,8 +1497,6 @@ impl PeerHandler { if !old_intervals.is_empty() { acc_hash = *account; } - } else { - continue; } } if acc_hash.is_zero() { @@ -1462,7 +1505,10 @@ impl PeerHandler { let (_, old_intervals) = account_storage_roots .accounts_with_storage_root .get_mut(&acc_hash) - .ok_or(PeerHandlerError::UnrecoverableError("Tried to get the old download intervals for an account but did not find them".to_owned()))?; + .ok_or(PeerHandlerError::UnrecoverableError( + "Tried to get the old download intervals for an account but did not find them" + .to_owned(), + ))?; old_intervals.remove( old_intervals .iter() @@ -1474,7 +1520,7 @@ impl PeerHandler { ); if old_intervals.is_empty() { for account in accounts_by_root_hash[remaining_start].1.iter() { - accounts_done.insert(*account, vec![]); + accounts_done.entry(*account).or_insert_with(Vec::new); account_storage_roots.healed_accounts.insert(*account); } } @@ -1534,7 +1580,10 @@ impl PeerHandler { let (_, intervals) = account_storage_roots .accounts_with_storage_root .get_mut(&accounts_by_root_hash[remaining_start].1[0]) - .ok_or(PeerHandlerError::UnrecoverableError("Tried to get the old download intervals for an account but did not find them".to_owned()))?; + .ok_or(PeerHandlerError::UnrecoverableError( + "Tried to get the old download intervals for an account but did not find them" + .to_owned(), + ))?; for i in 0..chunk_count { let start_hash_u256 = start_hash_u256 + chunk_size * i; @@ -1570,7 +1619,10 @@ impl PeerHandler { let (_, intervals) = account_storage_roots .accounts_with_storage_root .get_mut(&accounts_by_root_hash[remaining_start].1[0]) - .ok_or(PeerHandlerError::UnrecoverableError("Trie to get the old download intervals for an account but did not find them".to_owned()))?; + .ok_or(PeerHandlerError::UnrecoverableError( + "Trie to get the old download intervals for an account but did not find them" + .to_owned(), + ))?; for i in 0..chunk_count { let start_hash_u256 = start_hash_u256 + chunk_size * i; @@ -1715,9 +1767,10 @@ impl PeerHandler { .map_err(|_| PeerHandlerError::CreateStorageSnapshotsDir)?; } let path = - get_account_storages_snapshot_file(account_storages_snapshots_dir, chunk_index); + get_account_storages_snapshot_file(account_storages_snapshots_dir, *chunk_index); dump_storages_to_file(&path, snapshot) - .map_err(|_| PeerHandlerError::WriteStorageSnapshotsDir(chunk_index))?; + .map_err(|_| PeerHandlerError::WriteStorageSnapshotsDir(*chunk_index))?; + *chunk_index += 1; } disk_joinset .join_all() @@ -1745,7 +1798,7 @@ impl PeerHandler { self.peer_table.free_peer(&result.peer_id).await?; } - Ok(chunk_index + 1) + Ok(()) } async fn request_storage_ranges_worker( From d16e32b4d2454a00215911e94e7f58e918a588b9 Mon Sep 17 00:00:00 2001 From: Rodrigo Oliveri Date: Wed, 1 Oct 2025 19:29:57 -0300 Subject: [PATCH 02/14] reduce trie sorted size to write and buffer count --- crates/common/trie/trie_sorted.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/common/trie/trie_sorted.rs b/crates/common/trie/trie_sorted.rs index 0e0859596f7..847aa5a696f 100644 --- a/crates/common/trie/trie_sorted.rs +++ b/crates/common/trie/trie_sorted.rs @@ -41,8 +41,8 @@ pub enum TrieGenerationError { ThreadJoinError(), } -pub const SIZE_TO_WRITE_DB: u64 = 20_000; -pub const BUFFER_COUNT: u64 = 32; +pub const SIZE_TO_WRITE_DB: u64 = 10_000; +pub const BUFFER_COUNT: u64 = 16; impl CenterSide { fn from_value(tuple: (H256, Vec)) -> CenterSide { From d5c8d68de46e02ba1e1440f27850e6422eda4059 Mon Sep 17 00:00:00 2001 From: Rodrigo Oliveri Date: Wed, 1 Oct 2025 20:28:52 -0300 Subject: [PATCH 03/14] size_to_write and buffer_count changes didn't affect trie_sroted memory allocation --- crates/common/trie/trie_sorted.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/common/trie/trie_sorted.rs b/crates/common/trie/trie_sorted.rs index 847aa5a696f..0e0859596f7 100644 --- a/crates/common/trie/trie_sorted.rs +++ b/crates/common/trie/trie_sorted.rs @@ -41,8 +41,8 @@ pub enum TrieGenerationError { ThreadJoinError(), } -pub const SIZE_TO_WRITE_DB: u64 = 10_000; -pub const BUFFER_COUNT: u64 = 16; +pub const SIZE_TO_WRITE_DB: u64 = 20_000; +pub const BUFFER_COUNT: u64 = 32; impl CenterSide { fn from_value(tuple: (H256, Vec)) -> CenterSide { From a976d1d3bedda55e74fc24bdc72e5089892282a1 Mon Sep 17 00:00:00 2001 From: Rodrigo Oliveri Date: Wed, 1 Oct 2025 20:55:24 -0300 Subject: [PATCH 04/14] handle a case where snapshots become empty after a stale pivot --- crates/networking/p2p/peer_handler.rs | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/crates/networking/p2p/peer_handler.rs b/crates/networking/p2p/peer_handler.rs index 9187360095c..2b3c5e1901a 100644 --- a/crates/networking/p2p/peer_handler.rs +++ b/crates/networking/p2p/peer_handler.rs @@ -65,7 +65,7 @@ pub const SNAP_LIMIT: usize = 128; // increasing them may be the cause of peers disconnection pub const MAX_BLOCK_BODIES_TO_REQUEST: usize = 128; -const STORAGE_ROOTS_PER_CHUNK: usize = 300; +const STORAGE_ROOTS_PER_CHUNK: usize = 10_000; /// An abstraction over the [Kademlia] containing logic to make requests to peers #[derive(Debug, Clone)] @@ -1393,6 +1393,10 @@ impl PeerHandler { let current_account_storages = std::mem::take(&mut current_account_storages); let snapshot = current_account_storages.into_values().collect::>(); + if snapshot.is_empty() { + continue; + } + if !std::fs::exists(account_storages_snapshots_dir) .map_err(|_| PeerHandlerError::NoStorageSnapshotsDir)? { @@ -1766,11 +1770,15 @@ impl PeerHandler { std::fs::create_dir_all(account_storages_snapshots_dir) .map_err(|_| PeerHandlerError::CreateStorageSnapshotsDir)?; } - let path = - get_account_storages_snapshot_file(account_storages_snapshots_dir, *chunk_index); - dump_storages_to_file(&path, snapshot) - .map_err(|_| PeerHandlerError::WriteStorageSnapshotsDir(*chunk_index))?; - *chunk_index += 1; + if snapshot.is_empty() { + warn!(chunk = *chunk_index, "Skipping empty storage snapshot"); + } else { + let path = + get_account_storages_snapshot_file(account_storages_snapshots_dir, *chunk_index); + dump_storages_to_file(&path, snapshot) + .map_err(|_| PeerHandlerError::WriteStorageSnapshotsDir(*chunk_index))?; + *chunk_index += 1; + } } disk_joinset .join_all() From ceb3b09b0b30bd1f3e813fe8ee9795f1de5a8f80 Mon Sep 17 00:00:00 2001 From: Rodrigo Oliveri Date: Wed, 1 Oct 2025 23:26:14 -0300 Subject: [PATCH 05/14] go back to previous peer-level parallelism --- crates/networking/p2p/peer_handler.rs | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/crates/networking/p2p/peer_handler.rs b/crates/networking/p2p/peer_handler.rs index 2b3c5e1901a..f28c4662d2f 100644 --- a/crates/networking/p2p/peer_handler.rs +++ b/crates/networking/p2p/peer_handler.rs @@ -66,6 +66,8 @@ pub const SNAP_LIMIT: usize = 128; pub const MAX_BLOCK_BODIES_TO_REQUEST: usize = 128; const STORAGE_ROOTS_PER_CHUNK: usize = 10_000; +// How many storage roots we include in a single task sent to a peer. +const STORAGE_ROOTS_PER_TASK: usize = 300; /// An abstraction over the [Kademlia] containing logic to make requests to peers #[derive(Debug, Clone)] @@ -1347,16 +1349,18 @@ impl PeerHandler { // Maintain previous prioritization of busy roots accounts_by_root_hash.sort_unstable_by_key(|(_, accounts)| !accounts.len()); - let chunk_count = (accounts_by_root_hash.len() / STORAGE_ROOTS_PER_CHUNK) + 1; + let total_roots = accounts_by_root_hash.len(); + let task_span = STORAGE_ROOTS_PER_TASK.min(STORAGE_ROOTS_PER_CHUNK); + let task_partition_count = (total_roots + task_span - 1) / task_span; // list of tasks to be executed // Types are (start_index, end_index, starting_hash) // NOTE: end_index is NOT inclusive let mut tasks_queue_not_started = VecDeque::::new(); - for i in 0..chunk_count { - let chunk_start = STORAGE_ROOTS_PER_CHUNK * i; - let chunk_end = (chunk_start + STORAGE_ROOTS_PER_CHUNK).min(accounts_by_root_hash.len()); + for i in 0..task_partition_count { + let chunk_start = task_span * i; + let chunk_end = ((i + 1) * task_span).min(total_roots); tasks_queue_not_started.push_back(StorageTask { start_index: chunk_start, end_index: chunk_end, From 9d89961d0a729dd0d262e4765d0c029347aa9860 Mon Sep 17 00:00:00 2001 From: Rodrigo Oliveri Date: Thu, 2 Oct 2025 16:30:28 -0300 Subject: [PATCH 06/14] Added some comments and cleanup to the high level chunking --- crates/networking/p2p/peer_handler.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/crates/networking/p2p/peer_handler.rs b/crates/networking/p2p/peer_handler.rs index f28c4662d2f..b8dddebe061 100644 --- a/crates/networking/p2p/peer_handler.rs +++ b/crates/networking/p2p/peer_handler.rs @@ -1282,7 +1282,7 @@ impl PeerHandler { .current_step .set(CurrentStepValue::RequestingStorageRanges); debug!("Starting request_storage_ranges function"); - // 1) split the range in chunks of same length + // 1) collect pairs of (account_hash, storage_root) let account_root_pairs: Vec<(H256, Option)> = account_storage_roots .accounts_with_storage_root .iter() @@ -1290,7 +1290,9 @@ impl PeerHandler { .collect(); let mut chunk_groups: BTreeMap> = BTreeMap::new(); + // 2) group accounts by storage root and process them in chunks of STORAGE_ROOTS_PER_CHUNK for (account, maybe_root_hash) in account_root_pairs { + // 2.1) Make sure we have the storage root for the account let root = match maybe_root_hash { Some(root) => root, None => { @@ -1306,6 +1308,7 @@ impl PeerHandler { chunk_groups.entry(root).or_default().push(account); + // 2.2) If we have enough roots, process them if chunk_groups.len() >= STORAGE_ROOTS_PER_CHUNK { let chunk_accounts = Vec::from_iter(chunk_groups.into_iter()); self.process_storage_chunk( @@ -1320,6 +1323,7 @@ impl PeerHandler { } } + // 2.3) Process remaining roots if any if !chunk_groups.is_empty() { let chunk_accounts = Vec::from_iter(chunk_groups.into_iter()); self.process_storage_chunk( From 662c5fe80c86000bd914124e397f9719db212bc9 Mon Sep 17 00:00:00 2001 From: Rodrigo Oliveri Date: Thu, 2 Oct 2025 18:02:41 -0300 Subject: [PATCH 07/14] Added some comment and a bit of extra cleanup --- crates/networking/p2p/peer_handler.rs | 40 +++++++++------------------ 1 file changed, 13 insertions(+), 27 deletions(-) diff --git a/crates/networking/p2p/peer_handler.rs b/crates/networking/p2p/peer_handler.rs index b8dddebe061..b2122c53da4 100644 --- a/crates/networking/p2p/peer_handler.rs +++ b/crates/networking/p2p/peer_handler.rs @@ -1299,9 +1299,7 @@ impl PeerHandler { store .get_account_state_by_acc_hash(pivot_header.hash(), account) .expect("Failed to get account in state trie") - .expect( - "Could not find account that should have been downloaded or healed", - ) + .expect("Could not find account that should have been downloaded or healed") .storage_root } }; @@ -1355,12 +1353,12 @@ impl PeerHandler { accounts_by_root_hash.sort_unstable_by_key(|(_, accounts)| !accounts.len()); let total_roots = accounts_by_root_hash.len(); let task_span = STORAGE_ROOTS_PER_TASK.min(STORAGE_ROOTS_PER_CHUNK); + // how many fully-populated task_span slices fit in let task_partition_count = (total_roots + task_span - 1) / task_span; // list of tasks to be executed // Types are (start_index, end_index, starting_hash) // NOTE: end_index is NOT inclusive - let mut tasks_queue_not_started = VecDeque::::new(); for i in 0..task_partition_count { let chunk_start = task_span * i; @@ -1390,7 +1388,7 @@ impl PeerHandler { // vector of hashed storage keys and storage values. let mut current_account_storages: BTreeMap = BTreeMap::new(); - debug!("Starting request_storage_ranges chunk loop"); + debug!(chunk = chunk_index, "Starting request_storage_ranges loop"); loop { if current_account_storages .values() @@ -1402,6 +1400,7 @@ impl PeerHandler { let snapshot = current_account_storages.into_values().collect::>(); if snapshot.is_empty() { + // TODO: This happened while testing on pivot changes, we need to understand why continue; } @@ -1484,10 +1483,7 @@ impl PeerHandler { let (_, old_intervals) = account_storage_roots .accounts_with_storage_root .get_mut(&acc_hash) - .ok_or(PeerHandlerError::UnrecoverableError( - "Tried to get the old download intervals for an account but did not find them" - .to_owned(), - ))?; + .ok_or(PeerHandlerError::UnrecoverableError("Tried to get the old download intervals for an account but did not find them".to_owned()))?; for (old_start, end) in old_intervals { if end == &hash_end { *old_start = hash_start; @@ -1517,18 +1513,12 @@ impl PeerHandler { let (_, old_intervals) = account_storage_roots .accounts_with_storage_root .get_mut(&acc_hash) - .ok_or(PeerHandlerError::UnrecoverableError( - "Tried to get the old download intervals for an account but did not find them" - .to_owned(), - ))?; + .ok_or(PeerHandlerError::UnrecoverableError("Tried to get the old download intervals for an account but did not find them".to_owned()))?; old_intervals.remove( old_intervals .iter() .position(|(_old_start, end)| end == &hash_end) - .ok_or(PeerHandlerError::UnrecoverableError( - "Could not find an old interval that we were tracking" - .to_owned(), - ))?, + .ok_or(PeerHandlerError::UnrecoverableError("Could not find an old interval that we were tracking".to_owned()))?, ); if old_intervals.is_empty() { for account in accounts_by_root_hash[remaining_start].1.iter() { @@ -1592,10 +1582,7 @@ impl PeerHandler { let (_, intervals) = account_storage_roots .accounts_with_storage_root .get_mut(&accounts_by_root_hash[remaining_start].1[0]) - .ok_or(PeerHandlerError::UnrecoverableError( - "Tried to get the old download intervals for an account but did not find them" - .to_owned(), - ))?; + .ok_or(PeerHandlerError::UnrecoverableError("Tried to get the old download intervals for an account but did not find them".to_owned()))?; for i in 0..chunk_count { let start_hash_u256 = start_hash_u256 + chunk_size * i; @@ -1631,10 +1618,7 @@ impl PeerHandler { let (_, intervals) = account_storage_roots .accounts_with_storage_root .get_mut(&accounts_by_root_hash[remaining_start].1[0]) - .ok_or(PeerHandlerError::UnrecoverableError( - "Trie to get the old download intervals for an account but did not find them" - .to_owned(), - ))?; + .ok_or(PeerHandlerError::UnrecoverableError("Trie to get the old download intervals for an account but did not find them".to_owned()))?; for i in 0..chunk_count { let start_hash_u256 = start_hash_u256 + chunk_size * i; @@ -1781,8 +1765,10 @@ impl PeerHandler { if snapshot.is_empty() { warn!(chunk = *chunk_index, "Skipping empty storage snapshot"); } else { - let path = - get_account_storages_snapshot_file(account_storages_snapshots_dir, *chunk_index); + let path = get_account_storages_snapshot_file( + account_storages_snapshots_dir, + *chunk_index, + ); dump_storages_to_file(&path, snapshot) .map_err(|_| PeerHandlerError::WriteStorageSnapshotsDir(*chunk_index))?; *chunk_index += 1; From 0f0027ad3599da48857f4bb72e1e803bf47475df Mon Sep 17 00:00:00 2001 From: Rodrigo Oliveri Date: Thu, 2 Oct 2025 18:08:38 -0300 Subject: [PATCH 08/14] formatting --- crates/networking/p2p/peer_handler.rs | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/crates/networking/p2p/peer_handler.rs b/crates/networking/p2p/peer_handler.rs index b2122c53da4..8115f8e50ac 100644 --- a/crates/networking/p2p/peer_handler.rs +++ b/crates/networking/p2p/peer_handler.rs @@ -1483,7 +1483,10 @@ impl PeerHandler { let (_, old_intervals) = account_storage_roots .accounts_with_storage_root .get_mut(&acc_hash) - .ok_or(PeerHandlerError::UnrecoverableError("Tried to get the old download intervals for an account but did not find them".to_owned()))?; + .ok_or(PeerHandlerError::UnrecoverableError( + "Tried to get the old download intervals for an account but did not find them" + .to_owned(), + ))?; for (old_start, end) in old_intervals { if end == &hash_end { *old_start = hash_start; @@ -1513,12 +1516,18 @@ impl PeerHandler { let (_, old_intervals) = account_storage_roots .accounts_with_storage_root .get_mut(&acc_hash) - .ok_or(PeerHandlerError::UnrecoverableError("Tried to get the old download intervals for an account but did not find them".to_owned()))?; + .ok_or(PeerHandlerError::UnrecoverableError( + "Tried to get the old download intervals for an account but did not find them" + .to_owned(), + ))?; old_intervals.remove( old_intervals .iter() .position(|(_old_start, end)| end == &hash_end) - .ok_or(PeerHandlerError::UnrecoverableError("Could not find an old interval that we were tracking".to_owned()))?, + .ok_or(PeerHandlerError::UnrecoverableError( + "Could not find an old interval that we were tracking" + .to_owned(), + ))?, ); if old_intervals.is_empty() { for account in accounts_by_root_hash[remaining_start].1.iter() { @@ -1582,7 +1591,10 @@ impl PeerHandler { let (_, intervals) = account_storage_roots .accounts_with_storage_root .get_mut(&accounts_by_root_hash[remaining_start].1[0]) - .ok_or(PeerHandlerError::UnrecoverableError("Tried to get the old download intervals for an account but did not find them".to_owned()))?; + .ok_or(PeerHandlerError::UnrecoverableError( + "Tried to get the old download intervals for an account but did not find them" + .to_owned(), + ))?; for i in 0..chunk_count { let start_hash_u256 = start_hash_u256 + chunk_size * i; @@ -1618,7 +1630,10 @@ impl PeerHandler { let (_, intervals) = account_storage_roots .accounts_with_storage_root .get_mut(&accounts_by_root_hash[remaining_start].1[0]) - .ok_or(PeerHandlerError::UnrecoverableError("Trie to get the old download intervals for an account but did not find them".to_owned()))?; + .ok_or(PeerHandlerError::UnrecoverableError( + "Trie to get the old download intervals for an account but did not find them" + .to_owned(), + ))?; for i in 0..chunk_count { let start_hash_u256 = start_hash_u256 + chunk_size * i; From a9de2e0147f4810785558fd11b8662451c8ebc33 Mon Sep 17 00:00:00 2001 From: Rodrigo Oliveri Date: Thu, 2 Oct 2025 18:18:55 -0300 Subject: [PATCH 09/14] Added one more comment --- crates/networking/p2p/peer_handler.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/networking/p2p/peer_handler.rs b/crates/networking/p2p/peer_handler.rs index 8115f8e50ac..4856bce5c91 100644 --- a/crates/networking/p2p/peer_handler.rs +++ b/crates/networking/p2p/peer_handler.rs @@ -1778,6 +1778,7 @@ impl PeerHandler { .map_err(|_| PeerHandlerError::CreateStorageSnapshotsDir)?; } if snapshot.is_empty() { + // TODO: This happened while testing on pivot changes, we need to understand why warn!(chunk = *chunk_index, "Skipping empty storage snapshot"); } else { let path = get_account_storages_snapshot_file( From 3f7d1eae56e35adac6f9c2724fb1a7b1c16e34d4 Mon Sep 17 00:00:00 2001 From: Rodrigo Oliveri Date: Tue, 7 Oct 2025 16:36:54 -0300 Subject: [PATCH 10/14] remove guardrails for empty snapshots --- crates/networking/p2p/peer_handler.rs | 25 ++++++++----------------- 1 file changed, 8 insertions(+), 17 deletions(-) diff --git a/crates/networking/p2p/peer_handler.rs b/crates/networking/p2p/peer_handler.rs index 4856bce5c91..c247480f9a6 100644 --- a/crates/networking/p2p/peer_handler.rs +++ b/crates/networking/p2p/peer_handler.rs @@ -1399,11 +1399,6 @@ impl PeerHandler { let current_account_storages = std::mem::take(&mut current_account_storages); let snapshot = current_account_storages.into_values().collect::>(); - if snapshot.is_empty() { - // TODO: This happened while testing on pivot changes, we need to understand why - continue; - } - if !std::fs::exists(account_storages_snapshots_dir) .map_err(|_| PeerHandlerError::NoStorageSnapshotsDir)? { @@ -1777,18 +1772,14 @@ impl PeerHandler { std::fs::create_dir_all(account_storages_snapshots_dir) .map_err(|_| PeerHandlerError::CreateStorageSnapshotsDir)?; } - if snapshot.is_empty() { - // TODO: This happened while testing on pivot changes, we need to understand why - warn!(chunk = *chunk_index, "Skipping empty storage snapshot"); - } else { - let path = get_account_storages_snapshot_file( - account_storages_snapshots_dir, - *chunk_index, - ); - dump_storages_to_file(&path, snapshot) - .map_err(|_| PeerHandlerError::WriteStorageSnapshotsDir(*chunk_index))?; - *chunk_index += 1; - } + + let path = get_account_storages_snapshot_file( + account_storages_snapshots_dir, + *chunk_index, + ); + dump_storages_to_file(&path, snapshot) + .map_err(|_| PeerHandlerError::WriteStorageSnapshotsDir(*chunk_index))?; + *chunk_index += 1; } disk_joinset .join_all() From 524b1b9a31470388c1af2768ed6b951de5b74544 Mon Sep 17 00:00:00 2001 From: Rodrigo Oliveri Date: Wed, 8 Oct 2025 17:27:22 -0300 Subject: [PATCH 11/14] Fix lint and formatting issues --- crates/networking/p2p/peer_handler.rs | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/crates/networking/p2p/peer_handler.rs b/crates/networking/p2p/peer_handler.rs index c247480f9a6..ff1fef28f47 100644 --- a/crates/networking/p2p/peer_handler.rs +++ b/crates/networking/p2p/peer_handler.rs @@ -1354,7 +1354,7 @@ impl PeerHandler { let total_roots = accounts_by_root_hash.len(); let task_span = STORAGE_ROOTS_PER_TASK.min(STORAGE_ROOTS_PER_CHUNK); // how many fully-populated task_span slices fit in - let task_partition_count = (total_roots + task_span - 1) / task_span; + let task_partition_count = total_roots.div_ceil(task_span); // list of tasks to be executed // Types are (start_index, end_index, starting_hash) @@ -1446,7 +1446,7 @@ impl PeerHandler { for (_, accounts) in accounts_by_root_hash[start_index..remaining_start].iter() { for account in accounts { - accounts_done.entry(*account).or_insert_with(Vec::new); + accounts_done.entry(*account).or_default(); } } @@ -1526,7 +1526,7 @@ impl PeerHandler { ); if old_intervals.is_empty() { for account in accounts_by_root_hash[remaining_start].1.iter() { - accounts_done.entry(*account).or_insert_with(Vec::new); + accounts_done.entry(*account).or_default(); account_storage_roots.healed_accounts.insert(*account); } } @@ -1773,10 +1773,8 @@ impl PeerHandler { .map_err(|_| PeerHandlerError::CreateStorageSnapshotsDir)?; } - let path = get_account_storages_snapshot_file( - account_storages_snapshots_dir, - *chunk_index, - ); + let path = + get_account_storages_snapshot_file(account_storages_snapshots_dir, *chunk_index); dump_storages_to_file(&path, snapshot) .map_err(|_| PeerHandlerError::WriteStorageSnapshotsDir(*chunk_index))?; *chunk_index += 1; From 61703144a5af5a165f5dd0130ec908d72eab9d2e Mon Sep 17 00:00:00 2001 From: Rodrigo Oliveri Date: Wed, 8 Oct 2025 17:50:24 -0300 Subject: [PATCH 12/14] fix unintended diff --- crates/networking/p2p/peer_handler.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/networking/p2p/peer_handler.rs b/crates/networking/p2p/peer_handler.rs index ff1fef28f47..c6855cb6bd8 100644 --- a/crates/networking/p2p/peer_handler.rs +++ b/crates/networking/p2p/peer_handler.rs @@ -1526,7 +1526,7 @@ impl PeerHandler { ); if old_intervals.is_empty() { for account in accounts_by_root_hash[remaining_start].1.iter() { - accounts_done.entry(*account).or_default(); + accounts_done.insert(*account, vec![]); account_storage_roots.healed_accounts.insert(*account); } } From a8ae5fbe42bff2e47035e1afa23de46127f7ad23 Mon Sep 17 00:00:00 2001 From: Rodrigo Oliveri Date: Wed, 8 Oct 2025 18:31:35 -0300 Subject: [PATCH 13/14] readded else to avoid clippy error --- crates/networking/p2p/peer_handler.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crates/networking/p2p/peer_handler.rs b/crates/networking/p2p/peer_handler.rs index c6855cb6bd8..28bae61e9ef 100644 --- a/crates/networking/p2p/peer_handler.rs +++ b/crates/networking/p2p/peer_handler.rs @@ -1503,6 +1503,8 @@ impl PeerHandler { if !old_intervals.is_empty() { acc_hash = *account; } + } else { + continue; } } if acc_hash.is_zero() { From 387fa170c3723f8ca692ced1bc00d3ec16510616 Mon Sep 17 00:00:00 2001 From: Rodrigo Oliveri Date: Wed, 8 Oct 2025 18:33:30 -0300 Subject: [PATCH 14/14] fixed typo on an error message Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- crates/networking/p2p/peer_handler.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/networking/p2p/peer_handler.rs b/crates/networking/p2p/peer_handler.rs index 28bae61e9ef..14499abb3f1 100644 --- a/crates/networking/p2p/peer_handler.rs +++ b/crates/networking/p2p/peer_handler.rs @@ -1628,7 +1628,7 @@ impl PeerHandler { .accounts_with_storage_root .get_mut(&accounts_by_root_hash[remaining_start].1[0]) .ok_or(PeerHandlerError::UnrecoverableError( - "Trie to get the old download intervals for an account but did not find them" + "Tried to get the old download intervals for an account but did not find them" .to_owned(), ))?;