1use crate::{args::NetworkArgs, utils::get_single_header};
3use alloy_eips::BlockHashOrNumber;
4use backon::{ConstantBuilder, Retryable};
5use clap::Parser;
6use reth_beacon_consensus::EthBeaconConsensus;
7use reth_chainspec::ChainSpec;
8use reth_cli::chainspec::ChainSpecParser;
9use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs};
10use reth_cli_runner::CliContext;
11use reth_cli_util::get_secret_key;
12use reth_config::Config;
13use reth_consensus::Consensus;
14use reth_db::tables;
15use reth_db_api::{cursor::DbCursorRO, transaction::DbTx};
16use reth_evm::execute::{BatchExecutor, BlockExecutorProvider};
17use reth_network::{BlockDownloaderProvider, NetworkHandle};
18use reth_network_api::NetworkInfo;
19use reth_network_p2p::full_block::FullBlockClient;
20use reth_node_api::{BlockTy, NodePrimitives};
21use reth_node_ethereum::EthExecutorProvider;
22use reth_provider::{
23 providers::ProviderNodeTypes, BlockNumReader, BlockWriter, ChainSpecProvider,
24 DatabaseProviderFactory, HeaderProvider, LatestStateProviderRef, OriginalValuesKnown,
25 ProviderError, ProviderFactory, StateWriter, StorageLocation,
26};
27use reth_revm::database::StateProviderDatabase;
28use reth_stages::{
29 stages::{AccountHashingStage, MerkleStage, StorageHashingStage},
30 ExecInput, Stage, StageCheckpoint,
31};
32use reth_tasks::TaskExecutor;
33use std::{path::PathBuf, sync::Arc};
34use tracing::*;
35
36#[derive(Debug, Parser)]
38pub struct Command<C: ChainSpecParser> {
39 #[command(flatten)]
40 env: EnvironmentArgs<C>,
41
42 #[command(flatten)]
43 network: NetworkArgs,
44
45 #[arg(long, default_value = "5")]
47 retries: usize,
48
49 #[arg(long)]
51 to: u64,
52
53 #[arg(long)]
55 skip_node_depth: Option<usize>,
56}
57
58impl<C: ChainSpecParser<ChainSpec = ChainSpec>> Command<C> {
59 async fn build_network<
60 N: ProviderNodeTypes<
61 ChainSpec = C::ChainSpec,
62 Primitives: NodePrimitives<
63 Block = reth_primitives::Block,
64 Receipt = reth_primitives::Receipt,
65 BlockHeader = reth_primitives::Header,
66 >,
67 >,
68 >(
69 &self,
70 config: &Config,
71 task_executor: TaskExecutor,
72 provider_factory: ProviderFactory<N>,
73 network_secret_path: PathBuf,
74 default_peers_path: PathBuf,
75 ) -> eyre::Result<NetworkHandle> {
76 let secret_key = get_secret_key(&network_secret_path)?;
77 let network = self
78 .network
79 .network_config(config, provider_factory.chain_spec(), secret_key, default_peers_path)
80 .with_task_executor(Box::new(task_executor))
81 .build(provider_factory)
82 .start_network()
83 .await?;
84 info!(target: "reth::cli", peer_id = %network.peer_id(), local_addr = %network.local_addr(), "Connected to P2P network");
85 debug!(target: "reth::cli", peer_id = ?network.peer_id(), "Full peer ID");
86 Ok(network)
87 }
88
89 pub async fn execute<N: CliNodeTypes<ChainSpec = C::ChainSpec>>(
91 self,
92 ctx: CliContext,
93 ) -> eyre::Result<()> {
94 let Environment { provider_factory, config, data_dir } =
95 self.env.init::<N>(AccessRights::RW)?;
96
97 let provider_rw = provider_factory.database_provider_rw()?;
98
99 let network_secret_path =
101 self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret());
102 let network = self
103 .build_network(
104 &config,
105 ctx.task_executor.clone(),
106 provider_factory.clone(),
107 network_secret_path,
108 data_dir.known_peers(),
109 )
110 .await?;
111
112 let executor_provider = EthExecutorProvider::ethereum(provider_factory.chain_spec());
113
114 info!(target: "reth::cli", target_block_number=self.to, "Downloading tip of block range");
116 let fetch_client = network.fetch_client().await?;
117
118 let retries = self.retries.max(1);
120 let backoff = ConstantBuilder::default().with_max_times(retries);
121 let client = fetch_client.clone();
122 let to_header = (move || {
123 get_single_header(client.clone(), BlockHashOrNumber::Number(self.to))
124 })
125 .retry(backoff)
126 .notify(|err, _| warn!(target: "reth::cli", "Error requesting header: {err}. Retrying..."))
127 .await?;
128 info!(target: "reth::cli", target_block_number=self.to, "Finished downloading tip of block range");
129
130 let consensus: Arc<dyn Consensus> =
132 Arc::new(EthBeaconConsensus::new(provider_factory.chain_spec()));
133 let block_range_client = FullBlockClient::new(fetch_client, consensus);
134
135 let best_block_number = provider_rw.best_block_number()?;
137 assert!(best_block_number < self.to, "Nothing to run");
138
139 let block_range = best_block_number + 1..=self.to;
141 info!(target: "reth::cli", ?block_range, "Downloading range of blocks");
142 let blocks = block_range_client
143 .get_full_block_range(to_header.hash_slow(), self.to - best_block_number)
144 .await;
145
146 let mut td = provider_rw
147 .header_td_by_number(best_block_number)?
148 .ok_or(ProviderError::TotalDifficultyNotFound(best_block_number))?;
149
150 let mut account_hashing_stage = AccountHashingStage::default();
151 let mut storage_hashing_stage = StorageHashingStage::default();
152 let mut merkle_stage = MerkleStage::default_execution();
153
154 for block in blocks.into_iter().rev() {
155 let block_number = block.number;
156 let sealed_block = block
157 .try_seal_with_senders::<BlockTy<N>>()
158 .map_err(|block| eyre::eyre!("Error sealing block with senders: {block:?}"))?;
159 trace!(target: "reth::cli", block_number, "Executing block");
160
161 provider_rw.insert_block(sealed_block.clone(), StorageLocation::Database)?;
162
163 td += sealed_block.difficulty;
164 let mut executor = executor_provider.batch_executor(StateProviderDatabase::new(
165 LatestStateProviderRef::new(&provider_rw),
166 ));
167 executor.execute_and_verify_one((&sealed_block.clone().unseal(), td).into())?;
168 let execution_outcome = executor.finalize();
169
170 provider_rw.write_state(
171 execution_outcome,
172 OriginalValuesKnown::Yes,
173 StorageLocation::Database,
174 )?;
175
176 let checkpoint = Some(StageCheckpoint::new(
177 block_number
178 .checked_sub(1)
179 .ok_or_else(|| eyre::eyre!("GenesisBlockHasNoParent"))?,
180 ));
181
182 let mut account_hashing_done = false;
183 while !account_hashing_done {
184 let output = account_hashing_stage
185 .execute(&provider_rw, ExecInput { target: Some(block_number), checkpoint })?;
186 account_hashing_done = output.done;
187 }
188
189 let mut storage_hashing_done = false;
190 while !storage_hashing_done {
191 let output = storage_hashing_stage
192 .execute(&provider_rw, ExecInput { target: Some(block_number), checkpoint })?;
193 storage_hashing_done = output.done;
194 }
195
196 let incremental_result = merkle_stage
197 .execute(&provider_rw, ExecInput { target: Some(block_number), checkpoint });
198
199 if incremental_result.is_ok() {
200 debug!(target: "reth::cli", block_number, "Successfully computed incremental root");
201 continue
202 }
203
204 warn!(target: "reth::cli", block_number, "Incremental calculation failed, retrying from scratch");
205 let incremental_account_trie = provider_rw
206 .tx_ref()
207 .cursor_read::<tables::AccountsTrie>()?
208 .walk_range(..)?
209 .collect::<Result<Vec<_>, _>>()?;
210 let incremental_storage_trie = provider_rw
211 .tx_ref()
212 .cursor_dup_read::<tables::StoragesTrie>()?
213 .walk_range(..)?
214 .collect::<Result<Vec<_>, _>>()?;
215
216 let clean_input = ExecInput { target: Some(sealed_block.number), checkpoint: None };
217 loop {
218 let clean_result = merkle_stage.execute(&provider_rw, clean_input);
219 assert!(clean_result.is_ok(), "Clean state root calculation failed");
220 if clean_result.unwrap().done {
221 break
222 }
223 }
224
225 let clean_account_trie = provider_rw
226 .tx_ref()
227 .cursor_read::<tables::AccountsTrie>()?
228 .walk_range(..)?
229 .collect::<Result<Vec<_>, _>>()?;
230 let clean_storage_trie = provider_rw
231 .tx_ref()
232 .cursor_dup_read::<tables::StoragesTrie>()?
233 .walk_range(..)?
234 .collect::<Result<Vec<_>, _>>()?;
235
236 info!(target: "reth::cli", block_number, "Comparing incremental trie vs clean trie");
237
238 let mut incremental_account_mismatched = Vec::new();
240 let mut clean_account_mismatched = Vec::new();
241 let mut incremental_account_trie_iter = incremental_account_trie.into_iter().peekable();
242 let mut clean_account_trie_iter = clean_account_trie.into_iter().peekable();
243 while incremental_account_trie_iter.peek().is_some() ||
244 clean_account_trie_iter.peek().is_some()
245 {
246 match (incremental_account_trie_iter.next(), clean_account_trie_iter.next()) {
247 (Some(incremental), Some(clean)) => {
248 similar_asserts::assert_eq!(incremental.0, clean.0, "Nibbles don't match");
249 if incremental.1 != clean.1 &&
250 clean.0 .0.len() > self.skip_node_depth.unwrap_or_default()
251 {
252 incremental_account_mismatched.push(incremental);
253 clean_account_mismatched.push(clean);
254 }
255 }
256 (Some(incremental), None) => {
257 warn!(target: "reth::cli", next = ?incremental, "Incremental account trie has more entries");
258 }
259 (None, Some(clean)) => {
260 warn!(target: "reth::cli", next = ?clean, "Clean account trie has more entries");
261 }
262 (None, None) => {
263 info!(target: "reth::cli", "Exhausted all account trie entries");
264 }
265 }
266 }
267
268 let mut first_mismatched_storage = None;
270 let mut incremental_storage_trie_iter = incremental_storage_trie.into_iter().peekable();
271 let mut clean_storage_trie_iter = clean_storage_trie.into_iter().peekable();
272 while incremental_storage_trie_iter.peek().is_some() ||
273 clean_storage_trie_iter.peek().is_some()
274 {
275 match (incremental_storage_trie_iter.next(), clean_storage_trie_iter.next()) {
276 (Some(incremental), Some(clean)) => {
277 if incremental != clean &&
278 clean.1.nibbles.len() > self.skip_node_depth.unwrap_or_default()
279 {
280 first_mismatched_storage = Some((incremental, clean));
281 break
282 }
283 }
284 (Some(incremental), None) => {
285 warn!(target: "reth::cli", next = ?incremental, "Incremental storage trie has more entries");
286 }
287 (None, Some(clean)) => {
288 warn!(target: "reth::cli", next = ?clean, "Clean storage trie has more entries")
289 }
290 (None, None) => {
291 info!(target: "reth::cli", "Exhausted all storage trie entries.")
292 }
293 }
294 }
295
296 similar_asserts::assert_eq!(
297 (
298 incremental_account_mismatched,
299 first_mismatched_storage.as_ref().map(|(incremental, _)| incremental)
300 ),
301 (
302 clean_account_mismatched,
303 first_mismatched_storage.as_ref().map(|(_, clean)| clean)
304 ),
305 "Mismatched trie nodes"
306 );
307 }
308
309 info!(target: "reth::cli", ?block_range, "Successfully validated incremental roots");
310
311 Ok(())
312 }
313}