reth_config/
config.rs

1//! Configuration files.
2
3use eyre::eyre;
4use reth_network_types::{PeersConfig, SessionsConfig};
5use reth_prune_types::PruneModes;
6use reth_stages_types::ExecutionStageThresholds;
7use serde::{Deserialize, Deserializer, Serialize};
8use std::{
9    ffi::OsStr,
10    fs,
11    path::{Path, PathBuf},
12    time::Duration,
13};
14
15const EXTENSION: &str = "toml";
16
17/// The default prune block interval
18pub const DEFAULT_BLOCK_INTERVAL: usize = 5;
19
20/// Configuration for the reth node.
21#[derive(Debug, Clone, Default, Deserialize, PartialEq, Eq, Serialize)]
22#[serde(default)]
23pub struct Config {
24    /// Configuration for each stage in the pipeline.
25    // TODO(onbjerg): Can we make this easier to maintain when we add/remove stages?
26    pub stages: StageConfig,
27    /// Configuration for pruning.
28    #[serde(skip_serializing_if = "Option::is_none")]
29    pub prune: Option<PruneConfig>,
30    /// Configuration for the discovery service.
31    pub peers: PeersConfig,
32    /// Configuration for peer sessions.
33    pub sessions: SessionsConfig,
34}
35
36impl Config {
37    /// Load a [`Config`] from a specified path.
38    ///
39    /// A new configuration file is created with default values if none
40    /// exists.
41    pub fn from_path(path: impl AsRef<Path>) -> eyre::Result<Self> {
42        let path = path.as_ref();
43        match fs::read_to_string(path) {
44            Ok(cfg_string) => {
45                toml::from_str(&cfg_string).map_err(|e| eyre!("Failed to parse TOML: {e}"))
46            }
47            Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
48                if let Some(parent) = path.parent() {
49                    fs::create_dir_all(parent)
50                        .map_err(|e| eyre!("Failed to create directory: {e}"))?;
51                }
52                let cfg = Self::default();
53                let s = toml::to_string_pretty(&cfg)
54                    .map_err(|e| eyre!("Failed to serialize to TOML: {e}"))?;
55                fs::write(path, s).map_err(|e| eyre!("Failed to write configuration file: {e}"))?;
56                Ok(cfg)
57            }
58            Err(e) => Err(eyre!("Failed to load configuration: {e}")),
59        }
60    }
61
62    /// Returns the [`PeersConfig`] for the node.
63    ///
64    /// If a peers file is provided, the basic nodes from the file are added to the configuration.
65    pub fn peers_config_with_basic_nodes_from_file(
66        &self,
67        peers_file: Option<&Path>,
68    ) -> PeersConfig {
69        self.peers
70            .clone()
71            .with_basic_nodes_from_file(peers_file)
72            .unwrap_or_else(|_| self.peers.clone())
73    }
74
75    /// Save the configuration to toml file.
76    pub fn save(&self, path: &Path) -> Result<(), std::io::Error> {
77        if path.extension() != Some(OsStr::new(EXTENSION)) {
78            return Err(std::io::Error::new(
79                std::io::ErrorKind::InvalidInput,
80                format!("reth config file extension must be '{EXTENSION}'"),
81            ));
82        }
83
84        std::fs::write(
85            path,
86            toml::to_string(self)
87                .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e.to_string()))?,
88        )
89    }
90
91    /// Sets the pruning configuration.
92    pub fn update_prune_config(&mut self, prune_config: PruneConfig) {
93        self.prune = Some(prune_config);
94    }
95}
96
97/// Configuration for each stage in the pipeline.
98#[derive(Debug, Clone, Default, Deserialize, PartialEq, Eq, Serialize)]
99#[serde(default)]
100pub struct StageConfig {
101    /// Header stage configuration.
102    pub headers: HeadersConfig,
103    /// Body stage configuration.
104    pub bodies: BodiesConfig,
105    /// Sender Recovery stage configuration.
106    pub sender_recovery: SenderRecoveryConfig,
107    /// Execution stage configuration.
108    pub execution: ExecutionConfig,
109    /// Prune stage configuration.
110    pub prune: PruneStageConfig,
111    /// Account Hashing stage configuration.
112    pub account_hashing: HashingConfig,
113    /// Storage Hashing stage configuration.
114    pub storage_hashing: HashingConfig,
115    /// Merkle stage configuration.
116    pub merkle: MerkleConfig,
117    /// Transaction Lookup stage configuration.
118    pub transaction_lookup: TransactionLookupConfig,
119    /// Index Account History stage configuration.
120    pub index_account_history: IndexHistoryConfig,
121    /// Index Storage History stage configuration.
122    pub index_storage_history: IndexHistoryConfig,
123    /// Common ETL related configuration.
124    pub etl: EtlConfig,
125}
126
127impl StageConfig {
128    /// The highest threshold (in number of blocks) for switching between incremental and full
129    /// calculations across `MerkleStage`, `AccountHashingStage` and `StorageHashingStage`. This is
130    /// required to figure out if can prune or not changesets on subsequent pipeline runs during
131    /// `ExecutionStage`
132    pub fn execution_external_clean_threshold(&self) -> u64 {
133        self.merkle
134            .clean_threshold
135            .max(self.account_hashing.clean_threshold)
136            .max(self.storage_hashing.clean_threshold)
137    }
138}
139
140/// Header stage configuration.
141#[derive(Debug, Clone, Copy, Deserialize, PartialEq, Eq, Serialize)]
142#[serde(default)]
143pub struct HeadersConfig {
144    /// The maximum number of requests to send concurrently.
145    ///
146    /// Default: 100
147    pub downloader_max_concurrent_requests: usize,
148    /// The minimum number of requests to send concurrently.
149    ///
150    /// Default: 5
151    pub downloader_min_concurrent_requests: usize,
152    /// Maximum amount of responses to buffer internally.
153    /// The response contains multiple headers.
154    pub downloader_max_buffered_responses: usize,
155    /// The maximum number of headers to request from a peer at a time.
156    pub downloader_request_limit: u64,
157    /// The maximum number of headers to download before committing progress to the database.
158    pub commit_threshold: u64,
159}
160
161impl Default for HeadersConfig {
162    fn default() -> Self {
163        Self {
164            commit_threshold: 10_000,
165            downloader_request_limit: 1_000,
166            downloader_max_concurrent_requests: 100,
167            downloader_min_concurrent_requests: 5,
168            downloader_max_buffered_responses: 100,
169        }
170    }
171}
172
173/// Body stage configuration.
174#[derive(Debug, Clone, Copy, Deserialize, PartialEq, Eq, Serialize)]
175#[serde(default)]
176pub struct BodiesConfig {
177    /// The batch size of non-empty blocks per one request
178    ///
179    /// Default: 200
180    pub downloader_request_limit: u64,
181    /// The maximum number of block bodies returned at once from the stream
182    ///
183    /// Default: `1_000`
184    pub downloader_stream_batch_size: usize,
185    /// The size of the internal block buffer in bytes.
186    ///
187    /// Default: 2GB
188    pub downloader_max_buffered_blocks_size_bytes: usize,
189    /// The minimum number of requests to send concurrently.
190    ///
191    /// Default: 5
192    pub downloader_min_concurrent_requests: usize,
193    /// The maximum number of requests to send concurrently.
194    /// This is equal to the max number of peers.
195    ///
196    /// Default: 100
197    pub downloader_max_concurrent_requests: usize,
198}
199
200impl Default for BodiesConfig {
201    fn default() -> Self {
202        Self {
203            downloader_request_limit: 200,
204            downloader_stream_batch_size: 1_000,
205            downloader_max_buffered_blocks_size_bytes: 2 * 1024 * 1024 * 1024, // ~2GB
206            downloader_min_concurrent_requests: 5,
207            downloader_max_concurrent_requests: 100,
208        }
209    }
210}
211
212/// Sender recovery stage configuration.
213#[derive(Debug, Clone, Copy, Deserialize, PartialEq, Eq, Serialize)]
214#[serde(default)]
215pub struct SenderRecoveryConfig {
216    /// The maximum number of transactions to process before committing progress to the database.
217    pub commit_threshold: u64,
218}
219
220impl Default for SenderRecoveryConfig {
221    fn default() -> Self {
222        Self { commit_threshold: 5_000_000 }
223    }
224}
225
226/// Execution stage configuration.
227#[derive(Debug, Clone, Copy, Deserialize, PartialEq, Eq, Serialize)]
228#[serde(default)]
229pub struct ExecutionConfig {
230    /// The maximum number of blocks to process before the execution stage commits.
231    pub max_blocks: Option<u64>,
232    /// The maximum number of state changes to keep in memory before the execution stage commits.
233    pub max_changes: Option<u64>,
234    /// The maximum cumulative amount of gas to process before the execution stage commits.
235    pub max_cumulative_gas: Option<u64>,
236    /// The maximum time spent on blocks processing before the execution stage commits.
237    #[serde(
238        serialize_with = "humantime_serde::serialize",
239        deserialize_with = "deserialize_duration"
240    )]
241    pub max_duration: Option<Duration>,
242}
243
244impl Default for ExecutionConfig {
245    fn default() -> Self {
246        Self {
247            max_blocks: Some(500_000),
248            max_changes: Some(5_000_000),
249            // 50k full blocks of 30M gas
250            max_cumulative_gas: Some(30_000_000 * 50_000),
251            // 10 minutes
252            max_duration: Some(Duration::from_secs(10 * 60)),
253        }
254    }
255}
256
257impl From<ExecutionConfig> for ExecutionStageThresholds {
258    fn from(config: ExecutionConfig) -> Self {
259        Self {
260            max_blocks: config.max_blocks,
261            max_changes: config.max_changes,
262            max_cumulative_gas: config.max_cumulative_gas,
263            max_duration: config.max_duration,
264        }
265    }
266}
267
268/// Prune stage configuration.
269#[derive(Debug, Clone, Copy, Deserialize, PartialEq, Eq, Serialize)]
270#[serde(default)]
271pub struct PruneStageConfig {
272    /// The maximum number of entries to prune before committing progress to the database.
273    pub commit_threshold: usize,
274}
275
276impl Default for PruneStageConfig {
277    fn default() -> Self {
278        Self { commit_threshold: 1_000_000 }
279    }
280}
281
282/// Hashing stage configuration.
283#[derive(Debug, Clone, Copy, Deserialize, PartialEq, Eq, Serialize)]
284#[serde(default)]
285pub struct HashingConfig {
286    /// The threshold (in number of blocks) for switching between
287    /// incremental hashing and full hashing.
288    pub clean_threshold: u64,
289    /// The maximum number of entities to process before committing progress to the database.
290    pub commit_threshold: u64,
291}
292
293impl Default for HashingConfig {
294    fn default() -> Self {
295        Self { clean_threshold: 500_000, commit_threshold: 100_000 }
296    }
297}
298
299/// Merkle stage configuration.
300#[derive(Debug, Clone, Copy, Deserialize, PartialEq, Eq, Serialize)]
301#[serde(default)]
302pub struct MerkleConfig {
303    /// The threshold (in number of blocks) for switching from incremental trie building of changes
304    /// to whole rebuild.
305    pub clean_threshold: u64,
306}
307
308impl Default for MerkleConfig {
309    fn default() -> Self {
310        Self { clean_threshold: 5_000 }
311    }
312}
313
314/// Transaction Lookup stage configuration.
315#[derive(Debug, Clone, Copy, Deserialize, PartialEq, Eq, Serialize)]
316#[serde(default)]
317pub struct TransactionLookupConfig {
318    /// The maximum number of transactions to process before writing to disk.
319    pub chunk_size: u64,
320}
321
322impl Default for TransactionLookupConfig {
323    fn default() -> Self {
324        Self { chunk_size: 5_000_000 }
325    }
326}
327
328/// Common ETL related configuration.
329#[derive(Debug, Clone, Deserialize, PartialEq, Eq, Serialize)]
330#[serde(default)]
331pub struct EtlConfig {
332    /// Data directory where temporary files are created.
333    pub dir: Option<PathBuf>,
334    /// The maximum size in bytes of data held in memory before being flushed to disk as a file.
335    pub file_size: usize,
336}
337
338impl Default for EtlConfig {
339    fn default() -> Self {
340        Self { dir: None, file_size: Self::default_file_size() }
341    }
342}
343
344impl EtlConfig {
345    /// Creates an ETL configuration
346    pub const fn new(dir: Option<PathBuf>, file_size: usize) -> Self {
347        Self { dir, file_size }
348    }
349
350    /// Return default ETL directory from datadir path.
351    pub fn from_datadir(path: &Path) -> PathBuf {
352        path.join("etl-tmp")
353    }
354
355    /// Default size in bytes of data held in memory before being flushed to disk as a file.
356    pub const fn default_file_size() -> usize {
357        // 500 MB
358        500 * (1024 * 1024)
359    }
360}
361
362/// History stage configuration.
363#[derive(Debug, Clone, Copy, Deserialize, PartialEq, Eq, Serialize)]
364#[serde(default)]
365pub struct IndexHistoryConfig {
366    /// The maximum number of blocks to process before committing progress to the database.
367    pub commit_threshold: u64,
368}
369
370impl Default for IndexHistoryConfig {
371    fn default() -> Self {
372        Self { commit_threshold: 100_000 }
373    }
374}
375
376/// Pruning configuration.
377#[derive(Debug, Clone, Deserialize, PartialEq, Eq, Serialize)]
378#[serde(default)]
379pub struct PruneConfig {
380    /// Minimum pruning interval measured in blocks.
381    pub block_interval: usize,
382    /// Pruning configuration for every part of the data that can be pruned.
383    #[serde(alias = "parts")]
384    pub segments: PruneModes,
385}
386
387impl Default for PruneConfig {
388    fn default() -> Self {
389        Self { block_interval: DEFAULT_BLOCK_INTERVAL, segments: PruneModes::none() }
390    }
391}
392
393impl PruneConfig {
394    /// Returns whether there is any kind of receipt pruning configuration.
395    pub fn has_receipts_pruning(&self) -> bool {
396        self.segments.receipts.is_some() || !self.segments.receipts_log_filter.is_empty()
397    }
398
399    /// Merges another `PruneConfig` into this one, taking values from the other config if and only
400    /// if the corresponding value in this config is not set.
401    pub fn merge(&mut self, other: Option<Self>) {
402        let Some(other) = other else { return };
403        let Self {
404            block_interval,
405            segments:
406                PruneModes {
407                    sender_recovery,
408                    transaction_lookup,
409                    receipts,
410                    account_history,
411                    storage_history,
412                    receipts_log_filter,
413                },
414        } = other;
415
416        // Merge block_interval, only update if it's the default interval
417        if self.block_interval == DEFAULT_BLOCK_INTERVAL {
418            self.block_interval = block_interval;
419        }
420
421        // Merge the various segment prune modes
422        self.segments.sender_recovery = self.segments.sender_recovery.or(sender_recovery);
423        self.segments.transaction_lookup = self.segments.transaction_lookup.or(transaction_lookup);
424        self.segments.receipts = self.segments.receipts.or(receipts);
425        self.segments.account_history = self.segments.account_history.or(account_history);
426        self.segments.storage_history = self.segments.storage_history.or(storage_history);
427
428        if self.segments.receipts_log_filter.0.is_empty() && !receipts_log_filter.0.is_empty() {
429            self.segments.receipts_log_filter = receipts_log_filter;
430        }
431    }
432}
433
434/// Helper type to support older versions of Duration deserialization.
435fn deserialize_duration<'de, D>(deserializer: D) -> Result<Option<Duration>, D::Error>
436where
437    D: Deserializer<'de>,
438{
439    #[derive(Deserialize)]
440    #[serde(untagged)]
441    enum AnyDuration {
442        #[serde(deserialize_with = "humantime_serde::deserialize")]
443        Human(Option<Duration>),
444        Duration(Option<Duration>),
445    }
446
447    AnyDuration::deserialize(deserializer).map(|d| match d {
448        AnyDuration::Human(duration) | AnyDuration::Duration(duration) => duration,
449    })
450}
451
452#[cfg(test)]
453mod tests {
454    use super::{Config, EXTENSION};
455    use crate::PruneConfig;
456    use alloy_primitives::Address;
457    use reth_network_peers::TrustedPeer;
458    use reth_prune_types::{PruneMode, PruneModes, ReceiptsLogPruneConfig};
459    use std::{collections::BTreeMap, path::Path, str::FromStr, time::Duration};
460
461    fn with_tempdir(filename: &str, proc: fn(&std::path::Path)) {
462        let temp_dir = tempfile::tempdir().unwrap();
463        let config_path = temp_dir.path().join(filename).with_extension(EXTENSION);
464
465        proc(&config_path);
466
467        temp_dir.close().unwrap()
468    }
469
470    /// Run a test function with a temporary config path as fixture.
471    fn with_config_path(test_fn: fn(&Path)) {
472        // Create a temporary directory for the config file
473        let config_dir = tempfile::tempdir().expect("creating test fixture failed");
474        // Create the config file path
475        let config_path =
476            config_dir.path().join("example-app").join("example-config").with_extension("toml");
477        // Run the test function with the config path
478        test_fn(&config_path);
479        config_dir.close().expect("removing test fixture failed");
480    }
481
482    #[test]
483    fn test_load_path_works() {
484        with_config_path(|path| {
485            let config = Config::from_path(path).expect("load_path failed");
486            assert_eq!(config, Config::default());
487        })
488    }
489
490    #[test]
491    fn test_load_path_reads_existing_config() {
492        with_config_path(|path| {
493            let config = Config::default();
494
495            // Create the parent directory if it doesn't exist
496            if let Some(parent) = path.parent() {
497                std::fs::create_dir_all(parent).expect("Failed to create directories");
498            }
499
500            // Write the config to the file
501            std::fs::write(path, toml::to_string(&config).unwrap())
502                .expect("Failed to write config");
503
504            // Load the config from the file and compare it
505            let loaded = Config::from_path(path).expect("load_path failed");
506            assert_eq!(config, loaded);
507        })
508    }
509
510    #[test]
511    fn test_load_path_fails_on_invalid_toml() {
512        with_config_path(|path| {
513            let invalid_toml = "invalid toml data";
514
515            // Create the parent directory if it doesn't exist
516            if let Some(parent) = path.parent() {
517                std::fs::create_dir_all(parent).expect("Failed to create directories");
518            }
519
520            // Write invalid TOML data to the file
521            std::fs::write(path, invalid_toml).expect("Failed to write invalid TOML");
522
523            // Attempt to load the config should fail
524            let result = Config::from_path(path);
525            assert!(result.is_err());
526        })
527    }
528
529    #[test]
530    fn test_load_path_creates_directory_if_not_exists() {
531        with_config_path(|path| {
532            // Ensure the directory does not exist
533            let parent = path.parent().unwrap();
534            assert!(!parent.exists());
535
536            // Load the configuration, which should create the directory and a default config file
537            let config = Config::from_path(path).expect("load_path failed");
538            assert_eq!(config, Config::default());
539
540            // The directory and file should now exist
541            assert!(parent.exists());
542            assert!(path.exists());
543        });
544    }
545
546    #[test]
547    fn test_store_config() {
548        with_tempdir("config-store-test", |config_path| {
549            let config = Config::default();
550            std::fs::write(
551                config_path,
552                toml::to_string(&config).expect("Failed to serialize config"),
553            )
554            .expect("Failed to write config file");
555        })
556    }
557
558    #[test]
559    fn test_store_config_method() {
560        with_tempdir("config-store-test-method", |config_path| {
561            let config = Config::default();
562            config.save(config_path).expect("Failed to store config");
563        })
564    }
565
566    #[test]
567    fn test_load_config() {
568        with_tempdir("config-load-test", |config_path| {
569            let config = Config::default();
570
571            // Write the config to a file
572            std::fs::write(
573                config_path,
574                toml::to_string(&config).expect("Failed to serialize config"),
575            )
576            .expect("Failed to write config file");
577
578            // Load the config from the file
579            let loaded_config = Config::from_path(config_path).unwrap();
580
581            // Compare the loaded config with the original config
582            assert_eq!(config, loaded_config);
583        })
584    }
585
586    #[test]
587    fn test_load_execution_stage() {
588        with_tempdir("config-load-test", |config_path| {
589            let mut config = Config::default();
590            config.stages.execution.max_duration = Some(Duration::from_secs(10 * 60));
591
592            // Write the config to a file
593            std::fs::write(
594                config_path,
595                toml::to_string(&config).expect("Failed to serialize config"),
596            )
597            .expect("Failed to write config file");
598
599            // Load the config from the file
600            let loaded_config = Config::from_path(config_path).unwrap();
601
602            // Compare the loaded config with the original config
603            assert_eq!(config, loaded_config);
604        })
605    }
606
607    // ensures config deserialization is backwards compatible
608    #[test]
609    fn test_backwards_compatibility() {
610        let alpha_0_0_8 = r"#
611[stages.headers]
612downloader_max_concurrent_requests = 100
613downloader_min_concurrent_requests = 5
614downloader_max_buffered_responses = 100
615downloader_request_limit = 1000
616commit_threshold = 10000
617
618[stages.bodies]
619downloader_request_limit = 200
620downloader_stream_batch_size = 1000
621downloader_max_buffered_blocks_size_bytes = 2147483648
622downloader_min_concurrent_requests = 5
623downloader_max_concurrent_requests = 100
624
625[stages.sender_recovery]
626commit_threshold = 5000000
627
628[stages.execution]
629max_blocks = 500000
630max_changes = 5000000
631
632[stages.account_hashing]
633clean_threshold = 500000
634commit_threshold = 100000
635
636[stages.storage_hashing]
637clean_threshold = 500000
638commit_threshold = 100000
639
640[stages.merkle]
641clean_threshold = 50000
642
643[stages.transaction_lookup]
644chunk_size = 5000000
645
646[stages.index_account_history]
647commit_threshold = 100000
648
649[stages.index_storage_history]
650commit_threshold = 100000
651
652[peers]
653refill_slots_interval = '1s'
654trusted_nodes = []
655connect_trusted_nodes_only = false
656max_backoff_count = 5
657ban_duration = '12h'
658
659[peers.connection_info]
660max_outbound = 100
661max_inbound = 30
662
663[peers.reputation_weights]
664bad_message = -16384
665bad_block = -16384
666bad_transactions = -16384
667already_seen_transactions = 0
668timeout = -4096
669bad_protocol = -2147483648
670failed_to_connect = -25600
671dropped = -4096
672
673[peers.backoff_durations]
674low = '30s'
675medium = '3m'
676high = '15m'
677max = '1h'
678
679[sessions]
680session_command_buffer = 32
681session_event_buffer = 260
682
683[sessions.limits]
684
685[sessions.initial_internal_request_timeout]
686secs = 20
687nanos = 0
688
689[sessions.protocol_breach_request_timeout]
690secs = 120
691nanos = 0
692
693[prune]
694block_interval = 5
695
696[prune.parts]
697sender_recovery = { distance = 16384 }
698transaction_lookup = 'full'
699receipts = { before = 1920000 }
700account_history = { distance = 16384 }
701storage_history = { distance = 16384 }
702[prune.parts.receipts_log_filter]
703'0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48' = { before = 17000000 }
704'0xdac17f958d2ee523a2206206994597c13d831ec7' = { distance = 1000 }
705#";
706        let _conf: Config = toml::from_str(alpha_0_0_8).unwrap();
707
708        let alpha_0_0_11 = r"#
709[prune.segments]
710sender_recovery = { distance = 16384 }
711transaction_lookup = 'full'
712receipts = { before = 1920000 }
713account_history = { distance = 16384 }
714storage_history = { distance = 16384 }
715[prune.segments.receipts_log_filter]
716'0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48' = { before = 17000000 }
717'0xdac17f958d2ee523a2206206994597c13d831ec7' = { distance = 1000 }
718#";
719        let _conf: Config = toml::from_str(alpha_0_0_11).unwrap();
720
721        let alpha_0_0_18 = r"#
722[stages.headers]
723downloader_max_concurrent_requests = 100
724downloader_min_concurrent_requests = 5
725downloader_max_buffered_responses = 100
726downloader_request_limit = 1000
727commit_threshold = 10000
728
729[stages.total_difficulty]
730commit_threshold = 100000
731
732[stages.bodies]
733downloader_request_limit = 200
734downloader_stream_batch_size = 1000
735downloader_max_buffered_blocks_size_bytes = 2147483648
736downloader_min_concurrent_requests = 5
737downloader_max_concurrent_requests = 100
738
739[stages.sender_recovery]
740commit_threshold = 5000000
741
742[stages.execution]
743max_blocks = 500000
744max_changes = 5000000
745max_cumulative_gas = 1500000000000
746[stages.execution.max_duration]
747secs = 600
748nanos = 0
749
750[stages.account_hashing]
751clean_threshold = 500000
752commit_threshold = 100000
753
754[stages.storage_hashing]
755clean_threshold = 500000
756commit_threshold = 100000
757
758[stages.merkle]
759clean_threshold = 50000
760
761[stages.transaction_lookup]
762commit_threshold = 5000000
763
764[stages.index_account_history]
765commit_threshold = 100000
766
767[stages.index_storage_history]
768commit_threshold = 100000
769
770[peers]
771refill_slots_interval = '5s'
772trusted_nodes = []
773connect_trusted_nodes_only = false
774max_backoff_count = 5
775ban_duration = '12h'
776
777[peers.connection_info]
778max_outbound = 100
779max_inbound = 30
780max_concurrent_outbound_dials = 10
781
782[peers.reputation_weights]
783bad_message = -16384
784bad_block = -16384
785bad_transactions = -16384
786already_seen_transactions = 0
787timeout = -4096
788bad_protocol = -2147483648
789failed_to_connect = -25600
790dropped = -4096
791bad_announcement = -1024
792
793[peers.backoff_durations]
794low = '30s'
795medium = '3m'
796high = '15m'
797max = '1h'
798
799[sessions]
800session_command_buffer = 32
801session_event_buffer = 260
802
803[sessions.limits]
804
805[sessions.initial_internal_request_timeout]
806secs = 20
807nanos = 0
808
809[sessions.protocol_breach_request_timeout]
810secs = 120
811nanos = 0
812#";
813        let conf: Config = toml::from_str(alpha_0_0_18).unwrap();
814        assert_eq!(conf.stages.execution.max_duration, Some(Duration::from_secs(10 * 60)));
815
816        let alpha_0_0_19 = r"#
817[stages.headers]
818downloader_max_concurrent_requests = 100
819downloader_min_concurrent_requests = 5
820downloader_max_buffered_responses = 100
821downloader_request_limit = 1000
822commit_threshold = 10000
823
824[stages.total_difficulty]
825commit_threshold = 100000
826
827[stages.bodies]
828downloader_request_limit = 200
829downloader_stream_batch_size = 1000
830downloader_max_buffered_blocks_size_bytes = 2147483648
831downloader_min_concurrent_requests = 5
832downloader_max_concurrent_requests = 100
833
834[stages.sender_recovery]
835commit_threshold = 5000000
836
837[stages.execution]
838max_blocks = 500000
839max_changes = 5000000
840max_cumulative_gas = 1500000000000
841max_duration = '10m'
842
843[stages.account_hashing]
844clean_threshold = 500000
845commit_threshold = 100000
846
847[stages.storage_hashing]
848clean_threshold = 500000
849commit_threshold = 100000
850
851[stages.merkle]
852clean_threshold = 50000
853
854[stages.transaction_lookup]
855commit_threshold = 5000000
856
857[stages.index_account_history]
858commit_threshold = 100000
859
860[stages.index_storage_history]
861commit_threshold = 100000
862
863[peers]
864refill_slots_interval = '5s'
865trusted_nodes = []
866connect_trusted_nodes_only = false
867max_backoff_count = 5
868ban_duration = '12h'
869
870[peers.connection_info]
871max_outbound = 100
872max_inbound = 30
873max_concurrent_outbound_dials = 10
874
875[peers.reputation_weights]
876bad_message = -16384
877bad_block = -16384
878bad_transactions = -16384
879already_seen_transactions = 0
880timeout = -4096
881bad_protocol = -2147483648
882failed_to_connect = -25600
883dropped = -4096
884bad_announcement = -1024
885
886[peers.backoff_durations]
887low = '30s'
888medium = '3m'
889high = '15m'
890max = '1h'
891
892[sessions]
893session_command_buffer = 32
894session_event_buffer = 260
895
896[sessions.limits]
897
898[sessions.initial_internal_request_timeout]
899secs = 20
900nanos = 0
901
902[sessions.protocol_breach_request_timeout]
903secs = 120
904nanos = 0
905#";
906        let _conf: Config = toml::from_str(alpha_0_0_19).unwrap();
907    }
908
909    // ensures prune config deserialization is backwards compatible
910    #[test]
911    fn test_backwards_compatibility_prune_full() {
912        let s = r"#
913[prune]
914block_interval = 5
915
916[prune.segments]
917sender_recovery = { distance = 16384 }
918transaction_lookup = 'full'
919receipts = { distance = 16384 }
920#";
921        let _conf: Config = toml::from_str(s).unwrap();
922
923        let s = r"#
924[prune]
925block_interval = 5
926
927[prune.segments]
928sender_recovery = { distance = 16384 }
929transaction_lookup = 'full'
930receipts = 'full'
931#";
932        let err = toml::from_str::<Config>(s).unwrap_err().to_string();
933        assert!(err.contains("invalid value: string \"full\""), "{}", err);
934    }
935
936    #[test]
937    fn test_prune_config_merge() {
938        let mut config1 = PruneConfig {
939            block_interval: 5,
940            segments: PruneModes {
941                sender_recovery: Some(PruneMode::Full),
942                transaction_lookup: None,
943                receipts: Some(PruneMode::Distance(1000)),
944                account_history: None,
945                storage_history: Some(PruneMode::Before(5000)),
946                receipts_log_filter: ReceiptsLogPruneConfig(BTreeMap::from([(
947                    Address::random(),
948                    PruneMode::Full,
949                )])),
950            },
951        };
952
953        let config2 = PruneConfig {
954            block_interval: 10,
955            segments: PruneModes {
956                sender_recovery: Some(PruneMode::Distance(500)),
957                transaction_lookup: Some(PruneMode::Full),
958                receipts: Some(PruneMode::Full),
959                account_history: Some(PruneMode::Distance(2000)),
960                storage_history: Some(PruneMode::Distance(3000)),
961                receipts_log_filter: ReceiptsLogPruneConfig(BTreeMap::from([
962                    (Address::random(), PruneMode::Distance(1000)),
963                    (Address::random(), PruneMode::Before(2000)),
964                ])),
965            },
966        };
967
968        let original_filter = config1.segments.receipts_log_filter.clone();
969        config1.merge(Some(config2));
970
971        // Check that the configuration has been merged. Any configuration present in config1
972        // should not be overwritten by config2
973        assert_eq!(config1.block_interval, 10);
974        assert_eq!(config1.segments.sender_recovery, Some(PruneMode::Full));
975        assert_eq!(config1.segments.transaction_lookup, Some(PruneMode::Full));
976        assert_eq!(config1.segments.receipts, Some(PruneMode::Distance(1000)));
977        assert_eq!(config1.segments.account_history, Some(PruneMode::Distance(2000)));
978        assert_eq!(config1.segments.storage_history, Some(PruneMode::Before(5000)));
979        assert_eq!(config1.segments.receipts_log_filter, original_filter);
980    }
981
982    #[test]
983    fn test_conf_trust_nodes_only() {
984        let trusted_nodes_only = r"#
985[peers]
986trusted_nodes_only = true
987#";
988        let conf: Config = toml::from_str(trusted_nodes_only).unwrap();
989        assert!(conf.peers.trusted_nodes_only);
990
991        let trusted_nodes_only = r"#
992[peers]
993connect_trusted_nodes_only = true
994#";
995        let conf: Config = toml::from_str(trusted_nodes_only).unwrap();
996        assert!(conf.peers.trusted_nodes_only);
997    }
998
999    #[test]
1000    fn test_can_support_dns_in_trusted_nodes() {
1001        let reth_toml = r#"
1002    [peers]
1003    trusted_nodes = [
1004        "enode://0401e494dbd0c84c5c0f72adac5985d2f2525e08b68d448958aae218f5ac8198a80d1498e0ebec2ce38b1b18d6750f6e61a56b4614c5a6c6cf0981c39aed47dc@34.159.32.127:30303",
1005        "enode://e9675164b5e17b9d9edf0cc2bd79e6b6f487200c74d1331c220abb5b8ee80c2eefbf18213989585e9d0960683e819542e11d4eefb5f2b4019e1e49f9fd8fff18@berav2-bootnode.staketab.org:30303"
1006    ]
1007    "#;
1008
1009        let conf: Config = toml::from_str(reth_toml).unwrap();
1010        assert_eq!(conf.peers.trusted_nodes.len(), 2);
1011
1012        let expected_enodes = vec![
1013        "enode://0401e494dbd0c84c5c0f72adac5985d2f2525e08b68d448958aae218f5ac8198a80d1498e0ebec2ce38b1b18d6750f6e61a56b4614c5a6c6cf0981c39aed47dc@34.159.32.127:30303",
1014        "enode://e9675164b5e17b9d9edf0cc2bd79e6b6f487200c74d1331c220abb5b8ee80c2eefbf18213989585e9d0960683e819542e11d4eefb5f2b4019e1e49f9fd8fff18@berav2-bootnode.staketab.org:30303",
1015    ];
1016
1017        for enode in expected_enodes {
1018            let node = TrustedPeer::from_str(enode).unwrap();
1019            assert!(conf.peers.trusted_nodes.contains(&node));
1020        }
1021    }
1022}