reth_ethereum_forks/
forkid.rs

1//! EIP-2124 implementation based on <https://eips.ethereum.org/EIPS/eip-2124>.
2//!
3//! Previously version of Apache licenced [`ethereum-forkid`](https://crates.io/crates/ethereum-forkid).
4
5use crate::Head;
6use alloc::{
7    collections::{BTreeMap, BTreeSet},
8    vec::Vec,
9};
10use alloy_primitives::{hex, BlockNumber, B256};
11use alloy_rlp::{Error as RlpError, *};
12#[cfg(any(test, feature = "arbitrary"))]
13use arbitrary::Arbitrary;
14use core::{
15    cmp::Ordering,
16    fmt,
17    ops::{Add, AddAssign},
18};
19use crc::*;
20#[cfg(any(test, feature = "arbitrary"))]
21use proptest_derive::Arbitrary as PropTestArbitrary;
22#[cfg(feature = "serde")]
23use serde::{Deserialize, Serialize};
24
25const CRC_32_IEEE: Crc<u32> = Crc::<u32>::new(&CRC_32_ISO_HDLC);
26const TIMESTAMP_BEFORE_ETHEREUM_MAINNET: u64 = 1_300_000_000;
27
28/// `CRC32` hash of all previous forks starting from genesis block.
29#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
30#[cfg_attr(any(test, feature = "arbitrary"), derive(PropTestArbitrary, Arbitrary))]
31#[derive(
32    Clone, Copy, PartialEq, Eq, Hash, RlpEncodableWrapper, RlpDecodableWrapper, RlpMaxEncodedLen,
33)]
34pub struct ForkHash(pub [u8; 4]);
35
36impl fmt::Debug for ForkHash {
37    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
38        f.debug_tuple("ForkHash").field(&hex::encode(&self.0[..])).finish()
39    }
40}
41
42impl From<B256> for ForkHash {
43    fn from(genesis: B256) -> Self {
44        Self(CRC_32_IEEE.checksum(&genesis[..]).to_be_bytes())
45    }
46}
47
48impl<T> AddAssign<T> for ForkHash
49where
50    T: Into<u64>,
51{
52    fn add_assign(&mut self, v: T) {
53        let blob = v.into().to_be_bytes();
54        let digest = CRC_32_IEEE.digest_with_initial(u32::from_be_bytes(self.0));
55        let value = digest.finalize();
56        let mut digest = CRC_32_IEEE.digest_with_initial(value);
57        digest.update(&blob);
58        self.0 = digest.finalize().to_be_bytes();
59    }
60}
61
62impl<T> Add<T> for ForkHash
63where
64    T: Into<u64>,
65{
66    type Output = Self;
67    fn add(mut self, block: T) -> Self {
68        self += block;
69        self
70    }
71}
72
73/// How to filter forks.
74#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
75#[derive(Clone, Copy, Debug, Eq, PartialEq)]
76pub enum ForkFilterKey {
77    /// By block number activation.
78    Block(BlockNumber),
79    /// By timestamp activation.
80    Time(u64),
81}
82
83impl PartialOrd for ForkFilterKey {
84    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
85        Some(self.cmp(other))
86    }
87}
88
89impl Ord for ForkFilterKey {
90    fn cmp(&self, other: &Self) -> Ordering {
91        match (self, other) {
92            (Self::Block(a), Self::Block(b)) | (Self::Time(a), Self::Time(b)) => a.cmp(b),
93            (Self::Block(_), Self::Time(_)) => Ordering::Less,
94            _ => Ordering::Greater,
95        }
96    }
97}
98
99impl From<ForkFilterKey> for u64 {
100    fn from(value: ForkFilterKey) -> Self {
101        match value {
102            ForkFilterKey::Block(block) => block,
103            ForkFilterKey::Time(time) => time,
104        }
105    }
106}
107
108/// A fork identifier as defined by EIP-2124.
109/// Serves as the chain compatibility identifier.
110#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
111#[cfg_attr(any(test, feature = "arbitrary"), derive(PropTestArbitrary, Arbitrary))]
112#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, RlpEncodable, RlpDecodable, RlpMaxEncodedLen)]
113pub struct ForkId {
114    /// CRC32 checksum of the all fork blocks and timestamps from genesis.
115    pub hash: ForkHash,
116    /// Next upcoming fork block number or timestamp, 0 if not yet known.
117    pub next: u64,
118}
119
120/// Represents a forward-compatible ENR entry for including the forkid in a node record via
121/// EIP-868. Forward compatibility is achieved via EIP-8.
122///
123/// See:
124/// <https://github.com/ethereum/devp2p/blob/master/enr-entries/eth.md#entry-format>
125///
126/// for how geth implements `ForkId` values and forward compatibility.
127#[derive(Debug, Clone, PartialEq, Eq, RlpEncodable)]
128pub struct EnrForkIdEntry {
129    /// The inner forkid
130    pub fork_id: ForkId,
131}
132
133impl Decodable for EnrForkIdEntry {
134    // NOTE(onbjerg): Manual implementation to satisfy EIP-8.
135    //
136    // See https://eips.ethereum.org/EIPS/eip-8
137    fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
138        let b = &mut &**buf;
139        let rlp_head = Header::decode(b)?;
140        if !rlp_head.list {
141            return Err(RlpError::UnexpectedString)
142        }
143        let started_len = b.len();
144
145        let this = Self { fork_id: Decodable::decode(b)? };
146
147        // NOTE(onbjerg): Because of EIP-8, we only check that we did not consume *more* than the
148        // payload length, i.e. it is ok if payload length is greater than what we consumed, as we
149        // just discard the remaining list items
150        let consumed = started_len - b.len();
151        if consumed > rlp_head.payload_length {
152            return Err(RlpError::ListLengthMismatch {
153                expected: rlp_head.payload_length,
154                got: consumed,
155            })
156        }
157
158        let rem = rlp_head.payload_length - consumed;
159        b.advance(rem);
160        *buf = *b;
161
162        Ok(this)
163    }
164}
165
166impl From<ForkId> for EnrForkIdEntry {
167    fn from(fork_id: ForkId) -> Self {
168        Self { fork_id }
169    }
170}
171
172impl From<EnrForkIdEntry> for ForkId {
173    fn from(entry: EnrForkIdEntry) -> Self {
174        entry.fork_id
175    }
176}
177
178/// Reason for rejecting provided `ForkId`.
179#[derive(Clone, Copy, Debug, thiserror::Error, PartialEq, Eq, Hash)]
180pub enum ValidationError {
181    /// Remote node is outdated and needs a software update.
182    #[error(
183        "remote node is outdated and needs a software update: local={local:?}, remote={remote:?}"
184    )]
185    RemoteStale {
186        /// locally configured forkId
187        local: ForkId,
188        /// `ForkId` received from remote
189        remote: ForkId,
190    },
191    /// Local node is on an incompatible chain or needs a software update.
192    #[error("local node is on an incompatible chain or needs a software update: local={local:?}, remote={remote:?}")]
193    LocalIncompatibleOrStale {
194        /// locally configured forkId
195        local: ForkId,
196        /// `ForkId` received from remote
197        remote: ForkId,
198    },
199}
200
201/// Filter that describes the state of blockchain and can be used to check incoming `ForkId`s for
202/// compatibility.
203#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
204#[derive(Clone, Debug, PartialEq, Eq)]
205pub struct ForkFilter {
206    /// The forks in the filter are keyed by `(timestamp, block)`. This ensures that block-based
207    /// forks (`time == 0`) are processed before time-based forks as required by
208    /// [EIP-6122][eip-6122].
209    ///
210    /// Time-based forks have their block number set to 0, allowing easy comparisons with a [Head];
211    /// a fork is active if both it's time and block number are less than or equal to [Head].
212    ///
213    /// [eip-6122]: https://eips.ethereum.org/EIPS/eip-6122
214    forks: BTreeMap<ForkFilterKey, ForkHash>,
215
216    /// The current head, used to select forks that are active locally.
217    head: Head,
218
219    cache: Cache,
220}
221
222impl ForkFilter {
223    /// Create the filter from provided head, genesis block hash, past forks and expected future
224    /// forks.
225    pub fn new<F>(head: Head, genesis_hash: B256, genesis_timestamp: u64, forks: F) -> Self
226    where
227        F: IntoIterator<Item = ForkFilterKey>,
228    {
229        let genesis_fork_hash = ForkHash::from(genesis_hash);
230        let mut forks = forks.into_iter().collect::<BTreeSet<_>>();
231        forks.remove(&ForkFilterKey::Time(0));
232        forks.remove(&ForkFilterKey::Block(0));
233
234        let forks = forks
235            .into_iter()
236            // filter out forks that are pre-genesis by timestamp
237            .filter(|key| match key {
238                ForkFilterKey::Block(_) => true,
239                ForkFilterKey::Time(time) => *time > genesis_timestamp,
240            })
241            .collect::<BTreeSet<_>>()
242            .into_iter()
243            .fold(
244                (BTreeMap::from([(ForkFilterKey::Block(0), genesis_fork_hash)]), genesis_fork_hash),
245                |(mut acc, base_hash), key| {
246                    let fork_hash = base_hash + u64::from(key);
247                    acc.insert(key, fork_hash);
248                    (acc, fork_hash)
249                },
250            )
251            .0;
252
253        // Compute cache based on filtered forks and the current head.
254        let cache = Cache::compute_cache(&forks, head);
255
256        // Create and return a new `ForkFilter`.
257        Self { forks, head, cache }
258    }
259
260    fn set_head_priv(&mut self, head: Head) -> Option<ForkTransition> {
261        let head_in_past = match self.cache.epoch_start {
262            ForkFilterKey::Block(epoch_start_block) => head.number < epoch_start_block,
263            ForkFilterKey::Time(epoch_start_time) => head.timestamp < epoch_start_time,
264        };
265        let head_in_future = match self.cache.epoch_end {
266            Some(ForkFilterKey::Block(epoch_end_block)) => head.number >= epoch_end_block,
267            Some(ForkFilterKey::Time(epoch_end_time)) => head.timestamp >= epoch_end_time,
268            None => false,
269        };
270
271        self.head = head;
272
273        // Recompute the cache if the head is in the past or future epoch.
274        (head_in_past || head_in_future).then(|| {
275            let past = self.current();
276            self.cache = Cache::compute_cache(&self.forks, head);
277            ForkTransition { current: self.current(), past }
278        })
279    }
280
281    /// Set the current head.
282    ///
283    /// If the update updates the current [`ForkId`] it returns a [`ForkTransition`]
284    pub fn set_head(&mut self, head: Head) -> Option<ForkTransition> {
285        self.set_head_priv(head)
286    }
287
288    /// Return current fork id
289    #[must_use]
290    pub const fn current(&self) -> ForkId {
291        self.cache.fork_id
292    }
293
294    /// Manually set the current fork id.
295    ///
296    /// Caution: this disregards all configured fork filters and is reset on the next head update.
297    /// This is useful for testing or to connect to networks over p2p where only the latest forkid
298    /// is known.
299    pub fn set_current_fork_id(&mut self, fork_id: ForkId) {
300        self.cache.fork_id = fork_id;
301    }
302
303    /// Check whether the provided `ForkId` is compatible based on the validation rules in
304    /// `EIP-2124`.
305    ///
306    /// Implements the rules following: <https://github.com/ethereum/EIPs/blob/master/EIPS/eip-2124.md#stale-software-examples>
307    ///
308    /// # Errors
309    ///
310    /// Returns a `ValidationError` if the `ForkId` is not compatible.
311    pub fn validate(&self, fork_id: ForkId) -> Result<(), ValidationError> {
312        // 1) If local and remote FORK_HASH matches...
313        if self.current().hash == fork_id.hash {
314            if fork_id.next == 0 {
315                // 1b) No remotely announced fork, connect.
316                return Ok(())
317            }
318
319            let is_incompatible = if self.head.number < TIMESTAMP_BEFORE_ETHEREUM_MAINNET {
320                // When the block number is less than an old timestamp before Ethereum mainnet,
321                // we check if this fork is time-based or block number-based by estimating that,
322                // if fork_id.next is bigger than the old timestamp, we are dealing with a
323                // timestamp, otherwise with a block.
324                (fork_id.next > TIMESTAMP_BEFORE_ETHEREUM_MAINNET &&
325                    self.head.timestamp >= fork_id.next) ||
326                    (fork_id.next <= TIMESTAMP_BEFORE_ETHEREUM_MAINNET &&
327                        self.head.number >= fork_id.next)
328            } else {
329                // Extra safety check to future-proof for when Ethereum has over a billion blocks.
330                let head_block_or_time = match self.cache.epoch_start {
331                    ForkFilterKey::Block(_) => self.head.number,
332                    ForkFilterKey::Time(_) => self.head.timestamp,
333                };
334                head_block_or_time >= fork_id.next
335            };
336
337            return if is_incompatible {
338                // 1a) A remotely announced but remotely not passed block is already passed locally,
339                // disconnect, since the chains are incompatible.
340                Err(ValidationError::LocalIncompatibleOrStale {
341                    local: self.current(),
342                    remote: fork_id,
343                })
344            } else {
345                // 1b) Remotely announced fork not yet passed locally, connect.
346                Ok(())
347            }
348        }
349
350        // 2) If the remote FORK_HASH is a subset of the local past forks...
351        let mut it = self.cache.past.iter();
352        while let Some((_, hash)) = it.next() {
353            if *hash == fork_id.hash {
354                // ...and the remote FORK_NEXT matches with the locally following fork block number
355                // or timestamp, connect.
356                if let Some((actual_key, _)) = it.next() {
357                    return if u64::from(*actual_key) == fork_id.next {
358                        Ok(())
359                    } else {
360                        Err(ValidationError::RemoteStale { local: self.current(), remote: fork_id })
361                    }
362                }
363
364                break
365            }
366        }
367
368        // 3) If the remote FORK_HASH is a superset of the local past forks and can be completed
369        // with locally known future forks, connect.
370        for future_fork_hash in &self.cache.future {
371            if *future_fork_hash == fork_id.hash {
372                return Ok(())
373            }
374        }
375
376        // 4) Reject in all other cases.
377        Err(ValidationError::LocalIncompatibleOrStale { local: self.current(), remote: fork_id })
378    }
379}
380
381/// Represents a transition from one fork to another
382///
383/// See also [`ForkFilter::set_head`]
384#[derive(Debug, Clone, Eq, PartialEq)]
385pub struct ForkTransition {
386    /// The new, active `ForkId`
387    pub current: ForkId,
388    /// The previously active `ForkId` before the transition
389    pub past: ForkId,
390}
391
392#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
393#[derive(Clone, Debug, PartialEq, Eq)]
394struct Cache {
395    // An epoch is a period between forks.
396    // When we progress from one fork to the next one we move to the next epoch.
397    epoch_start: ForkFilterKey,
398    epoch_end: Option<ForkFilterKey>,
399    past: Vec<(ForkFilterKey, ForkHash)>,
400    future: Vec<ForkHash>,
401    fork_id: ForkId,
402}
403
404impl Cache {
405    /// Compute cache.
406    fn compute_cache(forks: &BTreeMap<ForkFilterKey, ForkHash>, head: Head) -> Self {
407        // Prepare vectors to store past and future forks.
408        let mut past = Vec::with_capacity(forks.len());
409        let mut future = Vec::with_capacity(forks.len());
410
411        // Initialize variables to track the epoch range.
412        let mut epoch_start = ForkFilterKey::Block(0);
413        let mut epoch_end = None;
414
415        // Iterate through forks and categorize them into past and future.
416        for (key, hash) in forks {
417            // Check if the fork is active based on its type (Block or Time).
418            let active = match key {
419                ForkFilterKey::Block(block) => *block <= head.number,
420                ForkFilterKey::Time(time) => *time <= head.timestamp,
421            };
422
423            // Categorize forks into past or future based on activity.
424            if active {
425                epoch_start = *key;
426                past.push((*key, *hash));
427            } else {
428                if epoch_end.is_none() {
429                    epoch_end = Some(*key);
430                }
431                future.push(*hash);
432            }
433        }
434
435        // Create ForkId using the last past fork's hash and the next epoch start.
436        let fork_id = ForkId {
437            hash: past.last().expect("there is always at least one - genesis - fork hash").1,
438            next: epoch_end.unwrap_or(ForkFilterKey::Block(0)).into(),
439        };
440
441        // Return the computed cache.
442        Self { epoch_start, epoch_end, past, future, fork_id }
443    }
444}
445
446#[cfg(test)]
447mod tests {
448    use super::*;
449    use alloy_consensus::constants::MAINNET_GENESIS_HASH;
450
451    // EIP test vectors.
452    #[test]
453    fn forkhash() {
454        let mut fork_hash = ForkHash::from(MAINNET_GENESIS_HASH);
455        assert_eq!(fork_hash.0, hex!("fc64ec04"));
456
457        fork_hash += 1_150_000u64;
458        assert_eq!(fork_hash.0, hex!("97c2c34c"));
459
460        fork_hash += 1_920_000u64;
461        assert_eq!(fork_hash.0, hex!("91d1f948"));
462    }
463
464    #[test]
465    fn compatibility_check() {
466        let mut filter = ForkFilter::new(
467            Head { number: 0, ..Default::default() },
468            MAINNET_GENESIS_HASH,
469            0,
470            vec![
471                ForkFilterKey::Block(1_150_000),
472                ForkFilterKey::Block(1_920_000),
473                ForkFilterKey::Block(2_463_000),
474                ForkFilterKey::Block(2_675_000),
475                ForkFilterKey::Block(4_370_000),
476                ForkFilterKey::Block(7_280_000),
477            ],
478        );
479
480        // Local is mainnet Petersburg, remote announces the same. No future fork is announced.
481        filter.set_head(Head { number: 7_987_396, ..Default::default() });
482        assert_eq!(filter.validate(ForkId { hash: ForkHash(hex!("668db0af")), next: 0 }), Ok(()));
483
484        // Local is mainnet Petersburg, remote announces the same. Remote also announces a next fork
485        // at block 0xffffffff, but that is uncertain.
486        filter.set_head(Head { number: 7_987_396, ..Default::default() });
487        assert_eq!(
488            filter.validate(ForkId { hash: ForkHash(hex!("668db0af")), next: BlockNumber::MAX }),
489            Ok(())
490        );
491
492        // Local is mainnet currently in Byzantium only (so it's aware of Petersburg),remote
493        // announces also Byzantium, but it's not yet aware of Petersburg (e.g. non updated
494        // node before the fork). In this case we don't know if Petersburg passed yet or
495        // not.
496        filter.set_head(Head { number: 7_279_999, ..Default::default() });
497        assert_eq!(filter.validate(ForkId { hash: ForkHash(hex!("a00bc324")), next: 0 }), Ok(()));
498
499        // Local is mainnet currently in Byzantium only (so it's aware of Petersburg), remote
500        // announces also Byzantium, and it's also aware of Petersburg (e.g. updated node
501        // before the fork). We don't know if Petersburg passed yet (will pass) or not.
502        filter.set_head(Head { number: 7_279_999, ..Default::default() });
503        assert_eq!(
504            filter.validate(ForkId { hash: ForkHash(hex!("a00bc324")), next: 7_280_000 }),
505            Ok(())
506        );
507
508        // Local is mainnet currently in Byzantium only (so it's aware of Petersburg), remote
509        // announces also Byzantium, and it's also aware of some random fork (e.g.
510        // misconfigured Petersburg). As neither forks passed at neither nodes, they may
511        // mismatch, but we still connect for now.
512        filter.set_head(Head { number: 7_279_999, ..Default::default() });
513        assert_eq!(
514            filter.validate(ForkId { hash: ForkHash(hex!("a00bc324")), next: BlockNumber::MAX }),
515            Ok(())
516        );
517
518        // Local is mainnet Petersburg, remote announces Byzantium + knowledge about Petersburg.
519        // Remote is simply out of sync, accept.
520        filter.set_head(Head { number: 7_987_396, ..Default::default() });
521        assert_eq!(
522            filter.validate(ForkId { hash: ForkHash(hex!("a00bc324")), next: 7_280_000 }),
523            Ok(())
524        );
525
526        // Local is mainnet Petersburg, remote announces Spurious + knowledge about Byzantium.
527        // Remote is definitely out of sync. It may or may not need the Petersburg update,
528        // we don't know yet.
529        filter.set_head(Head { number: 7_987_396, ..Default::default() });
530        assert_eq!(
531            filter.validate(ForkId { hash: ForkHash(hex!("3edd5b10")), next: 4_370_000 }),
532            Ok(())
533        );
534
535        // Local is mainnet Byzantium, remote announces Petersburg. Local is out of sync, accept.
536        filter.set_head(Head { number: 7_279_999, ..Default::default() });
537        assert_eq!(filter.validate(ForkId { hash: ForkHash(hex!("668db0af")), next: 0 }), Ok(()));
538
539        // Local is mainnet Spurious, remote announces Byzantium, but is not aware of Petersburg.
540        // Local out of sync. Local also knows about a future fork, but that is uncertain
541        // yet.
542        filter.set_head(Head { number: 4_369_999, ..Default::default() });
543        assert_eq!(filter.validate(ForkId { hash: ForkHash(hex!("a00bc324")), next: 0 }), Ok(()));
544
545        // Local is mainnet Petersburg. remote announces Byzantium but is not aware of further
546        // forks. Remote needs software update.
547        filter.set_head(Head { number: 7_987_396, ..Default::default() });
548        let remote = ForkId { hash: ForkHash(hex!("a00bc324")), next: 0 };
549        assert_eq!(
550            filter.validate(remote),
551            Err(ValidationError::RemoteStale { local: filter.current(), remote })
552        );
553
554        // Local is mainnet Petersburg, and isn't aware of more forks. Remote announces Petersburg +
555        // 0xffffffff. Local needs software update, reject.
556        filter.set_head(Head { number: 7_987_396, ..Default::default() });
557        let remote = ForkId { hash: ForkHash(hex!("5cddc0e1")), next: 0 };
558        assert_eq!(
559            filter.validate(remote),
560            Err(ValidationError::LocalIncompatibleOrStale { local: filter.current(), remote })
561        );
562
563        // Local is mainnet Byzantium, and is aware of Petersburg. Remote announces Petersburg +
564        // 0xffffffff. Local needs software update, reject.
565        filter.set_head(Head { number: 7_279_999, ..Default::default() });
566        let remote = ForkId { hash: ForkHash(hex!("5cddc0e1")), next: 0 };
567        assert_eq!(
568            filter.validate(remote),
569            Err(ValidationError::LocalIncompatibleOrStale { local: filter.current(), remote })
570        );
571
572        // Local is mainnet Petersburg, remote is Rinkeby Petersburg.
573        filter.set_head(Head { number: 7_987_396, ..Default::default() });
574        let remote = ForkId { hash: ForkHash(hex!("afec6b27")), next: 0 };
575        assert_eq!(
576            filter.validate(remote),
577            Err(ValidationError::LocalIncompatibleOrStale { local: filter.current(), remote })
578        );
579
580        // Local is mainnet Petersburg, far in the future. Remote announces Gopherium (non existing
581        // fork) at some future block 88888888, for itself, but past block for local. Local
582        // is incompatible.
583        //
584        // This case detects non-upgraded nodes with majority hash power (typical Ropsten mess).
585        filter.set_head(Head { number: 88_888_888, ..Default::default() });
586        let remote = ForkId { hash: ForkHash(hex!("668db0af")), next: 88_888_888 };
587        assert_eq!(
588            filter.validate(remote),
589            Err(ValidationError::LocalIncompatibleOrStale { local: filter.current(), remote })
590        );
591
592        // Local is mainnet Byzantium. Remote is also in Byzantium, but announces Gopherium (non
593        // existing fork) at block 7279999, before Petersburg. Local is incompatible.
594        filter.set_head(Head { number: 7_279_999, ..Default::default() });
595        let remote = ForkId { hash: ForkHash(hex!("a00bc324")), next: 7_279_999 };
596        assert_eq!(
597            filter.validate(remote),
598            Err(ValidationError::LocalIncompatibleOrStale { local: filter.current(), remote })
599        );
600
601        // Block far in the future (block number bigger than TIMESTAMP_BEFORE_ETHEREUM_MAINNET), not
602        // compatible.
603        filter
604            .set_head(Head { number: TIMESTAMP_BEFORE_ETHEREUM_MAINNET + 1, ..Default::default() });
605        let remote = ForkId {
606            hash: ForkHash(hex!("668db0af")),
607            next: TIMESTAMP_BEFORE_ETHEREUM_MAINNET + 1,
608        };
609        assert_eq!(
610            filter.validate(remote),
611            Err(ValidationError::LocalIncompatibleOrStale { local: filter.current(), remote })
612        );
613
614        // Block far in the future (block number bigger than TIMESTAMP_BEFORE_ETHEREUM_MAINNET),
615        // compatible.
616        filter
617            .set_head(Head { number: TIMESTAMP_BEFORE_ETHEREUM_MAINNET + 1, ..Default::default() });
618        let remote = ForkId {
619            hash: ForkHash(hex!("668db0af")),
620            next: TIMESTAMP_BEFORE_ETHEREUM_MAINNET + 2,
621        };
622        assert_eq!(filter.validate(remote), Ok(()));
623
624        // block number smaller than TIMESTAMP_BEFORE_ETHEREUM_MAINNET and
625        // fork_id.next > TIMESTAMP_BEFORE_ETHEREUM_MAINNET && self.head.timestamp >= fork_id.next,
626        // not compatible.
627        filter.set_head(Head {
628            number: TIMESTAMP_BEFORE_ETHEREUM_MAINNET - 1,
629            timestamp: TIMESTAMP_BEFORE_ETHEREUM_MAINNET + 2,
630            ..Default::default()
631        });
632        let remote = ForkId {
633            hash: ForkHash(hex!("668db0af")),
634            next: TIMESTAMP_BEFORE_ETHEREUM_MAINNET + 1,
635        };
636        assert_eq!(
637            filter.validate(remote),
638            Err(ValidationError::LocalIncompatibleOrStale { local: filter.current(), remote })
639        );
640
641        // block number smaller than TIMESTAMP_BEFORE_ETHEREUM_MAINNET and
642        // fork_id.next <= TIMESTAMP_BEFORE_ETHEREUM_MAINNET && self.head.number >= fork_id.next,
643        // not compatible.
644        filter
645            .set_head(Head { number: TIMESTAMP_BEFORE_ETHEREUM_MAINNET - 1, ..Default::default() });
646        let remote = ForkId {
647            hash: ForkHash(hex!("668db0af")),
648            next: TIMESTAMP_BEFORE_ETHEREUM_MAINNET - 2,
649        };
650        assert_eq!(
651            filter.validate(remote),
652            Err(ValidationError::LocalIncompatibleOrStale { local: filter.current(), remote })
653        );
654
655        // block number smaller than TIMESTAMP_BEFORE_ETHEREUM_MAINNET and
656        // !((fork_id.next > TIMESTAMP_BEFORE_ETHEREUM_MAINNET && self.head.timestamp >=
657        // fork_id.next) || (fork_id.next <= TIMESTAMP_BEFORE_ETHEREUM_MAINNET && self.head.number
658        // >= fork_id.next)), compatible.
659        filter
660            .set_head(Head { number: TIMESTAMP_BEFORE_ETHEREUM_MAINNET - 2, ..Default::default() });
661        let remote = ForkId {
662            hash: ForkHash(hex!("668db0af")),
663            next: TIMESTAMP_BEFORE_ETHEREUM_MAINNET - 1,
664        };
665        assert_eq!(filter.validate(remote), Ok(()));
666    }
667
668    #[test]
669    fn forkid_serialization() {
670        assert_eq!(
671            &*encode_fixed_size(&ForkId { hash: ForkHash(hex!("00000000")), next: 0 }),
672            hex!("c6840000000080")
673        );
674        assert_eq!(
675            &*encode_fixed_size(&ForkId { hash: ForkHash(hex!("deadbeef")), next: 0xBADD_CAFE }),
676            hex!("ca84deadbeef84baddcafe")
677        );
678        assert_eq!(
679            &*encode_fixed_size(&ForkId { hash: ForkHash(hex!("ffffffff")), next: u64::MAX }),
680            hex!("ce84ffffffff88ffffffffffffffff")
681        );
682
683        assert_eq!(
684            ForkId::decode(&mut (&hex!("c6840000000080") as &[u8])).unwrap(),
685            ForkId { hash: ForkHash(hex!("00000000")), next: 0 }
686        );
687        assert_eq!(
688            ForkId::decode(&mut (&hex!("ca84deadbeef84baddcafe") as &[u8])).unwrap(),
689            ForkId { hash: ForkHash(hex!("deadbeef")), next: 0xBADD_CAFE }
690        );
691        assert_eq!(
692            ForkId::decode(&mut (&hex!("ce84ffffffff88ffffffffffffffff") as &[u8])).unwrap(),
693            ForkId { hash: ForkHash(hex!("ffffffff")), next: u64::MAX }
694        );
695    }
696
697    #[test]
698    fn fork_id_rlp() {
699        // <https://github.com/ethereum/go-ethereum/blob/767b00b0b514771a663f3362dd0310fc28d40c25/core/forkid/forkid_test.go#L370-L370>
700        let val = hex!("c6840000000080");
701        let id = ForkId::decode(&mut &val[..]).unwrap();
702        assert_eq!(id, ForkId { hash: ForkHash(hex!("00000000")), next: 0 });
703        assert_eq!(alloy_rlp::encode(id), &val[..]);
704
705        let val = hex!("ca84deadbeef84baddcafe");
706        let id = ForkId::decode(&mut &val[..]).unwrap();
707        assert_eq!(id, ForkId { hash: ForkHash(hex!("deadbeef")), next: 0xBADDCAFE });
708        assert_eq!(alloy_rlp::encode(id), &val[..]);
709
710        let val = hex!("ce84ffffffff88ffffffffffffffff");
711        let id = ForkId::decode(&mut &val[..]).unwrap();
712        assert_eq!(id, ForkId { hash: ForkHash(u32::MAX.to_be_bytes()), next: u64::MAX });
713        assert_eq!(alloy_rlp::encode(id), &val[..]);
714    }
715
716    #[test]
717    fn compute_cache() {
718        let b1 = 1_150_000;
719        let b2 = 1_920_000;
720
721        let h0 = ForkId { hash: ForkHash(hex!("fc64ec04")), next: b1 };
722        let h1 = ForkId { hash: ForkHash(hex!("97c2c34c")), next: b2 };
723        let h2 = ForkId { hash: ForkHash(hex!("91d1f948")), next: 0 };
724
725        let mut fork_filter = ForkFilter::new(
726            Head { number: 0, ..Default::default() },
727            MAINNET_GENESIS_HASH,
728            0,
729            vec![ForkFilterKey::Block(b1), ForkFilterKey::Block(b2)],
730        );
731
732        assert!(fork_filter.set_head_priv(Head { number: 0, ..Default::default() }).is_none());
733        assert_eq!(fork_filter.current(), h0);
734
735        assert!(fork_filter.set_head_priv(Head { number: 1, ..Default::default() }).is_none());
736        assert_eq!(fork_filter.current(), h0);
737
738        assert_eq!(
739            fork_filter.set_head_priv(Head { number: b1 + 1, ..Default::default() }).unwrap(),
740            ForkTransition { current: h1, past: h0 }
741        );
742        assert_eq!(fork_filter.current(), h1);
743
744        assert!(fork_filter.set_head_priv(Head { number: b1, ..Default::default() }).is_none());
745        assert_eq!(fork_filter.current(), h1);
746
747        assert_eq!(
748            fork_filter.set_head_priv(Head { number: b1 - 1, ..Default::default() }).unwrap(),
749            ForkTransition { current: h0, past: h1 }
750        );
751        assert_eq!(fork_filter.current(), h0);
752
753        assert!(fork_filter.set_head_priv(Head { number: b1, ..Default::default() }).is_some());
754        assert_eq!(fork_filter.current(), h1);
755
756        assert!(fork_filter.set_head_priv(Head { number: b2 - 1, ..Default::default() }).is_none());
757        assert_eq!(fork_filter.current(), h1);
758
759        assert!(fork_filter.set_head_priv(Head { number: b2, ..Default::default() }).is_some());
760        assert_eq!(fork_filter.current(), h2);
761    }
762
763    mod eip8 {
764        use super::*;
765
766        fn junk_enr_fork_id_entry() -> Vec<u8> {
767            let mut buf = Vec::new();
768            // enr request is just an expiration
769            let fork_id = ForkId { hash: ForkHash(hex!("deadbeef")), next: 0xBADDCAFE };
770
771            // add some junk
772            let junk: u64 = 112233;
773
774            // rlp header encoding
775            let payload_length = fork_id.length() + junk.length();
776            alloy_rlp::Header { list: true, payload_length }.encode(&mut buf);
777
778            // fields
779            fork_id.encode(&mut buf);
780            junk.encode(&mut buf);
781
782            buf
783        }
784
785        #[test]
786        fn eip8_decode_enr_fork_id_entry() {
787            let enr_fork_id_entry_with_junk = junk_enr_fork_id_entry();
788
789            let mut buf = enr_fork_id_entry_with_junk.as_slice();
790            let decoded = EnrForkIdEntry::decode(&mut buf).unwrap();
791            assert_eq!(
792                decoded.fork_id,
793                ForkId { hash: ForkHash(hex!("deadbeef")), next: 0xBADDCAFE }
794            );
795        }
796    }
797}