tor_netdir/
lib.rs

1#![cfg_attr(docsrs, feature(doc_auto_cfg, doc_cfg))]
2#![doc = include_str!("../README.md")]
3// @@ begin lint list maintained by maint/add_warning @@
4#![allow(renamed_and_removed_lints)] // @@REMOVE_WHEN(ci_arti_stable)
5#![allow(unknown_lints)] // @@REMOVE_WHEN(ci_arti_nightly)
6#![warn(missing_docs)]
7#![warn(noop_method_call)]
8#![warn(unreachable_pub)]
9#![warn(clippy::all)]
10#![deny(clippy::await_holding_lock)]
11#![deny(clippy::cargo_common_metadata)]
12#![deny(clippy::cast_lossless)]
13#![deny(clippy::checked_conversions)]
14#![warn(clippy::cognitive_complexity)]
15#![deny(clippy::debug_assert_with_mut_call)]
16#![deny(clippy::exhaustive_enums)]
17#![deny(clippy::exhaustive_structs)]
18#![deny(clippy::expl_impl_clone_on_copy)]
19#![deny(clippy::fallible_impl_from)]
20#![deny(clippy::implicit_clone)]
21#![deny(clippy::large_stack_arrays)]
22#![warn(clippy::manual_ok_or)]
23#![deny(clippy::missing_docs_in_private_items)]
24#![warn(clippy::needless_borrow)]
25#![warn(clippy::needless_pass_by_value)]
26#![warn(clippy::option_option)]
27#![deny(clippy::print_stderr)]
28#![deny(clippy::print_stdout)]
29#![warn(clippy::rc_buffer)]
30#![deny(clippy::ref_option_ref)]
31#![warn(clippy::semicolon_if_nothing_returned)]
32#![warn(clippy::trait_duplication_in_bounds)]
33#![deny(clippy::unchecked_duration_subtraction)]
34#![deny(clippy::unnecessary_wraps)]
35#![warn(clippy::unseparated_literal_suffix)]
36#![deny(clippy::unwrap_used)]
37#![deny(clippy::mod_module_files)]
38#![allow(clippy::let_unit_value)] // This can reasonably be done for explicitness
39#![allow(clippy::uninlined_format_args)]
40#![allow(clippy::significant_drop_in_scrutinee)] // arti/-/merge_requests/588/#note_2812945
41#![allow(clippy::result_large_err)] // temporary workaround for arti#587
42#![allow(clippy::needless_raw_string_hashes)] // complained-about code is fine, often best
43#![allow(clippy::needless_lifetimes)] // See arti#1765
44#![allow(mismatched_lifetime_syntaxes)] // temporary workaround for arti#2060
45//! <!-- @@ end lint list maintained by maint/add_warning @@ -->
46
47pub mod details;
48mod err;
49#[cfg(feature = "hs-common")]
50mod hsdir_params;
51#[cfg(feature = "hs-common")]
52mod hsdir_ring;
53pub mod params;
54mod weight;
55
56#[cfg(any(test, feature = "testing"))]
57pub mod testnet;
58#[cfg(feature = "testing")]
59pub mod testprovider;
60
61use async_trait::async_trait;
62#[cfg(feature = "hs-service")]
63use itertools::chain;
64use static_assertions::const_assert;
65use tor_error::warn_report;
66use tor_linkspec::{
67    ChanTarget, DirectChanMethodsHelper, HasAddrs, HasRelayIds, RelayIdRef, RelayIdType,
68};
69use tor_llcrypto as ll;
70use tor_llcrypto::pk::{ed25519::Ed25519Identity, rsa::RsaIdentity};
71use tor_netdoc::doc::microdesc::{MdDigest, Microdesc};
72use tor_netdoc::doc::netstatus::{self, MdConsensus, MdConsensusRouterStatus, RouterStatus};
73#[cfg(feature = "hs-common")]
74use {hsdir_ring::HsDirRing, std::iter};
75
76use derive_more::{From, Into};
77use futures::{stream::BoxStream, StreamExt};
78use num_enum::{IntoPrimitive, TryFromPrimitive};
79use rand::seq::{IndexedRandom as _, SliceRandom as _, WeightError};
80use serde::Deserialize;
81use std::collections::HashMap;
82use std::net::IpAddr;
83use std::ops::Deref;
84use std::sync::Arc;
85use std::time::SystemTime;
86use strum::{EnumCount, EnumIter};
87use tracing::warn;
88use typed_index_collections::{TiSlice, TiVec};
89
90#[cfg(feature = "hs-common")]
91use {
92    itertools::Itertools,
93    std::collections::HashSet,
94    tor_error::{internal, Bug},
95    tor_hscrypto::{pk::HsBlindId, time::TimePeriod},
96};
97
98pub use err::Error;
99pub use weight::WeightRole;
100/// A Result using the Error type from the tor-netdir crate
101pub type Result<T> = std::result::Result<T, Error>;
102
103#[cfg(feature = "hs-common")]
104pub use err::OnionDirLookupError;
105
106use params::NetParameters;
107#[cfg(feature = "geoip")]
108use tor_geoip::{CountryCode, GeoipDb, HasCountryCode};
109
110#[cfg(feature = "hs-common")]
111#[cfg_attr(docsrs, doc(cfg(feature = "hs-common")))]
112pub use hsdir_params::HsDirParams;
113
114/// Index into the consensus relays
115///
116/// This is an index into the list of relays returned by
117/// [`.c_relays()`](ConsensusRelays::c_relays)
118/// (on the corresponding consensus or netdir).
119///
120/// This is just a `usize` inside, but using a newtype prevents getting a relay index
121/// confused with other kinds of slice indices or counts.
122///
123/// If you are in a part of the code which needs to work with multiple consensuses,
124/// the typechecking cannot tell if you try to index into the wrong consensus.
125#[derive(Debug, From, Into, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash)]
126pub(crate) struct RouterStatusIdx(usize);
127
128/// Extension trait to provide index-type-safe `.c_relays()` method
129//
130// TODO: Really it would be better to have MdConsensns::relays() return TiSlice,
131// but that would be an API break there.
132pub(crate) trait ConsensusRelays {
133    /// Obtain the list of relays in the consensus
134    //
135    fn c_relays(&self) -> &TiSlice<RouterStatusIdx, MdConsensusRouterStatus>;
136}
137impl ConsensusRelays for MdConsensus {
138    fn c_relays(&self) -> &TiSlice<RouterStatusIdx, MdConsensusRouterStatus> {
139        TiSlice::from_ref(MdConsensus::relays(self))
140    }
141}
142impl ConsensusRelays for NetDir {
143    fn c_relays(&self) -> &TiSlice<RouterStatusIdx, MdConsensusRouterStatus> {
144        self.consensus.c_relays()
145    }
146}
147
148/// Configuration for determining when two relays have addresses "too close" in
149/// the network.
150///
151/// Used by [`Relay::low_level_details().in_same_subnet()`].
152#[derive(Deserialize, Debug, Clone, Copy, Eq, PartialEq)]
153#[serde(deny_unknown_fields)]
154pub struct SubnetConfig {
155    /// Consider IPv4 nodes in the same /x to be the same family.
156    ///
157    /// If this value is 0, all nodes with IPv4 addresses will be in the
158    /// same family.  If this value is above 32, then no nodes will be
159    /// placed im the same family based on their IPv4 addresses.
160    subnets_family_v4: u8,
161    /// Consider IPv6 nodes in the same /x to be the same family.
162    ///
163    /// If this value is 0, all nodes with IPv6 addresses will be in the
164    /// same family.  If this value is above 128, then no nodes will be
165    /// placed im the same family based on their IPv6 addresses.
166    subnets_family_v6: u8,
167}
168
169impl Default for SubnetConfig {
170    fn default() -> Self {
171        Self::new(16, 32)
172    }
173}
174
175impl SubnetConfig {
176    /// Construct a new SubnetConfig from a pair of bit prefix lengths.
177    ///
178    /// The values are clamped to the appropriate ranges if they are
179    /// out-of-bounds.
180    pub fn new(subnets_family_v4: u8, subnets_family_v6: u8) -> Self {
181        Self {
182            subnets_family_v4,
183            subnets_family_v6,
184        }
185    }
186
187    /// Construct a new SubnetConfig such that addresses are not in the same
188    /// family with anything--not even with themselves.
189    pub fn no_addresses_match() -> SubnetConfig {
190        SubnetConfig {
191            subnets_family_v4: 33,
192            subnets_family_v6: 129,
193        }
194    }
195
196    /// Return true if the two addresses in the same subnet, according to this
197    /// configuration.
198    pub fn addrs_in_same_subnet(&self, a: &IpAddr, b: &IpAddr) -> bool {
199        match (a, b) {
200            (IpAddr::V4(a), IpAddr::V4(b)) => {
201                let bits = self.subnets_family_v4;
202                if bits > 32 {
203                    return false;
204                }
205                let a = u32::from_be_bytes(a.octets());
206                let b = u32::from_be_bytes(b.octets());
207                (a >> (32 - bits)) == (b >> (32 - bits))
208            }
209            (IpAddr::V6(a), IpAddr::V6(b)) => {
210                let bits = self.subnets_family_v6;
211                if bits > 128 {
212                    return false;
213                }
214                let a = u128::from_be_bytes(a.octets());
215                let b = u128::from_be_bytes(b.octets());
216                (a >> (128 - bits)) == (b >> (128 - bits))
217            }
218            _ => false,
219        }
220    }
221
222    /// Return true if any of the addresses in `a` shares a subnet with any of
223    /// the addresses in `b`, according to this configuration.
224    pub fn any_addrs_in_same_subnet<T, U>(&self, a: &T, b: &U) -> bool
225    where
226        T: tor_linkspec::HasAddrs,
227        U: tor_linkspec::HasAddrs,
228    {
229        a.addrs().iter().any(|aa| {
230            b.addrs()
231                .iter()
232                .any(|bb| self.addrs_in_same_subnet(&aa.ip(), &bb.ip()))
233        })
234    }
235
236    /// Return a new subnet configuration that is the union of `self` and
237    /// `other`.
238    ///
239    /// That is, return a subnet configuration that puts all addresses in the
240    /// same subnet if and only if at least one of `self` and `other` would put
241    /// them in the same subnet.
242    pub fn union(&self, other: &Self) -> Self {
243        use std::cmp::min;
244        Self {
245            subnets_family_v4: min(self.subnets_family_v4, other.subnets_family_v4),
246            subnets_family_v6: min(self.subnets_family_v6, other.subnets_family_v6),
247        }
248    }
249}
250
251/// Configuration for which listed family information to use when deciding
252/// whether relays belong to the same family.
253///
254/// Derived from network parameters.
255#[derive(Clone, Copy, Debug)]
256pub struct FamilyRules {
257    /// If true, we use family information from lists of family members.
258    use_family_lists: bool,
259    /// If true, we use family information from lists of family IDs and from family certs.
260    use_family_ids: bool,
261}
262
263impl<'a> From<&'a NetParameters> for FamilyRules {
264    fn from(params: &'a NetParameters) -> Self {
265        FamilyRules {
266            use_family_lists: bool::from(params.use_family_lists),
267            use_family_ids: bool::from(params.use_family_ids),
268        }
269    }
270}
271
272impl FamilyRules {
273    /// Return a `FamilyRules` that will use all recognized kinds of family information.
274    pub fn all_family_info() -> Self {
275        Self {
276            use_family_lists: true,
277            use_family_ids: true,
278        }
279    }
280
281    /// Return a `FamilyRules` that will ignore all family information declared by relays.
282    pub fn ignore_declared_families() -> Self {
283        Self {
284            use_family_lists: false,
285            use_family_ids: false,
286        }
287    }
288
289    /// Configure this `FamilyRules` to use (or not use) family information from
290    /// lists of family members.
291    pub fn use_family_lists(&mut self, val: bool) -> &mut Self {
292        self.use_family_lists = val;
293        self
294    }
295
296    /// Configure this `FamilyRules` to use (or not use) family information from
297    /// family IDs and family certs.
298    pub fn use_family_ids(&mut self, val: bool) -> &mut Self {
299        self.use_family_ids = val;
300        self
301    }
302
303    /// Return a `FamilyRules` that will look at every source of information
304    /// requested by `self` or by `other`.
305    pub fn union(&self, other: &Self) -> Self {
306        Self {
307            use_family_lists: self.use_family_lists || other.use_family_lists,
308            use_family_ids: self.use_family_ids || other.use_family_ids,
309        }
310    }
311}
312
313/// An opaque type representing the weight with which a relay or set of
314/// relays will be selected for a given role.
315///
316/// Most users should ignore this type, and just use pick_relay instead.
317#[derive(
318    Copy,
319    Clone,
320    Debug,
321    derive_more::Add,
322    derive_more::Sum,
323    derive_more::AddAssign,
324    Eq,
325    PartialEq,
326    Ord,
327    PartialOrd,
328)]
329pub struct RelayWeight(u64);
330
331impl RelayWeight {
332    /// Try to divide this weight by `rhs`.
333    ///
334    /// Return a ratio on success, or None on division-by-zero.
335    pub fn checked_div(&self, rhs: RelayWeight) -> Option<f64> {
336        if rhs.0 == 0 {
337            None
338        } else {
339            Some((self.0 as f64) / (rhs.0 as f64))
340        }
341    }
342
343    /// Compute a ratio `frac` of this weight.
344    ///
345    /// Return None if frac is less than zero, since negative weights
346    /// are impossible.
347    pub fn ratio(&self, frac: f64) -> Option<RelayWeight> {
348        let product = (self.0 as f64) * frac;
349        if product >= 0.0 && product.is_finite() {
350            Some(RelayWeight(product as u64))
351        } else {
352            None
353        }
354    }
355}
356
357impl From<u64> for RelayWeight {
358    fn from(val: u64) -> Self {
359        RelayWeight(val)
360    }
361}
362
363/// An operation for which we might be requesting a hidden service directory.
364#[derive(Copy, Clone, Debug, PartialEq)]
365// TODO: make this pub(crate) once NetDir::hs_dirs is removed
366#[non_exhaustive]
367pub enum HsDirOp {
368    /// Uploading an onion service descriptor.
369    #[cfg(feature = "hs-service")]
370    Upload,
371    /// Downloading an onion service descriptor.
372    Download,
373}
374
375/// A view of the Tor directory, suitable for use in building circuits.
376///
377/// Abstractly, a [`NetDir`] is a set of usable public [`Relay`]s, each of which
378/// has its own properties, identity, and correct weighted probability for use
379/// under different circumstances.
380///
381/// A [`NetDir`] is constructed by making a [`PartialNetDir`] from a consensus
382/// document, and then adding enough microdescriptors to that `PartialNetDir` so
383/// that it can be used to build paths. (Thus, if you have a NetDir, it is
384/// definitely adequate to build paths.)
385///
386/// # "Usable" relays
387///
388/// Many methods on NetDir are defined in terms of <a name="usable">"Usable"</a> relays.  Unless
389/// otherwise stated, a relay is "usable" if it is listed in the consensus,
390/// if we have full directory information for that relay (including a
391/// microdescriptor), and if that relay does not have any flags indicating that
392/// we should never use it. (Currently, `NoEdConsensus` is the only such flag.)
393///
394/// # Limitations
395///
396/// The current NetDir implementation assumes fairly strongly that every relay
397/// has an Ed25519 identity and an RSA identity, that the consensus is indexed
398/// by RSA identities, and that the Ed25519 identities are stored in
399/// microdescriptors.
400///
401/// If these assumptions someday change, then we'll have to revise the
402/// implementation.
403#[derive(Debug, Clone)]
404pub struct NetDir {
405    /// A microdescriptor consensus that lists the members of the network,
406    /// and maps each one to a 'microdescriptor' that has more information
407    /// about it
408    consensus: Arc<MdConsensus>,
409    /// A map from keys to integer values, distributed in the consensus,
410    /// and clamped to certain defaults.
411    params: NetParameters,
412    /// Map from routerstatus index, to that routerstatus's microdescriptor (if we have one.)
413    mds: TiVec<RouterStatusIdx, Option<Arc<Microdesc>>>,
414    /// Map from SHA256 of _missing_ microdescriptors to the index of their
415    /// corresponding routerstatus.
416    rsidx_by_missing: HashMap<MdDigest, RouterStatusIdx>,
417    /// Map from ed25519 identity to index of the routerstatus.
418    ///
419    /// Note that we don't know the ed25519 identity of a relay until
420    /// we get the microdescriptor for it, so this won't be filled in
421    /// until we get the microdescriptors.
422    ///
423    /// # Implementation note
424    ///
425    /// For this field, and for `rsidx_by_rsa`,
426    /// it might be cool to have references instead.
427    /// But that would make this into a self-referential structure,
428    /// which isn't possible in safe rust.
429    rsidx_by_ed: HashMap<Ed25519Identity, RouterStatusIdx>,
430    /// Map from RSA identity to index of the routerstatus.
431    ///
432    /// This is constructed at the same time as the NetDir object, so it
433    /// can be immutable.
434    rsidx_by_rsa: Arc<HashMap<RsaIdentity, RouterStatusIdx>>,
435
436    /// Hash ring(s) describing the onion service directory.
437    ///
438    /// This is empty in a PartialNetDir, and is filled in before the NetDir is
439    /// built.
440    //
441    // TODO hs: It is ugly to have this exist in a partially constructed state
442    // in a PartialNetDir.
443    // Ideally, a PartialNetDir would contain only an HsDirs<HsDirParams>,
444    // or perhaps nothing at all, here.
445    #[cfg(feature = "hs-common")]
446    hsdir_rings: Arc<HsDirs<HsDirRing>>,
447
448    /// Weight values to apply to a given relay when deciding how frequently
449    /// to choose it for a given role.
450    weights: weight::WeightSet,
451
452    #[cfg(feature = "geoip")]
453    /// Country codes for each router in our consensus.
454    ///
455    /// This is indexed by the `RouterStatusIdx` (i.e. a router idx of zero has
456    /// the country code at position zero in this array).
457    country_codes: Vec<Option<CountryCode>>,
458}
459
460/// Collection of hidden service directories (or parameters for them)
461///
462/// In [`NetDir`] this is used to store the actual hash rings.
463/// (But, in a NetDir in a [`PartialNetDir`], it contains [`HsDirRing`]s
464/// where only the `params` are populated, and the `ring` is empty.)
465///
466/// This same generic type is used as the return type from
467/// [`HsDirParams::compute`](HsDirParams::compute),
468/// where it contains the *parameters* for the primary and secondary rings.
469#[derive(Debug, Clone)]
470#[cfg(feature = "hs-common")]
471pub(crate) struct HsDirs<D> {
472    /// The current ring
473    ///
474    /// It corresponds to the time period containing the `valid-after` time in
475    /// the consensus. Its SRV is whatever SRV was most current at the time when
476    /// that time period began.
477    ///
478    /// This is the hash ring that we should use whenever we are fetching an
479    /// onion service descriptor.
480    current: D,
481
482    /// Secondary rings (based on the parameters for the previous and next time periods)
483    ///
484    /// Onion services upload to positions on these ring as well, based on how
485    /// far into the current time period this directory is, so that
486    /// not-synchronized clients can still find their descriptor.
487    ///
488    /// Note that with the current (2023) network parameters, with
489    /// `hsdir_interval = SRV lifetime = 24 hours` at most one of these
490    /// secondary rings will be active at a time.  We have two here in order
491    /// to conform with a more flexible regime in proposal 342.
492    //
493    // TODO: hs clients never need this; so I've made it not-present for them.
494    // But does that risk too much with respect to side channels?
495    //
496    // TODO: Perhaps we should refactor this so that it is clear that these
497    // are immutable?  On the other hand, the documentation for this type
498    // declares that it is immutable, so we are likely okay.
499    //
500    // TODO: this `Vec` is only ever 0,1,2 elements.
501    // Maybe it should be an ArrayVec or something.
502    #[cfg(feature = "hs-service")]
503    secondary: Vec<D>,
504}
505
506#[cfg(feature = "hs-common")]
507impl<D> HsDirs<D> {
508    /// Convert an `HsDirs<D>` to `HsDirs<D2>` by mapping each contained `D`
509    pub(crate) fn map<D2>(self, mut f: impl FnMut(D) -> D2) -> HsDirs<D2> {
510        HsDirs {
511            current: f(self.current),
512            #[cfg(feature = "hs-service")]
513            secondary: self.secondary.into_iter().map(f).collect(),
514        }
515    }
516
517    /// Iterate over some of the contained hsdirs, according to `secondary`
518    ///
519    /// The current ring is always included.
520    /// Secondary rings are included iff `secondary` and the `hs-service` feature is enabled.
521    fn iter_filter_secondary(&self, secondary: bool) -> impl Iterator<Item = &D> {
522        let i = iter::once(&self.current);
523
524        // With "hs-service" disabled, there are no secondary rings,
525        // so we don't care.
526        let _ = secondary;
527
528        #[cfg(feature = "hs-service")]
529        let i = chain!(i, self.secondary.iter().filter(move |_| secondary));
530
531        i
532    }
533
534    /// Iterate over all the contained hsdirs
535    pub(crate) fn iter(&self) -> impl Iterator<Item = &D> {
536        self.iter_filter_secondary(true)
537    }
538
539    /// Iterate over the hsdirs relevant for `op`
540    pub(crate) fn iter_for_op(&self, op: HsDirOp) -> impl Iterator<Item = &D> {
541        self.iter_filter_secondary(match op {
542            #[cfg(feature = "hs-service")]
543            HsDirOp::Upload => true,
544            HsDirOp::Download => false,
545        })
546    }
547}
548
549/// An event that a [`NetDirProvider`] can broadcast to indicate that a change in
550/// the status of its directory.
551#[derive(
552    Debug, Clone, Copy, PartialEq, Eq, EnumIter, EnumCount, IntoPrimitive, TryFromPrimitive,
553)]
554#[non_exhaustive]
555#[repr(u16)]
556pub enum DirEvent {
557    /// A new consensus has been received, and has enough information to be
558    /// used.
559    ///
560    /// This event is also broadcast when a new set of consensus parameters is
561    /// available, even if that set of parameters comes from a configuration
562    /// change rather than from the latest consensus.
563    NewConsensus,
564
565    /// New descriptors have been received for the current consensus.
566    ///
567    /// (This event is _not_ broadcast when receiving new descriptors for a
568    /// consensus which is not yet ready to replace the current consensus.)
569    NewDescriptors,
570
571    /// We have received updated recommendations and requirements
572    /// for which subprotocols we should have to use the network.
573    NewProtocolRecommendation,
574}
575
576/// The network directory provider is shutting down without giving us the
577/// netdir we asked for.
578#[derive(Clone, Copy, Debug, thiserror::Error)]
579#[error("Network directory provider is shutting down")]
580#[non_exhaustive]
581pub struct NetdirProviderShutdown;
582
583impl tor_error::HasKind for NetdirProviderShutdown {
584    fn kind(&self) -> tor_error::ErrorKind {
585        tor_error::ErrorKind::ArtiShuttingDown
586    }
587}
588
589/// How "timely" must a network directory be?
590///
591/// This enum is used as an argument when requesting a [`NetDir`] object from
592/// [`NetDirProvider`] and other APIs, to specify how recent the information
593/// must be in order to be useful.
594#[derive(Copy, Clone, Eq, PartialEq, Debug)]
595#[allow(clippy::exhaustive_enums)]
596pub enum Timeliness {
597    /// The network directory must be strictly timely.
598    ///
599    /// That is, it must be based on a consensus that valid right now, with no
600    /// tolerance for skew or consensus problems.
601    ///
602    /// Avoid using this option if you could use [`Timeliness::Timely`] instead.
603    Strict,
604    /// The network directory must be roughly timely.
605    ///
606    /// This is, it must be be based on a consensus that is not _too_ far in the
607    /// future, and not _too_ far in the past.
608    ///
609    /// (The tolerances for "too far" will depend on configuration.)
610    ///
611    /// This is almost always the option that you want to use.
612    Timely,
613    /// Any network directory is permissible, regardless of how untimely.
614    ///
615    /// Avoid using this option if you could use [`Timeliness::Timely`] instead.
616    Unchecked,
617}
618
619/// An object that can provide [`NetDir`]s, as well as inform consumers when
620/// they might have changed.
621///
622/// It is the responsibility of the implementor of `NetDirProvider`
623/// to try to obtain an up-to-date `NetDir`,
624/// and continuously to maintain and update it.
625///
626/// In usual configurations, Arti uses `tor_dirmgr::DirMgr`
627/// as its `NetDirProvider`.
628#[async_trait]
629pub trait NetDirProvider: UpcastArcNetDirProvider + Send + Sync {
630    /// Return a network directory that's live according to the provided
631    /// `timeliness`.
632    fn netdir(&self, timeliness: Timeliness) -> Result<Arc<NetDir>>;
633
634    /// Return a reasonable netdir for general usage.
635    ///
636    /// This is an alias for
637    /// [`NetDirProvider::netdir`]`(`[`Timeliness::Timely`]`)`.
638    fn timely_netdir(&self) -> Result<Arc<NetDir>> {
639        self.netdir(Timeliness::Timely)
640    }
641
642    /// Return a new asynchronous stream that will receive notification
643    /// whenever the consensus has changed.
644    ///
645    /// Multiple events may be batched up into a single item: each time
646    /// this stream yields an event, all you can assume is that the event has
647    /// occurred at least once.
648    fn events(&self) -> BoxStream<'static, DirEvent>;
649
650    /// Return the latest network parameters.
651    ///
652    /// If we have no directory, return a reasonable set of defaults.
653    fn params(&self) -> Arc<dyn AsRef<NetParameters>>;
654
655    /// Get a NetDir from `provider`, waiting until one exists.
656    async fn wait_for_netdir(
657        &self,
658        timeliness: Timeliness,
659    ) -> std::result::Result<Arc<NetDir>, NetdirProviderShutdown> {
660        if let Ok(nd) = self.netdir(timeliness) {
661            return Ok(nd);
662        }
663
664        let mut stream = self.events();
665        loop {
666            // We need to retry `self.netdir()` before waiting for any stream events, to
667            // avoid deadlock.
668            //
669            // We ignore all errors here: they can all potentially be fixed by
670            // getting a fresh consensus, and they will all get warned about
671            // by the NetDirProvider itself.
672            if let Ok(nd) = self.netdir(timeliness) {
673                return Ok(nd);
674            }
675            match stream.next().await {
676                Some(_) => {}
677                None => {
678                    return Err(NetdirProviderShutdown);
679                }
680            }
681        }
682    }
683
684    /// Wait until `provider` lists `target`.
685    ///
686    /// NOTE: This might potentially wait indefinitely, if `target` is never actually
687    /// becomes listed in the directory.  It will exit if the `NetDirProvider` shuts down.
688    async fn wait_for_netdir_to_list(
689        &self,
690        target: &tor_linkspec::RelayIds,
691        timeliness: Timeliness,
692    ) -> std::result::Result<(), NetdirProviderShutdown> {
693        let mut events = self.events();
694        loop {
695            // See if the desired relay is in the netdir.
696            //
697            // We do this before waiting for any events, to avoid race conditions.
698            {
699                let netdir = self.wait_for_netdir(timeliness).await?;
700                if netdir.ids_listed(target) == Some(true) {
701                    return Ok(());
702                }
703                // If we reach this point, then ids_listed returned `Some(false)`,
704                // meaning "This relay is definitely not in the current directory";
705                // or it returned `None`, meaning "waiting for more information
706                // about this network directory.
707                // In both cases, it's reasonable to just wait for another netdir
708                // event and try again.
709            }
710            // We didn't find the relay; wait for the provider to have a new netdir
711            // or more netdir information.
712            if events.next().await.is_none() {
713                // The event stream is closed; the provider has shut down.
714                return Err(NetdirProviderShutdown);
715            }
716        }
717    }
718
719    /// Return the latest set of recommended and required protocols, if there is one.
720    ///
721    /// This may be more recent (or more available) than this provider's associated NetDir.
722    fn protocol_statuses(&self) -> Option<(SystemTime, Arc<netstatus::ProtoStatuses>)>;
723}
724
725impl<T> NetDirProvider for Arc<T>
726where
727    T: NetDirProvider,
728{
729    fn netdir(&self, timeliness: Timeliness) -> Result<Arc<NetDir>> {
730        self.deref().netdir(timeliness)
731    }
732
733    fn timely_netdir(&self) -> Result<Arc<NetDir>> {
734        self.deref().timely_netdir()
735    }
736
737    fn events(&self) -> BoxStream<'static, DirEvent> {
738        self.deref().events()
739    }
740
741    fn params(&self) -> Arc<dyn AsRef<NetParameters>> {
742        self.deref().params()
743    }
744
745    fn protocol_statuses(&self) -> Option<(SystemTime, Arc<netstatus::ProtoStatuses>)> {
746        self.deref().protocol_statuses()
747    }
748}
749
750/// Helper trait: allows any `Arc<X>` to be upcast to a `Arc<dyn
751/// NetDirProvider>` if X is an implementation or supertrait of NetDirProvider.
752///
753/// This trait exists to work around a limitation in rust: when trait upcasting
754/// coercion is stable, this will be unnecessary.
755///
756/// The Rust tracking issue is <https://github.com/rust-lang/rust/issues/65991>.
757pub trait UpcastArcNetDirProvider {
758    /// Return a view of this object as an `Arc<dyn NetDirProvider>`
759    fn upcast_arc<'a>(self: Arc<Self>) -> Arc<dyn NetDirProvider + 'a>
760    where
761        Self: 'a;
762}
763
764impl<T> UpcastArcNetDirProvider for T
765where
766    T: NetDirProvider + Sized,
767{
768    fn upcast_arc<'a>(self: Arc<Self>) -> Arc<dyn NetDirProvider + 'a>
769    where
770        Self: 'a,
771    {
772        self
773    }
774}
775
776impl AsRef<NetParameters> for NetDir {
777    fn as_ref(&self) -> &NetParameters {
778        self.params()
779    }
780}
781
782/// A partially build NetDir -- it can't be unwrapped until it has
783/// enough information to build safe paths.
784#[derive(Debug, Clone)]
785pub struct PartialNetDir {
786    /// The netdir that's under construction.
787    netdir: NetDir,
788
789    /// The previous netdir, if we had one
790    ///
791    /// Used as a cache, so we can reuse information
792    #[cfg(feature = "hs-common")]
793    prev_netdir: Option<Arc<NetDir>>,
794}
795
796/// A view of a relay on the Tor network, suitable for building circuits.
797// TODO: This should probably be a more specific struct, with a trait
798// that implements it.
799#[derive(Clone)]
800pub struct Relay<'a> {
801    /// A router descriptor for this relay.
802    rs: &'a netstatus::MdConsensusRouterStatus,
803    /// A microdescriptor for this relay.
804    md: &'a Microdesc,
805    /// The country code this relay is in, if we know one.
806    #[cfg(feature = "geoip")]
807    cc: Option<CountryCode>,
808}
809
810/// A relay that we haven't checked for validity or usability in
811/// routing.
812#[derive(Debug)]
813pub struct UncheckedRelay<'a> {
814    /// A router descriptor for this relay.
815    rs: &'a netstatus::MdConsensusRouterStatus,
816    /// A microdescriptor for this relay, if there is one.
817    md: Option<&'a Microdesc>,
818    /// The country code this relay is in, if we know one.
819    #[cfg(feature = "geoip")]
820    cc: Option<CountryCode>,
821}
822
823/// A partial or full network directory that we can download
824/// microdescriptors for.
825pub trait MdReceiver {
826    /// Return an iterator over the digests for all of the microdescriptors
827    /// that this netdir is missing.
828    fn missing_microdescs(&self) -> Box<dyn Iterator<Item = &MdDigest> + '_>;
829    /// Add a microdescriptor to this netdir, if it was wanted.
830    ///
831    /// Return true if it was indeed wanted.
832    fn add_microdesc(&mut self, md: Microdesc) -> bool;
833    /// Return the number of missing microdescriptors.
834    fn n_missing(&self) -> usize;
835}
836
837impl PartialNetDir {
838    /// Create a new PartialNetDir with a given consensus, and no
839    /// microdescriptors loaded.
840    ///
841    /// If `replacement_params` is provided, override network parameters from
842    /// the consensus with those from `replacement_params`.
843    pub fn new(
844        consensus: MdConsensus,
845        replacement_params: Option<&netstatus::NetParams<i32>>,
846    ) -> Self {
847        Self::new_inner(
848            consensus,
849            replacement_params,
850            #[cfg(feature = "geoip")]
851            None,
852        )
853    }
854
855    /// Create a new PartialNetDir with GeoIP support.
856    ///
857    /// This does the same thing as `new()`, except the provided GeoIP database is used to add
858    /// country codes to relays.
859    #[cfg(feature = "geoip")]
860    #[cfg_attr(docsrs, doc(cfg(feature = "geoip")))]
861    pub fn new_with_geoip(
862        consensus: MdConsensus,
863        replacement_params: Option<&netstatus::NetParams<i32>>,
864        geoip_db: &GeoipDb,
865    ) -> Self {
866        Self::new_inner(consensus, replacement_params, Some(geoip_db))
867    }
868
869    /// Implementation of the `new()` functions.
870    fn new_inner(
871        consensus: MdConsensus,
872        replacement_params: Option<&netstatus::NetParams<i32>>,
873        #[cfg(feature = "geoip")] geoip_db: Option<&GeoipDb>,
874    ) -> Self {
875        let mut params = NetParameters::default();
876
877        // (We ignore unrecognized options here, since they come from
878        // the consensus, and we don't expect to recognize everything
879        // there.)
880        let _ = params.saturating_update(consensus.params().iter());
881
882        // Now see if the user has any parameters to override.
883        // (We have to do this now, or else changes won't be reflected in our
884        // weights.)
885        if let Some(replacement) = replacement_params {
886            for u in params.saturating_update(replacement.iter()) {
887                warn!("Unrecognized option: override_net_params.{}", u);
888            }
889        }
890
891        // Compute the weights we'll want to use for these relays.
892        let weights = weight::WeightSet::from_consensus(&consensus, &params);
893
894        let n_relays = consensus.c_relays().len();
895
896        let rsidx_by_missing = consensus
897            .c_relays()
898            .iter_enumerated()
899            .map(|(rsidx, rs)| (*rs.md_digest(), rsidx))
900            .collect();
901
902        let rsidx_by_rsa = consensus
903            .c_relays()
904            .iter_enumerated()
905            .map(|(rsidx, rs)| (*rs.rsa_identity(), rsidx))
906            .collect();
907
908        #[cfg(feature = "geoip")]
909        let country_codes = if let Some(db) = geoip_db {
910            consensus
911                .c_relays()
912                .iter()
913                .map(|rs| {
914                    let ret = db
915                        .lookup_country_code_multi(rs.addrs().iter().map(|x| x.ip()))
916                        .cloned();
917                    ret
918                })
919                .collect()
920        } else {
921            Default::default()
922        };
923
924        #[cfg(feature = "hs-common")]
925        let hsdir_rings = Arc::new({
926            let params = HsDirParams::compute(&consensus, &params).expect("Invalid consensus!");
927            // TODO: It's a bit ugly to use expect above, but this function does
928            // not return a Result. On the other hand, the error conditions under which
929            // HsDirParams::compute can return Err are _very_ narrow and hard to
930            // hit; see documentation in that function.  As such, we probably
931            // don't need to have this return a Result.
932
933            params.map(HsDirRing::empty_from_params)
934        });
935
936        let netdir = NetDir {
937            consensus: Arc::new(consensus),
938            params,
939            mds: vec![None; n_relays].into(),
940            rsidx_by_missing,
941            rsidx_by_rsa: Arc::new(rsidx_by_rsa),
942            rsidx_by_ed: HashMap::with_capacity(n_relays),
943            #[cfg(feature = "hs-common")]
944            hsdir_rings,
945            weights,
946            #[cfg(feature = "geoip")]
947            country_codes,
948        };
949
950        PartialNetDir {
951            netdir,
952            #[cfg(feature = "hs-common")]
953            prev_netdir: None,
954        }
955    }
956
957    /// Return the declared lifetime of this PartialNetDir.
958    pub fn lifetime(&self) -> &netstatus::Lifetime {
959        self.netdir.lifetime()
960    }
961
962    /// Record a previous netdir, which can be used for reusing cached information
963    //
964    // Fills in as many missing microdescriptors as possible in this
965    // netdir, using the microdescriptors from the previous netdir.
966    //
967    // With HS enabled, stores the netdir for reuse of relay hash ring index values.
968    #[allow(clippy::needless_pass_by_value)] // prev might, or might not, be stored
969    pub fn fill_from_previous_netdir(&mut self, prev: Arc<NetDir>) {
970        for md in prev.mds.iter().flatten() {
971            self.netdir.add_arc_microdesc(md.clone());
972        }
973
974        #[cfg(feature = "hs-common")]
975        {
976            self.prev_netdir = Some(prev);
977        }
978    }
979
980    /// Compute the hash ring(s) for this NetDir
981    #[cfg(feature = "hs-common")]
982    fn compute_rings(&mut self) {
983        let params = HsDirParams::compute(&self.netdir.consensus, &self.netdir.params)
984            .expect("Invalid consensus");
985        // TODO: see TODO by similar expect in new()
986
987        self.netdir.hsdir_rings =
988            Arc::new(params.map(|params| {
989                HsDirRing::compute(params, &self.netdir, self.prev_netdir.as_deref())
990            }));
991    }
992
993    /// Return true if this are enough information in this directory
994    /// to build multihop paths.
995    pub fn have_enough_paths(&self) -> bool {
996        self.netdir.have_enough_paths()
997    }
998    /// If this directory has enough information to build multihop
999    /// circuits, return it.
1000    pub fn unwrap_if_sufficient(
1001        #[allow(unused_mut)] mut self,
1002    ) -> std::result::Result<NetDir, PartialNetDir> {
1003        if self.netdir.have_enough_paths() {
1004            #[cfg(feature = "hs-common")]
1005            self.compute_rings();
1006            Ok(self.netdir)
1007        } else {
1008            Err(self)
1009        }
1010    }
1011}
1012
1013impl MdReceiver for PartialNetDir {
1014    fn missing_microdescs(&self) -> Box<dyn Iterator<Item = &MdDigest> + '_> {
1015        self.netdir.missing_microdescs()
1016    }
1017    fn add_microdesc(&mut self, md: Microdesc) -> bool {
1018        self.netdir.add_microdesc(md)
1019    }
1020    fn n_missing(&self) -> usize {
1021        self.netdir.n_missing()
1022    }
1023}
1024
1025impl NetDir {
1026    /// Return the declared lifetime of this NetDir.
1027    pub fn lifetime(&self) -> &netstatus::Lifetime {
1028        self.consensus.lifetime()
1029    }
1030
1031    /// Add `md` to this NetDir.
1032    ///
1033    /// Return true if we wanted it, and false otherwise.
1034    fn add_arc_microdesc(&mut self, md: Arc<Microdesc>) -> bool {
1035        if let Some(rsidx) = self.rsidx_by_missing.remove(md.digest()) {
1036            assert_eq!(self.c_relays()[rsidx].md_digest(), md.digest());
1037
1038            // There should never be two approved MDs in the same
1039            // consensus listing the same ID... but if there is,
1040            // we'll let the most recent one win.
1041            self.rsidx_by_ed.insert(*md.ed25519_id(), rsidx);
1042
1043            // Happy path: we did indeed want this one.
1044            self.mds[rsidx] = Some(md);
1045
1046            // Save some space in the missing-descriptor list.
1047            if self.rsidx_by_missing.len() < self.rsidx_by_missing.capacity() / 4 {
1048                self.rsidx_by_missing.shrink_to_fit();
1049            }
1050
1051            return true;
1052        }
1053
1054        // Either we already had it, or we never wanted it at all.
1055        false
1056    }
1057
1058    /// Construct a (possibly invalid) Relay object from a routerstatus and its
1059    /// index within the consensus.
1060    fn relay_from_rs_and_rsidx<'a>(
1061        &'a self,
1062        rs: &'a netstatus::MdConsensusRouterStatus,
1063        rsidx: RouterStatusIdx,
1064    ) -> UncheckedRelay<'a> {
1065        debug_assert_eq!(self.c_relays()[rsidx].rsa_identity(), rs.rsa_identity());
1066        let md = self.mds[rsidx].as_deref();
1067        if let Some(md) = md {
1068            debug_assert_eq!(rs.md_digest(), md.digest());
1069        }
1070
1071        UncheckedRelay {
1072            rs,
1073            md,
1074            #[cfg(feature = "geoip")]
1075            cc: self.country_codes.get(rsidx.0).copied().flatten(),
1076        }
1077    }
1078
1079    /// Return the value of the hsdir_n_replicas param.
1080    #[cfg(feature = "hs-common")]
1081    fn n_replicas(&self) -> u8 {
1082        self.params
1083            .hsdir_n_replicas
1084            .get()
1085            .try_into()
1086            .expect("BoundedInt did not enforce bounds")
1087    }
1088
1089    /// Return the spread parameter for the specified `op`.
1090    #[cfg(feature = "hs-common")]
1091    fn spread(&self, op: HsDirOp) -> usize {
1092        let spread = match op {
1093            HsDirOp::Download => self.params.hsdir_spread_fetch,
1094            #[cfg(feature = "hs-service")]
1095            HsDirOp::Upload => self.params.hsdir_spread_store,
1096        };
1097
1098        spread
1099            .get()
1100            .try_into()
1101            .expect("BoundedInt did not enforce bounds!")
1102    }
1103
1104    /// Select `spread` hsdir relays for the specified `hsid` from a given `ring`.
1105    ///
1106    /// Algorithm:
1107    ///
1108    /// for idx in 1..=n_replicas:
1109    ///       - let H = hsdir_ring::onion_service_index(id, replica, rand,
1110    ///         period).
1111    ///       - Find the position of H within hsdir_ring.
1112    ///       - Take elements from hsdir_ring starting at that position,
1113    ///         adding them to Dirs until we have added `spread` new elements
1114    ///         that were not there before.
1115    #[cfg(feature = "hs-common")]
1116    fn select_hsdirs<'h, 'r: 'h>(
1117        &'r self,
1118        hsid: HsBlindId,
1119        ring: &'h HsDirRing,
1120        spread: usize,
1121    ) -> impl Iterator<Item = Relay<'r>> + 'h {
1122        let n_replicas = self.n_replicas();
1123
1124        (1..=n_replicas) // 1-indexed !
1125            .flat_map({
1126                let mut selected_nodes = HashSet::new();
1127
1128                move |replica: u8| {
1129                    let hsdir_idx = hsdir_ring::service_hsdir_index(&hsid, replica, ring.params());
1130
1131                    let items = ring
1132                        .ring_items_at(hsdir_idx, spread, |(hsdir_idx, _)| {
1133                            // According to rend-spec 2.2.3:
1134                            //                                                  ... If any of those
1135                            // nodes have already been selected for a lower-numbered replica of the
1136                            // service, any nodes already chosen are disregarded (i.e. skipped over)
1137                            // when choosing a replica's hsdir_spread_store nodes.
1138                            selected_nodes.insert(*hsdir_idx)
1139                        })
1140                        .collect::<Vec<_>>();
1141
1142                    items
1143                }
1144            })
1145            .filter_map(move |(_hsdir_idx, rs_idx)| {
1146                // This ought not to be None but let's not panic or bail if it is
1147                self.relay_by_rs_idx(*rs_idx)
1148            })
1149    }
1150
1151    /// Replace the overridden parameters in this netdir with `new_replacement`.
1152    ///
1153    /// After this function is done, the netdir's parameters will be those in
1154    /// the consensus, overridden by settings from `new_replacement`.  Any
1155    /// settings in the old replacement parameters will be discarded.
1156    pub fn replace_overridden_parameters(&mut self, new_replacement: &netstatus::NetParams<i32>) {
1157        // TODO(nickm): This is largely duplicate code from PartialNetDir::new().
1158        let mut new_params = NetParameters::default();
1159        let _ = new_params.saturating_update(self.consensus.params().iter());
1160        for u in new_params.saturating_update(new_replacement.iter()) {
1161            warn!("Unrecognized option: override_net_params.{}", u);
1162        }
1163
1164        self.params = new_params;
1165    }
1166
1167    /// Return an iterator over all Relay objects, including invalid ones
1168    /// that we can't use.
1169    pub fn all_relays(&self) -> impl Iterator<Item = UncheckedRelay<'_>> {
1170        // TODO: I'd like if we could memoize this so we don't have to
1171        // do so many hashtable lookups.
1172        self.c_relays()
1173            .iter_enumerated()
1174            .map(move |(rsidx, rs)| self.relay_from_rs_and_rsidx(rs, rsidx))
1175    }
1176    /// Return an iterator over all [usable](NetDir#usable) Relays.
1177    pub fn relays(&self) -> impl Iterator<Item = Relay<'_>> {
1178        self.all_relays().filter_map(UncheckedRelay::into_relay)
1179    }
1180
1181    /// Look up a relay's [`Microdesc`] by its [`RouterStatusIdx`]
1182    #[cfg_attr(not(feature = "hs-common"), allow(dead_code))]
1183    pub(crate) fn md_by_rsidx(&self, rsidx: RouterStatusIdx) -> Option<&Microdesc> {
1184        self.mds.get(rsidx)?.as_deref()
1185    }
1186
1187    /// Return a relay matching a given identity, if we have a
1188    /// _usable_ relay with that key.
1189    ///
1190    /// (Does not return [unusable](NetDir#usable) relays.)
1191    ///
1192    ///
1193    /// Note that a `None` answer is not always permanent: if a microdescriptor
1194    /// is subsequently added for a relay with this ID, the ID may become usable
1195    /// even if it was not usable before.
1196    pub fn by_id<'a, T>(&self, id: T) -> Option<Relay<'_>>
1197    where
1198        T: Into<RelayIdRef<'a>>,
1199    {
1200        let id = id.into();
1201        let answer = match id {
1202            RelayIdRef::Ed25519(ed25519) => {
1203                let rsidx = *self.rsidx_by_ed.get(ed25519)?;
1204                let rs = self.c_relays().get(rsidx).expect("Corrupt index");
1205
1206                self.relay_from_rs_and_rsidx(rs, rsidx).into_relay()?
1207            }
1208            RelayIdRef::Rsa(rsa) => self
1209                .by_rsa_id_unchecked(rsa)
1210                .and_then(UncheckedRelay::into_relay)?,
1211            other_type => self.relays().find(|r| r.has_identity(other_type))?,
1212        };
1213        assert!(answer.has_identity(id));
1214        Some(answer)
1215    }
1216
1217    /// Obtain a `Relay` given a `RouterStatusIdx`
1218    ///
1219    /// Differs from `relay_from_rs_and_rsi` as follows:
1220    ///  * That function expects the caller to already have an `MdConsensusRouterStatus`;
1221    ///    it checks with `debug_assert` that the relay in the netdir matches.
1222    ///  * That function panics if the `RouterStatusIdx` is invalid; this one returns `None`.
1223    ///  * That function returns an `UncheckedRelay`; this one a `Relay`.
1224    ///
1225    /// `None` could be returned here, even with a valid `rsi`,
1226    /// if `rsi` refers to an [unusable](NetDir#usable) relay.
1227    #[cfg_attr(not(feature = "hs-common"), allow(dead_code))]
1228    pub(crate) fn relay_by_rs_idx(&self, rs_idx: RouterStatusIdx) -> Option<Relay<'_>> {
1229        let rs = self.c_relays().get(rs_idx)?;
1230        let md = self.mds.get(rs_idx)?.as_deref();
1231        UncheckedRelay {
1232            rs,
1233            md,
1234            #[cfg(feature = "geoip")]
1235            cc: self.country_codes.get(rs_idx.0).copied().flatten(),
1236        }
1237        .into_relay()
1238    }
1239
1240    /// Return a relay with the same identities as those in `target`, if one
1241    /// exists.
1242    ///
1243    /// Does not return [unusable](NetDir#usable) relays.
1244    ///
1245    /// Note that a negative result from this method is not necessarily permanent:
1246    /// it may be the case that a relay exists,
1247    /// but we don't yet have enough information about it to know all of its IDs.
1248    /// To test whether a relay is *definitely* absent,
1249    /// use [`by_ids_detailed`](Self::by_ids_detailed)
1250    /// or [`ids_listed`](Self::ids_listed).
1251    ///
1252    /// # Limitations
1253    ///
1254    /// This will be very slow if `target` does not have an Ed25519 or RSA
1255    /// identity.
1256    pub fn by_ids<T>(&self, target: &T) -> Option<Relay<'_>>
1257    where
1258        T: HasRelayIds + ?Sized,
1259    {
1260        let mut identities = target.identities();
1261        // Don't try if there are no identities.
1262        let first_id = identities.next()?;
1263
1264        // Since there is at most one relay with each given ID type,
1265        // we only need to check the first relay we find.
1266        let candidate = self.by_id(first_id)?;
1267        if identities.all(|wanted_id| candidate.has_identity(wanted_id)) {
1268            Some(candidate)
1269        } else {
1270            None
1271        }
1272    }
1273
1274    /// Check whether there is a relay that has at least one identity from
1275    /// `target`, and which _could_ have every identity from `target`.
1276    /// If so, return such a relay.
1277    ///
1278    /// Return `Ok(None)` if we did not find a relay with any identity from `target`.
1279    ///
1280    /// Return `RelayLookupError::Impossible` if we found a relay with at least
1281    /// one identity from `target`, but that relay's other identities contradict
1282    /// what we learned from `target`.
1283    ///
1284    /// Does not return [unusable](NetDir#usable) relays.
1285    ///
1286    /// (This function is only useful if you need to distinguish the
1287    /// "impossible" case from the "no such relay known" case.)
1288    ///
1289    /// # Limitations
1290    ///
1291    /// This will be very slow if `target` does not have an Ed25519 or RSA
1292    /// identity.
1293    //
1294    // TODO HS: This function could use a better name.
1295    //
1296    // TODO: We could remove the feature restriction here once we think this API is
1297    // stable.
1298    #[cfg(feature = "hs-common")]
1299    pub fn by_ids_detailed<T>(
1300        &self,
1301        target: &T,
1302    ) -> std::result::Result<Option<Relay<'_>>, RelayLookupError>
1303    where
1304        T: HasRelayIds + ?Sized,
1305    {
1306        let candidate = target
1307            .identities()
1308            // Find all the relays that share any identity with this set of identities.
1309            .filter_map(|id| self.by_id(id))
1310            // We might find the same relay more than once under a different
1311            // identity, so we remove the duplicates.
1312            //
1313            // Since there is at most one relay per rsa identity per consensus,
1314            // this is a true uniqueness check under current construction rules.
1315            .unique_by(|r| r.rs.rsa_identity())
1316            // If we find two or more distinct relays, then have a contradiction.
1317            .at_most_one()
1318            .map_err(|_| RelayLookupError::Impossible)?;
1319
1320        // If we have no candidate, return None early.
1321        let candidate = match candidate {
1322            Some(relay) => relay,
1323            None => return Ok(None),
1324        };
1325
1326        // Now we know we have a single candidate.  Make sure that it does not have any
1327        // identity that does not match the target.
1328        if target
1329            .identities()
1330            .all(|wanted_id| match candidate.identity(wanted_id.id_type()) {
1331                None => true,
1332                Some(id) => id == wanted_id,
1333            })
1334        {
1335            Ok(Some(candidate))
1336        } else {
1337            Err(RelayLookupError::Impossible)
1338        }
1339    }
1340
1341    /// Return a boolean if this consensus definitely has (or does not have) a
1342    /// relay matching the listed identities.
1343    ///
1344    /// `Some(true)` indicates that the relay exists.
1345    /// `Some(false)` indicates that the relay definitely does not exist.
1346    /// `None` indicates that we can't yet tell whether such a relay exists,
1347    ///  due to missing information.
1348    fn id_pair_listed(&self, ed_id: &Ed25519Identity, rsa_id: &RsaIdentity) -> Option<bool> {
1349        let r = self.by_rsa_id_unchecked(rsa_id);
1350        match r {
1351            Some(unchecked) => {
1352                if !unchecked.rs.ed25519_id_is_usable() {
1353                    return Some(false);
1354                }
1355                // If md is present, then it's listed iff we have the right
1356                // ed id.  Otherwise we don't know if it's listed.
1357                unchecked.md.map(|md| md.ed25519_id() == ed_id)
1358            }
1359            None => {
1360                // Definitely not listed.
1361                Some(false)
1362            }
1363        }
1364    }
1365
1366    /// Check whether a relay exists (or may exist)
1367    /// with the same identities as those in `target`.
1368    ///
1369    /// `Some(true)` indicates that the relay exists.
1370    /// `Some(false)` indicates that the relay definitely does not exist.
1371    /// `None` indicates that we can't yet tell whether such a relay exists,
1372    ///  due to missing information.
1373    pub fn ids_listed<T>(&self, target: &T) -> Option<bool>
1374    where
1375        T: HasRelayIds + ?Sized,
1376    {
1377        let rsa_id = target.rsa_identity();
1378        let ed25519_id = target.ed_identity();
1379
1380        // TODO: If we later support more identity key types, this will
1381        // become incorrect.  This assertion might help us recognize that case.
1382        const_assert!(RelayIdType::COUNT == 2);
1383
1384        match (rsa_id, ed25519_id) {
1385            (Some(r), Some(e)) => self.id_pair_listed(e, r),
1386            (Some(r), None) => Some(self.rsa_id_is_listed(r)),
1387            (None, Some(e)) => {
1388                if self.rsidx_by_ed.contains_key(e) {
1389                    Some(true)
1390                } else {
1391                    None
1392                }
1393            }
1394            (None, None) => None,
1395        }
1396    }
1397
1398    /// Return a (possibly [unusable](NetDir#usable)) relay with a given RSA identity.
1399    ///
1400    /// This API can be used to find information about a relay that is listed in
1401    /// the current consensus, even if we don't yet have enough information
1402    /// (like a microdescriptor) about the relay to use it.
1403    #[cfg_attr(feature = "experimental-api", visibility::make(pub))]
1404    #[cfg_attr(docsrs, doc(cfg(feature = "experimental-api")))]
1405    fn by_rsa_id_unchecked(&self, rsa_id: &RsaIdentity) -> Option<UncheckedRelay<'_>> {
1406        let rsidx = *self.rsidx_by_rsa.get(rsa_id)?;
1407        let rs = self.c_relays().get(rsidx).expect("Corrupt index");
1408        assert_eq!(rs.rsa_identity(), rsa_id);
1409        Some(self.relay_from_rs_and_rsidx(rs, rsidx))
1410    }
1411    /// Return the relay with a given RSA identity, if we have one
1412    /// and it is [usable](NetDir#usable).
1413    fn by_rsa_id(&self, rsa_id: &RsaIdentity) -> Option<Relay<'_>> {
1414        self.by_rsa_id_unchecked(rsa_id)?.into_relay()
1415    }
1416    /// Return true if `rsa_id` is listed in this directory, even if it isn't
1417    /// currently usable.
1418    ///
1419    /// (An "[unusable](NetDir#usable)" relay in this context is one for which we don't have full
1420    /// directory information.)
1421    #[cfg_attr(feature = "experimental-api", visibility::make(pub))]
1422    #[cfg_attr(docsrs, doc(cfg(feature = "experimental-api")))]
1423    fn rsa_id_is_listed(&self, rsa_id: &RsaIdentity) -> bool {
1424        self.by_rsa_id_unchecked(rsa_id).is_some()
1425    }
1426
1427    /// List the hsdirs in this NetDir, that should be in the HSDir rings
1428    ///
1429    /// The results are not returned in any particular order.
1430    #[cfg(feature = "hs-common")]
1431    fn all_hsdirs(&self) -> impl Iterator<Item = (RouterStatusIdx, Relay<'_>)> {
1432        self.c_relays().iter_enumerated().filter_map(|(rsidx, rs)| {
1433            let relay = self.relay_from_rs_and_rsidx(rs, rsidx);
1434            relay.is_hsdir_for_ring().then_some(())?;
1435            let relay = relay.into_relay()?;
1436            Some((rsidx, relay))
1437        })
1438    }
1439
1440    /// Return the parameters from the consensus, clamped to the
1441    /// correct ranges, with defaults filled in.
1442    ///
1443    /// NOTE: that unsupported parameters aren't returned here; only those
1444    /// values configured in the `params` module are available.
1445    pub fn params(&self) -> &NetParameters {
1446        &self.params
1447    }
1448
1449    /// Return a [`ProtoStatus`](netstatus::ProtoStatus) that lists the
1450    /// network's current requirements and recommendations for the list of
1451    /// protocols that every relay must implement.
1452    //
1453    // TODO HS: I am not sure this is the right API; other alternatives would be:
1454    //    * To expose the _required_ relay protocol list instead (since that's all that
1455    //      onion service implementations need).
1456    //    * To expose the client protocol list as well (for symmetry).
1457    //    * To expose the MdConsensus instead (since that's more general, although
1458    //      it restricts the future evolution of this API).
1459    //
1460    // I think that this is a reasonably good compromise for now, but I'm going
1461    // to put it behind the `hs-common` feature to give us time to consider more.
1462    #[cfg(feature = "hs-common")]
1463    pub fn relay_protocol_status(&self) -> &netstatus::ProtoStatus {
1464        self.consensus.relay_protocol_status()
1465    }
1466
1467    /// Return a [`ProtoStatus`](netstatus::ProtoStatus) that lists the
1468    /// network's current requirements and recommendations for the list of
1469    /// protocols that every relay must implement.
1470    //
1471    // TODO HS: See notes on relay_protocol_status above.
1472    #[cfg(feature = "hs-common")]
1473    pub fn client_protocol_status(&self) -> &netstatus::ProtoStatus {
1474        self.consensus.client_protocol_status()
1475    }
1476
1477    /// Return weighted the fraction of relays we can use.  We only
1478    /// consider relays that match the predicate `usable`.  We weight
1479    /// this bandwidth according to the provided `role`.
1480    ///
1481    /// If _no_ matching relays in the consensus have a nonzero
1482    /// weighted bandwidth value, we fall back to looking at the
1483    /// unweighted fraction of matching relays.
1484    ///
1485    /// If there are no matching relays in the consensus, we return 0.0.
1486    fn frac_for_role<'a, F>(&'a self, role: WeightRole, usable: F) -> f64
1487    where
1488        F: Fn(&UncheckedRelay<'a>) -> bool,
1489    {
1490        let mut total_weight = 0_u64;
1491        let mut have_weight = 0_u64;
1492        let mut have_count = 0_usize;
1493        let mut total_count = 0_usize;
1494
1495        for r in self.all_relays() {
1496            if !usable(&r) {
1497                continue;
1498            }
1499            let w = self.weights.weight_rs_for_role(r.rs, role);
1500            total_weight += w;
1501            total_count += 1;
1502            if r.is_usable() {
1503                have_weight += w;
1504                have_count += 1;
1505            }
1506        }
1507
1508        if total_weight > 0 {
1509            // The consensus lists some weighted bandwidth so return the
1510            // fraction of the weighted bandwidth for which we have
1511            // descriptors.
1512            (have_weight as f64) / (total_weight as f64)
1513        } else if total_count > 0 {
1514            // The consensus lists no weighted bandwidth for these relays,
1515            // but at least it does list relays. Return the fraction of
1516            // relays for which it we have descriptors.
1517            (have_count as f64) / (total_count as f64)
1518        } else {
1519            // There are no relays of this kind in the consensus.  Return
1520            // 0.0, to avoid dividing by zero and giving NaN.
1521            0.0
1522        }
1523    }
1524    /// Return the estimated fraction of possible paths that we have
1525    /// enough microdescriptors to build.
1526    fn frac_usable_paths(&self) -> f64 {
1527        // TODO #504, TODO SPEC: We may want to add a set of is_flagged_fast() and/or
1528        // is_flagged_stable() checks here.  This will require spec clarification.
1529        let f_g = self.frac_for_role(WeightRole::Guard, |u| {
1530            u.low_level_details().is_suitable_as_guard()
1531        });
1532        let f_m = self.frac_for_role(WeightRole::Middle, |_| true);
1533        let f_e = if self.all_relays().any(|u| u.rs.is_flagged_exit()) {
1534            self.frac_for_role(WeightRole::Exit, |u| u.rs.is_flagged_exit())
1535        } else {
1536            // If there are no exits at all, we use f_m here.
1537            f_m
1538        };
1539        f_g * f_m * f_e
1540    }
1541    /// Return true if there is enough information in this NetDir to build
1542    /// multihop circuits.
1543    fn have_enough_paths(&self) -> bool {
1544        // TODO-A001: This should check for our guards as well, and
1545        // make sure that if they're listed in the consensus, we have
1546        // the descriptors for them.
1547
1548        // If we can build a randomly chosen path with at least this
1549        // probability, we know enough information to participate
1550        // on the network.
1551
1552        let min_frac_paths: f64 = self.params().min_circuit_path_threshold.as_fraction();
1553
1554        // What fraction of paths can we build?
1555        let available = self.frac_usable_paths();
1556
1557        available >= min_frac_paths
1558    }
1559    /// Choose a relay at random.
1560    ///
1561    /// Each relay is chosen with probability proportional to its weight
1562    /// in the role `role`, and is only selected if the predicate `usable`
1563    /// returns true for it.
1564    ///
1565    /// This function returns None if (and only if) there are no relays
1566    /// with nonzero weight where `usable` returned true.
1567    //
1568    // TODO this API, with the `usable` closure, invites mistakes where we fail to
1569    // check conditions that are implied by the role we have selected for the relay:
1570    // call sites must include a call to `Relay::is_polarity_inverter()` or whatever.
1571    // IMO the `WeightRole` ought to imply a condition (and it should therefore probably
1572    // be renamed.)  -Diziet
1573    pub fn pick_relay<'a, R, P>(
1574        &'a self,
1575        rng: &mut R,
1576        role: WeightRole,
1577        usable: P,
1578    ) -> Option<Relay<'a>>
1579    where
1580        R: rand::Rng,
1581        P: FnMut(&Relay<'a>) -> bool,
1582    {
1583        let relays: Vec<_> = self.relays().filter(usable).collect();
1584        // This algorithm uses rand::distr::WeightedIndex, and uses
1585        // gives O(n) time and space  to build the index, plus O(log n)
1586        // sampling time.
1587        //
1588        // We might be better off building a WeightedIndex in advance
1589        // for each `role`, and then sampling it repeatedly until we
1590        // get a relay that satisfies `usable`.  Or we might not --
1591        // that depends heavily on the actual particulars of our
1592        // inputs.  We probably shouldn't make any changes there
1593        // unless profiling tells us that this function is in a hot
1594        // path.
1595        //
1596        // The C Tor sampling implementation goes through some trouble
1597        // here to try to make its path selection constant-time.  I
1598        // believe that there is no actual remotely exploitable
1599        // side-channel here however.  It could be worth analyzing in
1600        // the future.
1601        //
1602        // This code will give the wrong result if the total of all weights
1603        // can exceed u64::MAX.  We make sure that can't happen when we
1604        // set up `self.weights`.
1605        match relays[..].choose_weighted(rng, |r| self.weights.weight_rs_for_role(r.rs, role)) {
1606            Ok(relay) => Some(relay.clone()),
1607            Err(WeightError::InsufficientNonZero) => {
1608                if relays.is_empty() {
1609                    None
1610                } else {
1611                    warn!(?self.weights, ?role,
1612                          "After filtering, all {} relays had zero weight. Choosing one at random. See bug #1907.",
1613                          relays.len());
1614                    relays.choose(rng).cloned()
1615                }
1616            }
1617            Err(e) => {
1618                warn_report!(e, "Unexpected error while sampling a relay");
1619                None
1620            }
1621        }
1622    }
1623
1624    /// Choose `n` relay at random.
1625    ///
1626    /// Each relay is chosen with probability proportional to its weight
1627    /// in the role `role`, and is only selected if the predicate `usable`
1628    /// returns true for it.
1629    ///
1630    /// Relays are chosen without replacement: no relay will be
1631    /// returned twice. Therefore, the resulting vector may be smaller
1632    /// than `n` if we happen to have fewer than `n` appropriate relays.
1633    ///
1634    /// This function returns an empty vector if (and only if) there
1635    /// are no relays with nonzero weight where `usable` returned
1636    /// true.
1637    #[allow(clippy::cognitive_complexity)] // all due to tracing crate.
1638    pub fn pick_n_relays<'a, R, P>(
1639        &'a self,
1640        rng: &mut R,
1641        n: usize,
1642        role: WeightRole,
1643        usable: P,
1644    ) -> Vec<Relay<'a>>
1645    where
1646        R: rand::Rng,
1647        P: FnMut(&Relay<'a>) -> bool,
1648    {
1649        let relays: Vec<_> = self.relays().filter(usable).collect();
1650        // NOTE: See discussion in pick_relay().
1651        let mut relays = match relays[..].choose_multiple_weighted(rng, n, |r| {
1652            self.weights.weight_rs_for_role(r.rs, role) as f64
1653        }) {
1654            Err(WeightError::InsufficientNonZero) => {
1655                // Too few relays had nonzero weights: return all of those that are okay.
1656                // (This is behavior used to come up with rand 0.9; it no longer does.
1657                // We still detect it.)
1658                let remaining: Vec<_> = relays
1659                    .iter()
1660                    .filter(|r| self.weights.weight_rs_for_role(r.rs, role) > 0)
1661                    .cloned()
1662                    .collect();
1663                if remaining.is_empty() {
1664                    warn!(?self.weights, ?role,
1665                          "After filtering, all {} relays had zero weight! Picking some at random. See bug #1907.",
1666                          relays.len());
1667                    if relays.len() >= n {
1668                        relays.choose_multiple(rng, n).cloned().collect()
1669                    } else {
1670                        relays
1671                    }
1672                } else {
1673                    warn!(?self.weights, ?role,
1674                          "After filtering, only had {}/{} relays with nonzero weight. Returning them all. See bug #1907.",
1675                           remaining.len(), relays.len());
1676                    remaining
1677                }
1678            }
1679            Err(e) => {
1680                warn_report!(e, "Unexpected error while sampling a set of relays");
1681                Vec::new()
1682            }
1683            Ok(iter) => {
1684                let selection: Vec<_> = iter.map(Relay::clone).collect();
1685                if selection.len() < n && selection.len() < relays.len() {
1686                    warn!(?self.weights, ?role,
1687                          "choose_multiple_weighted returned only {returned}, despite requesting {n}, \
1688                          and having {filtered_len} available after filtering. See bug #1907.",
1689                          returned=selection.len(), filtered_len=relays.len());
1690                }
1691                selection
1692            }
1693        };
1694        relays.shuffle(rng);
1695        relays
1696    }
1697
1698    /// Compute the weight with which `relay` will be selected for a given
1699    /// `role`.
1700    pub fn relay_weight<'a>(&'a self, relay: &Relay<'a>, role: WeightRole) -> RelayWeight {
1701        RelayWeight(self.weights.weight_rs_for_role(relay.rs, role))
1702    }
1703
1704    /// Compute the total weight with which any relay matching `usable`
1705    /// will be selected for a given `role`.
1706    ///
1707    /// Note: because this function is used to assess the total
1708    /// properties of the consensus, the `usable` predicate takes a
1709    /// [`RouterStatus`] rather than a [`Relay`].
1710    pub fn total_weight<P>(&self, role: WeightRole, usable: P) -> RelayWeight
1711    where
1712        P: Fn(&UncheckedRelay<'_>) -> bool,
1713    {
1714        self.all_relays()
1715            .filter_map(|unchecked| {
1716                if usable(&unchecked) {
1717                    Some(RelayWeight(
1718                        self.weights.weight_rs_for_role(unchecked.rs, role),
1719                    ))
1720                } else {
1721                    None
1722                }
1723            })
1724            .sum()
1725    }
1726
1727    /// Compute the weight with which a relay with ID `rsa_id` would be
1728    /// selected for a given `role`.
1729    ///
1730    /// Note that weight returned by this function assumes that the
1731    /// relay with that ID is actually [usable](NetDir#usable); if it isn't usable,
1732    /// then other weight-related functions will call its weight zero.
1733    pub fn weight_by_rsa_id(&self, rsa_id: &RsaIdentity, role: WeightRole) -> Option<RelayWeight> {
1734        self.by_rsa_id_unchecked(rsa_id)
1735            .map(|unchecked| RelayWeight(self.weights.weight_rs_for_role(unchecked.rs, role)))
1736    }
1737
1738    /// Return all relays in this NetDir known to be in the same family as
1739    /// `relay`.
1740    ///
1741    /// This list of members will **not** necessarily include `relay` itself.
1742    ///
1743    /// # Limitations
1744    ///
1745    /// Two relays only belong to the same family if _each_ relay
1746    /// claims to share a family with the other.  But if we are
1747    /// missing a microdescriptor for one of the relays listed by this
1748    /// relay, we cannot know whether it acknowledges family
1749    /// membership with this relay or not.  Therefore, this function
1750    /// can omit family members for which there is not (as yet) any
1751    /// Relay object.
1752    pub fn known_family_members<'a>(
1753        &'a self,
1754        relay: &'a Relay<'a>,
1755    ) -> impl Iterator<Item = Relay<'a>> {
1756        let relay_rsa_id = relay.rsa_id();
1757        relay.md.family().members().filter_map(move |other_rsa_id| {
1758            self.by_rsa_id(other_rsa_id)
1759                .filter(|other_relay| other_relay.md.family().contains(relay_rsa_id))
1760        })
1761    }
1762
1763    /// Return the current hidden service directory "time period".
1764    ///
1765    /// Specifically, this returns the time period that contains the beginning
1766    /// of the validity period of this `NetDir`'s consensus.  That time period
1767    /// is the one we use when acting as an hidden service client.
1768    #[cfg(feature = "hs-common")]
1769    pub fn hs_time_period(&self) -> TimePeriod {
1770        self.hsdir_rings.current.time_period()
1771    }
1772
1773    /// Return the [`HsDirParams`] of all the relevant hidden service directory "time periods"
1774    ///
1775    /// This includes the current time period (as from
1776    /// [`.hs_time_period`](NetDir::hs_time_period))
1777    /// plus additional time periods that we publish descriptors for when we are
1778    /// acting as a hidden service.
1779    #[cfg(feature = "hs-service")]
1780    pub fn hs_all_time_periods(&self) -> Vec<HsDirParams> {
1781        self.hsdir_rings
1782            .iter()
1783            .map(|r| r.params().clone())
1784            .collect()
1785    }
1786
1787    /// Return the relays in this network directory that will be used as hidden service directories
1788    ///
1789    /// These are suitable to retrieve a given onion service's descriptor at a given time period.
1790    #[cfg(feature = "hs-common")]
1791    pub fn hs_dirs_download<'r, R>(
1792        &'r self,
1793        hsid: HsBlindId,
1794        period: TimePeriod,
1795        rng: &mut R,
1796    ) -> std::result::Result<Vec<Relay<'r>>, Bug>
1797    where
1798        R: rand::Rng,
1799    {
1800        // Algorithm:
1801        //
1802        // 1. Determine which HsDirRing to use, based on the time period.
1803        // 2. Find the shared random value that's associated with that HsDirRing.
1804        // 3. Choose spread = the parameter `hsdir_spread_fetch`
1805        // 4. Let n_replicas = the parameter `hsdir_n_replicas`.
1806        // 5. Initialize Dirs = []
1807        // 6. for idx in 1..=n_replicas:
1808        //       - let H = hsdir_ring::onion_service_index(id, replica, rand,
1809        //         period).
1810        //       - Find the position of H within hsdir_ring.
1811        //       - Take elements from hsdir_ring starting at that position,
1812        //         adding them to Dirs until we have added `spread` new elements
1813        //         that were not there before.
1814        // 7. Shuffle Dirs
1815        // 8. return Dirs.
1816
1817        let spread = self.spread(HsDirOp::Download);
1818
1819        // When downloading, only look at relays on current ring.
1820        let ring = &self.hsdir_rings.current;
1821
1822        if ring.params().time_period != period {
1823            return Err(internal!(
1824                "our current ring is not associated with the requested time period!"
1825            ));
1826        }
1827
1828        let mut hs_dirs = self.select_hsdirs(hsid, ring, spread).collect_vec();
1829
1830        // When downloading, the order of the returned relays is random.
1831        hs_dirs.shuffle(rng);
1832
1833        Ok(hs_dirs)
1834    }
1835
1836    /// Return the relays in this network directory that will be used as hidden service directories
1837    ///
1838    /// Returns the relays that are suitable for storing a given onion service's descriptors at the
1839    /// given time period.
1840    #[cfg(feature = "hs-service")]
1841    pub fn hs_dirs_upload(
1842        &self,
1843        hsid: HsBlindId,
1844        period: TimePeriod,
1845    ) -> std::result::Result<impl Iterator<Item = Relay<'_>>, Bug> {
1846        // Algorithm:
1847        //
1848        // 1. Choose spread = the parameter `hsdir_spread_store`
1849        // 2. Determine which HsDirRing to use, based on the time period.
1850        // 3. Find the shared random value that's associated with that HsDirRing.
1851        // 4. Let n_replicas = the parameter `hsdir_n_replicas`.
1852        // 5. Initialize Dirs = []
1853        // 6. for idx in 1..=n_replicas:
1854        //       - let H = hsdir_ring::onion_service_index(id, replica, rand,
1855        //         period).
1856        //       - Find the position of H within hsdir_ring.
1857        //       - Take elements from hsdir_ring starting at that position,
1858        //         adding them to Dirs until we have added `spread` new elements
1859        //         that were not there before.
1860        // 3. return Dirs.
1861        let spread = self.spread(HsDirOp::Upload);
1862
1863        // For each HsBlindId, determine which HsDirRing to use.
1864        let rings = self
1865            .hsdir_rings
1866            .iter()
1867            .filter_map(move |ring| {
1868                // Make sure the ring matches the TP of the hsid it's matched with.
1869                (ring.params().time_period == period).then_some((ring, hsid, period))
1870            })
1871            .collect::<Vec<_>>();
1872
1873        // The specified period should have an associated ring.
1874        if !rings.iter().any(|(_, _, tp)| *tp == period) {
1875            return Err(internal!(
1876                "the specified time period does not have an associated ring"
1877            ));
1878        };
1879
1880        // Now that we've matched each `hsid` with the ring associated with its TP, we can start
1881        // selecting replicas from each ring.
1882        Ok(rings.into_iter().flat_map(move |(ring, hsid, period)| {
1883            assert_eq!(period, ring.params().time_period());
1884            self.select_hsdirs(hsid, ring, spread)
1885        }))
1886    }
1887
1888    /// Return the relays in this network directory that will be used as hidden service directories
1889    ///
1890    /// Depending on `op`,
1891    /// these are suitable to either store, or retrieve, a
1892    /// given onion service's descriptor at a given time period.
1893    ///
1894    /// When `op` is `Download`, the order is random.
1895    /// When `op` is `Upload`, the order is not specified.
1896    ///
1897    /// Return an error if the time period is not one returned by
1898    /// `onion_service_time_period` or `onion_service_secondary_time_periods`.
1899    //
1900    // TODO: make HsDirOp pub(crate) once this is removed
1901    #[cfg(feature = "hs-common")]
1902    #[deprecated(note = "Use hs_dirs_upload or hs_dirs_download instead")]
1903    pub fn hs_dirs<'r, R>(&'r self, hsid: &HsBlindId, op: HsDirOp, rng: &mut R) -> Vec<Relay<'r>>
1904    where
1905        R: rand::Rng,
1906    {
1907        // Algorithm:
1908        //
1909        // 1. Determine which HsDirRing to use, based on the time period.
1910        // 2. Find the shared random value that's associated with that HsDirRing.
1911        // 3. Choose spread = the parameter `hsdir_spread_store` or
1912        //    `hsdir_spread_fetch` based on `op`.
1913        // 4. Let n_replicas = the parameter `hsdir_n_replicas`.
1914        // 5. Initialize Dirs = []
1915        // 6. for idx in 1..=n_replicas:
1916        //       - let H = hsdir_ring::onion_service_index(id, replica, rand,
1917        //         period).
1918        //       - Find the position of H within hsdir_ring.
1919        //       - Take elements from hsdir_ring starting at that position,
1920        //         adding them to Dirs until we have added `spread` new elements
1921        //         that were not there before.
1922        // 7. return Dirs.
1923        let n_replicas = self
1924            .params
1925            .hsdir_n_replicas
1926            .get()
1927            .try_into()
1928            .expect("BoundedInt did not enforce bounds");
1929
1930        let spread = match op {
1931            HsDirOp::Download => self.params.hsdir_spread_fetch,
1932            #[cfg(feature = "hs-service")]
1933            HsDirOp::Upload => self.params.hsdir_spread_store,
1934        };
1935
1936        let spread = spread
1937            .get()
1938            .try_into()
1939            .expect("BoundedInt did not enforce bounds!");
1940
1941        // TODO: I may be wrong here but I suspect that this function may
1942        // need refactoring so that it does not look at _all_ of the HsDirRings,
1943        // but only at the ones that corresponds to time periods for which
1944        // HsBlindId is valid.  Or I could be mistaken, in which case we should
1945        // have a comment to explain why I am, since the logic is subtle.
1946        // (For clients, there is only one ring.) -nickm
1947        //
1948        // (Actually, there is no need to follow through with the above TODO,
1949        // since this function is deprecated, and not used anywhere but the
1950        // tests.)
1951
1952        let mut hs_dirs = self
1953            .hsdir_rings
1954            .iter_for_op(op)
1955            .cartesian_product(1..=n_replicas) // 1-indexed !
1956            .flat_map({
1957                let mut selected_nodes = HashSet::new();
1958
1959                move |(ring, replica): (&HsDirRing, u8)| {
1960                    let hsdir_idx = hsdir_ring::service_hsdir_index(hsid, replica, ring.params());
1961
1962                    let items = ring
1963                        .ring_items_at(hsdir_idx, spread, |(hsdir_idx, _)| {
1964                            // According to rend-spec 2.2.3:
1965                            //                                                  ... If any of those
1966                            // nodes have already been selected for a lower-numbered replica of the
1967                            // service, any nodes already chosen are disregarded (i.e. skipped over)
1968                            // when choosing a replica's hsdir_spread_store nodes.
1969                            selected_nodes.insert(*hsdir_idx)
1970                        })
1971                        .collect::<Vec<_>>();
1972
1973                    items
1974                }
1975            })
1976            .filter_map(|(_hsdir_idx, rs_idx)| {
1977                // This ought not to be None but let's not panic or bail if it is
1978                self.relay_by_rs_idx(*rs_idx)
1979            })
1980            .collect_vec();
1981
1982        match op {
1983            HsDirOp::Download => {
1984                // When `op` is `Download`, the order is random.
1985                hs_dirs.shuffle(rng);
1986            }
1987            #[cfg(feature = "hs-service")]
1988            HsDirOp::Upload => {
1989                // When `op` is `Upload`, the order is not specified.
1990            }
1991        }
1992
1993        hs_dirs
1994    }
1995}
1996
1997impl MdReceiver for NetDir {
1998    fn missing_microdescs(&self) -> Box<dyn Iterator<Item = &MdDigest> + '_> {
1999        Box::new(self.rsidx_by_missing.keys())
2000    }
2001    fn add_microdesc(&mut self, md: Microdesc) -> bool {
2002        self.add_arc_microdesc(Arc::new(md))
2003    }
2004    fn n_missing(&self) -> usize {
2005        self.rsidx_by_missing.len()
2006    }
2007}
2008
2009impl<'a> UncheckedRelay<'a> {
2010    /// Return an [`UncheckedRelayDetails`](details::UncheckedRelayDetails) for this relay.
2011    ///
2012    /// Callers should generally avoid using this information directly if they can;
2013    /// it's better to use a higher-level function that exposes semantic information
2014    /// rather than these properties.
2015    pub fn low_level_details(&self) -> details::UncheckedRelayDetails<'_> {
2016        details::UncheckedRelayDetails(self)
2017    }
2018
2019    /// Return true if this relay is valid and [usable](NetDir#usable).
2020    ///
2021    /// This function should return `true` for every Relay we expose
2022    /// to the user.
2023    pub fn is_usable(&self) -> bool {
2024        // No need to check for 'valid' or 'running': they are implicit.
2025        self.md.is_some() && self.rs.ed25519_id_is_usable()
2026    }
2027    /// If this is [usable](NetDir#usable), return a corresponding Relay object.
2028    pub fn into_relay(self) -> Option<Relay<'a>> {
2029        if self.is_usable() {
2030            Some(Relay {
2031                rs: self.rs,
2032                md: self.md?,
2033                #[cfg(feature = "geoip")]
2034                cc: self.cc,
2035            })
2036        } else {
2037            None
2038        }
2039    }
2040
2041    /// Return true if this relay is a hidden service directory
2042    ///
2043    /// Ie, if it is to be included in the hsdir ring.
2044    #[cfg(feature = "hs-common")]
2045    pub(crate) fn is_hsdir_for_ring(&self) -> bool {
2046        // TODO are there any other flags should we check?
2047        // rend-spec-v3 2.2.3 says just
2048        //   "each node listed in the current consensus with the HSDir flag"
2049        // Do we need to check ed25519_id_is_usable ?
2050        // See also https://gitlab.torproject.org/tpo/core/arti/-/issues/504
2051        self.rs.is_flagged_hsdir()
2052    }
2053}
2054
2055impl<'a> Relay<'a> {
2056    /// Return a [`RelayDetails`](details::RelayDetails) for this relay.
2057    ///
2058    /// Callers should generally avoid using this information directly if they can;
2059    /// it's better to use a higher-level function that exposes semantic information
2060    /// rather than these properties.
2061    pub fn low_level_details(&self) -> details::RelayDetails<'_> {
2062        details::RelayDetails(self)
2063    }
2064
2065    /// Return the Ed25519 ID for this relay.
2066    pub fn id(&self) -> &Ed25519Identity {
2067        self.md.ed25519_id()
2068    }
2069    /// Return the RsaIdentity for this relay.
2070    pub fn rsa_id(&self) -> &RsaIdentity {
2071        self.rs.rsa_identity()
2072    }
2073
2074    /// Return a reference to this relay's "router status" entry in
2075    /// the consensus.
2076    ///
2077    /// The router status entry contains information about the relay
2078    /// that the authorities voted on directly.  For most use cases,
2079    /// you shouldn't need them.
2080    ///
2081    /// This function is only available if the crate was built with
2082    /// its `experimental-api` feature.
2083    #[cfg(feature = "experimental-api")]
2084    pub fn rs(&self) -> &netstatus::MdConsensusRouterStatus {
2085        self.rs
2086    }
2087    /// Return a reference to this relay's "microdescriptor" entry in
2088    /// the consensus.
2089    ///
2090    /// A "microdescriptor" is a synopsis of the information about a relay,
2091    /// used to determine its capabilities and route traffic through it.
2092    /// For most use cases, you shouldn't need it.
2093    ///
2094    /// This function is only available if the crate was built with
2095    /// its `experimental-api` feature.
2096    #[cfg(feature = "experimental-api")]
2097    pub fn md(&self) -> &Microdesc {
2098        self.md
2099    }
2100}
2101
2102/// An error value returned from [`NetDir::by_ids_detailed`].
2103#[cfg(feature = "hs-common")]
2104#[derive(Clone, Debug, thiserror::Error)]
2105#[non_exhaustive]
2106pub enum RelayLookupError {
2107    /// We found a relay whose presence indicates that the provided set of
2108    /// identities is impossible to resolve.
2109    #[error("Provided set of identities is impossible according to consensus.")]
2110    Impossible,
2111}
2112
2113impl<'a> HasAddrs for Relay<'a> {
2114    fn addrs(&self) -> &[std::net::SocketAddr] {
2115        self.rs.addrs()
2116    }
2117}
2118#[cfg(feature = "geoip")]
2119#[cfg_attr(docsrs, doc(cfg(feature = "geoip")))]
2120impl<'a> HasCountryCode for Relay<'a> {
2121    fn country_code(&self) -> Option<CountryCode> {
2122        self.cc
2123    }
2124}
2125impl<'a> tor_linkspec::HasRelayIdsLegacy for Relay<'a> {
2126    fn ed_identity(&self) -> &Ed25519Identity {
2127        self.id()
2128    }
2129    fn rsa_identity(&self) -> &RsaIdentity {
2130        self.rsa_id()
2131    }
2132}
2133
2134impl<'a> HasRelayIds for UncheckedRelay<'a> {
2135    fn identity(&self, key_type: RelayIdType) -> Option<RelayIdRef<'_>> {
2136        match key_type {
2137            RelayIdType::Ed25519 if self.rs.ed25519_id_is_usable() => {
2138                self.md.map(|m| m.ed25519_id().into())
2139            }
2140            RelayIdType::Rsa => Some(self.rs.rsa_identity().into()),
2141            _ => None,
2142        }
2143    }
2144}
2145#[cfg(feature = "geoip")]
2146impl<'a> HasCountryCode for UncheckedRelay<'a> {
2147    fn country_code(&self) -> Option<CountryCode> {
2148        self.cc
2149    }
2150}
2151
2152impl<'a> DirectChanMethodsHelper for Relay<'a> {}
2153impl<'a> ChanTarget for Relay<'a> {}
2154
2155impl<'a> tor_linkspec::CircTarget for Relay<'a> {
2156    fn ntor_onion_key(&self) -> &ll::pk::curve25519::PublicKey {
2157        self.md.ntor_key()
2158    }
2159    fn protovers(&self) -> &tor_protover::Protocols {
2160        self.rs.protovers()
2161    }
2162}
2163
2164#[cfg(test)]
2165mod test {
2166    // @@ begin test lint list maintained by maint/add_warning @@
2167    #![allow(clippy::bool_assert_comparison)]
2168    #![allow(clippy::clone_on_copy)]
2169    #![allow(clippy::dbg_macro)]
2170    #![allow(clippy::mixed_attributes_style)]
2171    #![allow(clippy::print_stderr)]
2172    #![allow(clippy::print_stdout)]
2173    #![allow(clippy::single_char_pattern)]
2174    #![allow(clippy::unwrap_used)]
2175    #![allow(clippy::unchecked_duration_subtraction)]
2176    #![allow(clippy::useless_vec)]
2177    #![allow(clippy::needless_pass_by_value)]
2178    //! <!-- @@ end test lint list maintained by maint/add_warning @@ -->
2179    #![allow(clippy::cognitive_complexity)]
2180    use super::*;
2181    use crate::testnet::*;
2182    use float_eq::assert_float_eq;
2183    use std::collections::HashSet;
2184    use std::time::Duration;
2185    use tor_basic_utils::test_rng::{self, testing_rng};
2186    use tor_linkspec::{RelayIdType, RelayIds};
2187
2188    #[cfg(feature = "hs-common")]
2189    fn dummy_hs_blind_id() -> HsBlindId {
2190        let hsid = [2, 1, 1, 1].iter().cycle().take(32).cloned().collect_vec();
2191        let hsid = Ed25519Identity::new(hsid[..].try_into().unwrap());
2192        HsBlindId::from(hsid)
2193    }
2194
2195    // Basic functionality for a partial netdir: Add microdescriptors,
2196    // then you have a netdir.
2197    #[test]
2198    fn partial_netdir() {
2199        let (consensus, microdescs) = construct_network().unwrap();
2200        let dir = PartialNetDir::new(consensus, None);
2201
2202        // Check the lifetime
2203        let lifetime = dir.lifetime();
2204        assert_eq!(
2205            lifetime
2206                .valid_until()
2207                .duration_since(lifetime.valid_after())
2208                .unwrap(),
2209            Duration::new(86400, 0)
2210        );
2211
2212        // No microdescriptors, so we don't have enough paths, and can't
2213        // advance.
2214        assert!(!dir.have_enough_paths());
2215        let mut dir = match dir.unwrap_if_sufficient() {
2216            Ok(_) => panic!(),
2217            Err(d) => d,
2218        };
2219
2220        let missing: HashSet<_> = dir.missing_microdescs().collect();
2221        assert_eq!(missing.len(), 40);
2222        assert_eq!(missing.len(), dir.netdir.c_relays().len());
2223        for md in &microdescs {
2224            assert!(missing.contains(md.digest()));
2225        }
2226
2227        // Now add all the mds and try again.
2228        for md in microdescs {
2229            let wanted = dir.add_microdesc(md);
2230            assert!(wanted);
2231        }
2232
2233        let missing: HashSet<_> = dir.missing_microdescs().collect();
2234        assert!(missing.is_empty());
2235        assert!(dir.have_enough_paths());
2236        let _complete = match dir.unwrap_if_sufficient() {
2237            Ok(d) => d,
2238            Err(_) => panic!(),
2239        };
2240    }
2241
2242    #[test]
2243    fn override_params() {
2244        let (consensus, _microdescs) = construct_network().unwrap();
2245        let override_p = "bwweightscale=2 doesnotexist=77 circwindow=500"
2246            .parse()
2247            .unwrap();
2248        let dir = PartialNetDir::new(consensus.clone(), Some(&override_p));
2249        let params = &dir.netdir.params;
2250        assert_eq!(params.bw_weight_scale.get(), 2);
2251        assert_eq!(params.circuit_window.get(), 500_i32);
2252
2253        // try again without the override.
2254        let dir = PartialNetDir::new(consensus, None);
2255        let params = &dir.netdir.params;
2256        assert_eq!(params.bw_weight_scale.get(), 1_i32);
2257        assert_eq!(params.circuit_window.get(), 1000_i32);
2258    }
2259
2260    #[test]
2261    fn fill_from_previous() {
2262        let (consensus, microdescs) = construct_network().unwrap();
2263
2264        let mut dir = PartialNetDir::new(consensus.clone(), None);
2265        for md in microdescs.iter().skip(2) {
2266            let wanted = dir.add_microdesc(md.clone());
2267            assert!(wanted);
2268        }
2269        let dir1 = dir.unwrap_if_sufficient().unwrap();
2270        assert_eq!(dir1.missing_microdescs().count(), 2);
2271
2272        let mut dir = PartialNetDir::new(consensus, None);
2273        assert_eq!(dir.missing_microdescs().count(), 40);
2274        dir.fill_from_previous_netdir(Arc::new(dir1));
2275        assert_eq!(dir.missing_microdescs().count(), 2);
2276    }
2277
2278    #[test]
2279    fn path_count() {
2280        let low_threshold = "min_paths_for_circs_pct=64".parse().unwrap();
2281        let high_threshold = "min_paths_for_circs_pct=65".parse().unwrap();
2282
2283        let (consensus, microdescs) = construct_network().unwrap();
2284
2285        let mut dir = PartialNetDir::new(consensus.clone(), Some(&low_threshold));
2286        for (pos, md) in microdescs.iter().enumerate() {
2287            if pos % 7 == 2 {
2288                continue; // skip a few relays.
2289            }
2290            dir.add_microdesc(md.clone());
2291        }
2292        let dir = dir.unwrap_if_sufficient().unwrap();
2293
2294        // We  have 40 relays that we know about from the consensus.
2295        assert_eq!(dir.all_relays().count(), 40);
2296
2297        // But only 34 are usable.
2298        assert_eq!(dir.relays().count(), 34);
2299
2300        // For guards: mds 20..=39 correspond to Guard relays.
2301        // Their bandwidth is 2*(1000+2000+...10000) = 110_000.
2302        // We skipped 23, 30, and 37.  They have bandwidth
2303        // 4000 + 1000 + 8000 = 13_000.  So our fractional bandwidth
2304        // should be (110-13)/110.
2305        let f = dir.frac_for_role(WeightRole::Guard, |u| u.rs.is_flagged_guard());
2306        assert!(((97.0 / 110.0) - f).abs() < 0.000001);
2307
2308        // For exits: mds 10..=19 and 30..=39 correspond to Exit relays.
2309        // We skipped 16, 30,  and 37. Per above our fractional bandwidth is
2310        // (110-16)/110.
2311        let f = dir.frac_for_role(WeightRole::Exit, |u| u.rs.is_flagged_exit());
2312        assert!(((94.0 / 110.0) - f).abs() < 0.000001);
2313
2314        // For middles: all relays are middles. We skipped 2, 9, 16,
2315        // 23, 30, and 37. Per above our fractional bandwidth is
2316        // (220-33)/220
2317        let f = dir.frac_for_role(WeightRole::Middle, |_| true);
2318        assert!(((187.0 / 220.0) - f).abs() < 0.000001);
2319
2320        // Multiplying those together, we get the fraction of paths we can
2321        // build at ~0.64052066, which is above the threshold we set above for
2322        // MinPathsForCircsPct.
2323        let f = dir.frac_usable_paths();
2324        assert!((f - 0.64052066).abs() < 0.000001);
2325
2326        // But if we try again with a slightly higher threshold...
2327        let mut dir = PartialNetDir::new(consensus, Some(&high_threshold));
2328        for (pos, md) in microdescs.into_iter().enumerate() {
2329            if pos % 7 == 2 {
2330                continue; // skip a few relays.
2331            }
2332            dir.add_microdesc(md);
2333        }
2334        assert!(dir.unwrap_if_sufficient().is_err());
2335    }
2336
2337    /// Return a 3-tuple for use by `test_pick_*()` of an Rng, a number of
2338    /// iterations, and a tolerance.
2339    ///
2340    /// If the Rng is deterministic (the default), we can use a faster setup,
2341    /// with a higher tolerance and fewer iterations.  But if you've explicitly
2342    /// opted into randomization (or are replaying a seed from an earlier
2343    /// randomized test), we give you more iterations and a tighter tolerance.
2344    fn testing_rng_with_tolerances() -> (impl rand::Rng, usize, f64) {
2345        // Use a deterministic RNG if none is specified, since this is slow otherwise.
2346        let config = test_rng::Config::from_env().unwrap_or(test_rng::Config::Deterministic);
2347        let (iters, tolerance) = match config {
2348            test_rng::Config::Deterministic => (5000, 0.02),
2349            _ => (50000, 0.01),
2350        };
2351        (config.into_rng(), iters, tolerance)
2352    }
2353
2354    #[test]
2355    fn test_pick() {
2356        let (consensus, microdescs) = construct_network().unwrap();
2357        let mut dir = PartialNetDir::new(consensus, None);
2358        for md in microdescs.into_iter() {
2359            let wanted = dir.add_microdesc(md.clone());
2360            assert!(wanted);
2361        }
2362        let dir = dir.unwrap_if_sufficient().unwrap();
2363
2364        let (mut rng, total, tolerance) = testing_rng_with_tolerances();
2365
2366        let mut picked = [0_isize; 40];
2367        for _ in 0..total {
2368            let r = dir.pick_relay(&mut rng, WeightRole::Middle, |r| {
2369                r.low_level_details().supports_exit_port_ipv4(80)
2370            });
2371            let r = r.unwrap();
2372            let id_byte = r.identity(RelayIdType::Rsa).unwrap().as_bytes()[0];
2373            picked[id_byte as usize] += 1;
2374        }
2375        // non-exits should never get picked.
2376        picked[0..10].iter().for_each(|x| assert_eq!(*x, 0));
2377        picked[20..30].iter().for_each(|x| assert_eq!(*x, 0));
2378
2379        let picked_f: Vec<_> = picked.iter().map(|x| *x as f64 / total as f64).collect();
2380
2381        // We didn't we any non-default weights, so the other relays get
2382        // weighted proportional to their bandwidth.
2383        assert_float_eq!(picked_f[19], (10.0 / 110.0), abs <= tolerance);
2384        assert_float_eq!(picked_f[38], (9.0 / 110.0), abs <= tolerance);
2385        assert_float_eq!(picked_f[39], (10.0 / 110.0), abs <= tolerance);
2386    }
2387
2388    #[test]
2389    fn test_pick_multiple() {
2390        // This is mostly a copy of test_pick, except that it uses
2391        // pick_n_relays to pick several relays at once.
2392
2393        let dir = construct_netdir().unwrap_if_sufficient().unwrap();
2394
2395        let (mut rng, total, tolerance) = testing_rng_with_tolerances();
2396
2397        let mut picked = [0_isize; 40];
2398        for _ in 0..total / 4 {
2399            let relays = dir.pick_n_relays(&mut rng, 4, WeightRole::Middle, |r| {
2400                r.low_level_details().supports_exit_port_ipv4(80)
2401            });
2402            assert_eq!(relays.len(), 4);
2403            for r in relays {
2404                let id_byte = r.identity(RelayIdType::Rsa).unwrap().as_bytes()[0];
2405                picked[id_byte as usize] += 1;
2406            }
2407        }
2408        // non-exits should never get picked.
2409        picked[0..10].iter().for_each(|x| assert_eq!(*x, 0));
2410        picked[20..30].iter().for_each(|x| assert_eq!(*x, 0));
2411
2412        let picked_f: Vec<_> = picked.iter().map(|x| *x as f64 / total as f64).collect();
2413
2414        // We didn't we any non-default weights, so the other relays get
2415        // weighted proportional to their bandwidth.
2416        assert_float_eq!(picked_f[19], (10.0 / 110.0), abs <= tolerance);
2417        assert_float_eq!(picked_f[36], (7.0 / 110.0), abs <= tolerance);
2418        assert_float_eq!(picked_f[39], (10.0 / 110.0), abs <= tolerance);
2419    }
2420
2421    #[test]
2422    fn subnets() {
2423        let cfg = SubnetConfig::default();
2424
2425        fn same_net(cfg: &SubnetConfig, a: &str, b: &str) -> bool {
2426            cfg.addrs_in_same_subnet(&a.parse().unwrap(), &b.parse().unwrap())
2427        }
2428
2429        assert!(same_net(&cfg, "127.15.3.3", "127.15.9.9"));
2430        assert!(!same_net(&cfg, "127.15.3.3", "127.16.9.9"));
2431
2432        assert!(!same_net(&cfg, "127.15.3.3", "127::"));
2433
2434        assert!(same_net(&cfg, "ffff:ffff:90:33::", "ffff:ffff:91:34::"));
2435        assert!(!same_net(&cfg, "ffff:ffff:90:33::", "ffff:fffe:91:34::"));
2436
2437        let cfg = SubnetConfig {
2438            subnets_family_v4: 32,
2439            subnets_family_v6: 128,
2440        };
2441        assert!(!same_net(&cfg, "127.15.3.3", "127.15.9.9"));
2442        assert!(!same_net(&cfg, "ffff:ffff:90:33::", "ffff:ffff:91:34::"));
2443
2444        assert!(same_net(&cfg, "127.0.0.1", "127.0.0.1"));
2445        assert!(!same_net(&cfg, "127.0.0.1", "127.0.0.2"));
2446        assert!(same_net(&cfg, "ffff:ffff:90:33::", "ffff:ffff:90:33::"));
2447
2448        let cfg = SubnetConfig {
2449            subnets_family_v4: 33,
2450            subnets_family_v6: 129,
2451        };
2452        assert!(!same_net(&cfg, "127.0.0.1", "127.0.0.1"));
2453        assert!(!same_net(&cfg, "::", "::"));
2454    }
2455
2456    #[test]
2457    fn subnet_union() {
2458        let cfg1 = SubnetConfig {
2459            subnets_family_v4: 16,
2460            subnets_family_v6: 64,
2461        };
2462        let cfg2 = SubnetConfig {
2463            subnets_family_v4: 24,
2464            subnets_family_v6: 32,
2465        };
2466        let a1 = "1.2.3.4".parse().unwrap();
2467        let a2 = "1.2.10.10".parse().unwrap();
2468
2469        let a3 = "ffff:ffff::7".parse().unwrap();
2470        let a4 = "ffff:ffff:1234::8".parse().unwrap();
2471
2472        assert_eq!(cfg1.addrs_in_same_subnet(&a1, &a2), true);
2473        assert_eq!(cfg2.addrs_in_same_subnet(&a1, &a2), false);
2474
2475        assert_eq!(cfg1.addrs_in_same_subnet(&a3, &a4), false);
2476        assert_eq!(cfg2.addrs_in_same_subnet(&a3, &a4), true);
2477
2478        let cfg_u = cfg1.union(&cfg2);
2479        assert_eq!(
2480            cfg_u,
2481            SubnetConfig {
2482                subnets_family_v4: 16,
2483                subnets_family_v6: 32,
2484            }
2485        );
2486        assert_eq!(cfg_u.addrs_in_same_subnet(&a1, &a2), true);
2487        assert_eq!(cfg_u.addrs_in_same_subnet(&a3, &a4), true);
2488
2489        assert_eq!(cfg1.union(&cfg1), cfg1);
2490
2491        assert_eq!(cfg1.union(&SubnetConfig::no_addresses_match()), cfg1);
2492    }
2493
2494    #[test]
2495    fn relay_funcs() {
2496        let (consensus, microdescs) = construct_custom_network(
2497            |pos, nb, _| {
2498                if pos == 15 {
2499                    nb.rs.add_or_port("[f0f0::30]:9001".parse().unwrap());
2500                } else if pos == 20 {
2501                    nb.rs.add_or_port("[f0f0::3131]:9001".parse().unwrap());
2502                }
2503            },
2504            None,
2505        )
2506        .unwrap();
2507        let subnet_config = SubnetConfig::default();
2508        let all_family_info = FamilyRules::all_family_info();
2509        let mut dir = PartialNetDir::new(consensus, None);
2510        for md in microdescs.into_iter() {
2511            let wanted = dir.add_microdesc(md.clone());
2512            assert!(wanted);
2513        }
2514        let dir = dir.unwrap_if_sufficient().unwrap();
2515
2516        // Pick out a few relays by ID.
2517        let k0 = Ed25519Identity::from([0; 32]);
2518        let k1 = Ed25519Identity::from([1; 32]);
2519        let k2 = Ed25519Identity::from([2; 32]);
2520        let k3 = Ed25519Identity::from([3; 32]);
2521        let k10 = Ed25519Identity::from([10; 32]);
2522        let k15 = Ed25519Identity::from([15; 32]);
2523        let k20 = Ed25519Identity::from([20; 32]);
2524
2525        let r0 = dir.by_id(&k0).unwrap();
2526        let r1 = dir.by_id(&k1).unwrap();
2527        let r2 = dir.by_id(&k2).unwrap();
2528        let r3 = dir.by_id(&k3).unwrap();
2529        let r10 = dir.by_id(&k10).unwrap();
2530        let r15 = dir.by_id(&k15).unwrap();
2531        let r20 = dir.by_id(&k20).unwrap();
2532
2533        assert_eq!(r0.id(), &[0; 32].into());
2534        assert_eq!(r0.rsa_id(), &[0; 20].into());
2535        assert_eq!(r1.id(), &[1; 32].into());
2536        assert_eq!(r1.rsa_id(), &[1; 20].into());
2537
2538        assert!(r0.same_relay_ids(&r0));
2539        assert!(r1.same_relay_ids(&r1));
2540        assert!(!r1.same_relay_ids(&r0));
2541
2542        assert!(r0.low_level_details().is_dir_cache());
2543        assert!(!r1.low_level_details().is_dir_cache());
2544        assert!(r2.low_level_details().is_dir_cache());
2545        assert!(!r3.low_level_details().is_dir_cache());
2546
2547        assert!(!r0.low_level_details().supports_exit_port_ipv4(80));
2548        assert!(!r1.low_level_details().supports_exit_port_ipv4(80));
2549        assert!(!r2.low_level_details().supports_exit_port_ipv4(80));
2550        assert!(!r3.low_level_details().supports_exit_port_ipv4(80));
2551
2552        assert!(!r0.low_level_details().policies_allow_some_port());
2553        assert!(!r1.low_level_details().policies_allow_some_port());
2554        assert!(!r2.low_level_details().policies_allow_some_port());
2555        assert!(!r3.low_level_details().policies_allow_some_port());
2556        assert!(r10.low_level_details().policies_allow_some_port());
2557
2558        assert!(r0.low_level_details().in_same_family(&r0, all_family_info));
2559        assert!(r0.low_level_details().in_same_family(&r1, all_family_info));
2560        assert!(r1.low_level_details().in_same_family(&r0, all_family_info));
2561        assert!(r1.low_level_details().in_same_family(&r1, all_family_info));
2562        assert!(!r0.low_level_details().in_same_family(&r2, all_family_info));
2563        assert!(!r2.low_level_details().in_same_family(&r0, all_family_info));
2564        assert!(r2.low_level_details().in_same_family(&r2, all_family_info));
2565        assert!(r2.low_level_details().in_same_family(&r3, all_family_info));
2566
2567        assert!(r0.low_level_details().in_same_subnet(&r10, &subnet_config));
2568        assert!(r10.low_level_details().in_same_subnet(&r10, &subnet_config));
2569        assert!(r0.low_level_details().in_same_subnet(&r0, &subnet_config));
2570        assert!(r1.low_level_details().in_same_subnet(&r1, &subnet_config));
2571        assert!(!r1.low_level_details().in_same_subnet(&r2, &subnet_config));
2572        assert!(!r2.low_level_details().in_same_subnet(&r3, &subnet_config));
2573
2574        // Make sure IPv6 families work.
2575        let subnet_config = SubnetConfig {
2576            subnets_family_v4: 128,
2577            subnets_family_v6: 96,
2578        };
2579        assert!(r15.low_level_details().in_same_subnet(&r20, &subnet_config));
2580        assert!(!r15.low_level_details().in_same_subnet(&r1, &subnet_config));
2581
2582        // Make sure that subnet configs can be disabled.
2583        let subnet_config = SubnetConfig {
2584            subnets_family_v4: 255,
2585            subnets_family_v6: 255,
2586        };
2587        assert!(!r15.low_level_details().in_same_subnet(&r20, &subnet_config));
2588    }
2589
2590    #[test]
2591    fn test_badexit() {
2592        // make a netdir where relays 10-19 are badexit, and everybody
2593        // exits to 443 on IPv6.
2594        use tor_netdoc::doc::netstatus::RelayFlags;
2595        let netdir = construct_custom_netdir(|pos, nb, _| {
2596            if (10..20).contains(&pos) {
2597                nb.rs.add_flags(RelayFlags::BAD_EXIT);
2598            }
2599            nb.md.parse_ipv6_policy("accept 443").unwrap();
2600        })
2601        .unwrap()
2602        .unwrap_if_sufficient()
2603        .unwrap();
2604
2605        let e12 = netdir.by_id(&Ed25519Identity::from([12; 32])).unwrap();
2606        let e32 = netdir.by_id(&Ed25519Identity::from([32; 32])).unwrap();
2607
2608        assert!(!e12.low_level_details().supports_exit_port_ipv4(80));
2609        assert!(e32.low_level_details().supports_exit_port_ipv4(80));
2610
2611        assert!(!e12.low_level_details().supports_exit_port_ipv6(443));
2612        assert!(e32.low_level_details().supports_exit_port_ipv6(443));
2613        assert!(!e32.low_level_details().supports_exit_port_ipv6(555));
2614
2615        assert!(!e12.low_level_details().policies_allow_some_port());
2616        assert!(e32.low_level_details().policies_allow_some_port());
2617
2618        assert!(!e12.low_level_details().ipv4_policy().allows_some_port());
2619        assert!(!e12.low_level_details().ipv6_policy().allows_some_port());
2620        assert!(e32.low_level_details().ipv4_policy().allows_some_port());
2621        assert!(e32.low_level_details().ipv6_policy().allows_some_port());
2622
2623        assert!(e12
2624            .low_level_details()
2625            .ipv4_declared_policy()
2626            .allows_some_port());
2627        assert!(e12
2628            .low_level_details()
2629            .ipv6_declared_policy()
2630            .allows_some_port());
2631    }
2632
2633    #[cfg(feature = "experimental-api")]
2634    #[test]
2635    fn test_accessors() {
2636        let netdir = construct_netdir().unwrap_if_sufficient().unwrap();
2637
2638        let r4 = netdir.by_id(&Ed25519Identity::from([4; 32])).unwrap();
2639        let r16 = netdir.by_id(&Ed25519Identity::from([16; 32])).unwrap();
2640
2641        assert!(!r4.md().ipv4_policy().allows_some_port());
2642        assert!(r16.md().ipv4_policy().allows_some_port());
2643
2644        assert!(!r4.rs().is_flagged_exit());
2645        assert!(r16.rs().is_flagged_exit());
2646    }
2647
2648    #[test]
2649    fn test_by_id() {
2650        // Make a netdir that omits the microdescriptor for 0xDDDDDD...
2651        let netdir = construct_custom_netdir(|pos, nb, _| {
2652            nb.omit_md = pos == 13;
2653        })
2654        .unwrap();
2655
2656        let netdir = netdir.unwrap_if_sufficient().unwrap();
2657
2658        let r = netdir.by_id(&Ed25519Identity::from([0; 32])).unwrap();
2659        assert_eq!(r.id().as_bytes(), &[0; 32]);
2660
2661        assert!(netdir.by_id(&Ed25519Identity::from([13; 32])).is_none());
2662
2663        let r = netdir.by_rsa_id(&[12; 20].into()).unwrap();
2664        assert_eq!(r.rsa_id().as_bytes(), &[12; 20]);
2665        assert!(netdir.rsa_id_is_listed(&[12; 20].into()));
2666
2667        assert!(netdir.by_rsa_id(&[13; 20].into()).is_none());
2668
2669        assert!(netdir.by_rsa_id_unchecked(&[99; 20].into()).is_none());
2670        assert!(!netdir.rsa_id_is_listed(&[99; 20].into()));
2671
2672        let r = netdir.by_rsa_id_unchecked(&[13; 20].into()).unwrap();
2673        assert_eq!(r.rs.rsa_identity().as_bytes(), &[13; 20]);
2674        assert!(netdir.rsa_id_is_listed(&[13; 20].into()));
2675
2676        let pair_13_13 = RelayIds::builder()
2677            .ed_identity([13; 32].into())
2678            .rsa_identity([13; 20].into())
2679            .build()
2680            .unwrap();
2681        let pair_14_14 = RelayIds::builder()
2682            .ed_identity([14; 32].into())
2683            .rsa_identity([14; 20].into())
2684            .build()
2685            .unwrap();
2686        let pair_14_99 = RelayIds::builder()
2687            .ed_identity([14; 32].into())
2688            .rsa_identity([99; 20].into())
2689            .build()
2690            .unwrap();
2691
2692        let r = netdir.by_ids(&pair_13_13);
2693        assert!(r.is_none());
2694        let r = netdir.by_ids(&pair_14_14).unwrap();
2695        assert_eq!(r.identity(RelayIdType::Rsa).unwrap().as_bytes(), &[14; 20]);
2696        assert_eq!(
2697            r.identity(RelayIdType::Ed25519).unwrap().as_bytes(),
2698            &[14; 32]
2699        );
2700        let r = netdir.by_ids(&pair_14_99);
2701        assert!(r.is_none());
2702
2703        assert_eq!(
2704            netdir.id_pair_listed(&[13; 32].into(), &[13; 20].into()),
2705            None
2706        );
2707        assert_eq!(
2708            netdir.id_pair_listed(&[15; 32].into(), &[15; 20].into()),
2709            Some(true)
2710        );
2711        assert_eq!(
2712            netdir.id_pair_listed(&[15; 32].into(), &[99; 20].into()),
2713            Some(false)
2714        );
2715    }
2716
2717    #[test]
2718    #[cfg(feature = "hs-common")]
2719    fn test_by_ids_detailed() {
2720        // Make a netdir that omits the microdescriptor for 0xDDDDDD...
2721        let netdir = construct_custom_netdir(|pos, nb, _| {
2722            nb.omit_md = pos == 13;
2723        })
2724        .unwrap();
2725
2726        let netdir = netdir.unwrap_if_sufficient().unwrap();
2727
2728        let id13_13 = RelayIds::builder()
2729            .ed_identity([13; 32].into())
2730            .rsa_identity([13; 20].into())
2731            .build()
2732            .unwrap();
2733        let id15_15 = RelayIds::builder()
2734            .ed_identity([15; 32].into())
2735            .rsa_identity([15; 20].into())
2736            .build()
2737            .unwrap();
2738        let id15_99 = RelayIds::builder()
2739            .ed_identity([15; 32].into())
2740            .rsa_identity([99; 20].into())
2741            .build()
2742            .unwrap();
2743        let id99_15 = RelayIds::builder()
2744            .ed_identity([99; 32].into())
2745            .rsa_identity([15; 20].into())
2746            .build()
2747            .unwrap();
2748        let id99_99 = RelayIds::builder()
2749            .ed_identity([99; 32].into())
2750            .rsa_identity([99; 20].into())
2751            .build()
2752            .unwrap();
2753        let id15_xx = RelayIds::builder()
2754            .ed_identity([15; 32].into())
2755            .build()
2756            .unwrap();
2757        let idxx_15 = RelayIds::builder()
2758            .rsa_identity([15; 20].into())
2759            .build()
2760            .unwrap();
2761
2762        assert!(matches!(netdir.by_ids_detailed(&id13_13), Ok(None)));
2763        assert!(matches!(netdir.by_ids_detailed(&id15_15), Ok(Some(_))));
2764        assert!(matches!(
2765            netdir.by_ids_detailed(&id15_99),
2766            Err(RelayLookupError::Impossible)
2767        ));
2768        assert!(matches!(
2769            netdir.by_ids_detailed(&id99_15),
2770            Err(RelayLookupError::Impossible)
2771        ));
2772        assert!(matches!(netdir.by_ids_detailed(&id99_99), Ok(None)));
2773        assert!(matches!(netdir.by_ids_detailed(&id15_xx), Ok(Some(_))));
2774        assert!(matches!(netdir.by_ids_detailed(&idxx_15), Ok(Some(_))));
2775    }
2776
2777    #[test]
2778    fn weight_type() {
2779        let r0 = RelayWeight(0);
2780        let r100 = RelayWeight(100);
2781        let r200 = RelayWeight(200);
2782        let r300 = RelayWeight(300);
2783        assert_eq!(r100 + r200, r300);
2784        assert_eq!(r100.checked_div(r200), Some(0.5));
2785        assert!(r100.checked_div(r0).is_none());
2786        assert_eq!(r200.ratio(0.5), Some(r100));
2787        assert!(r200.ratio(-1.0).is_none());
2788    }
2789
2790    #[test]
2791    fn weight_accessors() {
2792        // Make a netdir that omits the microdescriptor for 0xDDDDDD...
2793        let netdir = construct_netdir().unwrap_if_sufficient().unwrap();
2794
2795        let g_total = netdir.total_weight(WeightRole::Guard, |r| r.rs.is_flagged_guard());
2796        // This is just the total guard weight, since all our Wxy = 1.
2797        assert_eq!(g_total, RelayWeight(110_000));
2798
2799        let g_total = netdir.total_weight(WeightRole::Guard, |_| false);
2800        assert_eq!(g_total, RelayWeight(0));
2801
2802        let relay = netdir.by_id(&Ed25519Identity::from([35; 32])).unwrap();
2803        assert!(relay.rs.is_flagged_guard());
2804        let w = netdir.relay_weight(&relay, WeightRole::Guard);
2805        assert_eq!(w, RelayWeight(6_000));
2806
2807        let w = netdir
2808            .weight_by_rsa_id(&[33; 20].into(), WeightRole::Guard)
2809            .unwrap();
2810        assert_eq!(w, RelayWeight(4_000));
2811
2812        assert!(netdir
2813            .weight_by_rsa_id(&[99; 20].into(), WeightRole::Guard)
2814            .is_none());
2815    }
2816
2817    #[test]
2818    fn family_list() {
2819        let netdir = construct_custom_netdir(|pos, n, _| {
2820            if pos == 0x0a {
2821                n.md.family(
2822                    "$0B0B0B0B0B0B0B0B0B0B0B0B0B0B0B0B0B0B0B0B \
2823                     $0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C \
2824                     $0D0D0D0D0D0D0D0D0D0D0D0D0D0D0D0D0D0D0D0D"
2825                        .parse()
2826                        .unwrap(),
2827                );
2828            } else if pos == 0x0c {
2829                n.md.family("$0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A".parse().unwrap());
2830            }
2831        })
2832        .unwrap()
2833        .unwrap_if_sufficient()
2834        .unwrap();
2835
2836        // In the testing netdir, adjacent members are in the same family by default...
2837        let r0 = netdir.by_id(&Ed25519Identity::from([0; 32])).unwrap();
2838        let family: Vec<_> = netdir.known_family_members(&r0).collect();
2839        assert_eq!(family.len(), 1);
2840        assert_eq!(family[0].id(), &Ed25519Identity::from([1; 32]));
2841
2842        // But we've made this relay claim membership with several others.
2843        let r10 = netdir.by_id(&Ed25519Identity::from([10; 32])).unwrap();
2844        let family: HashSet<_> = netdir.known_family_members(&r10).map(|r| *r.id()).collect();
2845        assert_eq!(family.len(), 2);
2846        assert!(family.contains(&Ed25519Identity::from([11; 32])));
2847        assert!(family.contains(&Ed25519Identity::from([12; 32])));
2848        // Note that 13 doesn't get put in, even though it's listed, since it doesn't claim
2849        //  membership with 10.
2850    }
2851    #[test]
2852    #[cfg(feature = "geoip")]
2853    fn relay_has_country_code() {
2854        let src_v6 = r#"
2855        fe80:dead:beef::,fe80:dead:ffff::,US
2856        fe80:feed:eeee::1,fe80:feed:eeee::2,AT
2857        fe80:feed:eeee::2,fe80:feed:ffff::,DE
2858        "#;
2859        let db = GeoipDb::new_from_legacy_format("", src_v6).unwrap();
2860
2861        let netdir = construct_custom_netdir_with_geoip(
2862            |pos, n, _| {
2863                if pos == 0x01 {
2864                    n.rs.add_or_port("[fe80:dead:beef::1]:42".parse().unwrap());
2865                }
2866                if pos == 0x02 {
2867                    n.rs.add_or_port("[fe80:feed:eeee::1]:42".parse().unwrap());
2868                    n.rs.add_or_port("[fe80:feed:eeee::2]:42".parse().unwrap());
2869                }
2870                if pos == 0x03 {
2871                    n.rs.add_or_port("[fe80:dead:beef::1]:42".parse().unwrap());
2872                    n.rs.add_or_port("[fe80:dead:beef::2]:42".parse().unwrap());
2873                }
2874            },
2875            &db,
2876        )
2877        .unwrap()
2878        .unwrap_if_sufficient()
2879        .unwrap();
2880
2881        // No GeoIP data available -> None
2882        let r0 = netdir.by_id(&Ed25519Identity::from([0; 32])).unwrap();
2883        assert_eq!(r0.cc, None);
2884
2885        // Exactly one match -> Some
2886        let r1 = netdir.by_id(&Ed25519Identity::from([1; 32])).unwrap();
2887        assert_eq!(r1.cc.as_ref().map(|x| x.as_ref()), Some("US"));
2888
2889        // Conflicting matches -> None
2890        let r2 = netdir.by_id(&Ed25519Identity::from([2; 32])).unwrap();
2891        assert_eq!(r2.cc, None);
2892
2893        // Multiple agreeing matches -> Some
2894        let r3 = netdir.by_id(&Ed25519Identity::from([3; 32])).unwrap();
2895        assert_eq!(r3.cc.as_ref().map(|x| x.as_ref()), Some("US"));
2896    }
2897
2898    #[test]
2899    #[cfg(feature = "hs-common")]
2900    #[allow(deprecated)]
2901    fn hs_dirs_selection() {
2902        use tor_basic_utils::test_rng::testing_rng;
2903
2904        const HSDIR_SPREAD_STORE: i32 = 6;
2905        const HSDIR_SPREAD_FETCH: i32 = 2;
2906        const PARAMS: [(&str, i32); 2] = [
2907            ("hsdir_spread_store", HSDIR_SPREAD_STORE),
2908            ("hsdir_spread_fetch", HSDIR_SPREAD_FETCH),
2909        ];
2910
2911        let netdir: Arc<NetDir> =
2912            crate::testnet::construct_custom_netdir_with_params(|_, _, _| {}, PARAMS, None)
2913                .unwrap()
2914                .unwrap_if_sufficient()
2915                .unwrap()
2916                .into();
2917        let hsid = dummy_hs_blind_id();
2918
2919        const OP_RELAY_COUNT: &[(HsDirOp, usize)] = &[
2920            // We can't upload to (hsdir_n_replicas * hsdir_spread_store) = 12, relays because there
2921            // are only 10 relays with the HsDir flag in the consensus.
2922            #[cfg(feature = "hs-service")]
2923            (HsDirOp::Upload, 10),
2924            (HsDirOp::Download, 4),
2925        ];
2926
2927        for (op, relay_count) in OP_RELAY_COUNT {
2928            let relays = netdir.hs_dirs(&hsid, *op, &mut testing_rng());
2929
2930            assert_eq!(relays.len(), *relay_count);
2931
2932            // There should be no duplicates (the filtering function passed to
2933            // HsDirRing::ring_items_at() ensures the relays that are already in use for
2934            // lower-numbered replicas aren't considered a second time for a higher-numbered
2935            // replica).
2936            let unique = relays
2937                .iter()
2938                .map(|relay| relay.ed_identity())
2939                .collect::<HashSet<_>>();
2940            assert_eq!(unique.len(), relays.len());
2941        }
2942
2943        // TODO: come up with a test that checks that HsDirRing::ring_items_at() skips over the
2944        // expected relays.
2945        //
2946        // For example, let's say we have the following hsdir ring:
2947        //
2948        //         A  -  B
2949        //        /       \
2950        //       F         C
2951        //        \       /
2952        //         E  -  D
2953        //
2954        // Let's also assume that:
2955        //
2956        //   * hsdir_spread_store = 3
2957        //   * the ordering of the relays on the ring is [A, B, C, D, E, F]
2958        //
2959        // If we use relays [A, B, C] for replica 1, and hs_index(2) = E, then replica 2 _must_ get
2960        // relays [E, F, D]. We should have a test that checks this.
2961    }
2962
2963    #[test]
2964    fn zero_weights() {
2965        // Here we check the behavior of IndexedRandom::{choose_weighted, choose_multiple_weighted}
2966        // in the presence of items whose weight is 0.
2967        //
2968        // We think that the behavior is:
2969        //   - An item with weight 0 is never returned.
2970        //   - If all items have weight 0, choose_weighted returns an error.
2971        //   - If all items have weight 0, choose_multiple_weighted returns an empty list.
2972        //   - If we request n items from choose_multiple_weighted,
2973        //     but only m<n items have nonzero weight, we return all m of those items.
2974        //   - if the request for n items can't be completely satisfied with n items of weight >= 0,
2975        //     we get InsufficientNonZero.
2976        let items = vec![1, 2, 3];
2977        let mut rng = testing_rng();
2978
2979        let a = items.choose_weighted(&mut rng, |_| 0);
2980        assert!(matches!(a, Err(WeightError::InsufficientNonZero)));
2981
2982        let x = items.choose_multiple_weighted(&mut rng, 2, |_| 0);
2983        let xs: Vec<_> = x.unwrap().collect();
2984        assert!(xs.is_empty());
2985
2986        let only_one = |n: &i32| if *n == 1 { 1 } else { 0 };
2987        let x = items.choose_multiple_weighted(&mut rng, 2, only_one);
2988        let xs: Vec<_> = x.unwrap().collect();
2989        assert_eq!(&xs[..], &[&1]);
2990
2991        for _ in 0..100 {
2992            let a = items.choose_weighted(&mut rng, only_one);
2993            assert_eq!(a.unwrap(), &1);
2994
2995            let x = items
2996                .choose_multiple_weighted(&mut rng, 1, only_one)
2997                .unwrap()
2998                .collect::<Vec<_>>();
2999            assert_eq!(x, vec![&1]);
3000        }
3001    }
3002
3003    #[test]
3004    fn insufficient_but_nonzero() {
3005        // Here we check IndexedRandom::choose_multiple_weighted when there no zero values,
3006        // but there are insufficient values.
3007        // (If this behavior changes, we need to change our usage.)
3008
3009        let items = vec![1, 2, 3];
3010        let mut rng = testing_rng();
3011        let mut a = items
3012            .choose_multiple_weighted(&mut rng, 10, |_| 1)
3013            .unwrap()
3014            .copied()
3015            .collect::<Vec<_>>();
3016        a.sort();
3017        assert_eq!(a, items);
3018    }
3019}