tor_netdir/lib.rs
1#![cfg_attr(docsrs, feature(doc_cfg))]
2#![doc = include_str!("../README.md")]
3// @@ begin lint list maintained by maint/add_warning @@
4#![allow(renamed_and_removed_lints)] // @@REMOVE_WHEN(ci_arti_stable)
5#![allow(unknown_lints)] // @@REMOVE_WHEN(ci_arti_nightly)
6#![warn(missing_docs)]
7#![warn(noop_method_call)]
8#![warn(unreachable_pub)]
9#![warn(clippy::all)]
10#![deny(clippy::await_holding_lock)]
11#![deny(clippy::cargo_common_metadata)]
12#![deny(clippy::cast_lossless)]
13#![deny(clippy::checked_conversions)]
14#![warn(clippy::cognitive_complexity)]
15#![deny(clippy::debug_assert_with_mut_call)]
16#![deny(clippy::exhaustive_enums)]
17#![deny(clippy::exhaustive_structs)]
18#![deny(clippy::expl_impl_clone_on_copy)]
19#![deny(clippy::fallible_impl_from)]
20#![deny(clippy::implicit_clone)]
21#![deny(clippy::large_stack_arrays)]
22#![warn(clippy::manual_ok_or)]
23#![deny(clippy::missing_docs_in_private_items)]
24#![warn(clippy::needless_borrow)]
25#![warn(clippy::needless_pass_by_value)]
26#![warn(clippy::option_option)]
27#![deny(clippy::print_stderr)]
28#![deny(clippy::print_stdout)]
29#![warn(clippy::rc_buffer)]
30#![deny(clippy::ref_option_ref)]
31#![warn(clippy::semicolon_if_nothing_returned)]
32#![warn(clippy::trait_duplication_in_bounds)]
33#![deny(clippy::unchecked_time_subtraction)]
34#![deny(clippy::unnecessary_wraps)]
35#![warn(clippy::unseparated_literal_suffix)]
36#![deny(clippy::unwrap_used)]
37#![deny(clippy::mod_module_files)]
38#![allow(clippy::let_unit_value)] // This can reasonably be done for explicitness
39#![allow(clippy::uninlined_format_args)]
40#![allow(clippy::significant_drop_in_scrutinee)] // arti/-/merge_requests/588/#note_2812945
41#![allow(clippy::result_large_err)] // temporary workaround for arti#587
42#![allow(clippy::needless_raw_string_hashes)] // complained-about code is fine, often best
43#![allow(clippy::needless_lifetimes)] // See arti#1765
44#![allow(mismatched_lifetime_syntaxes)] // temporary workaround for arti#2060
45//! <!-- @@ end lint list maintained by maint/add_warning @@ -->
46
47pub mod details;
48mod err;
49#[cfg(feature = "hs-common")]
50mod hsdir_params;
51#[cfg(feature = "hs-common")]
52mod hsdir_ring;
53pub mod params;
54mod weight;
55
56#[cfg(any(test, feature = "testing"))]
57pub mod testnet;
58#[cfg(feature = "testing")]
59pub mod testprovider;
60
61use async_trait::async_trait;
62#[cfg(feature = "hs-service")]
63use itertools::chain;
64use tor_error::warn_report;
65use tor_linkspec::{
66 ChanTarget, DirectChanMethodsHelper, HasAddrs, HasRelayIds, RelayIdRef, RelayIdType,
67};
68use tor_llcrypto as ll;
69use tor_llcrypto::pk::{ed25519::Ed25519Identity, rsa::RsaIdentity};
70use tor_netdoc::doc::microdesc::{MdDigest, Microdesc};
71use tor_netdoc::doc::netstatus::{self, MdConsensus, MdRouterStatus};
72#[cfg(feature = "hs-common")]
73use {hsdir_ring::HsDirRing, std::iter};
74
75use derive_more::{From, Into};
76use futures::{StreamExt, stream::BoxStream};
77use num_enum::{IntoPrimitive, TryFromPrimitive};
78use rand::seq::{IndexedRandom as _, SliceRandom as _, WeightError};
79use serde::Deserialize;
80use std::collections::HashMap;
81use std::net::IpAddr;
82use std::ops::Deref;
83use std::sync::Arc;
84use std::time::SystemTime;
85use strum::{EnumCount, EnumIter};
86use tracing::warn;
87use typed_index_collections::{TiSlice, TiVec};
88
89#[cfg(feature = "hs-common")]
90use {
91 itertools::Itertools,
92 std::collections::HashSet,
93 tor_error::{Bug, internal},
94 tor_hscrypto::{pk::HsBlindId, time::TimePeriod},
95};
96
97pub use err::Error;
98pub use weight::WeightRole;
99/// A Result using the Error type from the tor-netdir crate
100pub type Result<T> = std::result::Result<T, Error>;
101
102#[cfg(feature = "hs-common")]
103pub use err::OnionDirLookupError;
104
105use params::NetParameters;
106#[cfg(feature = "geoip")]
107use tor_geoip::{CountryCode, GeoipDb, HasCountryCode};
108
109#[cfg(feature = "hs-common")]
110pub use hsdir_params::HsDirParams;
111
112/// Index into the consensus relays
113///
114/// This is an index into the list of relays returned by
115/// [`.c_relays()`](ConsensusRelays::c_relays)
116/// (on the corresponding consensus or netdir).
117///
118/// This is just a `usize` inside, but using a newtype prevents getting a relay index
119/// confused with other kinds of slice indices or counts.
120///
121/// If you are in a part of the code which needs to work with multiple consensuses,
122/// the typechecking cannot tell if you try to index into the wrong consensus.
123#[derive(Debug, From, Into, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash)]
124pub(crate) struct RouterStatusIdx(usize);
125
126/// Extension trait to provide index-type-safe `.c_relays()` method
127//
128// TODO: Really it would be better to have MdConsensns::relays() return TiSlice,
129// but that would be an API break there.
130pub(crate) trait ConsensusRelays {
131 /// Obtain the list of relays in the consensus
132 //
133 fn c_relays(&self) -> &TiSlice<RouterStatusIdx, MdRouterStatus>;
134}
135impl ConsensusRelays for MdConsensus {
136 fn c_relays(&self) -> &TiSlice<RouterStatusIdx, MdRouterStatus> {
137 TiSlice::from_ref(MdConsensus::relays(self))
138 }
139}
140impl ConsensusRelays for NetDir {
141 fn c_relays(&self) -> &TiSlice<RouterStatusIdx, MdRouterStatus> {
142 self.consensus.c_relays()
143 }
144}
145
146/// Configuration for determining when two relays have addresses "too close" in
147/// the network.
148///
149/// Used by `Relay::low_level_details().in_same_subnet()`.
150#[derive(Deserialize, Debug, Clone, Copy, Eq, PartialEq)]
151#[serde(deny_unknown_fields)]
152pub struct SubnetConfig {
153 /// Consider IPv4 nodes in the same /x to be the same family.
154 ///
155 /// If this value is 0, all nodes with IPv4 addresses will be in the
156 /// same family. If this value is above 32, then no nodes will be
157 /// placed im the same family based on their IPv4 addresses.
158 subnets_family_v4: u8,
159 /// Consider IPv6 nodes in the same /x to be the same family.
160 ///
161 /// If this value is 0, all nodes with IPv6 addresses will be in the
162 /// same family. If this value is above 128, then no nodes will be
163 /// placed im the same family based on their IPv6 addresses.
164 subnets_family_v6: u8,
165}
166
167impl Default for SubnetConfig {
168 fn default() -> Self {
169 Self::new(16, 32)
170 }
171}
172
173impl SubnetConfig {
174 /// Construct a new SubnetConfig from a pair of bit prefix lengths.
175 ///
176 /// The values are clamped to the appropriate ranges if they are
177 /// out-of-bounds.
178 pub fn new(subnets_family_v4: u8, subnets_family_v6: u8) -> Self {
179 Self {
180 subnets_family_v4,
181 subnets_family_v6,
182 }
183 }
184
185 /// Construct a new SubnetConfig such that addresses are not in the same
186 /// family with anything--not even with themselves.
187 pub fn no_addresses_match() -> SubnetConfig {
188 SubnetConfig {
189 subnets_family_v4: 33,
190 subnets_family_v6: 129,
191 }
192 }
193
194 /// Return true if the two addresses in the same subnet, according to this
195 /// configuration.
196 pub fn addrs_in_same_subnet(&self, a: &IpAddr, b: &IpAddr) -> bool {
197 match (a, b) {
198 (IpAddr::V4(a), IpAddr::V4(b)) => {
199 let bits = self.subnets_family_v4;
200 if bits > 32 {
201 return false;
202 }
203 let a = u32::from_be_bytes(a.octets());
204 let b = u32::from_be_bytes(b.octets());
205 (a >> (32 - bits)) == (b >> (32 - bits))
206 }
207 (IpAddr::V6(a), IpAddr::V6(b)) => {
208 let bits = self.subnets_family_v6;
209 if bits > 128 {
210 return false;
211 }
212 let a = u128::from_be_bytes(a.octets());
213 let b = u128::from_be_bytes(b.octets());
214 (a >> (128 - bits)) == (b >> (128 - bits))
215 }
216 _ => false,
217 }
218 }
219
220 /// Return true if any of the addresses in `a` shares a subnet with any of
221 /// the addresses in `b`, according to this configuration.
222 pub fn any_addrs_in_same_subnet<T, U>(&self, a: &T, b: &U) -> bool
223 where
224 T: tor_linkspec::HasAddrs,
225 U: tor_linkspec::HasAddrs,
226 {
227 a.addrs().any(|aa| {
228 b.addrs()
229 .any(|bb| self.addrs_in_same_subnet(&aa.ip(), &bb.ip()))
230 })
231 }
232
233 /// Return a new subnet configuration that is the union of `self` and
234 /// `other`.
235 ///
236 /// That is, return a subnet configuration that puts all addresses in the
237 /// same subnet if and only if at least one of `self` and `other` would put
238 /// them in the same subnet.
239 pub fn union(&self, other: &Self) -> Self {
240 use std::cmp::min;
241 Self {
242 subnets_family_v4: min(self.subnets_family_v4, other.subnets_family_v4),
243 subnets_family_v6: min(self.subnets_family_v6, other.subnets_family_v6),
244 }
245 }
246}
247
248/// Configuration for which listed family information to use when deciding
249/// whether relays belong to the same family.
250///
251/// Derived from network parameters.
252#[derive(Clone, Copy, Debug)]
253pub struct FamilyRules {
254 /// If true, we use family information from lists of family members.
255 use_family_lists: bool,
256 /// If true, we use family information from lists of family IDs and from family certs.
257 use_family_ids: bool,
258}
259
260impl<'a> From<&'a NetParameters> for FamilyRules {
261 fn from(params: &'a NetParameters) -> Self {
262 FamilyRules {
263 use_family_lists: bool::from(params.use_family_lists),
264 use_family_ids: bool::from(params.use_family_ids),
265 }
266 }
267}
268
269impl FamilyRules {
270 /// Return a `FamilyRules` that will use all recognized kinds of family information.
271 pub fn all_family_info() -> Self {
272 Self {
273 use_family_lists: true,
274 use_family_ids: true,
275 }
276 }
277
278 /// Return a `FamilyRules` that will ignore all family information declared by relays.
279 pub fn ignore_declared_families() -> Self {
280 Self {
281 use_family_lists: false,
282 use_family_ids: false,
283 }
284 }
285
286 /// Configure this `FamilyRules` to use (or not use) family information from
287 /// lists of family members.
288 pub fn use_family_lists(&mut self, val: bool) -> &mut Self {
289 self.use_family_lists = val;
290 self
291 }
292
293 /// Configure this `FamilyRules` to use (or not use) family information from
294 /// family IDs and family certs.
295 pub fn use_family_ids(&mut self, val: bool) -> &mut Self {
296 self.use_family_ids = val;
297 self
298 }
299
300 /// Return a `FamilyRules` that will look at every source of information
301 /// requested by `self` or by `other`.
302 pub fn union(&self, other: &Self) -> Self {
303 Self {
304 use_family_lists: self.use_family_lists || other.use_family_lists,
305 use_family_ids: self.use_family_ids || other.use_family_ids,
306 }
307 }
308}
309
310/// An opaque type representing the weight with which a relay or set of
311/// relays will be selected for a given role.
312///
313/// Most users should ignore this type, and just use pick_relay instead.
314#[derive(
315 Copy,
316 Clone,
317 Debug,
318 derive_more::Add,
319 derive_more::Sum,
320 derive_more::AddAssign,
321 Eq,
322 PartialEq,
323 Ord,
324 PartialOrd,
325)]
326pub struct RelayWeight(u64);
327
328impl RelayWeight {
329 /// Try to divide this weight by `rhs`.
330 ///
331 /// Return a ratio on success, or None on division-by-zero.
332 pub fn checked_div(&self, rhs: RelayWeight) -> Option<f64> {
333 if rhs.0 == 0 {
334 None
335 } else {
336 Some((self.0 as f64) / (rhs.0 as f64))
337 }
338 }
339
340 /// Compute a ratio `frac` of this weight.
341 ///
342 /// Return None if frac is less than zero, since negative weights
343 /// are impossible.
344 pub fn ratio(&self, frac: f64) -> Option<RelayWeight> {
345 let product = (self.0 as f64) * frac;
346 if product >= 0.0 && product.is_finite() {
347 Some(RelayWeight(product as u64))
348 } else {
349 None
350 }
351 }
352}
353
354impl From<u64> for RelayWeight {
355 fn from(val: u64) -> Self {
356 RelayWeight(val)
357 }
358}
359
360/// An operation for which we might be requesting a hidden service directory.
361#[derive(Copy, Clone, Debug, PartialEq)]
362// TODO: make this pub(crate) once NetDir::hs_dirs is removed
363#[non_exhaustive]
364pub enum HsDirOp {
365 /// Uploading an onion service descriptor.
366 #[cfg(feature = "hs-service")]
367 Upload,
368 /// Downloading an onion service descriptor.
369 Download,
370}
371
372/// A view of the Tor directory, suitable for use in building circuits.
373///
374/// Abstractly, a [`NetDir`] is a set of usable public [`Relay`]s, each of which
375/// has its own properties, identity, and correct weighted probability for use
376/// under different circumstances.
377///
378/// A [`NetDir`] is constructed by making a [`PartialNetDir`] from a consensus
379/// document, and then adding enough microdescriptors to that `PartialNetDir` so
380/// that it can be used to build paths. (Thus, if you have a NetDir, it is
381/// definitely adequate to build paths.)
382///
383/// # "Usable" relays
384///
385/// Many methods on NetDir are defined in terms of <a name="usable">"Usable"</a> relays. Unless
386/// otherwise stated, a relay is "usable" if it is listed in the consensus,
387/// if we have full directory information for that relay (including a
388/// microdescriptor), and if that relay does not have any flags indicating that
389/// we should never use it. (Currently, `NoEdConsensus` is the only such flag.)
390///
391/// # Limitations
392///
393/// The current NetDir implementation assumes fairly strongly that every relay
394/// has an Ed25519 identity and an RSA identity, that the consensus is indexed
395/// by RSA identities, and that the Ed25519 identities are stored in
396/// microdescriptors.
397///
398/// If these assumptions someday change, then we'll have to revise the
399/// implementation.
400#[derive(Debug, Clone)]
401pub struct NetDir {
402 /// A microdescriptor consensus that lists the members of the network,
403 /// and maps each one to a 'microdescriptor' that has more information
404 /// about it
405 consensus: Arc<MdConsensus>,
406 /// A map from keys to integer values, distributed in the consensus,
407 /// and clamped to certain defaults.
408 params: NetParameters,
409 /// Map from routerstatus index, to that routerstatus's microdescriptor (if we have one.)
410 mds: TiVec<RouterStatusIdx, Option<Arc<Microdesc>>>,
411 /// Map from SHA256 of _missing_ microdescriptors to the index of their
412 /// corresponding routerstatus.
413 rsidx_by_missing: HashMap<MdDigest, RouterStatusIdx>,
414 /// Map from ed25519 identity to index of the routerstatus.
415 ///
416 /// Note that we don't know the ed25519 identity of a relay until
417 /// we get the microdescriptor for it, so this won't be filled in
418 /// until we get the microdescriptors.
419 ///
420 /// # Implementation note
421 ///
422 /// For this field, and for `rsidx_by_rsa`,
423 /// it might be cool to have references instead.
424 /// But that would make this into a self-referential structure,
425 /// which isn't possible in safe rust.
426 rsidx_by_ed: HashMap<Ed25519Identity, RouterStatusIdx>,
427 /// Map from RSA identity to index of the routerstatus.
428 ///
429 /// This is constructed at the same time as the NetDir object, so it
430 /// can be immutable.
431 rsidx_by_rsa: Arc<HashMap<RsaIdentity, RouterStatusIdx>>,
432
433 /// Hash ring(s) describing the onion service directory.
434 ///
435 /// This is empty in a PartialNetDir, and is filled in before the NetDir is
436 /// built.
437 //
438 // TODO hs: It is ugly to have this exist in a partially constructed state
439 // in a PartialNetDir.
440 // Ideally, a PartialNetDir would contain only an HsDirs<HsDirParams>,
441 // or perhaps nothing at all, here.
442 #[cfg(feature = "hs-common")]
443 hsdir_rings: Arc<HsDirs<HsDirRing>>,
444
445 /// Weight values to apply to a given relay when deciding how frequently
446 /// to choose it for a given role.
447 weights: weight::WeightSet,
448
449 #[cfg(feature = "geoip")]
450 /// Country codes for each router in our consensus.
451 ///
452 /// This is indexed by the `RouterStatusIdx` (i.e. a router idx of zero has
453 /// the country code at position zero in this array).
454 country_codes: Vec<Option<CountryCode>>,
455}
456
457/// Collection of hidden service directories (or parameters for them)
458///
459/// In [`NetDir`] this is used to store the actual hash rings.
460/// (But, in a NetDir in a [`PartialNetDir`], it contains [`HsDirRing`]s
461/// where only the `params` are populated, and the `ring` is empty.)
462///
463/// This same generic type is used as the return type from
464/// [`HsDirParams::compute`](HsDirParams::compute),
465/// where it contains the *parameters* for the primary and secondary rings.
466#[derive(Debug, Clone)]
467#[cfg(feature = "hs-common")]
468pub(crate) struct HsDirs<D> {
469 /// The current ring
470 ///
471 /// It corresponds to the time period containing the `valid-after` time in
472 /// the consensus. Its SRV is whatever SRV was most current at the time when
473 /// that time period began.
474 ///
475 /// This is the hash ring that we should use whenever we are fetching an
476 /// onion service descriptor.
477 current: D,
478
479 /// Secondary rings (based on the parameters for the previous and next time periods)
480 ///
481 /// Onion services upload to positions on these ring as well, based on how
482 /// far into the current time period this directory is, so that
483 /// not-synchronized clients can still find their descriptor.
484 ///
485 /// Note that with the current (2023) network parameters, with
486 /// `hsdir_interval = SRV lifetime = 24 hours` at most one of these
487 /// secondary rings will be active at a time. We have two here in order
488 /// to conform with a more flexible regime in proposal 342.
489 //
490 // TODO: hs clients never need this; so I've made it not-present for them.
491 // But does that risk too much with respect to side channels?
492 //
493 // TODO: Perhaps we should refactor this so that it is clear that these
494 // are immutable? On the other hand, the documentation for this type
495 // declares that it is immutable, so we are likely okay.
496 //
497 // TODO: this `Vec` is only ever 0,1,2 elements.
498 // Maybe it should be an ArrayVec or something.
499 #[cfg(feature = "hs-service")]
500 secondary: Vec<D>,
501}
502
503#[cfg(feature = "hs-common")]
504impl<D> HsDirs<D> {
505 /// Convert an `HsDirs<D>` to `HsDirs<D2>` by mapping each contained `D`
506 pub(crate) fn map<D2>(self, mut f: impl FnMut(D) -> D2) -> HsDirs<D2> {
507 HsDirs {
508 current: f(self.current),
509 #[cfg(feature = "hs-service")]
510 secondary: self.secondary.into_iter().map(f).collect(),
511 }
512 }
513
514 /// Iterate over some of the contained hsdirs, according to `secondary`
515 ///
516 /// The current ring is always included.
517 /// Secondary rings are included iff `secondary` and the `hs-service` feature is enabled.
518 fn iter_filter_secondary(&self, secondary: bool) -> impl Iterator<Item = &D> {
519 let i = iter::once(&self.current);
520
521 // With "hs-service" disabled, there are no secondary rings,
522 // so we don't care.
523 let _ = secondary;
524
525 #[cfg(feature = "hs-service")]
526 let i = chain!(i, self.secondary.iter().filter(move |_| secondary));
527
528 i
529 }
530
531 /// Iterate over all the contained hsdirs
532 pub(crate) fn iter(&self) -> impl Iterator<Item = &D> {
533 self.iter_filter_secondary(true)
534 }
535
536 /// Iterate over the hsdirs relevant for `op`
537 pub(crate) fn iter_for_op(&self, op: HsDirOp) -> impl Iterator<Item = &D> {
538 self.iter_filter_secondary(match op {
539 #[cfg(feature = "hs-service")]
540 HsDirOp::Upload => true,
541 HsDirOp::Download => false,
542 })
543 }
544}
545
546/// An event that a [`NetDirProvider`] can broadcast to indicate that a change in
547/// the status of its directory.
548#[derive(
549 Debug, Clone, Copy, PartialEq, Eq, EnumIter, EnumCount, IntoPrimitive, TryFromPrimitive,
550)]
551#[non_exhaustive]
552#[repr(u16)]
553pub enum DirEvent {
554 /// A new consensus has been received, and has enough information to be
555 /// used.
556 ///
557 /// This event is also broadcast when a new set of consensus parameters is
558 /// available, even if that set of parameters comes from a configuration
559 /// change rather than from the latest consensus.
560 NewConsensus,
561
562 /// New descriptors have been received for the current consensus.
563 ///
564 /// (This event is _not_ broadcast when receiving new descriptors for a
565 /// consensus which is not yet ready to replace the current consensus.)
566 NewDescriptors,
567
568 /// We have received updated recommendations and requirements
569 /// for which subprotocols we should have to use the network.
570 NewProtocolRecommendation,
571}
572
573/// The network directory provider is shutting down without giving us the
574/// netdir we asked for.
575#[derive(Clone, Copy, Debug, thiserror::Error)]
576#[error("Network directory provider is shutting down")]
577#[non_exhaustive]
578pub struct NetdirProviderShutdown;
579
580impl tor_error::HasKind for NetdirProviderShutdown {
581 fn kind(&self) -> tor_error::ErrorKind {
582 tor_error::ErrorKind::ArtiShuttingDown
583 }
584}
585
586/// How "timely" must a network directory be?
587///
588/// This enum is used as an argument when requesting a [`NetDir`] object from
589/// [`NetDirProvider`] and other APIs, to specify how recent the information
590/// must be in order to be useful.
591#[derive(Copy, Clone, Eq, PartialEq, Debug)]
592#[allow(clippy::exhaustive_enums)]
593pub enum Timeliness {
594 /// The network directory must be strictly timely.
595 ///
596 /// That is, it must be based on a consensus that valid right now, with no
597 /// tolerance for skew or consensus problems.
598 ///
599 /// Avoid using this option if you could use [`Timeliness::Timely`] instead.
600 Strict,
601 /// The network directory must be roughly timely.
602 ///
603 /// This is, it must be be based on a consensus that is not _too_ far in the
604 /// future, and not _too_ far in the past.
605 ///
606 /// (The tolerances for "too far" will depend on configuration.)
607 ///
608 /// This is almost always the option that you want to use.
609 Timely,
610 /// Any network directory is permissible, regardless of how untimely.
611 ///
612 /// Avoid using this option if you could use [`Timeliness::Timely`] instead.
613 Unchecked,
614}
615
616/// An object that can provide [`NetDir`]s, as well as inform consumers when
617/// they might have changed.
618///
619/// It is the responsibility of the implementor of `NetDirProvider`
620/// to try to obtain an up-to-date `NetDir`,
621/// and continuously to maintain and update it.
622///
623/// In usual configurations, Arti uses `tor_dirmgr::DirMgr`
624/// as its `NetDirProvider`.
625#[async_trait]
626pub trait NetDirProvider: UpcastArcNetDirProvider + Send + Sync {
627 /// Return a network directory that's live according to the provided
628 /// `timeliness`.
629 fn netdir(&self, timeliness: Timeliness) -> Result<Arc<NetDir>>;
630
631 /// Return a reasonable netdir for general usage.
632 ///
633 /// This is an alias for
634 /// [`NetDirProvider::netdir`]`(`[`Timeliness::Timely`]`)`.
635 fn timely_netdir(&self) -> Result<Arc<NetDir>> {
636 self.netdir(Timeliness::Timely)
637 }
638
639 /// Return a new asynchronous stream that will receive notification
640 /// whenever the consensus has changed.
641 ///
642 /// Multiple events may be batched up into a single item: each time
643 /// this stream yields an event, all you can assume is that the event has
644 /// occurred at least once.
645 fn events(&self) -> BoxStream<'static, DirEvent>;
646
647 /// Return the latest network parameters.
648 ///
649 /// If we have no directory, return a reasonable set of defaults.
650 fn params(&self) -> Arc<dyn AsRef<NetParameters>>;
651
652 /// Get a NetDir from `provider`, waiting until one exists.
653 async fn wait_for_netdir(
654 &self,
655 timeliness: Timeliness,
656 ) -> std::result::Result<Arc<NetDir>, NetdirProviderShutdown> {
657 if let Ok(nd) = self.netdir(timeliness) {
658 return Ok(nd);
659 }
660
661 let mut stream = self.events();
662 loop {
663 // We need to retry `self.netdir()` before waiting for any stream events, to
664 // avoid deadlock.
665 //
666 // We ignore all errors here: they can all potentially be fixed by
667 // getting a fresh consensus, and they will all get warned about
668 // by the NetDirProvider itself.
669 if let Ok(nd) = self.netdir(timeliness) {
670 return Ok(nd);
671 }
672 match stream.next().await {
673 Some(_) => {}
674 None => {
675 return Err(NetdirProviderShutdown);
676 }
677 }
678 }
679 }
680
681 /// Wait until `provider` lists `target`.
682 ///
683 /// NOTE: This might potentially wait indefinitely, if `target` is never actually
684 /// becomes listed in the directory. It will exit if the `NetDirProvider` shuts down.
685 async fn wait_for_netdir_to_list(
686 &self,
687 target: &tor_linkspec::RelayIds,
688 timeliness: Timeliness,
689 ) -> std::result::Result<(), NetdirProviderShutdown> {
690 let mut events = self.events();
691 loop {
692 // See if the desired relay is in the netdir.
693 //
694 // We do this before waiting for any events, to avoid race conditions.
695 {
696 let netdir = self.wait_for_netdir(timeliness).await?;
697 if netdir.ids_listed(target) == Some(true) {
698 return Ok(());
699 }
700 // If we reach this point, then ids_listed returned `Some(false)`,
701 // meaning "This relay is definitely not in the current directory";
702 // or it returned `None`, meaning "waiting for more information
703 // about this network directory.
704 // In both cases, it's reasonable to just wait for another netdir
705 // event and try again.
706 }
707 // We didn't find the relay; wait for the provider to have a new netdir
708 // or more netdir information.
709 if events.next().await.is_none() {
710 // The event stream is closed; the provider has shut down.
711 return Err(NetdirProviderShutdown);
712 }
713 }
714 }
715
716 /// Return the latest set of recommended and required protocols, if there is one.
717 ///
718 /// This may be more recent (or more available) than this provider's associated NetDir.
719 fn protocol_statuses(&self) -> Option<(SystemTime, Arc<netstatus::ProtoStatuses>)>;
720}
721
722impl<T> NetDirProvider for Arc<T>
723where
724 T: NetDirProvider,
725{
726 fn netdir(&self, timeliness: Timeliness) -> Result<Arc<NetDir>> {
727 self.deref().netdir(timeliness)
728 }
729
730 fn timely_netdir(&self) -> Result<Arc<NetDir>> {
731 self.deref().timely_netdir()
732 }
733
734 fn events(&self) -> BoxStream<'static, DirEvent> {
735 self.deref().events()
736 }
737
738 fn params(&self) -> Arc<dyn AsRef<NetParameters>> {
739 self.deref().params()
740 }
741
742 fn protocol_statuses(&self) -> Option<(SystemTime, Arc<netstatus::ProtoStatuses>)> {
743 self.deref().protocol_statuses()
744 }
745}
746
747/// Helper trait: allows any `Arc<X>` to be upcast to a `Arc<dyn
748/// NetDirProvider>` if X is an implementation or supertrait of NetDirProvider.
749///
750/// This trait exists to work around a limitation in rust: when trait upcasting
751/// coercion is stable, this will be unnecessary.
752///
753/// The Rust tracking issue is <https://github.com/rust-lang/rust/issues/65991>.
754pub trait UpcastArcNetDirProvider {
755 /// Return a view of this object as an `Arc<dyn NetDirProvider>`
756 fn upcast_arc<'a>(self: Arc<Self>) -> Arc<dyn NetDirProvider + 'a>
757 where
758 Self: 'a;
759}
760
761impl<T> UpcastArcNetDirProvider for T
762where
763 T: NetDirProvider + Sized,
764{
765 fn upcast_arc<'a>(self: Arc<Self>) -> Arc<dyn NetDirProvider + 'a>
766 where
767 Self: 'a,
768 {
769 self
770 }
771}
772
773impl AsRef<NetParameters> for NetDir {
774 fn as_ref(&self) -> &NetParameters {
775 self.params()
776 }
777}
778
779/// A partially build NetDir -- it can't be unwrapped until it has
780/// enough information to build safe paths.
781#[derive(Debug, Clone)]
782pub struct PartialNetDir {
783 /// The netdir that's under construction.
784 netdir: NetDir,
785
786 /// The previous netdir, if we had one
787 ///
788 /// Used as a cache, so we can reuse information
789 #[cfg(feature = "hs-common")]
790 prev_netdir: Option<Arc<NetDir>>,
791}
792
793/// A view of a relay on the Tor network, suitable for building circuits.
794// TODO: This should probably be a more specific struct, with a trait
795// that implements it.
796#[derive(Clone)]
797pub struct Relay<'a> {
798 /// A router descriptor for this relay.
799 rs: &'a netstatus::MdRouterStatus,
800 /// A microdescriptor for this relay.
801 md: &'a Microdesc,
802 /// The country code this relay is in, if we know one.
803 #[cfg(feature = "geoip")]
804 cc: Option<CountryCode>,
805}
806
807/// A relay that we haven't checked for validity or usability in
808/// routing.
809#[derive(Debug)]
810pub struct UncheckedRelay<'a> {
811 /// A router descriptor for this relay.
812 rs: &'a netstatus::MdRouterStatus,
813 /// A microdescriptor for this relay, if there is one.
814 md: Option<&'a Microdesc>,
815 /// The country code this relay is in, if we know one.
816 #[cfg(feature = "geoip")]
817 cc: Option<CountryCode>,
818}
819
820/// A partial or full network directory that we can download
821/// microdescriptors for.
822pub trait MdReceiver {
823 /// Return an iterator over the digests for all of the microdescriptors
824 /// that this netdir is missing.
825 fn missing_microdescs(&self) -> Box<dyn Iterator<Item = &MdDigest> + '_>;
826 /// Add a microdescriptor to this netdir, if it was wanted.
827 ///
828 /// Return true if it was indeed wanted.
829 fn add_microdesc(&mut self, md: Microdesc) -> bool;
830 /// Return the number of missing microdescriptors.
831 fn n_missing(&self) -> usize;
832}
833
834impl PartialNetDir {
835 /// Create a new PartialNetDir with a given consensus, and no
836 /// microdescriptors loaded.
837 ///
838 /// If `replacement_params` is provided, override network parameters from
839 /// the consensus with those from `replacement_params`.
840 pub fn new(
841 consensus: MdConsensus,
842 replacement_params: Option<&netstatus::NetParams<i32>>,
843 ) -> Self {
844 Self::new_inner(
845 consensus,
846 replacement_params,
847 #[cfg(feature = "geoip")]
848 None,
849 )
850 }
851
852 /// Create a new PartialNetDir with GeoIP support.
853 ///
854 /// This does the same thing as `new()`, except the provided GeoIP database is used to add
855 /// country codes to relays.
856 #[cfg(feature = "geoip")]
857 pub fn new_with_geoip(
858 consensus: MdConsensus,
859 replacement_params: Option<&netstatus::NetParams<i32>>,
860 geoip_db: &GeoipDb,
861 ) -> Self {
862 Self::new_inner(consensus, replacement_params, Some(geoip_db))
863 }
864
865 /// Implementation of the `new()` functions.
866 fn new_inner(
867 consensus: MdConsensus,
868 replacement_params: Option<&netstatus::NetParams<i32>>,
869 #[cfg(feature = "geoip")] geoip_db: Option<&GeoipDb>,
870 ) -> Self {
871 let mut params = NetParameters::default();
872
873 // (We ignore unrecognized options here, since they come from
874 // the consensus, and we don't expect to recognize everything
875 // there.)
876 let _ = params.saturating_update(consensus.params().iter());
877
878 // Now see if the user has any parameters to override.
879 // (We have to do this now, or else changes won't be reflected in our
880 // weights.)
881 if let Some(replacement) = replacement_params {
882 for u in params.saturating_update(replacement.iter()) {
883 warn!("Unrecognized option: override_net_params.{}", u);
884 }
885 }
886
887 // Compute the weights we'll want to use for these relays.
888 let weights = weight::WeightSet::from_consensus(&consensus, ¶ms);
889
890 let n_relays = consensus.c_relays().len();
891
892 let rsidx_by_missing = consensus
893 .c_relays()
894 .iter_enumerated()
895 .map(|(rsidx, rs)| (*rs.md_digest(), rsidx))
896 .collect();
897
898 let rsidx_by_rsa = consensus
899 .c_relays()
900 .iter_enumerated()
901 .map(|(rsidx, rs)| (*rs.rsa_identity(), rsidx))
902 .collect();
903
904 #[cfg(feature = "geoip")]
905 let country_codes = if let Some(db) = geoip_db {
906 consensus
907 .c_relays()
908 .iter()
909 .map(|rs| {
910 db.lookup_country_code_multi(rs.addrs().map(|x| x.ip()))
911 .cloned()
912 })
913 .collect()
914 } else {
915 Default::default()
916 };
917
918 #[cfg(feature = "hs-common")]
919 let hsdir_rings = Arc::new({
920 let params = HsDirParams::compute(&consensus, ¶ms).expect("Invalid consensus!");
921 // TODO: It's a bit ugly to use expect above, but this function does
922 // not return a Result. On the other hand, the error conditions under which
923 // HsDirParams::compute can return Err are _very_ narrow and hard to
924 // hit; see documentation in that function. As such, we probably
925 // don't need to have this return a Result.
926
927 params.map(HsDirRing::empty_from_params)
928 });
929
930 let netdir = NetDir {
931 consensus: Arc::new(consensus),
932 params,
933 mds: vec![None; n_relays].into(),
934 rsidx_by_missing,
935 rsidx_by_rsa: Arc::new(rsidx_by_rsa),
936 rsidx_by_ed: HashMap::with_capacity(n_relays),
937 #[cfg(feature = "hs-common")]
938 hsdir_rings,
939 weights,
940 #[cfg(feature = "geoip")]
941 country_codes,
942 };
943
944 PartialNetDir {
945 netdir,
946 #[cfg(feature = "hs-common")]
947 prev_netdir: None,
948 }
949 }
950
951 /// Return the declared lifetime of this PartialNetDir.
952 pub fn lifetime(&self) -> &netstatus::Lifetime {
953 self.netdir.lifetime()
954 }
955
956 /// Record a previous netdir, which can be used for reusing cached information
957 //
958 // Fills in as many missing microdescriptors as possible in this
959 // netdir, using the microdescriptors from the previous netdir.
960 //
961 // With HS enabled, stores the netdir for reuse of relay hash ring index values.
962 #[allow(clippy::needless_pass_by_value)] // prev might, or might not, be stored
963 pub fn fill_from_previous_netdir(&mut self, prev: Arc<NetDir>) {
964 for md in prev.mds.iter().flatten() {
965 self.netdir.add_arc_microdesc(md.clone());
966 }
967
968 #[cfg(feature = "hs-common")]
969 {
970 self.prev_netdir = Some(prev);
971 }
972 }
973
974 /// Compute the hash ring(s) for this NetDir
975 #[cfg(feature = "hs-common")]
976 fn compute_rings(&mut self) {
977 let params = HsDirParams::compute(&self.netdir.consensus, &self.netdir.params)
978 .expect("Invalid consensus");
979 // TODO: see TODO by similar expect in new()
980
981 self.netdir.hsdir_rings =
982 Arc::new(params.map(|params| {
983 HsDirRing::compute(params, &self.netdir, self.prev_netdir.as_deref())
984 }));
985 }
986
987 /// Return true if this are enough information in this directory
988 /// to build multihop paths.
989 pub fn have_enough_paths(&self) -> bool {
990 self.netdir.have_enough_paths()
991 }
992 /// If this directory has enough information to build multihop
993 /// circuits, return it.
994 pub fn unwrap_if_sufficient(
995 #[allow(unused_mut)] mut self,
996 ) -> std::result::Result<NetDir, PartialNetDir> {
997 if self.netdir.have_enough_paths() {
998 #[cfg(feature = "hs-common")]
999 self.compute_rings();
1000 Ok(self.netdir)
1001 } else {
1002 Err(self)
1003 }
1004 }
1005}
1006
1007impl MdReceiver for PartialNetDir {
1008 fn missing_microdescs(&self) -> Box<dyn Iterator<Item = &MdDigest> + '_> {
1009 self.netdir.missing_microdescs()
1010 }
1011 fn add_microdesc(&mut self, md: Microdesc) -> bool {
1012 self.netdir.add_microdesc(md)
1013 }
1014 fn n_missing(&self) -> usize {
1015 self.netdir.n_missing()
1016 }
1017}
1018
1019impl NetDir {
1020 /// Return the declared lifetime of this NetDir.
1021 pub fn lifetime(&self) -> &netstatus::Lifetime {
1022 self.consensus.lifetime()
1023 }
1024
1025 /// Add `md` to this NetDir.
1026 ///
1027 /// Return true if we wanted it, and false otherwise.
1028 fn add_arc_microdesc(&mut self, md: Arc<Microdesc>) -> bool {
1029 if let Some(rsidx) = self.rsidx_by_missing.remove(md.digest()) {
1030 assert_eq!(self.c_relays()[rsidx].md_digest(), md.digest());
1031
1032 // There should never be two approved MDs in the same
1033 // consensus listing the same ID... but if there is,
1034 // we'll let the most recent one win.
1035 self.rsidx_by_ed.insert(*md.ed25519_id(), rsidx);
1036
1037 // Happy path: we did indeed want this one.
1038 self.mds[rsidx] = Some(md);
1039
1040 // Save some space in the missing-descriptor list.
1041 if self.rsidx_by_missing.len() < self.rsidx_by_missing.capacity() / 4 {
1042 self.rsidx_by_missing.shrink_to_fit();
1043 }
1044
1045 return true;
1046 }
1047
1048 // Either we already had it, or we never wanted it at all.
1049 false
1050 }
1051
1052 /// Construct a (possibly invalid) Relay object from a routerstatus and its
1053 /// index within the consensus.
1054 fn relay_from_rs_and_rsidx<'a>(
1055 &'a self,
1056 rs: &'a netstatus::MdRouterStatus,
1057 rsidx: RouterStatusIdx,
1058 ) -> UncheckedRelay<'a> {
1059 debug_assert_eq!(self.c_relays()[rsidx].rsa_identity(), rs.rsa_identity());
1060 let md = self.mds[rsidx].as_deref();
1061 if let Some(md) = md {
1062 debug_assert_eq!(rs.md_digest(), md.digest());
1063 }
1064
1065 UncheckedRelay {
1066 rs,
1067 md,
1068 #[cfg(feature = "geoip")]
1069 cc: self.country_codes.get(rsidx.0).copied().flatten(),
1070 }
1071 }
1072
1073 /// Return the value of the hsdir_n_replicas param.
1074 #[cfg(feature = "hs-common")]
1075 fn n_replicas(&self) -> u8 {
1076 self.params
1077 .hsdir_n_replicas
1078 .get()
1079 .try_into()
1080 .expect("BoundedInt did not enforce bounds")
1081 }
1082
1083 /// Return the spread parameter for the specified `op`.
1084 #[cfg(feature = "hs-common")]
1085 fn spread(&self, op: HsDirOp) -> usize {
1086 let spread = match op {
1087 HsDirOp::Download => self.params.hsdir_spread_fetch,
1088 #[cfg(feature = "hs-service")]
1089 HsDirOp::Upload => self.params.hsdir_spread_store,
1090 };
1091
1092 spread
1093 .get()
1094 .try_into()
1095 .expect("BoundedInt did not enforce bounds!")
1096 }
1097
1098 /// Select `spread` hsdir relays for the specified `hsid` from a given `ring`.
1099 ///
1100 /// Algorithm:
1101 ///
1102 /// for idx in 1..=n_replicas:
1103 /// - let H = hsdir_ring::onion_service_index(id, replica, rand,
1104 /// period).
1105 /// - Find the position of H within hsdir_ring.
1106 /// - Take elements from hsdir_ring starting at that position,
1107 /// adding them to Dirs until we have added `spread` new elements
1108 /// that were not there before.
1109 #[cfg(feature = "hs-common")]
1110 fn select_hsdirs<'h, 'r: 'h>(
1111 &'r self,
1112 hsid: HsBlindId,
1113 ring: &'h HsDirRing,
1114 spread: usize,
1115 ) -> impl Iterator<Item = Relay<'r>> + 'h {
1116 let n_replicas = self.n_replicas();
1117
1118 (1..=n_replicas) // 1-indexed !
1119 .flat_map({
1120 let mut selected_nodes = HashSet::new();
1121
1122 move |replica: u8| {
1123 let hsdir_idx = hsdir_ring::service_hsdir_index(&hsid, replica, ring.params());
1124
1125 ring.ring_items_at(hsdir_idx, spread, |(hsdir_idx, _)| {
1126 // According to rend-spec 2.2.3:
1127 // ... If any of those
1128 // nodes have already been selected for a lower-numbered replica of the
1129 // service, any nodes already chosen are disregarded (i.e. skipped over)
1130 // when choosing a replica's hsdir_spread_store nodes.
1131 selected_nodes.insert(*hsdir_idx)
1132 })
1133 .collect::<Vec<_>>()
1134 }
1135 })
1136 .filter_map(move |(_hsdir_idx, rs_idx)| {
1137 // This ought not to be None but let's not panic or bail if it is
1138 self.relay_by_rs_idx(*rs_idx)
1139 })
1140 }
1141
1142 /// Replace the overridden parameters in this netdir with `new_replacement`.
1143 ///
1144 /// After this function is done, the netdir's parameters will be those in
1145 /// the consensus, overridden by settings from `new_replacement`. Any
1146 /// settings in the old replacement parameters will be discarded.
1147 pub fn replace_overridden_parameters(&mut self, new_replacement: &netstatus::NetParams<i32>) {
1148 // TODO(nickm): This is largely duplicate code from PartialNetDir::new().
1149 let mut new_params = NetParameters::default();
1150 let _ = new_params.saturating_update(self.consensus.params().iter());
1151 for u in new_params.saturating_update(new_replacement.iter()) {
1152 warn!("Unrecognized option: override_net_params.{}", u);
1153 }
1154
1155 self.params = new_params;
1156 }
1157
1158 /// Return an iterator over all Relay objects, including invalid ones
1159 /// that we can't use.
1160 pub fn all_relays(&self) -> impl Iterator<Item = UncheckedRelay<'_>> {
1161 // TODO: I'd like if we could memoize this so we don't have to
1162 // do so many hashtable lookups.
1163 self.c_relays()
1164 .iter_enumerated()
1165 .map(move |(rsidx, rs)| self.relay_from_rs_and_rsidx(rs, rsidx))
1166 }
1167 /// Return an iterator over all [usable](NetDir#usable) Relays.
1168 pub fn relays(&self) -> impl Iterator<Item = Relay<'_>> {
1169 self.all_relays().filter_map(UncheckedRelay::into_relay)
1170 }
1171
1172 /// Look up a relay's [`Microdesc`] by its [`RouterStatusIdx`]
1173 #[cfg_attr(not(feature = "hs-common"), allow(dead_code))]
1174 pub(crate) fn md_by_rsidx(&self, rsidx: RouterStatusIdx) -> Option<&Microdesc> {
1175 self.mds.get(rsidx)?.as_deref()
1176 }
1177
1178 /// Return a relay matching a given identity, if we have a
1179 /// _usable_ relay with that key.
1180 ///
1181 /// (Does not return [unusable](NetDir#usable) relays.)
1182 ///
1183 ///
1184 /// Note that a `None` answer is not always permanent: if a microdescriptor
1185 /// is subsequently added for a relay with this ID, the ID may become usable
1186 /// even if it was not usable before.
1187 pub fn by_id<'a, T>(&self, id: T) -> Option<Relay<'_>>
1188 where
1189 T: Into<RelayIdRef<'a>>,
1190 {
1191 let id = id.into();
1192 let answer = match id {
1193 RelayIdRef::Ed25519(ed25519) => {
1194 let rsidx = *self.rsidx_by_ed.get(ed25519)?;
1195 let rs = self.c_relays().get(rsidx).expect("Corrupt index");
1196
1197 self.relay_from_rs_and_rsidx(rs, rsidx).into_relay()?
1198 }
1199 RelayIdRef::Rsa(rsa) => self
1200 .by_rsa_id_unchecked(rsa)
1201 .and_then(UncheckedRelay::into_relay)?,
1202 other_type => self.relays().find(|r| r.has_identity(other_type))?,
1203 };
1204 assert!(answer.has_identity(id));
1205 Some(answer)
1206 }
1207
1208 /// Obtain a `Relay` given a `RouterStatusIdx`
1209 ///
1210 /// Differs from `relay_from_rs_and_rsi` as follows:
1211 /// * That function expects the caller to already have an `MdRouterStatus`;
1212 /// it checks with `debug_assert` that the relay in the netdir matches.
1213 /// * That function panics if the `RouterStatusIdx` is invalid; this one returns `None`.
1214 /// * That function returns an `UncheckedRelay`; this one a `Relay`.
1215 ///
1216 /// `None` could be returned here, even with a valid `rsi`,
1217 /// if `rsi` refers to an [unusable](NetDir#usable) relay.
1218 #[cfg_attr(not(feature = "hs-common"), allow(dead_code))]
1219 pub(crate) fn relay_by_rs_idx(&self, rs_idx: RouterStatusIdx) -> Option<Relay<'_>> {
1220 let rs = self.c_relays().get(rs_idx)?;
1221 let md = self.mds.get(rs_idx)?.as_deref();
1222 UncheckedRelay {
1223 rs,
1224 md,
1225 #[cfg(feature = "geoip")]
1226 cc: self.country_codes.get(rs_idx.0).copied().flatten(),
1227 }
1228 .into_relay()
1229 }
1230
1231 /// Return a relay with the same identities as those in `target`, if one
1232 /// exists.
1233 ///
1234 /// Does not return [unusable](NetDir#usable) relays.
1235 ///
1236 /// Note that a negative result from this method is not necessarily permanent:
1237 /// it may be the case that a relay exists,
1238 /// but we don't yet have enough information about it to know all of its IDs.
1239 /// To test whether a relay is *definitely* absent,
1240 /// use [`by_ids_detailed`](Self::by_ids_detailed)
1241 /// or [`ids_listed`](Self::ids_listed).
1242 ///
1243 /// # Limitations
1244 ///
1245 /// This will be very slow if `target` does not have an Ed25519 or RSA
1246 /// identity.
1247 pub fn by_ids<T>(&self, target: &T) -> Option<Relay<'_>>
1248 where
1249 T: HasRelayIds + ?Sized,
1250 {
1251 let mut identities = target.identities();
1252 // Don't try if there are no identities.
1253 let first_id = identities.next()?;
1254
1255 // Since there is at most one relay with each given ID type,
1256 // we only need to check the first relay we find.
1257 let candidate = self.by_id(first_id)?;
1258 if identities.all(|wanted_id| candidate.has_identity(wanted_id)) {
1259 Some(candidate)
1260 } else {
1261 None
1262 }
1263 }
1264
1265 /// Check whether there is a relay that has at least one identity from
1266 /// `target`, and which _could_ have every identity from `target`.
1267 /// If so, return such a relay.
1268 ///
1269 /// Return `Ok(None)` if we did not find a relay with any identity from `target`.
1270 ///
1271 /// Return `RelayLookupError::Impossible` if we found a relay with at least
1272 /// one identity from `target`, but that relay's other identities contradict
1273 /// what we learned from `target`.
1274 ///
1275 /// Does not return [unusable](NetDir#usable) relays.
1276 ///
1277 /// (This function is only useful if you need to distinguish the
1278 /// "impossible" case from the "no such relay known" case.)
1279 ///
1280 /// # Limitations
1281 ///
1282 /// This will be very slow if `target` does not have an Ed25519 or RSA
1283 /// identity.
1284 //
1285 // TODO HS: This function could use a better name.
1286 //
1287 // TODO: We could remove the feature restriction here once we think this API is
1288 // stable.
1289 #[cfg(feature = "hs-common")]
1290 pub fn by_ids_detailed<T>(
1291 &self,
1292 target: &T,
1293 ) -> std::result::Result<Option<Relay<'_>>, RelayLookupError>
1294 where
1295 T: HasRelayIds + ?Sized,
1296 {
1297 let candidate = target
1298 .identities()
1299 // Find all the relays that share any identity with this set of identities.
1300 .filter_map(|id| self.by_id(id))
1301 // We might find the same relay more than once under a different
1302 // identity, so we remove the duplicates.
1303 //
1304 // Since there is at most one relay per rsa identity per consensus,
1305 // this is a true uniqueness check under current construction rules.
1306 .unique_by(|r| r.rs.rsa_identity())
1307 // If we find two or more distinct relays, then have a contradiction.
1308 .at_most_one()
1309 .map_err(|_| RelayLookupError::Impossible)?;
1310
1311 // If we have no candidate, return None early.
1312 let candidate = match candidate {
1313 Some(relay) => relay,
1314 None => return Ok(None),
1315 };
1316
1317 // Now we know we have a single candidate. Make sure that it does not have any
1318 // identity that does not match the target.
1319 if target
1320 .identities()
1321 .all(|wanted_id| match candidate.identity(wanted_id.id_type()) {
1322 None => true,
1323 Some(id) => id == wanted_id,
1324 })
1325 {
1326 Ok(Some(candidate))
1327 } else {
1328 Err(RelayLookupError::Impossible)
1329 }
1330 }
1331
1332 /// Return a boolean if this consensus definitely has (or does not have) a
1333 /// relay matching the listed identities.
1334 ///
1335 /// `Some(true)` indicates that the relay exists.
1336 /// `Some(false)` indicates that the relay definitely does not exist.
1337 /// `None` indicates that we can't yet tell whether such a relay exists,
1338 /// due to missing information.
1339 fn id_pair_listed(&self, ed_id: &Ed25519Identity, rsa_id: &RsaIdentity) -> Option<bool> {
1340 let r = self.by_rsa_id_unchecked(rsa_id);
1341 match r {
1342 Some(unchecked) => {
1343 if !unchecked.rs.ed25519_id_is_usable() {
1344 return Some(false);
1345 }
1346 // If md is present, then it's listed iff we have the right
1347 // ed id. Otherwise we don't know if it's listed.
1348 unchecked.md.map(|md| md.ed25519_id() == ed_id)
1349 }
1350 None => {
1351 // Definitely not listed.
1352 Some(false)
1353 }
1354 }
1355 }
1356
1357 /// Check whether a relay exists (or may exist)
1358 /// with the same identities as those in `target`.
1359 ///
1360 /// `Some(true)` indicates that the relay exists.
1361 /// `Some(false)` indicates that the relay definitely does not exist.
1362 /// `None` indicates that we can't yet tell whether such a relay exists,
1363 /// due to missing information.
1364 pub fn ids_listed<T>(&self, target: &T) -> Option<bool>
1365 where
1366 T: HasRelayIds + ?Sized,
1367 {
1368 let rsa_id = target.rsa_identity();
1369 let ed25519_id = target.ed_identity();
1370
1371 // TODO: If we later support more identity key types, this will
1372 // become incorrect. This assertion might help us recognize that case.
1373 const _: () = assert!(RelayIdType::COUNT == 2);
1374
1375 match (rsa_id, ed25519_id) {
1376 (Some(r), Some(e)) => self.id_pair_listed(e, r),
1377 (Some(r), None) => Some(self.rsa_id_is_listed(r)),
1378 (None, Some(e)) => {
1379 if self.rsidx_by_ed.contains_key(e) {
1380 Some(true)
1381 } else {
1382 None
1383 }
1384 }
1385 (None, None) => None,
1386 }
1387 }
1388
1389 /// Return a (possibly [unusable](NetDir#usable)) relay with a given RSA identity.
1390 ///
1391 /// This API can be used to find information about a relay that is listed in
1392 /// the current consensus, even if we don't yet have enough information
1393 /// (like a microdescriptor) about the relay to use it.
1394 #[cfg_attr(feature = "experimental-api", visibility::make(pub))]
1395 #[cfg_attr(docsrs, doc(cfg(feature = "experimental-api")))]
1396 fn by_rsa_id_unchecked(&self, rsa_id: &RsaIdentity) -> Option<UncheckedRelay<'_>> {
1397 let rsidx = *self.rsidx_by_rsa.get(rsa_id)?;
1398 let rs = self.c_relays().get(rsidx).expect("Corrupt index");
1399 assert_eq!(rs.rsa_identity(), rsa_id);
1400 Some(self.relay_from_rs_and_rsidx(rs, rsidx))
1401 }
1402 /// Return the relay with a given RSA identity, if we have one
1403 /// and it is [usable](NetDir#usable).
1404 fn by_rsa_id(&self, rsa_id: &RsaIdentity) -> Option<Relay<'_>> {
1405 self.by_rsa_id_unchecked(rsa_id)?.into_relay()
1406 }
1407 /// Return true if `rsa_id` is listed in this directory, even if it isn't
1408 /// currently usable.
1409 ///
1410 /// (An "[unusable](NetDir#usable)" relay in this context is one for which we don't have full
1411 /// directory information.)
1412 #[cfg_attr(feature = "experimental-api", visibility::make(pub))]
1413 #[cfg_attr(docsrs, doc(cfg(feature = "experimental-api")))]
1414 fn rsa_id_is_listed(&self, rsa_id: &RsaIdentity) -> bool {
1415 self.by_rsa_id_unchecked(rsa_id).is_some()
1416 }
1417
1418 /// List the hsdirs in this NetDir, that should be in the HSDir rings
1419 ///
1420 /// The results are not returned in any particular order.
1421 #[cfg(feature = "hs-common")]
1422 fn all_hsdirs(&self) -> impl Iterator<Item = (RouterStatusIdx, Relay<'_>)> {
1423 self.c_relays().iter_enumerated().filter_map(|(rsidx, rs)| {
1424 let relay = self.relay_from_rs_and_rsidx(rs, rsidx);
1425 relay.is_hsdir_for_ring().then_some(())?;
1426 let relay = relay.into_relay()?;
1427 Some((rsidx, relay))
1428 })
1429 }
1430
1431 /// Return the parameters from the consensus, clamped to the
1432 /// correct ranges, with defaults filled in.
1433 ///
1434 /// NOTE: that unsupported parameters aren't returned here; only those
1435 /// values configured in the `params` module are available.
1436 pub fn params(&self) -> &NetParameters {
1437 &self.params
1438 }
1439
1440 /// Return a [`ProtoStatus`](netstatus::ProtoStatus) that lists the
1441 /// network's current requirements and recommendations for the list of
1442 /// protocols that every relay must implement.
1443 //
1444 // TODO HS: I am not sure this is the right API; other alternatives would be:
1445 // * To expose the _required_ relay protocol list instead (since that's all that
1446 // onion service implementations need).
1447 // * To expose the client protocol list as well (for symmetry).
1448 // * To expose the MdConsensus instead (since that's more general, although
1449 // it restricts the future evolution of this API).
1450 //
1451 // I think that this is a reasonably good compromise for now, but I'm going
1452 // to put it behind the `hs-common` feature to give us time to consider more.
1453 #[cfg(feature = "hs-common")]
1454 pub fn relay_protocol_status(&self) -> &netstatus::ProtoStatus {
1455 self.consensus.relay_protocol_status()
1456 }
1457
1458 /// Return a [`ProtoStatus`](netstatus::ProtoStatus) that lists the
1459 /// network's current requirements and recommendations for the list of
1460 /// protocols that every relay must implement.
1461 //
1462 // TODO HS: See notes on relay_protocol_status above.
1463 #[cfg(feature = "hs-common")]
1464 pub fn client_protocol_status(&self) -> &netstatus::ProtoStatus {
1465 self.consensus.client_protocol_status()
1466 }
1467
1468 /// Return weighted the fraction of relays we can use. We only
1469 /// consider relays that match the predicate `usable`. We weight
1470 /// this bandwidth according to the provided `role`.
1471 ///
1472 /// If _no_ matching relays in the consensus have a nonzero
1473 /// weighted bandwidth value, we fall back to looking at the
1474 /// unweighted fraction of matching relays.
1475 ///
1476 /// If there are no matching relays in the consensus, we return 0.0.
1477 fn frac_for_role<'a, F>(&'a self, role: WeightRole, usable: F) -> f64
1478 where
1479 F: Fn(&UncheckedRelay<'a>) -> bool,
1480 {
1481 let mut total_weight = 0_u64;
1482 let mut have_weight = 0_u64;
1483 let mut have_count = 0_usize;
1484 let mut total_count = 0_usize;
1485
1486 for r in self.all_relays() {
1487 if !usable(&r) {
1488 continue;
1489 }
1490 let w = self.weights.weight_rs_for_role(r.rs, role);
1491 total_weight += w;
1492 total_count += 1;
1493 if r.is_usable() {
1494 have_weight += w;
1495 have_count += 1;
1496 }
1497 }
1498
1499 if total_weight > 0 {
1500 // The consensus lists some weighted bandwidth so return the
1501 // fraction of the weighted bandwidth for which we have
1502 // descriptors.
1503 (have_weight as f64) / (total_weight as f64)
1504 } else if total_count > 0 {
1505 // The consensus lists no weighted bandwidth for these relays,
1506 // but at least it does list relays. Return the fraction of
1507 // relays for which it we have descriptors.
1508 (have_count as f64) / (total_count as f64)
1509 } else {
1510 // There are no relays of this kind in the consensus. Return
1511 // 0.0, to avoid dividing by zero and giving NaN.
1512 0.0
1513 }
1514 }
1515 /// Return the estimated fraction of possible paths that we have
1516 /// enough microdescriptors to build.
1517 fn frac_usable_paths(&self) -> f64 {
1518 // TODO #504, TODO SPEC: We may want to add a set of is_flagged_fast() and/or
1519 // is_flagged_stable() checks here. This will require spec clarification.
1520 let f_g = self.frac_for_role(WeightRole::Guard, |u| {
1521 u.low_level_details().is_suitable_as_guard()
1522 });
1523 let f_m = self.frac_for_role(WeightRole::Middle, |_| true);
1524 let f_e = if self.all_relays().any(|u| u.rs.is_flagged_exit()) {
1525 self.frac_for_role(WeightRole::Exit, |u| u.rs.is_flagged_exit())
1526 } else {
1527 // If there are no exits at all, we use f_m here.
1528 f_m
1529 };
1530 f_g * f_m * f_e
1531 }
1532 /// Return true if there is enough information in this NetDir to build
1533 /// multihop circuits.
1534 fn have_enough_paths(&self) -> bool {
1535 // TODO-A001: This should check for our guards as well, and
1536 // make sure that if they're listed in the consensus, we have
1537 // the descriptors for them.
1538
1539 // If we can build a randomly chosen path with at least this
1540 // probability, we know enough information to participate
1541 // on the network.
1542
1543 let min_frac_paths: f64 = self.params().min_circuit_path_threshold.as_fraction();
1544
1545 // What fraction of paths can we build?
1546 let available = self.frac_usable_paths();
1547
1548 available >= min_frac_paths
1549 }
1550 /// Choose a relay at random.
1551 ///
1552 /// Each relay is chosen with probability proportional to its weight
1553 /// in the role `role`, and is only selected if the predicate `usable`
1554 /// returns true for it.
1555 ///
1556 /// This function returns None if (and only if) there are no relays
1557 /// with nonzero weight where `usable` returned true.
1558 //
1559 // TODO this API, with the `usable` closure, invites mistakes where we fail to
1560 // check conditions that are implied by the role we have selected for the relay:
1561 // call sites must include a call to `Relay::is_polarity_inverter()` or whatever.
1562 // IMO the `WeightRole` ought to imply a condition (and it should therefore probably
1563 // be renamed.) -Diziet
1564 pub fn pick_relay<'a, R, P>(
1565 &'a self,
1566 rng: &mut R,
1567 role: WeightRole,
1568 usable: P,
1569 ) -> Option<Relay<'a>>
1570 where
1571 R: rand::Rng,
1572 P: FnMut(&Relay<'a>) -> bool,
1573 {
1574 let relays: Vec<_> = self.relays().filter(usable).collect();
1575 // This algorithm uses rand::distr::WeightedIndex, and uses
1576 // gives O(n) time and space to build the index, plus O(log n)
1577 // sampling time.
1578 //
1579 // We might be better off building a WeightedIndex in advance
1580 // for each `role`, and then sampling it repeatedly until we
1581 // get a relay that satisfies `usable`. Or we might not --
1582 // that depends heavily on the actual particulars of our
1583 // inputs. We probably shouldn't make any changes there
1584 // unless profiling tells us that this function is in a hot
1585 // path.
1586 //
1587 // The C Tor sampling implementation goes through some trouble
1588 // here to try to make its path selection constant-time. I
1589 // believe that there is no actual remotely exploitable
1590 // side-channel here however. It could be worth analyzing in
1591 // the future.
1592 //
1593 // This code will give the wrong result if the total of all weights
1594 // can exceed u64::MAX. We make sure that can't happen when we
1595 // set up `self.weights`.
1596 match relays[..].choose_weighted(rng, |r| self.weights.weight_rs_for_role(r.rs, role)) {
1597 Ok(relay) => Some(relay.clone()),
1598 Err(WeightError::InsufficientNonZero) => {
1599 if relays.is_empty() {
1600 None
1601 } else {
1602 warn!(?self.weights, ?role,
1603 "After filtering, all {} relays had zero weight. Choosing one at random. See bug #1907.",
1604 relays.len());
1605 relays.choose(rng).cloned()
1606 }
1607 }
1608 Err(e) => {
1609 warn_report!(e, "Unexpected error while sampling a relay");
1610 None
1611 }
1612 }
1613 }
1614
1615 /// Choose `n` relay at random.
1616 ///
1617 /// Each relay is chosen with probability proportional to its weight
1618 /// in the role `role`, and is only selected if the predicate `usable`
1619 /// returns true for it.
1620 ///
1621 /// Relays are chosen without replacement: no relay will be
1622 /// returned twice. Therefore, the resulting vector may be smaller
1623 /// than `n` if we happen to have fewer than `n` appropriate relays.
1624 ///
1625 /// This function returns an empty vector if (and only if) there
1626 /// are no relays with nonzero weight where `usable` returned
1627 /// true.
1628 #[allow(clippy::cognitive_complexity)] // all due to tracing crate.
1629 pub fn pick_n_relays<'a, R, P>(
1630 &'a self,
1631 rng: &mut R,
1632 n: usize,
1633 role: WeightRole,
1634 usable: P,
1635 ) -> Vec<Relay<'a>>
1636 where
1637 R: rand::Rng,
1638 P: FnMut(&Relay<'a>) -> bool,
1639 {
1640 let relays: Vec<_> = self.relays().filter(usable).collect();
1641 // NOTE: See discussion in pick_relay().
1642 let mut relays = match relays[..].choose_multiple_weighted(rng, n, |r| {
1643 self.weights.weight_rs_for_role(r.rs, role) as f64
1644 }) {
1645 Err(WeightError::InsufficientNonZero) => {
1646 // Too few relays had nonzero weights: return all of those that are okay.
1647 // (This is behavior used to come up with rand 0.9; it no longer does.
1648 // We still detect it.)
1649 let remaining: Vec<_> = relays
1650 .iter()
1651 .filter(|r| self.weights.weight_rs_for_role(r.rs, role) > 0)
1652 .cloned()
1653 .collect();
1654 if remaining.is_empty() {
1655 warn!(?self.weights, ?role,
1656 "After filtering, all {} relays had zero weight! Picking some at random. See bug #1907.",
1657 relays.len());
1658 if relays.len() >= n {
1659 relays.choose_multiple(rng, n).cloned().collect()
1660 } else {
1661 relays
1662 }
1663 } else {
1664 warn!(?self.weights, ?role,
1665 "After filtering, only had {}/{} relays with nonzero weight. Returning them all. See bug #1907.",
1666 remaining.len(), relays.len());
1667 remaining
1668 }
1669 }
1670 Err(e) => {
1671 warn_report!(e, "Unexpected error while sampling a set of relays");
1672 Vec::new()
1673 }
1674 Ok(iter) => {
1675 let selection: Vec<_> = iter.map(Relay::clone).collect();
1676 if selection.len() < n && selection.len() < relays.len() {
1677 warn!(?self.weights, ?role,
1678 "choose_multiple_weighted returned only {returned}, despite requesting {n}, \
1679 and having {filtered_len} available after filtering. See bug #1907.",
1680 returned=selection.len(), filtered_len=relays.len());
1681 }
1682 selection
1683 }
1684 };
1685 relays.shuffle(rng);
1686 relays
1687 }
1688
1689 /// Compute the weight with which `relay` will be selected for a given
1690 /// `role`.
1691 pub fn relay_weight<'a>(&'a self, relay: &Relay<'a>, role: WeightRole) -> RelayWeight {
1692 RelayWeight(self.weights.weight_rs_for_role(relay.rs, role))
1693 }
1694
1695 /// Compute the total weight with which any relay matching `usable`
1696 /// will be selected for a given `role`.
1697 ///
1698 /// Note: because this function is used to assess the total
1699 /// properties of the consensus, the `usable` predicate takes a
1700 /// [`MdRouterStatus`] rather than a [`Relay`].
1701 pub fn total_weight<P>(&self, role: WeightRole, usable: P) -> RelayWeight
1702 where
1703 P: Fn(&UncheckedRelay<'_>) -> bool,
1704 {
1705 self.all_relays()
1706 .filter_map(|unchecked| {
1707 if usable(&unchecked) {
1708 Some(RelayWeight(
1709 self.weights.weight_rs_for_role(unchecked.rs, role),
1710 ))
1711 } else {
1712 None
1713 }
1714 })
1715 .sum()
1716 }
1717
1718 /// Compute the weight with which a relay with ID `rsa_id` would be
1719 /// selected for a given `role`.
1720 ///
1721 /// Note that weight returned by this function assumes that the
1722 /// relay with that ID is actually [usable](NetDir#usable); if it isn't usable,
1723 /// then other weight-related functions will call its weight zero.
1724 pub fn weight_by_rsa_id(&self, rsa_id: &RsaIdentity, role: WeightRole) -> Option<RelayWeight> {
1725 self.by_rsa_id_unchecked(rsa_id)
1726 .map(|unchecked| RelayWeight(self.weights.weight_rs_for_role(unchecked.rs, role)))
1727 }
1728
1729 /// Return all relays in this NetDir known to be in the same family as
1730 /// `relay`.
1731 ///
1732 /// This list of members will **not** necessarily include `relay` itself.
1733 ///
1734 /// # Limitations
1735 ///
1736 /// Two relays only belong to the same family if _each_ relay
1737 /// claims to share a family with the other. But if we are
1738 /// missing a microdescriptor for one of the relays listed by this
1739 /// relay, we cannot know whether it acknowledges family
1740 /// membership with this relay or not. Therefore, this function
1741 /// can omit family members for which there is not (as yet) any
1742 /// Relay object.
1743 pub fn known_family_members<'a>(
1744 &'a self,
1745 relay: &'a Relay<'a>,
1746 ) -> impl Iterator<Item = Relay<'a>> {
1747 let relay_rsa_id = relay.rsa_id();
1748 relay.md.family().members().filter_map(move |other_rsa_id| {
1749 self.by_rsa_id(other_rsa_id)
1750 .filter(|other_relay| other_relay.md.family().contains(relay_rsa_id))
1751 })
1752 }
1753
1754 /// Return the current hidden service directory "time period".
1755 ///
1756 /// Specifically, this returns the time period that contains the beginning
1757 /// of the validity period of this `NetDir`'s consensus. That time period
1758 /// is the one we use when acting as an hidden service client.
1759 #[cfg(feature = "hs-common")]
1760 pub fn hs_time_period(&self) -> TimePeriod {
1761 self.hsdir_rings.current.time_period()
1762 }
1763
1764 /// Return the [`HsDirParams`] of all the relevant hidden service directory "time periods"
1765 ///
1766 /// This includes the current time period (as from
1767 /// [`.hs_time_period`](NetDir::hs_time_period))
1768 /// plus additional time periods that we publish descriptors for when we are
1769 /// acting as a hidden service.
1770 #[cfg(feature = "hs-service")]
1771 pub fn hs_all_time_periods(&self) -> Vec<HsDirParams> {
1772 self.hsdir_rings
1773 .iter()
1774 .map(|r| r.params().clone())
1775 .collect()
1776 }
1777
1778 /// Return the relays in this network directory that will be used as hidden service directories
1779 ///
1780 /// These are suitable to retrieve a given onion service's descriptor at a given time period.
1781 #[cfg(feature = "hs-common")]
1782 pub fn hs_dirs_download<'r, R>(
1783 &'r self,
1784 hsid: HsBlindId,
1785 period: TimePeriod,
1786 rng: &mut R,
1787 ) -> std::result::Result<Vec<Relay<'r>>, Bug>
1788 where
1789 R: rand::Rng,
1790 {
1791 // Algorithm:
1792 //
1793 // 1. Determine which HsDirRing to use, based on the time period.
1794 // 2. Find the shared random value that's associated with that HsDirRing.
1795 // 3. Choose spread = the parameter `hsdir_spread_fetch`
1796 // 4. Let n_replicas = the parameter `hsdir_n_replicas`.
1797 // 5. Initialize Dirs = []
1798 // 6. for idx in 1..=n_replicas:
1799 // - let H = hsdir_ring::onion_service_index(id, replica, rand,
1800 // period).
1801 // - Find the position of H within hsdir_ring.
1802 // - Take elements from hsdir_ring starting at that position,
1803 // adding them to Dirs until we have added `spread` new elements
1804 // that were not there before.
1805 // 7. Shuffle Dirs
1806 // 8. return Dirs.
1807
1808 let spread = self.spread(HsDirOp::Download);
1809
1810 // When downloading, only look at relays on current ring.
1811 let ring = &self.hsdir_rings.current;
1812
1813 if ring.params().time_period != period {
1814 return Err(internal!(
1815 "our current ring is not associated with the requested time period!"
1816 ));
1817 }
1818
1819 let mut hs_dirs = self.select_hsdirs(hsid, ring, spread).collect_vec();
1820
1821 // When downloading, the order of the returned relays is random.
1822 hs_dirs.shuffle(rng);
1823
1824 Ok(hs_dirs)
1825 }
1826
1827 /// Return the relays in this network directory that will be used as hidden service directories
1828 ///
1829 /// Returns the relays that are suitable for storing a given onion service's descriptors at the
1830 /// given time period.
1831 #[cfg(feature = "hs-service")]
1832 pub fn hs_dirs_upload(
1833 &self,
1834 hsid: HsBlindId,
1835 period: TimePeriod,
1836 ) -> std::result::Result<impl Iterator<Item = Relay<'_>>, Bug> {
1837 // Algorithm:
1838 //
1839 // 1. Choose spread = the parameter `hsdir_spread_store`
1840 // 2. Determine which HsDirRing to use, based on the time period.
1841 // 3. Find the shared random value that's associated with that HsDirRing.
1842 // 4. Let n_replicas = the parameter `hsdir_n_replicas`.
1843 // 5. Initialize Dirs = []
1844 // 6. for idx in 1..=n_replicas:
1845 // - let H = hsdir_ring::onion_service_index(id, replica, rand,
1846 // period).
1847 // - Find the position of H within hsdir_ring.
1848 // - Take elements from hsdir_ring starting at that position,
1849 // adding them to Dirs until we have added `spread` new elements
1850 // that were not there before.
1851 // 3. return Dirs.
1852 let spread = self.spread(HsDirOp::Upload);
1853
1854 // For each HsBlindId, determine which HsDirRing to use.
1855 let rings = self
1856 .hsdir_rings
1857 .iter()
1858 .filter_map(move |ring| {
1859 // Make sure the ring matches the TP of the hsid it's matched with.
1860 (ring.params().time_period == period).then_some((ring, hsid, period))
1861 })
1862 .collect::<Vec<_>>();
1863
1864 // The specified period should have an associated ring.
1865 if !rings.iter().any(|(_, _, tp)| *tp == period) {
1866 return Err(internal!(
1867 "the specified time period does not have an associated ring"
1868 ));
1869 };
1870
1871 // Now that we've matched each `hsid` with the ring associated with its TP, we can start
1872 // selecting replicas from each ring.
1873 Ok(rings.into_iter().flat_map(move |(ring, hsid, period)| {
1874 assert_eq!(period, ring.params().time_period());
1875 self.select_hsdirs(hsid, ring, spread)
1876 }))
1877 }
1878
1879 /// Return the relays in this network directory that will be used as hidden service directories
1880 ///
1881 /// Depending on `op`,
1882 /// these are suitable to either store, or retrieve, a
1883 /// given onion service's descriptor at a given time period.
1884 ///
1885 /// When `op` is `Download`, the order is random.
1886 /// When `op` is `Upload`, the order is not specified.
1887 ///
1888 /// Return an error if the time period is not one returned by
1889 /// `onion_service_time_period` or `onion_service_secondary_time_periods`.
1890 //
1891 // TODO: make HsDirOp pub(crate) once this is removed
1892 #[cfg(feature = "hs-common")]
1893 #[deprecated(note = "Use hs_dirs_upload or hs_dirs_download instead")]
1894 pub fn hs_dirs<'r, R>(&'r self, hsid: &HsBlindId, op: HsDirOp, rng: &mut R) -> Vec<Relay<'r>>
1895 where
1896 R: rand::Rng,
1897 {
1898 // Algorithm:
1899 //
1900 // 1. Determine which HsDirRing to use, based on the time period.
1901 // 2. Find the shared random value that's associated with that HsDirRing.
1902 // 3. Choose spread = the parameter `hsdir_spread_store` or
1903 // `hsdir_spread_fetch` based on `op`.
1904 // 4. Let n_replicas = the parameter `hsdir_n_replicas`.
1905 // 5. Initialize Dirs = []
1906 // 6. for idx in 1..=n_replicas:
1907 // - let H = hsdir_ring::onion_service_index(id, replica, rand,
1908 // period).
1909 // - Find the position of H within hsdir_ring.
1910 // - Take elements from hsdir_ring starting at that position,
1911 // adding them to Dirs until we have added `spread` new elements
1912 // that were not there before.
1913 // 7. return Dirs.
1914 let n_replicas = self
1915 .params
1916 .hsdir_n_replicas
1917 .get()
1918 .try_into()
1919 .expect("BoundedInt did not enforce bounds");
1920
1921 let spread = match op {
1922 HsDirOp::Download => self.params.hsdir_spread_fetch,
1923 #[cfg(feature = "hs-service")]
1924 HsDirOp::Upload => self.params.hsdir_spread_store,
1925 };
1926
1927 let spread = spread
1928 .get()
1929 .try_into()
1930 .expect("BoundedInt did not enforce bounds!");
1931
1932 // TODO: I may be wrong here but I suspect that this function may
1933 // need refactoring so that it does not look at _all_ of the HsDirRings,
1934 // but only at the ones that corresponds to time periods for which
1935 // HsBlindId is valid. Or I could be mistaken, in which case we should
1936 // have a comment to explain why I am, since the logic is subtle.
1937 // (For clients, there is only one ring.) -nickm
1938 //
1939 // (Actually, there is no need to follow through with the above TODO,
1940 // since this function is deprecated, and not used anywhere but the
1941 // tests.)
1942
1943 let mut hs_dirs = self
1944 .hsdir_rings
1945 .iter_for_op(op)
1946 .cartesian_product(1..=n_replicas) // 1-indexed !
1947 .flat_map({
1948 let mut selected_nodes = HashSet::new();
1949
1950 move |(ring, replica): (&HsDirRing, u8)| {
1951 let hsdir_idx = hsdir_ring::service_hsdir_index(hsid, replica, ring.params());
1952
1953 ring.ring_items_at(hsdir_idx, spread, |(hsdir_idx, _)| {
1954 // According to rend-spec 2.2.3:
1955 // ... If any of those
1956 // nodes have already been selected for a lower-numbered replica of the
1957 // service, any nodes already chosen are disregarded (i.e. skipped over)
1958 // when choosing a replica's hsdir_spread_store nodes.
1959 selected_nodes.insert(*hsdir_idx)
1960 })
1961 .collect::<Vec<_>>()
1962 }
1963 })
1964 .filter_map(|(_hsdir_idx, rs_idx)| {
1965 // This ought not to be None but let's not panic or bail if it is
1966 self.relay_by_rs_idx(*rs_idx)
1967 })
1968 .collect_vec();
1969
1970 match op {
1971 HsDirOp::Download => {
1972 // When `op` is `Download`, the order is random.
1973 hs_dirs.shuffle(rng);
1974 }
1975 #[cfg(feature = "hs-service")]
1976 HsDirOp::Upload => {
1977 // When `op` is `Upload`, the order is not specified.
1978 }
1979 }
1980
1981 hs_dirs
1982 }
1983}
1984
1985impl MdReceiver for NetDir {
1986 fn missing_microdescs(&self) -> Box<dyn Iterator<Item = &MdDigest> + '_> {
1987 Box::new(self.rsidx_by_missing.keys())
1988 }
1989 fn add_microdesc(&mut self, md: Microdesc) -> bool {
1990 self.add_arc_microdesc(Arc::new(md))
1991 }
1992 fn n_missing(&self) -> usize {
1993 self.rsidx_by_missing.len()
1994 }
1995}
1996
1997impl<'a> UncheckedRelay<'a> {
1998 /// Return an [`UncheckedRelayDetails`](details::UncheckedRelayDetails) for this relay.
1999 ///
2000 /// Callers should generally avoid using this information directly if they can;
2001 /// it's better to use a higher-level function that exposes semantic information
2002 /// rather than these properties.
2003 pub fn low_level_details(&self) -> details::UncheckedRelayDetails<'_> {
2004 details::UncheckedRelayDetails(self)
2005 }
2006
2007 /// Return true if this relay is valid and [usable](NetDir#usable).
2008 ///
2009 /// This function should return `true` for every Relay we expose
2010 /// to the user.
2011 pub fn is_usable(&self) -> bool {
2012 // No need to check for 'valid' or 'running': they are implicit.
2013 self.md.is_some() && self.rs.ed25519_id_is_usable()
2014 }
2015 /// If this is [usable](NetDir#usable), return a corresponding Relay object.
2016 pub fn into_relay(self) -> Option<Relay<'a>> {
2017 if self.is_usable() {
2018 Some(Relay {
2019 rs: self.rs,
2020 md: self.md?,
2021 #[cfg(feature = "geoip")]
2022 cc: self.cc,
2023 })
2024 } else {
2025 None
2026 }
2027 }
2028
2029 /// Return true if this relay is a hidden service directory
2030 ///
2031 /// Ie, if it is to be included in the hsdir ring.
2032 #[cfg(feature = "hs-common")]
2033 pub(crate) fn is_hsdir_for_ring(&self) -> bool {
2034 // TODO are there any other flags should we check?
2035 // rend-spec-v3 2.2.3 says just
2036 // "each node listed in the current consensus with the HSDir flag"
2037 // Do we need to check ed25519_id_is_usable ?
2038 // See also https://gitlab.torproject.org/tpo/core/arti/-/issues/504
2039 self.rs.is_flagged_hsdir()
2040 }
2041}
2042
2043impl<'a> Relay<'a> {
2044 /// Return a [`RelayDetails`](details::RelayDetails) for this relay.
2045 ///
2046 /// Callers should generally avoid using this information directly if they can;
2047 /// it's better to use a higher-level function that exposes semantic information
2048 /// rather than these properties.
2049 pub fn low_level_details(&self) -> details::RelayDetails<'_> {
2050 details::RelayDetails(self)
2051 }
2052
2053 /// Return the Ed25519 ID for this relay.
2054 pub fn id(&self) -> &Ed25519Identity {
2055 self.md.ed25519_id()
2056 }
2057 /// Return the RsaIdentity for this relay.
2058 pub fn rsa_id(&self) -> &RsaIdentity {
2059 self.rs.rsa_identity()
2060 }
2061
2062 /// Return a reference to this relay's "router status" entry in
2063 /// the consensus.
2064 ///
2065 /// The router status entry contains information about the relay
2066 /// that the authorities voted on directly. For most use cases,
2067 /// you shouldn't need them.
2068 ///
2069 /// This function is only available if the crate was built with
2070 /// its `experimental-api` feature.
2071 #[cfg(feature = "experimental-api")]
2072 pub fn rs(&self) -> &netstatus::MdRouterStatus {
2073 self.rs
2074 }
2075 /// Return a reference to this relay's "microdescriptor" entry in
2076 /// the consensus.
2077 ///
2078 /// A "microdescriptor" is a synopsis of the information about a relay,
2079 /// used to determine its capabilities and route traffic through it.
2080 /// For most use cases, you shouldn't need it.
2081 ///
2082 /// This function is only available if the crate was built with
2083 /// its `experimental-api` feature.
2084 #[cfg(feature = "experimental-api")]
2085 pub fn md(&self) -> &Microdesc {
2086 self.md
2087 }
2088}
2089
2090/// An error value returned from [`NetDir::by_ids_detailed`].
2091#[cfg(feature = "hs-common")]
2092#[derive(Clone, Debug, thiserror::Error)]
2093#[non_exhaustive]
2094pub enum RelayLookupError {
2095 /// We found a relay whose presence indicates that the provided set of
2096 /// identities is impossible to resolve.
2097 #[error("Provided set of identities is impossible according to consensus.")]
2098 Impossible,
2099}
2100
2101impl<'a> HasAddrs for Relay<'a> {
2102 fn addrs(&self) -> impl Iterator<Item = std::net::SocketAddr> {
2103 self.rs.addrs()
2104 }
2105}
2106#[cfg(feature = "geoip")]
2107impl<'a> HasCountryCode for Relay<'a> {
2108 fn country_code(&self) -> Option<CountryCode> {
2109 self.cc
2110 }
2111}
2112impl<'a> tor_linkspec::HasRelayIdsLegacy for Relay<'a> {
2113 fn ed_identity(&self) -> &Ed25519Identity {
2114 self.id()
2115 }
2116 fn rsa_identity(&self) -> &RsaIdentity {
2117 self.rsa_id()
2118 }
2119}
2120
2121impl<'a> HasRelayIds for UncheckedRelay<'a> {
2122 fn identity(&self, key_type: RelayIdType) -> Option<RelayIdRef<'_>> {
2123 match key_type {
2124 RelayIdType::Ed25519 if self.rs.ed25519_id_is_usable() => {
2125 self.md.map(|m| m.ed25519_id().into())
2126 }
2127 RelayIdType::Rsa => Some(self.rs.rsa_identity().into()),
2128 _ => None,
2129 }
2130 }
2131}
2132#[cfg(feature = "geoip")]
2133impl<'a> HasCountryCode for UncheckedRelay<'a> {
2134 fn country_code(&self) -> Option<CountryCode> {
2135 self.cc
2136 }
2137}
2138
2139impl<'a> DirectChanMethodsHelper for Relay<'a> {}
2140impl<'a> ChanTarget for Relay<'a> {}
2141
2142impl<'a> tor_linkspec::CircTarget for Relay<'a> {
2143 fn ntor_onion_key(&self) -> &ll::pk::curve25519::PublicKey {
2144 self.md.ntor_key()
2145 }
2146 fn protovers(&self) -> &tor_protover::Protocols {
2147 self.rs.protovers()
2148 }
2149}
2150
2151#[cfg(test)]
2152mod test {
2153 // @@ begin test lint list maintained by maint/add_warning @@
2154 #![allow(clippy::bool_assert_comparison)]
2155 #![allow(clippy::clone_on_copy)]
2156 #![allow(clippy::dbg_macro)]
2157 #![allow(clippy::mixed_attributes_style)]
2158 #![allow(clippy::print_stderr)]
2159 #![allow(clippy::print_stdout)]
2160 #![allow(clippy::single_char_pattern)]
2161 #![allow(clippy::unwrap_used)]
2162 #![allow(clippy::unchecked_time_subtraction)]
2163 #![allow(clippy::useless_vec)]
2164 #![allow(clippy::needless_pass_by_value)]
2165 //! <!-- @@ end test lint list maintained by maint/add_warning @@ -->
2166 #![allow(clippy::cognitive_complexity)]
2167 use super::*;
2168 use crate::testnet::*;
2169 use float_eq::assert_float_eq;
2170 use std::collections::HashSet;
2171 use std::time::Duration;
2172 use tor_basic_utils::test_rng::{self, testing_rng};
2173 use tor_linkspec::{RelayIdType, RelayIds};
2174
2175 #[cfg(feature = "hs-common")]
2176 fn dummy_hs_blind_id() -> HsBlindId {
2177 let hsid = [2, 1, 1, 1].iter().cycle().take(32).cloned().collect_vec();
2178 let hsid = Ed25519Identity::new(hsid[..].try_into().unwrap());
2179 HsBlindId::from(hsid)
2180 }
2181
2182 // Basic functionality for a partial netdir: Add microdescriptors,
2183 // then you have a netdir.
2184 #[test]
2185 fn partial_netdir() {
2186 let (consensus, microdescs) = construct_network().unwrap();
2187 let dir = PartialNetDir::new(consensus, None);
2188
2189 // Check the lifetime
2190 let lifetime = dir.lifetime();
2191 assert_eq!(
2192 lifetime
2193 .valid_until()
2194 .duration_since(lifetime.valid_after())
2195 .unwrap(),
2196 Duration::new(86400, 0)
2197 );
2198
2199 // No microdescriptors, so we don't have enough paths, and can't
2200 // advance.
2201 assert!(!dir.have_enough_paths());
2202 let mut dir = match dir.unwrap_if_sufficient() {
2203 Ok(_) => panic!(),
2204 Err(d) => d,
2205 };
2206
2207 let missing: HashSet<_> = dir.missing_microdescs().collect();
2208 assert_eq!(missing.len(), 40);
2209 assert_eq!(missing.len(), dir.netdir.c_relays().len());
2210 for md in µdescs {
2211 assert!(missing.contains(md.digest()));
2212 }
2213
2214 // Now add all the mds and try again.
2215 for md in microdescs {
2216 let wanted = dir.add_microdesc(md);
2217 assert!(wanted);
2218 }
2219
2220 let missing: HashSet<_> = dir.missing_microdescs().collect();
2221 assert!(missing.is_empty());
2222 assert!(dir.have_enough_paths());
2223 let _complete = match dir.unwrap_if_sufficient() {
2224 Ok(d) => d,
2225 Err(_) => panic!(),
2226 };
2227 }
2228
2229 #[test]
2230 fn override_params() {
2231 let (consensus, _microdescs) = construct_network().unwrap();
2232 let override_p = "bwweightscale=2 doesnotexist=77 circwindow=500"
2233 .parse()
2234 .unwrap();
2235 let dir = PartialNetDir::new(consensus.clone(), Some(&override_p));
2236 let params = &dir.netdir.params;
2237 assert_eq!(params.bw_weight_scale.get(), 2);
2238 assert_eq!(params.circuit_window.get(), 500_i32);
2239
2240 // try again without the override.
2241 let dir = PartialNetDir::new(consensus, None);
2242 let params = &dir.netdir.params;
2243 assert_eq!(params.bw_weight_scale.get(), 1_i32);
2244 assert_eq!(params.circuit_window.get(), 1000_i32);
2245 }
2246
2247 #[test]
2248 fn fill_from_previous() {
2249 let (consensus, microdescs) = construct_network().unwrap();
2250
2251 let mut dir = PartialNetDir::new(consensus.clone(), None);
2252 for md in microdescs.iter().skip(2) {
2253 let wanted = dir.add_microdesc(md.clone());
2254 assert!(wanted);
2255 }
2256 let dir1 = dir.unwrap_if_sufficient().unwrap();
2257 assert_eq!(dir1.missing_microdescs().count(), 2);
2258
2259 let mut dir = PartialNetDir::new(consensus, None);
2260 assert_eq!(dir.missing_microdescs().count(), 40);
2261 dir.fill_from_previous_netdir(Arc::new(dir1));
2262 assert_eq!(dir.missing_microdescs().count(), 2);
2263 }
2264
2265 #[test]
2266 fn path_count() {
2267 let low_threshold = "min_paths_for_circs_pct=64".parse().unwrap();
2268 let high_threshold = "min_paths_for_circs_pct=65".parse().unwrap();
2269
2270 let (consensus, microdescs) = construct_network().unwrap();
2271
2272 let mut dir = PartialNetDir::new(consensus.clone(), Some(&low_threshold));
2273 for (pos, md) in microdescs.iter().enumerate() {
2274 if pos % 7 == 2 {
2275 continue; // skip a few relays.
2276 }
2277 dir.add_microdesc(md.clone());
2278 }
2279 let dir = dir.unwrap_if_sufficient().unwrap();
2280
2281 // We have 40 relays that we know about from the consensus.
2282 assert_eq!(dir.all_relays().count(), 40);
2283
2284 // But only 34 are usable.
2285 assert_eq!(dir.relays().count(), 34);
2286
2287 // For guards: mds 20..=39 correspond to Guard relays.
2288 // Their bandwidth is 2*(1000+2000+...10000) = 110_000.
2289 // We skipped 23, 30, and 37. They have bandwidth
2290 // 4000 + 1000 + 8000 = 13_000. So our fractional bandwidth
2291 // should be (110-13)/110.
2292 let f = dir.frac_for_role(WeightRole::Guard, |u| u.rs.is_flagged_guard());
2293 assert!(((97.0 / 110.0) - f).abs() < 0.000001);
2294
2295 // For exits: mds 10..=19 and 30..=39 correspond to Exit relays.
2296 // We skipped 16, 30, and 37. Per above our fractional bandwidth is
2297 // (110-16)/110.
2298 let f = dir.frac_for_role(WeightRole::Exit, |u| u.rs.is_flagged_exit());
2299 assert!(((94.0 / 110.0) - f).abs() < 0.000001);
2300
2301 // For middles: all relays are middles. We skipped 2, 9, 16,
2302 // 23, 30, and 37. Per above our fractional bandwidth is
2303 // (220-33)/220
2304 let f = dir.frac_for_role(WeightRole::Middle, |_| true);
2305 assert!(((187.0 / 220.0) - f).abs() < 0.000001);
2306
2307 // Multiplying those together, we get the fraction of paths we can
2308 // build at ~0.64052066, which is above the threshold we set above for
2309 // MinPathsForCircsPct.
2310 let f = dir.frac_usable_paths();
2311 assert!((f - 0.64052066).abs() < 0.000001);
2312
2313 // But if we try again with a slightly higher threshold...
2314 let mut dir = PartialNetDir::new(consensus, Some(&high_threshold));
2315 for (pos, md) in microdescs.into_iter().enumerate() {
2316 if pos % 7 == 2 {
2317 continue; // skip a few relays.
2318 }
2319 dir.add_microdesc(md);
2320 }
2321 assert!(dir.unwrap_if_sufficient().is_err());
2322 }
2323
2324 /// Return a 3-tuple for use by `test_pick_*()` of an Rng, a number of
2325 /// iterations, and a tolerance.
2326 ///
2327 /// If the Rng is deterministic (the default), we can use a faster setup,
2328 /// with a higher tolerance and fewer iterations. But if you've explicitly
2329 /// opted into randomization (or are replaying a seed from an earlier
2330 /// randomized test), we give you more iterations and a tighter tolerance.
2331 fn testing_rng_with_tolerances() -> (impl rand::Rng, usize, f64) {
2332 // Use a deterministic RNG if none is specified, since this is slow otherwise.
2333 let config = test_rng::Config::from_env().unwrap_or(test_rng::Config::Deterministic);
2334 let (iters, tolerance) = match config {
2335 test_rng::Config::Deterministic => (5000, 0.02),
2336 _ => (50000, 0.01),
2337 };
2338 (config.into_rng(), iters, tolerance)
2339 }
2340
2341 #[test]
2342 fn test_pick() {
2343 let (consensus, microdescs) = construct_network().unwrap();
2344 let mut dir = PartialNetDir::new(consensus, None);
2345 for md in microdescs.into_iter() {
2346 let wanted = dir.add_microdesc(md.clone());
2347 assert!(wanted);
2348 }
2349 let dir = dir.unwrap_if_sufficient().unwrap();
2350
2351 let (mut rng, total, tolerance) = testing_rng_with_tolerances();
2352
2353 let mut picked = [0_isize; 40];
2354 for _ in 0..total {
2355 let r = dir.pick_relay(&mut rng, WeightRole::Middle, |r| {
2356 r.low_level_details().supports_exit_port_ipv4(80)
2357 });
2358 let r = r.unwrap();
2359 let id_byte = r.identity(RelayIdType::Rsa).unwrap().as_bytes()[0];
2360 picked[id_byte as usize] += 1;
2361 }
2362 // non-exits should never get picked.
2363 picked[0..10].iter().for_each(|x| assert_eq!(*x, 0));
2364 picked[20..30].iter().for_each(|x| assert_eq!(*x, 0));
2365
2366 let picked_f: Vec<_> = picked.iter().map(|x| *x as f64 / total as f64).collect();
2367
2368 // We didn't we any non-default weights, so the other relays get
2369 // weighted proportional to their bandwidth.
2370 assert_float_eq!(picked_f[19], (10.0 / 110.0), abs <= tolerance);
2371 assert_float_eq!(picked_f[38], (9.0 / 110.0), abs <= tolerance);
2372 assert_float_eq!(picked_f[39], (10.0 / 110.0), abs <= tolerance);
2373 }
2374
2375 #[test]
2376 fn test_pick_multiple() {
2377 // This is mostly a copy of test_pick, except that it uses
2378 // pick_n_relays to pick several relays at once.
2379
2380 let dir = construct_netdir().unwrap_if_sufficient().unwrap();
2381
2382 let (mut rng, total, tolerance) = testing_rng_with_tolerances();
2383
2384 let mut picked = [0_isize; 40];
2385 for _ in 0..total / 4 {
2386 let relays = dir.pick_n_relays(&mut rng, 4, WeightRole::Middle, |r| {
2387 r.low_level_details().supports_exit_port_ipv4(80)
2388 });
2389 assert_eq!(relays.len(), 4);
2390 for r in relays {
2391 let id_byte = r.identity(RelayIdType::Rsa).unwrap().as_bytes()[0];
2392 picked[id_byte as usize] += 1;
2393 }
2394 }
2395 // non-exits should never get picked.
2396 picked[0..10].iter().for_each(|x| assert_eq!(*x, 0));
2397 picked[20..30].iter().for_each(|x| assert_eq!(*x, 0));
2398
2399 let picked_f: Vec<_> = picked.iter().map(|x| *x as f64 / total as f64).collect();
2400
2401 // We didn't we any non-default weights, so the other relays get
2402 // weighted proportional to their bandwidth.
2403 assert_float_eq!(picked_f[19], (10.0 / 110.0), abs <= tolerance);
2404 assert_float_eq!(picked_f[36], (7.0 / 110.0), abs <= tolerance);
2405 assert_float_eq!(picked_f[39], (10.0 / 110.0), abs <= tolerance);
2406 }
2407
2408 #[test]
2409 fn subnets() {
2410 let cfg = SubnetConfig::default();
2411
2412 fn same_net(cfg: &SubnetConfig, a: &str, b: &str) -> bool {
2413 cfg.addrs_in_same_subnet(&a.parse().unwrap(), &b.parse().unwrap())
2414 }
2415
2416 assert!(same_net(&cfg, "127.15.3.3", "127.15.9.9"));
2417 assert!(!same_net(&cfg, "127.15.3.3", "127.16.9.9"));
2418
2419 assert!(!same_net(&cfg, "127.15.3.3", "127::"));
2420
2421 assert!(same_net(&cfg, "ffff:ffff:90:33::", "ffff:ffff:91:34::"));
2422 assert!(!same_net(&cfg, "ffff:ffff:90:33::", "ffff:fffe:91:34::"));
2423
2424 let cfg = SubnetConfig {
2425 subnets_family_v4: 32,
2426 subnets_family_v6: 128,
2427 };
2428 assert!(!same_net(&cfg, "127.15.3.3", "127.15.9.9"));
2429 assert!(!same_net(&cfg, "ffff:ffff:90:33::", "ffff:ffff:91:34::"));
2430
2431 assert!(same_net(&cfg, "127.0.0.1", "127.0.0.1"));
2432 assert!(!same_net(&cfg, "127.0.0.1", "127.0.0.2"));
2433 assert!(same_net(&cfg, "ffff:ffff:90:33::", "ffff:ffff:90:33::"));
2434
2435 let cfg = SubnetConfig {
2436 subnets_family_v4: 33,
2437 subnets_family_v6: 129,
2438 };
2439 assert!(!same_net(&cfg, "127.0.0.1", "127.0.0.1"));
2440 assert!(!same_net(&cfg, "::", "::"));
2441 }
2442
2443 #[test]
2444 fn subnet_union() {
2445 let cfg1 = SubnetConfig {
2446 subnets_family_v4: 16,
2447 subnets_family_v6: 64,
2448 };
2449 let cfg2 = SubnetConfig {
2450 subnets_family_v4: 24,
2451 subnets_family_v6: 32,
2452 };
2453 let a1 = "1.2.3.4".parse().unwrap();
2454 let a2 = "1.2.10.10".parse().unwrap();
2455
2456 let a3 = "ffff:ffff::7".parse().unwrap();
2457 let a4 = "ffff:ffff:1234::8".parse().unwrap();
2458
2459 assert_eq!(cfg1.addrs_in_same_subnet(&a1, &a2), true);
2460 assert_eq!(cfg2.addrs_in_same_subnet(&a1, &a2), false);
2461
2462 assert_eq!(cfg1.addrs_in_same_subnet(&a3, &a4), false);
2463 assert_eq!(cfg2.addrs_in_same_subnet(&a3, &a4), true);
2464
2465 let cfg_u = cfg1.union(&cfg2);
2466 assert_eq!(
2467 cfg_u,
2468 SubnetConfig {
2469 subnets_family_v4: 16,
2470 subnets_family_v6: 32,
2471 }
2472 );
2473 assert_eq!(cfg_u.addrs_in_same_subnet(&a1, &a2), true);
2474 assert_eq!(cfg_u.addrs_in_same_subnet(&a3, &a4), true);
2475
2476 assert_eq!(cfg1.union(&cfg1), cfg1);
2477
2478 assert_eq!(cfg1.union(&SubnetConfig::no_addresses_match()), cfg1);
2479 }
2480
2481 #[test]
2482 fn relay_funcs() {
2483 let (consensus, microdescs) = construct_custom_network(
2484 |pos, nb, _| {
2485 if pos == 15 {
2486 nb.rs.add_or_port("[f0f0::30]:9001".parse().unwrap());
2487 } else if pos == 20 {
2488 nb.rs.add_or_port("[f0f0::3131]:9001".parse().unwrap());
2489 }
2490 },
2491 None,
2492 )
2493 .unwrap();
2494 let subnet_config = SubnetConfig::default();
2495 let all_family_info = FamilyRules::all_family_info();
2496 let mut dir = PartialNetDir::new(consensus, None);
2497 for md in microdescs.into_iter() {
2498 let wanted = dir.add_microdesc(md.clone());
2499 assert!(wanted);
2500 }
2501 let dir = dir.unwrap_if_sufficient().unwrap();
2502
2503 // Pick out a few relays by ID.
2504 let k0 = Ed25519Identity::from([0; 32]);
2505 let k1 = Ed25519Identity::from([1; 32]);
2506 let k2 = Ed25519Identity::from([2; 32]);
2507 let k3 = Ed25519Identity::from([3; 32]);
2508 let k10 = Ed25519Identity::from([10; 32]);
2509 let k15 = Ed25519Identity::from([15; 32]);
2510 let k20 = Ed25519Identity::from([20; 32]);
2511
2512 let r0 = dir.by_id(&k0).unwrap();
2513 let r1 = dir.by_id(&k1).unwrap();
2514 let r2 = dir.by_id(&k2).unwrap();
2515 let r3 = dir.by_id(&k3).unwrap();
2516 let r10 = dir.by_id(&k10).unwrap();
2517 let r15 = dir.by_id(&k15).unwrap();
2518 let r20 = dir.by_id(&k20).unwrap();
2519
2520 assert_eq!(r0.id(), &[0; 32].into());
2521 assert_eq!(r0.rsa_id(), &[0; 20].into());
2522 assert_eq!(r1.id(), &[1; 32].into());
2523 assert_eq!(r1.rsa_id(), &[1; 20].into());
2524
2525 assert!(r0.same_relay_ids(&r0));
2526 assert!(r1.same_relay_ids(&r1));
2527 assert!(!r1.same_relay_ids(&r0));
2528
2529 assert!(r0.low_level_details().is_dir_cache());
2530 assert!(!r1.low_level_details().is_dir_cache());
2531 assert!(r2.low_level_details().is_dir_cache());
2532 assert!(!r3.low_level_details().is_dir_cache());
2533
2534 assert!(!r0.low_level_details().supports_exit_port_ipv4(80));
2535 assert!(!r1.low_level_details().supports_exit_port_ipv4(80));
2536 assert!(!r2.low_level_details().supports_exit_port_ipv4(80));
2537 assert!(!r3.low_level_details().supports_exit_port_ipv4(80));
2538
2539 assert!(!r0.low_level_details().policies_allow_some_port());
2540 assert!(!r1.low_level_details().policies_allow_some_port());
2541 assert!(!r2.low_level_details().policies_allow_some_port());
2542 assert!(!r3.low_level_details().policies_allow_some_port());
2543 assert!(r10.low_level_details().policies_allow_some_port());
2544
2545 assert!(r0.low_level_details().in_same_family(&r0, all_family_info));
2546 assert!(r0.low_level_details().in_same_family(&r1, all_family_info));
2547 assert!(r1.low_level_details().in_same_family(&r0, all_family_info));
2548 assert!(r1.low_level_details().in_same_family(&r1, all_family_info));
2549 assert!(!r0.low_level_details().in_same_family(&r2, all_family_info));
2550 assert!(!r2.low_level_details().in_same_family(&r0, all_family_info));
2551 assert!(r2.low_level_details().in_same_family(&r2, all_family_info));
2552 assert!(r2.low_level_details().in_same_family(&r3, all_family_info));
2553
2554 assert!(r0.low_level_details().in_same_subnet(&r10, &subnet_config));
2555 assert!(r10.low_level_details().in_same_subnet(&r10, &subnet_config));
2556 assert!(r0.low_level_details().in_same_subnet(&r0, &subnet_config));
2557 assert!(r1.low_level_details().in_same_subnet(&r1, &subnet_config));
2558 assert!(!r1.low_level_details().in_same_subnet(&r2, &subnet_config));
2559 assert!(!r2.low_level_details().in_same_subnet(&r3, &subnet_config));
2560
2561 // Make sure IPv6 families work.
2562 let subnet_config = SubnetConfig {
2563 subnets_family_v4: 128,
2564 subnets_family_v6: 96,
2565 };
2566 assert!(r15.low_level_details().in_same_subnet(&r20, &subnet_config));
2567 assert!(!r15.low_level_details().in_same_subnet(&r1, &subnet_config));
2568
2569 // Make sure that subnet configs can be disabled.
2570 let subnet_config = SubnetConfig {
2571 subnets_family_v4: 255,
2572 subnets_family_v6: 255,
2573 };
2574 assert!(!r15.low_level_details().in_same_subnet(&r20, &subnet_config));
2575 }
2576
2577 #[test]
2578 fn test_badexit() {
2579 // make a netdir where relays 10-19 are badexit, and everybody
2580 // exits to 443 on IPv6.
2581 use tor_netdoc::types::relay_flags::RelayFlag;
2582 let netdir = construct_custom_netdir(|pos, nb, _| {
2583 if (10..20).contains(&pos) {
2584 nb.rs.add_flags(RelayFlag::BadExit);
2585 }
2586 nb.md.parse_ipv6_policy("accept 443").unwrap();
2587 })
2588 .unwrap()
2589 .unwrap_if_sufficient()
2590 .unwrap();
2591
2592 let e12 = netdir.by_id(&Ed25519Identity::from([12; 32])).unwrap();
2593 let e32 = netdir.by_id(&Ed25519Identity::from([32; 32])).unwrap();
2594
2595 assert!(!e12.low_level_details().supports_exit_port_ipv4(80));
2596 assert!(e32.low_level_details().supports_exit_port_ipv4(80));
2597
2598 assert!(!e12.low_level_details().supports_exit_port_ipv6(443));
2599 assert!(e32.low_level_details().supports_exit_port_ipv6(443));
2600 assert!(!e32.low_level_details().supports_exit_port_ipv6(555));
2601
2602 assert!(!e12.low_level_details().policies_allow_some_port());
2603 assert!(e32.low_level_details().policies_allow_some_port());
2604
2605 assert!(!e12.low_level_details().ipv4_policy().allows_some_port());
2606 assert!(!e12.low_level_details().ipv6_policy().allows_some_port());
2607 assert!(e32.low_level_details().ipv4_policy().allows_some_port());
2608 assert!(e32.low_level_details().ipv6_policy().allows_some_port());
2609
2610 assert!(
2611 e12.low_level_details()
2612 .ipv4_declared_policy()
2613 .allows_some_port()
2614 );
2615 assert!(
2616 e12.low_level_details()
2617 .ipv6_declared_policy()
2618 .allows_some_port()
2619 );
2620 }
2621
2622 #[cfg(feature = "experimental-api")]
2623 #[test]
2624 fn test_accessors() {
2625 let netdir = construct_netdir().unwrap_if_sufficient().unwrap();
2626
2627 let r4 = netdir.by_id(&Ed25519Identity::from([4; 32])).unwrap();
2628 let r16 = netdir.by_id(&Ed25519Identity::from([16; 32])).unwrap();
2629
2630 assert!(!r4.md().ipv4_policy().allows_some_port());
2631 assert!(r16.md().ipv4_policy().allows_some_port());
2632
2633 assert!(!r4.rs().is_flagged_exit());
2634 assert!(r16.rs().is_flagged_exit());
2635 }
2636
2637 #[test]
2638 fn test_by_id() {
2639 // Make a netdir that omits the microdescriptor for 0xDDDDDD...
2640 let netdir = construct_custom_netdir(|pos, nb, _| {
2641 nb.omit_md = pos == 13;
2642 })
2643 .unwrap();
2644
2645 let netdir = netdir.unwrap_if_sufficient().unwrap();
2646
2647 let r = netdir.by_id(&Ed25519Identity::from([0; 32])).unwrap();
2648 assert_eq!(r.id().as_bytes(), &[0; 32]);
2649
2650 assert!(netdir.by_id(&Ed25519Identity::from([13; 32])).is_none());
2651
2652 let r = netdir.by_rsa_id(&[12; 20].into()).unwrap();
2653 assert_eq!(r.rsa_id().as_bytes(), &[12; 20]);
2654 assert!(netdir.rsa_id_is_listed(&[12; 20].into()));
2655
2656 assert!(netdir.by_rsa_id(&[13; 20].into()).is_none());
2657
2658 assert!(netdir.by_rsa_id_unchecked(&[99; 20].into()).is_none());
2659 assert!(!netdir.rsa_id_is_listed(&[99; 20].into()));
2660
2661 let r = netdir.by_rsa_id_unchecked(&[13; 20].into()).unwrap();
2662 assert_eq!(r.rs.rsa_identity().as_bytes(), &[13; 20]);
2663 assert!(netdir.rsa_id_is_listed(&[13; 20].into()));
2664
2665 let pair_13_13 = RelayIds::builder()
2666 .ed_identity([13; 32].into())
2667 .rsa_identity([13; 20].into())
2668 .build()
2669 .unwrap();
2670 let pair_14_14 = RelayIds::builder()
2671 .ed_identity([14; 32].into())
2672 .rsa_identity([14; 20].into())
2673 .build()
2674 .unwrap();
2675 let pair_14_99 = RelayIds::builder()
2676 .ed_identity([14; 32].into())
2677 .rsa_identity([99; 20].into())
2678 .build()
2679 .unwrap();
2680
2681 let r = netdir.by_ids(&pair_13_13);
2682 assert!(r.is_none());
2683 let r = netdir.by_ids(&pair_14_14).unwrap();
2684 assert_eq!(r.identity(RelayIdType::Rsa).unwrap().as_bytes(), &[14; 20]);
2685 assert_eq!(
2686 r.identity(RelayIdType::Ed25519).unwrap().as_bytes(),
2687 &[14; 32]
2688 );
2689 let r = netdir.by_ids(&pair_14_99);
2690 assert!(r.is_none());
2691
2692 assert_eq!(
2693 netdir.id_pair_listed(&[13; 32].into(), &[13; 20].into()),
2694 None
2695 );
2696 assert_eq!(
2697 netdir.id_pair_listed(&[15; 32].into(), &[15; 20].into()),
2698 Some(true)
2699 );
2700 assert_eq!(
2701 netdir.id_pair_listed(&[15; 32].into(), &[99; 20].into()),
2702 Some(false)
2703 );
2704 }
2705
2706 #[test]
2707 #[cfg(feature = "hs-common")]
2708 fn test_by_ids_detailed() {
2709 // Make a netdir that omits the microdescriptor for 0xDDDDDD...
2710 let netdir = construct_custom_netdir(|pos, nb, _| {
2711 nb.omit_md = pos == 13;
2712 })
2713 .unwrap();
2714
2715 let netdir = netdir.unwrap_if_sufficient().unwrap();
2716
2717 let id13_13 = RelayIds::builder()
2718 .ed_identity([13; 32].into())
2719 .rsa_identity([13; 20].into())
2720 .build()
2721 .unwrap();
2722 let id15_15 = RelayIds::builder()
2723 .ed_identity([15; 32].into())
2724 .rsa_identity([15; 20].into())
2725 .build()
2726 .unwrap();
2727 let id15_99 = RelayIds::builder()
2728 .ed_identity([15; 32].into())
2729 .rsa_identity([99; 20].into())
2730 .build()
2731 .unwrap();
2732 let id99_15 = RelayIds::builder()
2733 .ed_identity([99; 32].into())
2734 .rsa_identity([15; 20].into())
2735 .build()
2736 .unwrap();
2737 let id99_99 = RelayIds::builder()
2738 .ed_identity([99; 32].into())
2739 .rsa_identity([99; 20].into())
2740 .build()
2741 .unwrap();
2742 let id15_xx = RelayIds::builder()
2743 .ed_identity([15; 32].into())
2744 .build()
2745 .unwrap();
2746 let idxx_15 = RelayIds::builder()
2747 .rsa_identity([15; 20].into())
2748 .build()
2749 .unwrap();
2750
2751 assert!(matches!(netdir.by_ids_detailed(&id13_13), Ok(None)));
2752 assert!(matches!(netdir.by_ids_detailed(&id15_15), Ok(Some(_))));
2753 assert!(matches!(
2754 netdir.by_ids_detailed(&id15_99),
2755 Err(RelayLookupError::Impossible)
2756 ));
2757 assert!(matches!(
2758 netdir.by_ids_detailed(&id99_15),
2759 Err(RelayLookupError::Impossible)
2760 ));
2761 assert!(matches!(netdir.by_ids_detailed(&id99_99), Ok(None)));
2762 assert!(matches!(netdir.by_ids_detailed(&id15_xx), Ok(Some(_))));
2763 assert!(matches!(netdir.by_ids_detailed(&idxx_15), Ok(Some(_))));
2764 }
2765
2766 #[test]
2767 fn weight_type() {
2768 let r0 = RelayWeight(0);
2769 let r100 = RelayWeight(100);
2770 let r200 = RelayWeight(200);
2771 let r300 = RelayWeight(300);
2772 assert_eq!(r100 + r200, r300);
2773 assert_eq!(r100.checked_div(r200), Some(0.5));
2774 assert!(r100.checked_div(r0).is_none());
2775 assert_eq!(r200.ratio(0.5), Some(r100));
2776 assert!(r200.ratio(-1.0).is_none());
2777 }
2778
2779 #[test]
2780 fn weight_accessors() {
2781 // Make a netdir that omits the microdescriptor for 0xDDDDDD...
2782 let netdir = construct_netdir().unwrap_if_sufficient().unwrap();
2783
2784 let g_total = netdir.total_weight(WeightRole::Guard, |r| r.rs.is_flagged_guard());
2785 // This is just the total guard weight, since all our Wxy = 1.
2786 assert_eq!(g_total, RelayWeight(110_000));
2787
2788 let g_total = netdir.total_weight(WeightRole::Guard, |_| false);
2789 assert_eq!(g_total, RelayWeight(0));
2790
2791 let relay = netdir.by_id(&Ed25519Identity::from([35; 32])).unwrap();
2792 assert!(relay.rs.is_flagged_guard());
2793 let w = netdir.relay_weight(&relay, WeightRole::Guard);
2794 assert_eq!(w, RelayWeight(6_000));
2795
2796 let w = netdir
2797 .weight_by_rsa_id(&[33; 20].into(), WeightRole::Guard)
2798 .unwrap();
2799 assert_eq!(w, RelayWeight(4_000));
2800
2801 assert!(
2802 netdir
2803 .weight_by_rsa_id(&[99; 20].into(), WeightRole::Guard)
2804 .is_none()
2805 );
2806 }
2807
2808 #[test]
2809 fn family_list() {
2810 let netdir = construct_custom_netdir(|pos, n, _| {
2811 if pos == 0x0a {
2812 n.md.family(
2813 "$0B0B0B0B0B0B0B0B0B0B0B0B0B0B0B0B0B0B0B0B \
2814 $0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C \
2815 $0D0D0D0D0D0D0D0D0D0D0D0D0D0D0D0D0D0D0D0D"
2816 .parse()
2817 .unwrap(),
2818 );
2819 } else if pos == 0x0c {
2820 n.md.family("$0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A".parse().unwrap());
2821 }
2822 })
2823 .unwrap()
2824 .unwrap_if_sufficient()
2825 .unwrap();
2826
2827 // In the testing netdir, adjacent members are in the same family by default...
2828 let r0 = netdir.by_id(&Ed25519Identity::from([0; 32])).unwrap();
2829 let family: Vec<_> = netdir.known_family_members(&r0).collect();
2830 assert_eq!(family.len(), 1);
2831 assert_eq!(family[0].id(), &Ed25519Identity::from([1; 32]));
2832
2833 // But we've made this relay claim membership with several others.
2834 let r10 = netdir.by_id(&Ed25519Identity::from([10; 32])).unwrap();
2835 let family: HashSet<_> = netdir.known_family_members(&r10).map(|r| *r.id()).collect();
2836 assert_eq!(family.len(), 2);
2837 assert!(family.contains(&Ed25519Identity::from([11; 32])));
2838 assert!(family.contains(&Ed25519Identity::from([12; 32])));
2839 // Note that 13 doesn't get put in, even though it's listed, since it doesn't claim
2840 // membership with 10.
2841 }
2842 #[test]
2843 #[cfg(feature = "geoip")]
2844 fn relay_has_country_code() {
2845 let src_v6 = r#"
2846 fe80:dead:beef::,fe80:dead:ffff::,US
2847 fe80:feed:eeee::1,fe80:feed:eeee::2,AT
2848 fe80:feed:eeee::2,fe80:feed:ffff::,DE
2849 "#;
2850 let db = GeoipDb::new_from_legacy_format("", src_v6).unwrap();
2851
2852 let netdir = construct_custom_netdir_with_geoip(
2853 |pos, n, _| {
2854 if pos == 0x01 {
2855 n.rs.add_or_port("[fe80:dead:beef::1]:42".parse().unwrap());
2856 }
2857 if pos == 0x02 {
2858 n.rs.add_or_port("[fe80:feed:eeee::1]:42".parse().unwrap());
2859 n.rs.add_or_port("[fe80:feed:eeee::2]:42".parse().unwrap());
2860 }
2861 if pos == 0x03 {
2862 n.rs.add_or_port("[fe80:dead:beef::1]:42".parse().unwrap());
2863 n.rs.add_or_port("[fe80:dead:beef::2]:42".parse().unwrap());
2864 }
2865 },
2866 &db,
2867 )
2868 .unwrap()
2869 .unwrap_if_sufficient()
2870 .unwrap();
2871
2872 // No GeoIP data available -> None
2873 let r0 = netdir.by_id(&Ed25519Identity::from([0; 32])).unwrap();
2874 assert_eq!(r0.cc, None);
2875
2876 // Exactly one match -> Some
2877 let r1 = netdir.by_id(&Ed25519Identity::from([1; 32])).unwrap();
2878 assert_eq!(r1.cc.as_ref().map(|x| x.as_ref()), Some("US"));
2879
2880 // Conflicting matches -> None
2881 let r2 = netdir.by_id(&Ed25519Identity::from([2; 32])).unwrap();
2882 assert_eq!(r2.cc, None);
2883
2884 // Multiple agreeing matches -> Some
2885 let r3 = netdir.by_id(&Ed25519Identity::from([3; 32])).unwrap();
2886 assert_eq!(r3.cc.as_ref().map(|x| x.as_ref()), Some("US"));
2887 }
2888
2889 #[test]
2890 #[cfg(feature = "hs-common")]
2891 #[allow(deprecated)]
2892 fn hs_dirs_selection() {
2893 use tor_basic_utils::test_rng::testing_rng;
2894
2895 const HSDIR_SPREAD_STORE: i32 = 6;
2896 const HSDIR_SPREAD_FETCH: i32 = 2;
2897 const PARAMS: [(&str, i32); 2] = [
2898 ("hsdir_spread_store", HSDIR_SPREAD_STORE),
2899 ("hsdir_spread_fetch", HSDIR_SPREAD_FETCH),
2900 ];
2901
2902 let netdir: Arc<NetDir> =
2903 crate::testnet::construct_custom_netdir_with_params(|_, _, _| {}, PARAMS, None)
2904 .unwrap()
2905 .unwrap_if_sufficient()
2906 .unwrap()
2907 .into();
2908 let hsid = dummy_hs_blind_id();
2909
2910 const OP_RELAY_COUNT: &[(HsDirOp, usize)] = &[
2911 // We can't upload to (hsdir_n_replicas * hsdir_spread_store) = 12, relays because there
2912 // are only 10 relays with the HsDir flag in the consensus.
2913 #[cfg(feature = "hs-service")]
2914 (HsDirOp::Upload, 10),
2915 (HsDirOp::Download, 4),
2916 ];
2917
2918 for (op, relay_count) in OP_RELAY_COUNT {
2919 let relays = netdir.hs_dirs(&hsid, *op, &mut testing_rng());
2920
2921 assert_eq!(relays.len(), *relay_count);
2922
2923 // There should be no duplicates (the filtering function passed to
2924 // HsDirRing::ring_items_at() ensures the relays that are already in use for
2925 // lower-numbered replicas aren't considered a second time for a higher-numbered
2926 // replica).
2927 let unique = relays
2928 .iter()
2929 .map(|relay| relay.ed_identity())
2930 .collect::<HashSet<_>>();
2931 assert_eq!(unique.len(), relays.len());
2932 }
2933
2934 // TODO: come up with a test that checks that HsDirRing::ring_items_at() skips over the
2935 // expected relays.
2936 //
2937 // For example, let's say we have the following hsdir ring:
2938 //
2939 // A - B
2940 // / \
2941 // F C
2942 // \ /
2943 // E - D
2944 //
2945 // Let's also assume that:
2946 //
2947 // * hsdir_spread_store = 3
2948 // * the ordering of the relays on the ring is [A, B, C, D, E, F]
2949 //
2950 // If we use relays [A, B, C] for replica 1, and hs_index(2) = E, then replica 2 _must_ get
2951 // relays [E, F, D]. We should have a test that checks this.
2952 }
2953
2954 #[test]
2955 fn zero_weights() {
2956 // Here we check the behavior of IndexedRandom::{choose_weighted, choose_multiple_weighted}
2957 // in the presence of items whose weight is 0.
2958 //
2959 // We think that the behavior is:
2960 // - An item with weight 0 is never returned.
2961 // - If all items have weight 0, choose_weighted returns an error.
2962 // - If all items have weight 0, choose_multiple_weighted returns an empty list.
2963 // - If we request n items from choose_multiple_weighted,
2964 // but only m<n items have nonzero weight, we return all m of those items.
2965 // - if the request for n items can't be completely satisfied with n items of weight >= 0,
2966 // we get InsufficientNonZero.
2967 let items = vec![1, 2, 3];
2968 let mut rng = testing_rng();
2969
2970 let a = items.choose_weighted(&mut rng, |_| 0);
2971 assert!(matches!(a, Err(WeightError::InsufficientNonZero)));
2972
2973 let x = items.choose_multiple_weighted(&mut rng, 2, |_| 0);
2974 let xs: Vec<_> = x.unwrap().collect();
2975 assert!(xs.is_empty());
2976
2977 let only_one = |n: &i32| if *n == 1 { 1 } else { 0 };
2978 let x = items.choose_multiple_weighted(&mut rng, 2, only_one);
2979 let xs: Vec<_> = x.unwrap().collect();
2980 assert_eq!(&xs[..], &[&1]);
2981
2982 for _ in 0..100 {
2983 let a = items.choose_weighted(&mut rng, only_one);
2984 assert_eq!(a.unwrap(), &1);
2985
2986 let x = items
2987 .choose_multiple_weighted(&mut rng, 1, only_one)
2988 .unwrap()
2989 .collect::<Vec<_>>();
2990 assert_eq!(x, vec![&1]);
2991 }
2992 }
2993
2994 #[test]
2995 fn insufficient_but_nonzero() {
2996 // Here we check IndexedRandom::choose_multiple_weighted when there no zero values,
2997 // but there are insufficient values.
2998 // (If this behavior changes, we need to change our usage.)
2999
3000 let items = vec![1, 2, 3];
3001 let mut rng = testing_rng();
3002 let mut a = items
3003 .choose_multiple_weighted(&mut rng, 10, |_| 1)
3004 .unwrap()
3005 .copied()
3006 .collect::<Vec<_>>();
3007 a.sort();
3008 assert_eq!(a, items);
3009 }
3010}