tor_netdir/lib.rs
1#![cfg_attr(docsrs, feature(doc_cfg))]
2#![doc = include_str!("../README.md")]
3// @@ begin lint list maintained by maint/add_warning @@
4#![allow(renamed_and_removed_lints)] // @@REMOVE_WHEN(ci_arti_stable)
5#![allow(unknown_lints)] // @@REMOVE_WHEN(ci_arti_nightly)
6#![warn(missing_docs)]
7#![warn(noop_method_call)]
8#![warn(unreachable_pub)]
9#![warn(clippy::all)]
10#![deny(clippy::await_holding_lock)]
11#![deny(clippy::cargo_common_metadata)]
12#![deny(clippy::cast_lossless)]
13#![deny(clippy::checked_conversions)]
14#![warn(clippy::cognitive_complexity)]
15#![deny(clippy::debug_assert_with_mut_call)]
16#![deny(clippy::exhaustive_enums)]
17#![deny(clippy::exhaustive_structs)]
18#![deny(clippy::expl_impl_clone_on_copy)]
19#![deny(clippy::fallible_impl_from)]
20#![deny(clippy::implicit_clone)]
21#![deny(clippy::large_stack_arrays)]
22#![warn(clippy::manual_ok_or)]
23#![deny(clippy::missing_docs_in_private_items)]
24#![warn(clippy::needless_borrow)]
25#![warn(clippy::needless_pass_by_value)]
26#![warn(clippy::option_option)]
27#![deny(clippy::print_stderr)]
28#![deny(clippy::print_stdout)]
29#![warn(clippy::rc_buffer)]
30#![deny(clippy::ref_option_ref)]
31#![warn(clippy::semicolon_if_nothing_returned)]
32#![warn(clippy::trait_duplication_in_bounds)]
33#![deny(clippy::unchecked_duration_subtraction)]
34#![deny(clippy::unnecessary_wraps)]
35#![warn(clippy::unseparated_literal_suffix)]
36#![deny(clippy::unwrap_used)]
37#![deny(clippy::mod_module_files)]
38#![allow(clippy::let_unit_value)] // This can reasonably be done for explicitness
39#![allow(clippy::uninlined_format_args)]
40#![allow(clippy::significant_drop_in_scrutinee)] // arti/-/merge_requests/588/#note_2812945
41#![allow(clippy::result_large_err)] // temporary workaround for arti#587
42#![allow(clippy::needless_raw_string_hashes)] // complained-about code is fine, often best
43#![allow(clippy::needless_lifetimes)] // See arti#1765
44#![allow(mismatched_lifetime_syntaxes)] // temporary workaround for arti#2060
45//! <!-- @@ end lint list maintained by maint/add_warning @@ -->
46
47pub mod details;
48mod err;
49#[cfg(feature = "hs-common")]
50mod hsdir_params;
51#[cfg(feature = "hs-common")]
52mod hsdir_ring;
53pub mod params;
54mod weight;
55
56#[cfg(any(test, feature = "testing"))]
57pub mod testnet;
58#[cfg(feature = "testing")]
59pub mod testprovider;
60
61use async_trait::async_trait;
62#[cfg(feature = "hs-service")]
63use itertools::chain;
64use tor_error::warn_report;
65use tor_linkspec::{
66 ChanTarget, DirectChanMethodsHelper, HasAddrs, HasRelayIds, RelayIdRef, RelayIdType,
67};
68use tor_llcrypto as ll;
69use tor_llcrypto::pk::{ed25519::Ed25519Identity, rsa::RsaIdentity};
70use tor_netdoc::doc::microdesc::{MdDigest, Microdesc};
71use tor_netdoc::doc::netstatus::{self, MdConsensus, MdRouterStatus};
72#[cfg(feature = "hs-common")]
73use {hsdir_ring::HsDirRing, std::iter};
74
75use derive_more::{From, Into};
76use futures::{StreamExt, stream::BoxStream};
77use num_enum::{IntoPrimitive, TryFromPrimitive};
78use rand::seq::{IndexedRandom as _, SliceRandom as _, WeightError};
79use serde::Deserialize;
80use std::collections::HashMap;
81use std::net::IpAddr;
82use std::ops::Deref;
83use std::sync::Arc;
84use std::time::SystemTime;
85use strum::{EnumCount, EnumIter};
86use tracing::warn;
87use typed_index_collections::{TiSlice, TiVec};
88
89#[cfg(feature = "hs-common")]
90use {
91 itertools::Itertools,
92 std::collections::HashSet,
93 tor_error::{Bug, internal},
94 tor_hscrypto::{pk::HsBlindId, time::TimePeriod},
95};
96
97pub use err::Error;
98pub use weight::WeightRole;
99/// A Result using the Error type from the tor-netdir crate
100pub type Result<T> = std::result::Result<T, Error>;
101
102#[cfg(feature = "hs-common")]
103pub use err::OnionDirLookupError;
104
105use params::NetParameters;
106#[cfg(feature = "geoip")]
107use tor_geoip::{CountryCode, GeoipDb, HasCountryCode};
108
109#[cfg(feature = "hs-common")]
110#[cfg_attr(docsrs, doc(cfg(feature = "hs-common")))]
111pub use hsdir_params::HsDirParams;
112
113/// Index into the consensus relays
114///
115/// This is an index into the list of relays returned by
116/// [`.c_relays()`](ConsensusRelays::c_relays)
117/// (on the corresponding consensus or netdir).
118///
119/// This is just a `usize` inside, but using a newtype prevents getting a relay index
120/// confused with other kinds of slice indices or counts.
121///
122/// If you are in a part of the code which needs to work with multiple consensuses,
123/// the typechecking cannot tell if you try to index into the wrong consensus.
124#[derive(Debug, From, Into, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash)]
125pub(crate) struct RouterStatusIdx(usize);
126
127/// Extension trait to provide index-type-safe `.c_relays()` method
128//
129// TODO: Really it would be better to have MdConsensns::relays() return TiSlice,
130// but that would be an API break there.
131pub(crate) trait ConsensusRelays {
132 /// Obtain the list of relays in the consensus
133 //
134 fn c_relays(&self) -> &TiSlice<RouterStatusIdx, MdRouterStatus>;
135}
136impl ConsensusRelays for MdConsensus {
137 fn c_relays(&self) -> &TiSlice<RouterStatusIdx, MdRouterStatus> {
138 TiSlice::from_ref(MdConsensus::relays(self))
139 }
140}
141impl ConsensusRelays for NetDir {
142 fn c_relays(&self) -> &TiSlice<RouterStatusIdx, MdRouterStatus> {
143 self.consensus.c_relays()
144 }
145}
146
147/// Configuration for determining when two relays have addresses "too close" in
148/// the network.
149///
150/// Used by `Relay::low_level_details().in_same_subnet()`.
151#[derive(Deserialize, Debug, Clone, Copy, Eq, PartialEq)]
152#[serde(deny_unknown_fields)]
153pub struct SubnetConfig {
154 /// Consider IPv4 nodes in the same /x to be the same family.
155 ///
156 /// If this value is 0, all nodes with IPv4 addresses will be in the
157 /// same family. If this value is above 32, then no nodes will be
158 /// placed im the same family based on their IPv4 addresses.
159 subnets_family_v4: u8,
160 /// Consider IPv6 nodes in the same /x to be the same family.
161 ///
162 /// If this value is 0, all nodes with IPv6 addresses will be in the
163 /// same family. If this value is above 128, then no nodes will be
164 /// placed im the same family based on their IPv6 addresses.
165 subnets_family_v6: u8,
166}
167
168impl Default for SubnetConfig {
169 fn default() -> Self {
170 Self::new(16, 32)
171 }
172}
173
174impl SubnetConfig {
175 /// Construct a new SubnetConfig from a pair of bit prefix lengths.
176 ///
177 /// The values are clamped to the appropriate ranges if they are
178 /// out-of-bounds.
179 pub fn new(subnets_family_v4: u8, subnets_family_v6: u8) -> Self {
180 Self {
181 subnets_family_v4,
182 subnets_family_v6,
183 }
184 }
185
186 /// Construct a new SubnetConfig such that addresses are not in the same
187 /// family with anything--not even with themselves.
188 pub fn no_addresses_match() -> SubnetConfig {
189 SubnetConfig {
190 subnets_family_v4: 33,
191 subnets_family_v6: 129,
192 }
193 }
194
195 /// Return true if the two addresses in the same subnet, according to this
196 /// configuration.
197 pub fn addrs_in_same_subnet(&self, a: &IpAddr, b: &IpAddr) -> bool {
198 match (a, b) {
199 (IpAddr::V4(a), IpAddr::V4(b)) => {
200 let bits = self.subnets_family_v4;
201 if bits > 32 {
202 return false;
203 }
204 let a = u32::from_be_bytes(a.octets());
205 let b = u32::from_be_bytes(b.octets());
206 (a >> (32 - bits)) == (b >> (32 - bits))
207 }
208 (IpAddr::V6(a), IpAddr::V6(b)) => {
209 let bits = self.subnets_family_v6;
210 if bits > 128 {
211 return false;
212 }
213 let a = u128::from_be_bytes(a.octets());
214 let b = u128::from_be_bytes(b.octets());
215 (a >> (128 - bits)) == (b >> (128 - bits))
216 }
217 _ => false,
218 }
219 }
220
221 /// Return true if any of the addresses in `a` shares a subnet with any of
222 /// the addresses in `b`, according to this configuration.
223 pub fn any_addrs_in_same_subnet<T, U>(&self, a: &T, b: &U) -> bool
224 where
225 T: tor_linkspec::HasAddrs,
226 U: tor_linkspec::HasAddrs,
227 {
228 a.addrs().any(|aa| {
229 b.addrs()
230 .any(|bb| self.addrs_in_same_subnet(&aa.ip(), &bb.ip()))
231 })
232 }
233
234 /// Return a new subnet configuration that is the union of `self` and
235 /// `other`.
236 ///
237 /// That is, return a subnet configuration that puts all addresses in the
238 /// same subnet if and only if at least one of `self` and `other` would put
239 /// them in the same subnet.
240 pub fn union(&self, other: &Self) -> Self {
241 use std::cmp::min;
242 Self {
243 subnets_family_v4: min(self.subnets_family_v4, other.subnets_family_v4),
244 subnets_family_v6: min(self.subnets_family_v6, other.subnets_family_v6),
245 }
246 }
247}
248
249/// Configuration for which listed family information to use when deciding
250/// whether relays belong to the same family.
251///
252/// Derived from network parameters.
253#[derive(Clone, Copy, Debug)]
254pub struct FamilyRules {
255 /// If true, we use family information from lists of family members.
256 use_family_lists: bool,
257 /// If true, we use family information from lists of family IDs and from family certs.
258 use_family_ids: bool,
259}
260
261impl<'a> From<&'a NetParameters> for FamilyRules {
262 fn from(params: &'a NetParameters) -> Self {
263 FamilyRules {
264 use_family_lists: bool::from(params.use_family_lists),
265 use_family_ids: bool::from(params.use_family_ids),
266 }
267 }
268}
269
270impl FamilyRules {
271 /// Return a `FamilyRules` that will use all recognized kinds of family information.
272 pub fn all_family_info() -> Self {
273 Self {
274 use_family_lists: true,
275 use_family_ids: true,
276 }
277 }
278
279 /// Return a `FamilyRules` that will ignore all family information declared by relays.
280 pub fn ignore_declared_families() -> Self {
281 Self {
282 use_family_lists: false,
283 use_family_ids: false,
284 }
285 }
286
287 /// Configure this `FamilyRules` to use (or not use) family information from
288 /// lists of family members.
289 pub fn use_family_lists(&mut self, val: bool) -> &mut Self {
290 self.use_family_lists = val;
291 self
292 }
293
294 /// Configure this `FamilyRules` to use (or not use) family information from
295 /// family IDs and family certs.
296 pub fn use_family_ids(&mut self, val: bool) -> &mut Self {
297 self.use_family_ids = val;
298 self
299 }
300
301 /// Return a `FamilyRules` that will look at every source of information
302 /// requested by `self` or by `other`.
303 pub fn union(&self, other: &Self) -> Self {
304 Self {
305 use_family_lists: self.use_family_lists || other.use_family_lists,
306 use_family_ids: self.use_family_ids || other.use_family_ids,
307 }
308 }
309}
310
311/// An opaque type representing the weight with which a relay or set of
312/// relays will be selected for a given role.
313///
314/// Most users should ignore this type, and just use pick_relay instead.
315#[derive(
316 Copy,
317 Clone,
318 Debug,
319 derive_more::Add,
320 derive_more::Sum,
321 derive_more::AddAssign,
322 Eq,
323 PartialEq,
324 Ord,
325 PartialOrd,
326)]
327pub struct RelayWeight(u64);
328
329impl RelayWeight {
330 /// Try to divide this weight by `rhs`.
331 ///
332 /// Return a ratio on success, or None on division-by-zero.
333 pub fn checked_div(&self, rhs: RelayWeight) -> Option<f64> {
334 if rhs.0 == 0 {
335 None
336 } else {
337 Some((self.0 as f64) / (rhs.0 as f64))
338 }
339 }
340
341 /// Compute a ratio `frac` of this weight.
342 ///
343 /// Return None if frac is less than zero, since negative weights
344 /// are impossible.
345 pub fn ratio(&self, frac: f64) -> Option<RelayWeight> {
346 let product = (self.0 as f64) * frac;
347 if product >= 0.0 && product.is_finite() {
348 Some(RelayWeight(product as u64))
349 } else {
350 None
351 }
352 }
353}
354
355impl From<u64> for RelayWeight {
356 fn from(val: u64) -> Self {
357 RelayWeight(val)
358 }
359}
360
361/// An operation for which we might be requesting a hidden service directory.
362#[derive(Copy, Clone, Debug, PartialEq)]
363// TODO: make this pub(crate) once NetDir::hs_dirs is removed
364#[non_exhaustive]
365pub enum HsDirOp {
366 /// Uploading an onion service descriptor.
367 #[cfg(feature = "hs-service")]
368 Upload,
369 /// Downloading an onion service descriptor.
370 Download,
371}
372
373/// A view of the Tor directory, suitable for use in building circuits.
374///
375/// Abstractly, a [`NetDir`] is a set of usable public [`Relay`]s, each of which
376/// has its own properties, identity, and correct weighted probability for use
377/// under different circumstances.
378///
379/// A [`NetDir`] is constructed by making a [`PartialNetDir`] from a consensus
380/// document, and then adding enough microdescriptors to that `PartialNetDir` so
381/// that it can be used to build paths. (Thus, if you have a NetDir, it is
382/// definitely adequate to build paths.)
383///
384/// # "Usable" relays
385///
386/// Many methods on NetDir are defined in terms of <a name="usable">"Usable"</a> relays. Unless
387/// otherwise stated, a relay is "usable" if it is listed in the consensus,
388/// if we have full directory information for that relay (including a
389/// microdescriptor), and if that relay does not have any flags indicating that
390/// we should never use it. (Currently, `NoEdConsensus` is the only such flag.)
391///
392/// # Limitations
393///
394/// The current NetDir implementation assumes fairly strongly that every relay
395/// has an Ed25519 identity and an RSA identity, that the consensus is indexed
396/// by RSA identities, and that the Ed25519 identities are stored in
397/// microdescriptors.
398///
399/// If these assumptions someday change, then we'll have to revise the
400/// implementation.
401#[derive(Debug, Clone)]
402pub struct NetDir {
403 /// A microdescriptor consensus that lists the members of the network,
404 /// and maps each one to a 'microdescriptor' that has more information
405 /// about it
406 consensus: Arc<MdConsensus>,
407 /// A map from keys to integer values, distributed in the consensus,
408 /// and clamped to certain defaults.
409 params: NetParameters,
410 /// Map from routerstatus index, to that routerstatus's microdescriptor (if we have one.)
411 mds: TiVec<RouterStatusIdx, Option<Arc<Microdesc>>>,
412 /// Map from SHA256 of _missing_ microdescriptors to the index of their
413 /// corresponding routerstatus.
414 rsidx_by_missing: HashMap<MdDigest, RouterStatusIdx>,
415 /// Map from ed25519 identity to index of the routerstatus.
416 ///
417 /// Note that we don't know the ed25519 identity of a relay until
418 /// we get the microdescriptor for it, so this won't be filled in
419 /// until we get the microdescriptors.
420 ///
421 /// # Implementation note
422 ///
423 /// For this field, and for `rsidx_by_rsa`,
424 /// it might be cool to have references instead.
425 /// But that would make this into a self-referential structure,
426 /// which isn't possible in safe rust.
427 rsidx_by_ed: HashMap<Ed25519Identity, RouterStatusIdx>,
428 /// Map from RSA identity to index of the routerstatus.
429 ///
430 /// This is constructed at the same time as the NetDir object, so it
431 /// can be immutable.
432 rsidx_by_rsa: Arc<HashMap<RsaIdentity, RouterStatusIdx>>,
433
434 /// Hash ring(s) describing the onion service directory.
435 ///
436 /// This is empty in a PartialNetDir, and is filled in before the NetDir is
437 /// built.
438 //
439 // TODO hs: It is ugly to have this exist in a partially constructed state
440 // in a PartialNetDir.
441 // Ideally, a PartialNetDir would contain only an HsDirs<HsDirParams>,
442 // or perhaps nothing at all, here.
443 #[cfg(feature = "hs-common")]
444 hsdir_rings: Arc<HsDirs<HsDirRing>>,
445
446 /// Weight values to apply to a given relay when deciding how frequently
447 /// to choose it for a given role.
448 weights: weight::WeightSet,
449
450 #[cfg(feature = "geoip")]
451 /// Country codes for each router in our consensus.
452 ///
453 /// This is indexed by the `RouterStatusIdx` (i.e. a router idx of zero has
454 /// the country code at position zero in this array).
455 country_codes: Vec<Option<CountryCode>>,
456}
457
458/// Collection of hidden service directories (or parameters for them)
459///
460/// In [`NetDir`] this is used to store the actual hash rings.
461/// (But, in a NetDir in a [`PartialNetDir`], it contains [`HsDirRing`]s
462/// where only the `params` are populated, and the `ring` is empty.)
463///
464/// This same generic type is used as the return type from
465/// [`HsDirParams::compute`](HsDirParams::compute),
466/// where it contains the *parameters* for the primary and secondary rings.
467#[derive(Debug, Clone)]
468#[cfg(feature = "hs-common")]
469pub(crate) struct HsDirs<D> {
470 /// The current ring
471 ///
472 /// It corresponds to the time period containing the `valid-after` time in
473 /// the consensus. Its SRV is whatever SRV was most current at the time when
474 /// that time period began.
475 ///
476 /// This is the hash ring that we should use whenever we are fetching an
477 /// onion service descriptor.
478 current: D,
479
480 /// Secondary rings (based on the parameters for the previous and next time periods)
481 ///
482 /// Onion services upload to positions on these ring as well, based on how
483 /// far into the current time period this directory is, so that
484 /// not-synchronized clients can still find their descriptor.
485 ///
486 /// Note that with the current (2023) network parameters, with
487 /// `hsdir_interval = SRV lifetime = 24 hours` at most one of these
488 /// secondary rings will be active at a time. We have two here in order
489 /// to conform with a more flexible regime in proposal 342.
490 //
491 // TODO: hs clients never need this; so I've made it not-present for them.
492 // But does that risk too much with respect to side channels?
493 //
494 // TODO: Perhaps we should refactor this so that it is clear that these
495 // are immutable? On the other hand, the documentation for this type
496 // declares that it is immutable, so we are likely okay.
497 //
498 // TODO: this `Vec` is only ever 0,1,2 elements.
499 // Maybe it should be an ArrayVec or something.
500 #[cfg(feature = "hs-service")]
501 secondary: Vec<D>,
502}
503
504#[cfg(feature = "hs-common")]
505impl<D> HsDirs<D> {
506 /// Convert an `HsDirs<D>` to `HsDirs<D2>` by mapping each contained `D`
507 pub(crate) fn map<D2>(self, mut f: impl FnMut(D) -> D2) -> HsDirs<D2> {
508 HsDirs {
509 current: f(self.current),
510 #[cfg(feature = "hs-service")]
511 secondary: self.secondary.into_iter().map(f).collect(),
512 }
513 }
514
515 /// Iterate over some of the contained hsdirs, according to `secondary`
516 ///
517 /// The current ring is always included.
518 /// Secondary rings are included iff `secondary` and the `hs-service` feature is enabled.
519 fn iter_filter_secondary(&self, secondary: bool) -> impl Iterator<Item = &D> {
520 let i = iter::once(&self.current);
521
522 // With "hs-service" disabled, there are no secondary rings,
523 // so we don't care.
524 let _ = secondary;
525
526 #[cfg(feature = "hs-service")]
527 let i = chain!(i, self.secondary.iter().filter(move |_| secondary));
528
529 i
530 }
531
532 /// Iterate over all the contained hsdirs
533 pub(crate) fn iter(&self) -> impl Iterator<Item = &D> {
534 self.iter_filter_secondary(true)
535 }
536
537 /// Iterate over the hsdirs relevant for `op`
538 pub(crate) fn iter_for_op(&self, op: HsDirOp) -> impl Iterator<Item = &D> {
539 self.iter_filter_secondary(match op {
540 #[cfg(feature = "hs-service")]
541 HsDirOp::Upload => true,
542 HsDirOp::Download => false,
543 })
544 }
545}
546
547/// An event that a [`NetDirProvider`] can broadcast to indicate that a change in
548/// the status of its directory.
549#[derive(
550 Debug, Clone, Copy, PartialEq, Eq, EnumIter, EnumCount, IntoPrimitive, TryFromPrimitive,
551)]
552#[non_exhaustive]
553#[repr(u16)]
554pub enum DirEvent {
555 /// A new consensus has been received, and has enough information to be
556 /// used.
557 ///
558 /// This event is also broadcast when a new set of consensus parameters is
559 /// available, even if that set of parameters comes from a configuration
560 /// change rather than from the latest consensus.
561 NewConsensus,
562
563 /// New descriptors have been received for the current consensus.
564 ///
565 /// (This event is _not_ broadcast when receiving new descriptors for a
566 /// consensus which is not yet ready to replace the current consensus.)
567 NewDescriptors,
568
569 /// We have received updated recommendations and requirements
570 /// for which subprotocols we should have to use the network.
571 NewProtocolRecommendation,
572}
573
574/// The network directory provider is shutting down without giving us the
575/// netdir we asked for.
576#[derive(Clone, Copy, Debug, thiserror::Error)]
577#[error("Network directory provider is shutting down")]
578#[non_exhaustive]
579pub struct NetdirProviderShutdown;
580
581impl tor_error::HasKind for NetdirProviderShutdown {
582 fn kind(&self) -> tor_error::ErrorKind {
583 tor_error::ErrorKind::ArtiShuttingDown
584 }
585}
586
587/// How "timely" must a network directory be?
588///
589/// This enum is used as an argument when requesting a [`NetDir`] object from
590/// [`NetDirProvider`] and other APIs, to specify how recent the information
591/// must be in order to be useful.
592#[derive(Copy, Clone, Eq, PartialEq, Debug)]
593#[allow(clippy::exhaustive_enums)]
594pub enum Timeliness {
595 /// The network directory must be strictly timely.
596 ///
597 /// That is, it must be based on a consensus that valid right now, with no
598 /// tolerance for skew or consensus problems.
599 ///
600 /// Avoid using this option if you could use [`Timeliness::Timely`] instead.
601 Strict,
602 /// The network directory must be roughly timely.
603 ///
604 /// This is, it must be be based on a consensus that is not _too_ far in the
605 /// future, and not _too_ far in the past.
606 ///
607 /// (The tolerances for "too far" will depend on configuration.)
608 ///
609 /// This is almost always the option that you want to use.
610 Timely,
611 /// Any network directory is permissible, regardless of how untimely.
612 ///
613 /// Avoid using this option if you could use [`Timeliness::Timely`] instead.
614 Unchecked,
615}
616
617/// An object that can provide [`NetDir`]s, as well as inform consumers when
618/// they might have changed.
619///
620/// It is the responsibility of the implementor of `NetDirProvider`
621/// to try to obtain an up-to-date `NetDir`,
622/// and continuously to maintain and update it.
623///
624/// In usual configurations, Arti uses `tor_dirmgr::DirMgr`
625/// as its `NetDirProvider`.
626#[async_trait]
627pub trait NetDirProvider: UpcastArcNetDirProvider + Send + Sync {
628 /// Return a network directory that's live according to the provided
629 /// `timeliness`.
630 fn netdir(&self, timeliness: Timeliness) -> Result<Arc<NetDir>>;
631
632 /// Return a reasonable netdir for general usage.
633 ///
634 /// This is an alias for
635 /// [`NetDirProvider::netdir`]`(`[`Timeliness::Timely`]`)`.
636 fn timely_netdir(&self) -> Result<Arc<NetDir>> {
637 self.netdir(Timeliness::Timely)
638 }
639
640 /// Return a new asynchronous stream that will receive notification
641 /// whenever the consensus has changed.
642 ///
643 /// Multiple events may be batched up into a single item: each time
644 /// this stream yields an event, all you can assume is that the event has
645 /// occurred at least once.
646 fn events(&self) -> BoxStream<'static, DirEvent>;
647
648 /// Return the latest network parameters.
649 ///
650 /// If we have no directory, return a reasonable set of defaults.
651 fn params(&self) -> Arc<dyn AsRef<NetParameters>>;
652
653 /// Get a NetDir from `provider`, waiting until one exists.
654 async fn wait_for_netdir(
655 &self,
656 timeliness: Timeliness,
657 ) -> std::result::Result<Arc<NetDir>, NetdirProviderShutdown> {
658 if let Ok(nd) = self.netdir(timeliness) {
659 return Ok(nd);
660 }
661
662 let mut stream = self.events();
663 loop {
664 // We need to retry `self.netdir()` before waiting for any stream events, to
665 // avoid deadlock.
666 //
667 // We ignore all errors here: they can all potentially be fixed by
668 // getting a fresh consensus, and they will all get warned about
669 // by the NetDirProvider itself.
670 if let Ok(nd) = self.netdir(timeliness) {
671 return Ok(nd);
672 }
673 match stream.next().await {
674 Some(_) => {}
675 None => {
676 return Err(NetdirProviderShutdown);
677 }
678 }
679 }
680 }
681
682 /// Wait until `provider` lists `target`.
683 ///
684 /// NOTE: This might potentially wait indefinitely, if `target` is never actually
685 /// becomes listed in the directory. It will exit if the `NetDirProvider` shuts down.
686 async fn wait_for_netdir_to_list(
687 &self,
688 target: &tor_linkspec::RelayIds,
689 timeliness: Timeliness,
690 ) -> std::result::Result<(), NetdirProviderShutdown> {
691 let mut events = self.events();
692 loop {
693 // See if the desired relay is in the netdir.
694 //
695 // We do this before waiting for any events, to avoid race conditions.
696 {
697 let netdir = self.wait_for_netdir(timeliness).await?;
698 if netdir.ids_listed(target) == Some(true) {
699 return Ok(());
700 }
701 // If we reach this point, then ids_listed returned `Some(false)`,
702 // meaning "This relay is definitely not in the current directory";
703 // or it returned `None`, meaning "waiting for more information
704 // about this network directory.
705 // In both cases, it's reasonable to just wait for another netdir
706 // event and try again.
707 }
708 // We didn't find the relay; wait for the provider to have a new netdir
709 // or more netdir information.
710 if events.next().await.is_none() {
711 // The event stream is closed; the provider has shut down.
712 return Err(NetdirProviderShutdown);
713 }
714 }
715 }
716
717 /// Return the latest set of recommended and required protocols, if there is one.
718 ///
719 /// This may be more recent (or more available) than this provider's associated NetDir.
720 fn protocol_statuses(&self) -> Option<(SystemTime, Arc<netstatus::ProtoStatuses>)>;
721}
722
723impl<T> NetDirProvider for Arc<T>
724where
725 T: NetDirProvider,
726{
727 fn netdir(&self, timeliness: Timeliness) -> Result<Arc<NetDir>> {
728 self.deref().netdir(timeliness)
729 }
730
731 fn timely_netdir(&self) -> Result<Arc<NetDir>> {
732 self.deref().timely_netdir()
733 }
734
735 fn events(&self) -> BoxStream<'static, DirEvent> {
736 self.deref().events()
737 }
738
739 fn params(&self) -> Arc<dyn AsRef<NetParameters>> {
740 self.deref().params()
741 }
742
743 fn protocol_statuses(&self) -> Option<(SystemTime, Arc<netstatus::ProtoStatuses>)> {
744 self.deref().protocol_statuses()
745 }
746}
747
748/// Helper trait: allows any `Arc<X>` to be upcast to a `Arc<dyn
749/// NetDirProvider>` if X is an implementation or supertrait of NetDirProvider.
750///
751/// This trait exists to work around a limitation in rust: when trait upcasting
752/// coercion is stable, this will be unnecessary.
753///
754/// The Rust tracking issue is <https://github.com/rust-lang/rust/issues/65991>.
755pub trait UpcastArcNetDirProvider {
756 /// Return a view of this object as an `Arc<dyn NetDirProvider>`
757 fn upcast_arc<'a>(self: Arc<Self>) -> Arc<dyn NetDirProvider + 'a>
758 where
759 Self: 'a;
760}
761
762impl<T> UpcastArcNetDirProvider for T
763where
764 T: NetDirProvider + Sized,
765{
766 fn upcast_arc<'a>(self: Arc<Self>) -> Arc<dyn NetDirProvider + 'a>
767 where
768 Self: 'a,
769 {
770 self
771 }
772}
773
774impl AsRef<NetParameters> for NetDir {
775 fn as_ref(&self) -> &NetParameters {
776 self.params()
777 }
778}
779
780/// A partially build NetDir -- it can't be unwrapped until it has
781/// enough information to build safe paths.
782#[derive(Debug, Clone)]
783pub struct PartialNetDir {
784 /// The netdir that's under construction.
785 netdir: NetDir,
786
787 /// The previous netdir, if we had one
788 ///
789 /// Used as a cache, so we can reuse information
790 #[cfg(feature = "hs-common")]
791 prev_netdir: Option<Arc<NetDir>>,
792}
793
794/// A view of a relay on the Tor network, suitable for building circuits.
795// TODO: This should probably be a more specific struct, with a trait
796// that implements it.
797#[derive(Clone)]
798pub struct Relay<'a> {
799 /// A router descriptor for this relay.
800 rs: &'a netstatus::MdRouterStatus,
801 /// A microdescriptor for this relay.
802 md: &'a Microdesc,
803 /// The country code this relay is in, if we know one.
804 #[cfg(feature = "geoip")]
805 cc: Option<CountryCode>,
806}
807
808/// A relay that we haven't checked for validity or usability in
809/// routing.
810#[derive(Debug)]
811pub struct UncheckedRelay<'a> {
812 /// A router descriptor for this relay.
813 rs: &'a netstatus::MdRouterStatus,
814 /// A microdescriptor for this relay, if there is one.
815 md: Option<&'a Microdesc>,
816 /// The country code this relay is in, if we know one.
817 #[cfg(feature = "geoip")]
818 cc: Option<CountryCode>,
819}
820
821/// A partial or full network directory that we can download
822/// microdescriptors for.
823pub trait MdReceiver {
824 /// Return an iterator over the digests for all of the microdescriptors
825 /// that this netdir is missing.
826 fn missing_microdescs(&self) -> Box<dyn Iterator<Item = &MdDigest> + '_>;
827 /// Add a microdescriptor to this netdir, if it was wanted.
828 ///
829 /// Return true if it was indeed wanted.
830 fn add_microdesc(&mut self, md: Microdesc) -> bool;
831 /// Return the number of missing microdescriptors.
832 fn n_missing(&self) -> usize;
833}
834
835impl PartialNetDir {
836 /// Create a new PartialNetDir with a given consensus, and no
837 /// microdescriptors loaded.
838 ///
839 /// If `replacement_params` is provided, override network parameters from
840 /// the consensus with those from `replacement_params`.
841 pub fn new(
842 consensus: MdConsensus,
843 replacement_params: Option<&netstatus::NetParams<i32>>,
844 ) -> Self {
845 Self::new_inner(
846 consensus,
847 replacement_params,
848 #[cfg(feature = "geoip")]
849 None,
850 )
851 }
852
853 /// Create a new PartialNetDir with GeoIP support.
854 ///
855 /// This does the same thing as `new()`, except the provided GeoIP database is used to add
856 /// country codes to relays.
857 #[cfg(feature = "geoip")]
858 #[cfg_attr(docsrs, doc(cfg(feature = "geoip")))]
859 pub fn new_with_geoip(
860 consensus: MdConsensus,
861 replacement_params: Option<&netstatus::NetParams<i32>>,
862 geoip_db: &GeoipDb,
863 ) -> Self {
864 Self::new_inner(consensus, replacement_params, Some(geoip_db))
865 }
866
867 /// Implementation of the `new()` functions.
868 fn new_inner(
869 consensus: MdConsensus,
870 replacement_params: Option<&netstatus::NetParams<i32>>,
871 #[cfg(feature = "geoip")] geoip_db: Option<&GeoipDb>,
872 ) -> Self {
873 let mut params = NetParameters::default();
874
875 // (We ignore unrecognized options here, since they come from
876 // the consensus, and we don't expect to recognize everything
877 // there.)
878 let _ = params.saturating_update(consensus.params().iter());
879
880 // Now see if the user has any parameters to override.
881 // (We have to do this now, or else changes won't be reflected in our
882 // weights.)
883 if let Some(replacement) = replacement_params {
884 for u in params.saturating_update(replacement.iter()) {
885 warn!("Unrecognized option: override_net_params.{}", u);
886 }
887 }
888
889 // Compute the weights we'll want to use for these relays.
890 let weights = weight::WeightSet::from_consensus(&consensus, ¶ms);
891
892 let n_relays = consensus.c_relays().len();
893
894 let rsidx_by_missing = consensus
895 .c_relays()
896 .iter_enumerated()
897 .map(|(rsidx, rs)| (*rs.md_digest(), rsidx))
898 .collect();
899
900 let rsidx_by_rsa = consensus
901 .c_relays()
902 .iter_enumerated()
903 .map(|(rsidx, rs)| (*rs.rsa_identity(), rsidx))
904 .collect();
905
906 #[cfg(feature = "geoip")]
907 let country_codes = if let Some(db) = geoip_db {
908 consensus
909 .c_relays()
910 .iter()
911 .map(|rs| {
912 db.lookup_country_code_multi(rs.addrs().map(|x| x.ip()))
913 .cloned()
914 })
915 .collect()
916 } else {
917 Default::default()
918 };
919
920 #[cfg(feature = "hs-common")]
921 let hsdir_rings = Arc::new({
922 let params = HsDirParams::compute(&consensus, ¶ms).expect("Invalid consensus!");
923 // TODO: It's a bit ugly to use expect above, but this function does
924 // not return a Result. On the other hand, the error conditions under which
925 // HsDirParams::compute can return Err are _very_ narrow and hard to
926 // hit; see documentation in that function. As such, we probably
927 // don't need to have this return a Result.
928
929 params.map(HsDirRing::empty_from_params)
930 });
931
932 let netdir = NetDir {
933 consensus: Arc::new(consensus),
934 params,
935 mds: vec![None; n_relays].into(),
936 rsidx_by_missing,
937 rsidx_by_rsa: Arc::new(rsidx_by_rsa),
938 rsidx_by_ed: HashMap::with_capacity(n_relays),
939 #[cfg(feature = "hs-common")]
940 hsdir_rings,
941 weights,
942 #[cfg(feature = "geoip")]
943 country_codes,
944 };
945
946 PartialNetDir {
947 netdir,
948 #[cfg(feature = "hs-common")]
949 prev_netdir: None,
950 }
951 }
952
953 /// Return the declared lifetime of this PartialNetDir.
954 pub fn lifetime(&self) -> &netstatus::Lifetime {
955 self.netdir.lifetime()
956 }
957
958 /// Record a previous netdir, which can be used for reusing cached information
959 //
960 // Fills in as many missing microdescriptors as possible in this
961 // netdir, using the microdescriptors from the previous netdir.
962 //
963 // With HS enabled, stores the netdir for reuse of relay hash ring index values.
964 #[allow(clippy::needless_pass_by_value)] // prev might, or might not, be stored
965 pub fn fill_from_previous_netdir(&mut self, prev: Arc<NetDir>) {
966 for md in prev.mds.iter().flatten() {
967 self.netdir.add_arc_microdesc(md.clone());
968 }
969
970 #[cfg(feature = "hs-common")]
971 {
972 self.prev_netdir = Some(prev);
973 }
974 }
975
976 /// Compute the hash ring(s) for this NetDir
977 #[cfg(feature = "hs-common")]
978 fn compute_rings(&mut self) {
979 let params = HsDirParams::compute(&self.netdir.consensus, &self.netdir.params)
980 .expect("Invalid consensus");
981 // TODO: see TODO by similar expect in new()
982
983 self.netdir.hsdir_rings =
984 Arc::new(params.map(|params| {
985 HsDirRing::compute(params, &self.netdir, self.prev_netdir.as_deref())
986 }));
987 }
988
989 /// Return true if this are enough information in this directory
990 /// to build multihop paths.
991 pub fn have_enough_paths(&self) -> bool {
992 self.netdir.have_enough_paths()
993 }
994 /// If this directory has enough information to build multihop
995 /// circuits, return it.
996 pub fn unwrap_if_sufficient(
997 #[allow(unused_mut)] mut self,
998 ) -> std::result::Result<NetDir, PartialNetDir> {
999 if self.netdir.have_enough_paths() {
1000 #[cfg(feature = "hs-common")]
1001 self.compute_rings();
1002 Ok(self.netdir)
1003 } else {
1004 Err(self)
1005 }
1006 }
1007}
1008
1009impl MdReceiver for PartialNetDir {
1010 fn missing_microdescs(&self) -> Box<dyn Iterator<Item = &MdDigest> + '_> {
1011 self.netdir.missing_microdescs()
1012 }
1013 fn add_microdesc(&mut self, md: Microdesc) -> bool {
1014 self.netdir.add_microdesc(md)
1015 }
1016 fn n_missing(&self) -> usize {
1017 self.netdir.n_missing()
1018 }
1019}
1020
1021impl NetDir {
1022 /// Return the declared lifetime of this NetDir.
1023 pub fn lifetime(&self) -> &netstatus::Lifetime {
1024 self.consensus.lifetime()
1025 }
1026
1027 /// Add `md` to this NetDir.
1028 ///
1029 /// Return true if we wanted it, and false otherwise.
1030 fn add_arc_microdesc(&mut self, md: Arc<Microdesc>) -> bool {
1031 if let Some(rsidx) = self.rsidx_by_missing.remove(md.digest()) {
1032 assert_eq!(self.c_relays()[rsidx].md_digest(), md.digest());
1033
1034 // There should never be two approved MDs in the same
1035 // consensus listing the same ID... but if there is,
1036 // we'll let the most recent one win.
1037 self.rsidx_by_ed.insert(*md.ed25519_id(), rsidx);
1038
1039 // Happy path: we did indeed want this one.
1040 self.mds[rsidx] = Some(md);
1041
1042 // Save some space in the missing-descriptor list.
1043 if self.rsidx_by_missing.len() < self.rsidx_by_missing.capacity() / 4 {
1044 self.rsidx_by_missing.shrink_to_fit();
1045 }
1046
1047 return true;
1048 }
1049
1050 // Either we already had it, or we never wanted it at all.
1051 false
1052 }
1053
1054 /// Construct a (possibly invalid) Relay object from a routerstatus and its
1055 /// index within the consensus.
1056 fn relay_from_rs_and_rsidx<'a>(
1057 &'a self,
1058 rs: &'a netstatus::MdRouterStatus,
1059 rsidx: RouterStatusIdx,
1060 ) -> UncheckedRelay<'a> {
1061 debug_assert_eq!(self.c_relays()[rsidx].rsa_identity(), rs.rsa_identity());
1062 let md = self.mds[rsidx].as_deref();
1063 if let Some(md) = md {
1064 debug_assert_eq!(rs.md_digest(), md.digest());
1065 }
1066
1067 UncheckedRelay {
1068 rs,
1069 md,
1070 #[cfg(feature = "geoip")]
1071 cc: self.country_codes.get(rsidx.0).copied().flatten(),
1072 }
1073 }
1074
1075 /// Return the value of the hsdir_n_replicas param.
1076 #[cfg(feature = "hs-common")]
1077 fn n_replicas(&self) -> u8 {
1078 self.params
1079 .hsdir_n_replicas
1080 .get()
1081 .try_into()
1082 .expect("BoundedInt did not enforce bounds")
1083 }
1084
1085 /// Return the spread parameter for the specified `op`.
1086 #[cfg(feature = "hs-common")]
1087 fn spread(&self, op: HsDirOp) -> usize {
1088 let spread = match op {
1089 HsDirOp::Download => self.params.hsdir_spread_fetch,
1090 #[cfg(feature = "hs-service")]
1091 HsDirOp::Upload => self.params.hsdir_spread_store,
1092 };
1093
1094 spread
1095 .get()
1096 .try_into()
1097 .expect("BoundedInt did not enforce bounds!")
1098 }
1099
1100 /// Select `spread` hsdir relays for the specified `hsid` from a given `ring`.
1101 ///
1102 /// Algorithm:
1103 ///
1104 /// for idx in 1..=n_replicas:
1105 /// - let H = hsdir_ring::onion_service_index(id, replica, rand,
1106 /// period).
1107 /// - Find the position of H within hsdir_ring.
1108 /// - Take elements from hsdir_ring starting at that position,
1109 /// adding them to Dirs until we have added `spread` new elements
1110 /// that were not there before.
1111 #[cfg(feature = "hs-common")]
1112 fn select_hsdirs<'h, 'r: 'h>(
1113 &'r self,
1114 hsid: HsBlindId,
1115 ring: &'h HsDirRing,
1116 spread: usize,
1117 ) -> impl Iterator<Item = Relay<'r>> + 'h {
1118 let n_replicas = self.n_replicas();
1119
1120 (1..=n_replicas) // 1-indexed !
1121 .flat_map({
1122 let mut selected_nodes = HashSet::new();
1123
1124 move |replica: u8| {
1125 let hsdir_idx = hsdir_ring::service_hsdir_index(&hsid, replica, ring.params());
1126
1127 ring.ring_items_at(hsdir_idx, spread, |(hsdir_idx, _)| {
1128 // According to rend-spec 2.2.3:
1129 // ... If any of those
1130 // nodes have already been selected for a lower-numbered replica of the
1131 // service, any nodes already chosen are disregarded (i.e. skipped over)
1132 // when choosing a replica's hsdir_spread_store nodes.
1133 selected_nodes.insert(*hsdir_idx)
1134 })
1135 .collect::<Vec<_>>()
1136 }
1137 })
1138 .filter_map(move |(_hsdir_idx, rs_idx)| {
1139 // This ought not to be None but let's not panic or bail if it is
1140 self.relay_by_rs_idx(*rs_idx)
1141 })
1142 }
1143
1144 /// Replace the overridden parameters in this netdir with `new_replacement`.
1145 ///
1146 /// After this function is done, the netdir's parameters will be those in
1147 /// the consensus, overridden by settings from `new_replacement`. Any
1148 /// settings in the old replacement parameters will be discarded.
1149 pub fn replace_overridden_parameters(&mut self, new_replacement: &netstatus::NetParams<i32>) {
1150 // TODO(nickm): This is largely duplicate code from PartialNetDir::new().
1151 let mut new_params = NetParameters::default();
1152 let _ = new_params.saturating_update(self.consensus.params().iter());
1153 for u in new_params.saturating_update(new_replacement.iter()) {
1154 warn!("Unrecognized option: override_net_params.{}", u);
1155 }
1156
1157 self.params = new_params;
1158 }
1159
1160 /// Return an iterator over all Relay objects, including invalid ones
1161 /// that we can't use.
1162 pub fn all_relays(&self) -> impl Iterator<Item = UncheckedRelay<'_>> {
1163 // TODO: I'd like if we could memoize this so we don't have to
1164 // do so many hashtable lookups.
1165 self.c_relays()
1166 .iter_enumerated()
1167 .map(move |(rsidx, rs)| self.relay_from_rs_and_rsidx(rs, rsidx))
1168 }
1169 /// Return an iterator over all [usable](NetDir#usable) Relays.
1170 pub fn relays(&self) -> impl Iterator<Item = Relay<'_>> {
1171 self.all_relays().filter_map(UncheckedRelay::into_relay)
1172 }
1173
1174 /// Look up a relay's [`Microdesc`] by its [`RouterStatusIdx`]
1175 #[cfg_attr(not(feature = "hs-common"), allow(dead_code))]
1176 pub(crate) fn md_by_rsidx(&self, rsidx: RouterStatusIdx) -> Option<&Microdesc> {
1177 self.mds.get(rsidx)?.as_deref()
1178 }
1179
1180 /// Return a relay matching a given identity, if we have a
1181 /// _usable_ relay with that key.
1182 ///
1183 /// (Does not return [unusable](NetDir#usable) relays.)
1184 ///
1185 ///
1186 /// Note that a `None` answer is not always permanent: if a microdescriptor
1187 /// is subsequently added for a relay with this ID, the ID may become usable
1188 /// even if it was not usable before.
1189 pub fn by_id<'a, T>(&self, id: T) -> Option<Relay<'_>>
1190 where
1191 T: Into<RelayIdRef<'a>>,
1192 {
1193 let id = id.into();
1194 let answer = match id {
1195 RelayIdRef::Ed25519(ed25519) => {
1196 let rsidx = *self.rsidx_by_ed.get(ed25519)?;
1197 let rs = self.c_relays().get(rsidx).expect("Corrupt index");
1198
1199 self.relay_from_rs_and_rsidx(rs, rsidx).into_relay()?
1200 }
1201 RelayIdRef::Rsa(rsa) => self
1202 .by_rsa_id_unchecked(rsa)
1203 .and_then(UncheckedRelay::into_relay)?,
1204 other_type => self.relays().find(|r| r.has_identity(other_type))?,
1205 };
1206 assert!(answer.has_identity(id));
1207 Some(answer)
1208 }
1209
1210 /// Obtain a `Relay` given a `RouterStatusIdx`
1211 ///
1212 /// Differs from `relay_from_rs_and_rsi` as follows:
1213 /// * That function expects the caller to already have an `MdRouterStatus`;
1214 /// it checks with `debug_assert` that the relay in the netdir matches.
1215 /// * That function panics if the `RouterStatusIdx` is invalid; this one returns `None`.
1216 /// * That function returns an `UncheckedRelay`; this one a `Relay`.
1217 ///
1218 /// `None` could be returned here, even with a valid `rsi`,
1219 /// if `rsi` refers to an [unusable](NetDir#usable) relay.
1220 #[cfg_attr(not(feature = "hs-common"), allow(dead_code))]
1221 pub(crate) fn relay_by_rs_idx(&self, rs_idx: RouterStatusIdx) -> Option<Relay<'_>> {
1222 let rs = self.c_relays().get(rs_idx)?;
1223 let md = self.mds.get(rs_idx)?.as_deref();
1224 UncheckedRelay {
1225 rs,
1226 md,
1227 #[cfg(feature = "geoip")]
1228 cc: self.country_codes.get(rs_idx.0).copied().flatten(),
1229 }
1230 .into_relay()
1231 }
1232
1233 /// Return a relay with the same identities as those in `target`, if one
1234 /// exists.
1235 ///
1236 /// Does not return [unusable](NetDir#usable) relays.
1237 ///
1238 /// Note that a negative result from this method is not necessarily permanent:
1239 /// it may be the case that a relay exists,
1240 /// but we don't yet have enough information about it to know all of its IDs.
1241 /// To test whether a relay is *definitely* absent,
1242 /// use [`by_ids_detailed`](Self::by_ids_detailed)
1243 /// or [`ids_listed`](Self::ids_listed).
1244 ///
1245 /// # Limitations
1246 ///
1247 /// This will be very slow if `target` does not have an Ed25519 or RSA
1248 /// identity.
1249 pub fn by_ids<T>(&self, target: &T) -> Option<Relay<'_>>
1250 where
1251 T: HasRelayIds + ?Sized,
1252 {
1253 let mut identities = target.identities();
1254 // Don't try if there are no identities.
1255 let first_id = identities.next()?;
1256
1257 // Since there is at most one relay with each given ID type,
1258 // we only need to check the first relay we find.
1259 let candidate = self.by_id(first_id)?;
1260 if identities.all(|wanted_id| candidate.has_identity(wanted_id)) {
1261 Some(candidate)
1262 } else {
1263 None
1264 }
1265 }
1266
1267 /// Check whether there is a relay that has at least one identity from
1268 /// `target`, and which _could_ have every identity from `target`.
1269 /// If so, return such a relay.
1270 ///
1271 /// Return `Ok(None)` if we did not find a relay with any identity from `target`.
1272 ///
1273 /// Return `RelayLookupError::Impossible` if we found a relay with at least
1274 /// one identity from `target`, but that relay's other identities contradict
1275 /// what we learned from `target`.
1276 ///
1277 /// Does not return [unusable](NetDir#usable) relays.
1278 ///
1279 /// (This function is only useful if you need to distinguish the
1280 /// "impossible" case from the "no such relay known" case.)
1281 ///
1282 /// # Limitations
1283 ///
1284 /// This will be very slow if `target` does not have an Ed25519 or RSA
1285 /// identity.
1286 //
1287 // TODO HS: This function could use a better name.
1288 //
1289 // TODO: We could remove the feature restriction here once we think this API is
1290 // stable.
1291 #[cfg(feature = "hs-common")]
1292 pub fn by_ids_detailed<T>(
1293 &self,
1294 target: &T,
1295 ) -> std::result::Result<Option<Relay<'_>>, RelayLookupError>
1296 where
1297 T: HasRelayIds + ?Sized,
1298 {
1299 let candidate = target
1300 .identities()
1301 // Find all the relays that share any identity with this set of identities.
1302 .filter_map(|id| self.by_id(id))
1303 // We might find the same relay more than once under a different
1304 // identity, so we remove the duplicates.
1305 //
1306 // Since there is at most one relay per rsa identity per consensus,
1307 // this is a true uniqueness check under current construction rules.
1308 .unique_by(|r| r.rs.rsa_identity())
1309 // If we find two or more distinct relays, then have a contradiction.
1310 .at_most_one()
1311 .map_err(|_| RelayLookupError::Impossible)?;
1312
1313 // If we have no candidate, return None early.
1314 let candidate = match candidate {
1315 Some(relay) => relay,
1316 None => return Ok(None),
1317 };
1318
1319 // Now we know we have a single candidate. Make sure that it does not have any
1320 // identity that does not match the target.
1321 if target
1322 .identities()
1323 .all(|wanted_id| match candidate.identity(wanted_id.id_type()) {
1324 None => true,
1325 Some(id) => id == wanted_id,
1326 })
1327 {
1328 Ok(Some(candidate))
1329 } else {
1330 Err(RelayLookupError::Impossible)
1331 }
1332 }
1333
1334 /// Return a boolean if this consensus definitely has (or does not have) a
1335 /// relay matching the listed identities.
1336 ///
1337 /// `Some(true)` indicates that the relay exists.
1338 /// `Some(false)` indicates that the relay definitely does not exist.
1339 /// `None` indicates that we can't yet tell whether such a relay exists,
1340 /// due to missing information.
1341 fn id_pair_listed(&self, ed_id: &Ed25519Identity, rsa_id: &RsaIdentity) -> Option<bool> {
1342 let r = self.by_rsa_id_unchecked(rsa_id);
1343 match r {
1344 Some(unchecked) => {
1345 if !unchecked.rs.ed25519_id_is_usable() {
1346 return Some(false);
1347 }
1348 // If md is present, then it's listed iff we have the right
1349 // ed id. Otherwise we don't know if it's listed.
1350 unchecked.md.map(|md| md.ed25519_id() == ed_id)
1351 }
1352 None => {
1353 // Definitely not listed.
1354 Some(false)
1355 }
1356 }
1357 }
1358
1359 /// Check whether a relay exists (or may exist)
1360 /// with the same identities as those in `target`.
1361 ///
1362 /// `Some(true)` indicates that the relay exists.
1363 /// `Some(false)` indicates that the relay definitely does not exist.
1364 /// `None` indicates that we can't yet tell whether such a relay exists,
1365 /// due to missing information.
1366 pub fn ids_listed<T>(&self, target: &T) -> Option<bool>
1367 where
1368 T: HasRelayIds + ?Sized,
1369 {
1370 let rsa_id = target.rsa_identity();
1371 let ed25519_id = target.ed_identity();
1372
1373 // TODO: If we later support more identity key types, this will
1374 // become incorrect. This assertion might help us recognize that case.
1375 const _: () = assert!(RelayIdType::COUNT == 2);
1376
1377 match (rsa_id, ed25519_id) {
1378 (Some(r), Some(e)) => self.id_pair_listed(e, r),
1379 (Some(r), None) => Some(self.rsa_id_is_listed(r)),
1380 (None, Some(e)) => {
1381 if self.rsidx_by_ed.contains_key(e) {
1382 Some(true)
1383 } else {
1384 None
1385 }
1386 }
1387 (None, None) => None,
1388 }
1389 }
1390
1391 /// Return a (possibly [unusable](NetDir#usable)) relay with a given RSA identity.
1392 ///
1393 /// This API can be used to find information about a relay that is listed in
1394 /// the current consensus, even if we don't yet have enough information
1395 /// (like a microdescriptor) about the relay to use it.
1396 #[cfg_attr(feature = "experimental-api", visibility::make(pub))]
1397 #[cfg_attr(docsrs, doc(cfg(feature = "experimental-api")))]
1398 fn by_rsa_id_unchecked(&self, rsa_id: &RsaIdentity) -> Option<UncheckedRelay<'_>> {
1399 let rsidx = *self.rsidx_by_rsa.get(rsa_id)?;
1400 let rs = self.c_relays().get(rsidx).expect("Corrupt index");
1401 assert_eq!(rs.rsa_identity(), rsa_id);
1402 Some(self.relay_from_rs_and_rsidx(rs, rsidx))
1403 }
1404 /// Return the relay with a given RSA identity, if we have one
1405 /// and it is [usable](NetDir#usable).
1406 fn by_rsa_id(&self, rsa_id: &RsaIdentity) -> Option<Relay<'_>> {
1407 self.by_rsa_id_unchecked(rsa_id)?.into_relay()
1408 }
1409 /// Return true if `rsa_id` is listed in this directory, even if it isn't
1410 /// currently usable.
1411 ///
1412 /// (An "[unusable](NetDir#usable)" relay in this context is one for which we don't have full
1413 /// directory information.)
1414 #[cfg_attr(feature = "experimental-api", visibility::make(pub))]
1415 #[cfg_attr(docsrs, doc(cfg(feature = "experimental-api")))]
1416 fn rsa_id_is_listed(&self, rsa_id: &RsaIdentity) -> bool {
1417 self.by_rsa_id_unchecked(rsa_id).is_some()
1418 }
1419
1420 /// List the hsdirs in this NetDir, that should be in the HSDir rings
1421 ///
1422 /// The results are not returned in any particular order.
1423 #[cfg(feature = "hs-common")]
1424 fn all_hsdirs(&self) -> impl Iterator<Item = (RouterStatusIdx, Relay<'_>)> {
1425 self.c_relays().iter_enumerated().filter_map(|(rsidx, rs)| {
1426 let relay = self.relay_from_rs_and_rsidx(rs, rsidx);
1427 relay.is_hsdir_for_ring().then_some(())?;
1428 let relay = relay.into_relay()?;
1429 Some((rsidx, relay))
1430 })
1431 }
1432
1433 /// Return the parameters from the consensus, clamped to the
1434 /// correct ranges, with defaults filled in.
1435 ///
1436 /// NOTE: that unsupported parameters aren't returned here; only those
1437 /// values configured in the `params` module are available.
1438 pub fn params(&self) -> &NetParameters {
1439 &self.params
1440 }
1441
1442 /// Return a [`ProtoStatus`](netstatus::ProtoStatus) that lists the
1443 /// network's current requirements and recommendations for the list of
1444 /// protocols that every relay must implement.
1445 //
1446 // TODO HS: I am not sure this is the right API; other alternatives would be:
1447 // * To expose the _required_ relay protocol list instead (since that's all that
1448 // onion service implementations need).
1449 // * To expose the client protocol list as well (for symmetry).
1450 // * To expose the MdConsensus instead (since that's more general, although
1451 // it restricts the future evolution of this API).
1452 //
1453 // I think that this is a reasonably good compromise for now, but I'm going
1454 // to put it behind the `hs-common` feature to give us time to consider more.
1455 #[cfg(feature = "hs-common")]
1456 pub fn relay_protocol_status(&self) -> &netstatus::ProtoStatus {
1457 self.consensus.relay_protocol_status()
1458 }
1459
1460 /// Return a [`ProtoStatus`](netstatus::ProtoStatus) that lists the
1461 /// network's current requirements and recommendations for the list of
1462 /// protocols that every relay must implement.
1463 //
1464 // TODO HS: See notes on relay_protocol_status above.
1465 #[cfg(feature = "hs-common")]
1466 pub fn client_protocol_status(&self) -> &netstatus::ProtoStatus {
1467 self.consensus.client_protocol_status()
1468 }
1469
1470 /// Return weighted the fraction of relays we can use. We only
1471 /// consider relays that match the predicate `usable`. We weight
1472 /// this bandwidth according to the provided `role`.
1473 ///
1474 /// If _no_ matching relays in the consensus have a nonzero
1475 /// weighted bandwidth value, we fall back to looking at the
1476 /// unweighted fraction of matching relays.
1477 ///
1478 /// If there are no matching relays in the consensus, we return 0.0.
1479 fn frac_for_role<'a, F>(&'a self, role: WeightRole, usable: F) -> f64
1480 where
1481 F: Fn(&UncheckedRelay<'a>) -> bool,
1482 {
1483 let mut total_weight = 0_u64;
1484 let mut have_weight = 0_u64;
1485 let mut have_count = 0_usize;
1486 let mut total_count = 0_usize;
1487
1488 for r in self.all_relays() {
1489 if !usable(&r) {
1490 continue;
1491 }
1492 let w = self.weights.weight_rs_for_role(r.rs, role);
1493 total_weight += w;
1494 total_count += 1;
1495 if r.is_usable() {
1496 have_weight += w;
1497 have_count += 1;
1498 }
1499 }
1500
1501 if total_weight > 0 {
1502 // The consensus lists some weighted bandwidth so return the
1503 // fraction of the weighted bandwidth for which we have
1504 // descriptors.
1505 (have_weight as f64) / (total_weight as f64)
1506 } else if total_count > 0 {
1507 // The consensus lists no weighted bandwidth for these relays,
1508 // but at least it does list relays. Return the fraction of
1509 // relays for which it we have descriptors.
1510 (have_count as f64) / (total_count as f64)
1511 } else {
1512 // There are no relays of this kind in the consensus. Return
1513 // 0.0, to avoid dividing by zero and giving NaN.
1514 0.0
1515 }
1516 }
1517 /// Return the estimated fraction of possible paths that we have
1518 /// enough microdescriptors to build.
1519 fn frac_usable_paths(&self) -> f64 {
1520 // TODO #504, TODO SPEC: We may want to add a set of is_flagged_fast() and/or
1521 // is_flagged_stable() checks here. This will require spec clarification.
1522 let f_g = self.frac_for_role(WeightRole::Guard, |u| {
1523 u.low_level_details().is_suitable_as_guard()
1524 });
1525 let f_m = self.frac_for_role(WeightRole::Middle, |_| true);
1526 let f_e = if self.all_relays().any(|u| u.rs.is_flagged_exit()) {
1527 self.frac_for_role(WeightRole::Exit, |u| u.rs.is_flagged_exit())
1528 } else {
1529 // If there are no exits at all, we use f_m here.
1530 f_m
1531 };
1532 f_g * f_m * f_e
1533 }
1534 /// Return true if there is enough information in this NetDir to build
1535 /// multihop circuits.
1536 fn have_enough_paths(&self) -> bool {
1537 // TODO-A001: This should check for our guards as well, and
1538 // make sure that if they're listed in the consensus, we have
1539 // the descriptors for them.
1540
1541 // If we can build a randomly chosen path with at least this
1542 // probability, we know enough information to participate
1543 // on the network.
1544
1545 let min_frac_paths: f64 = self.params().min_circuit_path_threshold.as_fraction();
1546
1547 // What fraction of paths can we build?
1548 let available = self.frac_usable_paths();
1549
1550 available >= min_frac_paths
1551 }
1552 /// Choose a relay at random.
1553 ///
1554 /// Each relay is chosen with probability proportional to its weight
1555 /// in the role `role`, and is only selected if the predicate `usable`
1556 /// returns true for it.
1557 ///
1558 /// This function returns None if (and only if) there are no relays
1559 /// with nonzero weight where `usable` returned true.
1560 //
1561 // TODO this API, with the `usable` closure, invites mistakes where we fail to
1562 // check conditions that are implied by the role we have selected for the relay:
1563 // call sites must include a call to `Relay::is_polarity_inverter()` or whatever.
1564 // IMO the `WeightRole` ought to imply a condition (and it should therefore probably
1565 // be renamed.) -Diziet
1566 pub fn pick_relay<'a, R, P>(
1567 &'a self,
1568 rng: &mut R,
1569 role: WeightRole,
1570 usable: P,
1571 ) -> Option<Relay<'a>>
1572 where
1573 R: rand::Rng,
1574 P: FnMut(&Relay<'a>) -> bool,
1575 {
1576 let relays: Vec<_> = self.relays().filter(usable).collect();
1577 // This algorithm uses rand::distr::WeightedIndex, and uses
1578 // gives O(n) time and space to build the index, plus O(log n)
1579 // sampling time.
1580 //
1581 // We might be better off building a WeightedIndex in advance
1582 // for each `role`, and then sampling it repeatedly until we
1583 // get a relay that satisfies `usable`. Or we might not --
1584 // that depends heavily on the actual particulars of our
1585 // inputs. We probably shouldn't make any changes there
1586 // unless profiling tells us that this function is in a hot
1587 // path.
1588 //
1589 // The C Tor sampling implementation goes through some trouble
1590 // here to try to make its path selection constant-time. I
1591 // believe that there is no actual remotely exploitable
1592 // side-channel here however. It could be worth analyzing in
1593 // the future.
1594 //
1595 // This code will give the wrong result if the total of all weights
1596 // can exceed u64::MAX. We make sure that can't happen when we
1597 // set up `self.weights`.
1598 match relays[..].choose_weighted(rng, |r| self.weights.weight_rs_for_role(r.rs, role)) {
1599 Ok(relay) => Some(relay.clone()),
1600 Err(WeightError::InsufficientNonZero) => {
1601 if relays.is_empty() {
1602 None
1603 } else {
1604 warn!(?self.weights, ?role,
1605 "After filtering, all {} relays had zero weight. Choosing one at random. See bug #1907.",
1606 relays.len());
1607 relays.choose(rng).cloned()
1608 }
1609 }
1610 Err(e) => {
1611 warn_report!(e, "Unexpected error while sampling a relay");
1612 None
1613 }
1614 }
1615 }
1616
1617 /// Choose `n` relay at random.
1618 ///
1619 /// Each relay is chosen with probability proportional to its weight
1620 /// in the role `role`, and is only selected if the predicate `usable`
1621 /// returns true for it.
1622 ///
1623 /// Relays are chosen without replacement: no relay will be
1624 /// returned twice. Therefore, the resulting vector may be smaller
1625 /// than `n` if we happen to have fewer than `n` appropriate relays.
1626 ///
1627 /// This function returns an empty vector if (and only if) there
1628 /// are no relays with nonzero weight where `usable` returned
1629 /// true.
1630 #[allow(clippy::cognitive_complexity)] // all due to tracing crate.
1631 pub fn pick_n_relays<'a, R, P>(
1632 &'a self,
1633 rng: &mut R,
1634 n: usize,
1635 role: WeightRole,
1636 usable: P,
1637 ) -> Vec<Relay<'a>>
1638 where
1639 R: rand::Rng,
1640 P: FnMut(&Relay<'a>) -> bool,
1641 {
1642 let relays: Vec<_> = self.relays().filter(usable).collect();
1643 // NOTE: See discussion in pick_relay().
1644 let mut relays = match relays[..].choose_multiple_weighted(rng, n, |r| {
1645 self.weights.weight_rs_for_role(r.rs, role) as f64
1646 }) {
1647 Err(WeightError::InsufficientNonZero) => {
1648 // Too few relays had nonzero weights: return all of those that are okay.
1649 // (This is behavior used to come up with rand 0.9; it no longer does.
1650 // We still detect it.)
1651 let remaining: Vec<_> = relays
1652 .iter()
1653 .filter(|r| self.weights.weight_rs_for_role(r.rs, role) > 0)
1654 .cloned()
1655 .collect();
1656 if remaining.is_empty() {
1657 warn!(?self.weights, ?role,
1658 "After filtering, all {} relays had zero weight! Picking some at random. See bug #1907.",
1659 relays.len());
1660 if relays.len() >= n {
1661 relays.choose_multiple(rng, n).cloned().collect()
1662 } else {
1663 relays
1664 }
1665 } else {
1666 warn!(?self.weights, ?role,
1667 "After filtering, only had {}/{} relays with nonzero weight. Returning them all. See bug #1907.",
1668 remaining.len(), relays.len());
1669 remaining
1670 }
1671 }
1672 Err(e) => {
1673 warn_report!(e, "Unexpected error while sampling a set of relays");
1674 Vec::new()
1675 }
1676 Ok(iter) => {
1677 let selection: Vec<_> = iter.map(Relay::clone).collect();
1678 if selection.len() < n && selection.len() < relays.len() {
1679 warn!(?self.weights, ?role,
1680 "choose_multiple_weighted returned only {returned}, despite requesting {n}, \
1681 and having {filtered_len} available after filtering. See bug #1907.",
1682 returned=selection.len(), filtered_len=relays.len());
1683 }
1684 selection
1685 }
1686 };
1687 relays.shuffle(rng);
1688 relays
1689 }
1690
1691 /// Compute the weight with which `relay` will be selected for a given
1692 /// `role`.
1693 pub fn relay_weight<'a>(&'a self, relay: &Relay<'a>, role: WeightRole) -> RelayWeight {
1694 RelayWeight(self.weights.weight_rs_for_role(relay.rs, role))
1695 }
1696
1697 /// Compute the total weight with which any relay matching `usable`
1698 /// will be selected for a given `role`.
1699 ///
1700 /// Note: because this function is used to assess the total
1701 /// properties of the consensus, the `usable` predicate takes a
1702 /// [`MdRouterStatus`] rather than a [`Relay`].
1703 pub fn total_weight<P>(&self, role: WeightRole, usable: P) -> RelayWeight
1704 where
1705 P: Fn(&UncheckedRelay<'_>) -> bool,
1706 {
1707 self.all_relays()
1708 .filter_map(|unchecked| {
1709 if usable(&unchecked) {
1710 Some(RelayWeight(
1711 self.weights.weight_rs_for_role(unchecked.rs, role),
1712 ))
1713 } else {
1714 None
1715 }
1716 })
1717 .sum()
1718 }
1719
1720 /// Compute the weight with which a relay with ID `rsa_id` would be
1721 /// selected for a given `role`.
1722 ///
1723 /// Note that weight returned by this function assumes that the
1724 /// relay with that ID is actually [usable](NetDir#usable); if it isn't usable,
1725 /// then other weight-related functions will call its weight zero.
1726 pub fn weight_by_rsa_id(&self, rsa_id: &RsaIdentity, role: WeightRole) -> Option<RelayWeight> {
1727 self.by_rsa_id_unchecked(rsa_id)
1728 .map(|unchecked| RelayWeight(self.weights.weight_rs_for_role(unchecked.rs, role)))
1729 }
1730
1731 /// Return all relays in this NetDir known to be in the same family as
1732 /// `relay`.
1733 ///
1734 /// This list of members will **not** necessarily include `relay` itself.
1735 ///
1736 /// # Limitations
1737 ///
1738 /// Two relays only belong to the same family if _each_ relay
1739 /// claims to share a family with the other. But if we are
1740 /// missing a microdescriptor for one of the relays listed by this
1741 /// relay, we cannot know whether it acknowledges family
1742 /// membership with this relay or not. Therefore, this function
1743 /// can omit family members for which there is not (as yet) any
1744 /// Relay object.
1745 pub fn known_family_members<'a>(
1746 &'a self,
1747 relay: &'a Relay<'a>,
1748 ) -> impl Iterator<Item = Relay<'a>> {
1749 let relay_rsa_id = relay.rsa_id();
1750 relay.md.family().members().filter_map(move |other_rsa_id| {
1751 self.by_rsa_id(other_rsa_id)
1752 .filter(|other_relay| other_relay.md.family().contains(relay_rsa_id))
1753 })
1754 }
1755
1756 /// Return the current hidden service directory "time period".
1757 ///
1758 /// Specifically, this returns the time period that contains the beginning
1759 /// of the validity period of this `NetDir`'s consensus. That time period
1760 /// is the one we use when acting as an hidden service client.
1761 #[cfg(feature = "hs-common")]
1762 pub fn hs_time_period(&self) -> TimePeriod {
1763 self.hsdir_rings.current.time_period()
1764 }
1765
1766 /// Return the [`HsDirParams`] of all the relevant hidden service directory "time periods"
1767 ///
1768 /// This includes the current time period (as from
1769 /// [`.hs_time_period`](NetDir::hs_time_period))
1770 /// plus additional time periods that we publish descriptors for when we are
1771 /// acting as a hidden service.
1772 #[cfg(feature = "hs-service")]
1773 pub fn hs_all_time_periods(&self) -> Vec<HsDirParams> {
1774 self.hsdir_rings
1775 .iter()
1776 .map(|r| r.params().clone())
1777 .collect()
1778 }
1779
1780 /// Return the relays in this network directory that will be used as hidden service directories
1781 ///
1782 /// These are suitable to retrieve a given onion service's descriptor at a given time period.
1783 #[cfg(feature = "hs-common")]
1784 pub fn hs_dirs_download<'r, R>(
1785 &'r self,
1786 hsid: HsBlindId,
1787 period: TimePeriod,
1788 rng: &mut R,
1789 ) -> std::result::Result<Vec<Relay<'r>>, Bug>
1790 where
1791 R: rand::Rng,
1792 {
1793 // Algorithm:
1794 //
1795 // 1. Determine which HsDirRing to use, based on the time period.
1796 // 2. Find the shared random value that's associated with that HsDirRing.
1797 // 3. Choose spread = the parameter `hsdir_spread_fetch`
1798 // 4. Let n_replicas = the parameter `hsdir_n_replicas`.
1799 // 5. Initialize Dirs = []
1800 // 6. for idx in 1..=n_replicas:
1801 // - let H = hsdir_ring::onion_service_index(id, replica, rand,
1802 // period).
1803 // - Find the position of H within hsdir_ring.
1804 // - Take elements from hsdir_ring starting at that position,
1805 // adding them to Dirs until we have added `spread` new elements
1806 // that were not there before.
1807 // 7. Shuffle Dirs
1808 // 8. return Dirs.
1809
1810 let spread = self.spread(HsDirOp::Download);
1811
1812 // When downloading, only look at relays on current ring.
1813 let ring = &self.hsdir_rings.current;
1814
1815 if ring.params().time_period != period {
1816 return Err(internal!(
1817 "our current ring is not associated with the requested time period!"
1818 ));
1819 }
1820
1821 let mut hs_dirs = self.select_hsdirs(hsid, ring, spread).collect_vec();
1822
1823 // When downloading, the order of the returned relays is random.
1824 hs_dirs.shuffle(rng);
1825
1826 Ok(hs_dirs)
1827 }
1828
1829 /// Return the relays in this network directory that will be used as hidden service directories
1830 ///
1831 /// Returns the relays that are suitable for storing a given onion service's descriptors at the
1832 /// given time period.
1833 #[cfg(feature = "hs-service")]
1834 pub fn hs_dirs_upload(
1835 &self,
1836 hsid: HsBlindId,
1837 period: TimePeriod,
1838 ) -> std::result::Result<impl Iterator<Item = Relay<'_>>, Bug> {
1839 // Algorithm:
1840 //
1841 // 1. Choose spread = the parameter `hsdir_spread_store`
1842 // 2. Determine which HsDirRing to use, based on the time period.
1843 // 3. Find the shared random value that's associated with that HsDirRing.
1844 // 4. Let n_replicas = the parameter `hsdir_n_replicas`.
1845 // 5. Initialize Dirs = []
1846 // 6. for idx in 1..=n_replicas:
1847 // - let H = hsdir_ring::onion_service_index(id, replica, rand,
1848 // period).
1849 // - Find the position of H within hsdir_ring.
1850 // - Take elements from hsdir_ring starting at that position,
1851 // adding them to Dirs until we have added `spread` new elements
1852 // that were not there before.
1853 // 3. return Dirs.
1854 let spread = self.spread(HsDirOp::Upload);
1855
1856 // For each HsBlindId, determine which HsDirRing to use.
1857 let rings = self
1858 .hsdir_rings
1859 .iter()
1860 .filter_map(move |ring| {
1861 // Make sure the ring matches the TP of the hsid it's matched with.
1862 (ring.params().time_period == period).then_some((ring, hsid, period))
1863 })
1864 .collect::<Vec<_>>();
1865
1866 // The specified period should have an associated ring.
1867 if !rings.iter().any(|(_, _, tp)| *tp == period) {
1868 return Err(internal!(
1869 "the specified time period does not have an associated ring"
1870 ));
1871 };
1872
1873 // Now that we've matched each `hsid` with the ring associated with its TP, we can start
1874 // selecting replicas from each ring.
1875 Ok(rings.into_iter().flat_map(move |(ring, hsid, period)| {
1876 assert_eq!(period, ring.params().time_period());
1877 self.select_hsdirs(hsid, ring, spread)
1878 }))
1879 }
1880
1881 /// Return the relays in this network directory that will be used as hidden service directories
1882 ///
1883 /// Depending on `op`,
1884 /// these are suitable to either store, or retrieve, a
1885 /// given onion service's descriptor at a given time period.
1886 ///
1887 /// When `op` is `Download`, the order is random.
1888 /// When `op` is `Upload`, the order is not specified.
1889 ///
1890 /// Return an error if the time period is not one returned by
1891 /// `onion_service_time_period` or `onion_service_secondary_time_periods`.
1892 //
1893 // TODO: make HsDirOp pub(crate) once this is removed
1894 #[cfg(feature = "hs-common")]
1895 #[deprecated(note = "Use hs_dirs_upload or hs_dirs_download instead")]
1896 pub fn hs_dirs<'r, R>(&'r self, hsid: &HsBlindId, op: HsDirOp, rng: &mut R) -> Vec<Relay<'r>>
1897 where
1898 R: rand::Rng,
1899 {
1900 // Algorithm:
1901 //
1902 // 1. Determine which HsDirRing to use, based on the time period.
1903 // 2. Find the shared random value that's associated with that HsDirRing.
1904 // 3. Choose spread = the parameter `hsdir_spread_store` or
1905 // `hsdir_spread_fetch` based on `op`.
1906 // 4. Let n_replicas = the parameter `hsdir_n_replicas`.
1907 // 5. Initialize Dirs = []
1908 // 6. for idx in 1..=n_replicas:
1909 // - let H = hsdir_ring::onion_service_index(id, replica, rand,
1910 // period).
1911 // - Find the position of H within hsdir_ring.
1912 // - Take elements from hsdir_ring starting at that position,
1913 // adding them to Dirs until we have added `spread` new elements
1914 // that were not there before.
1915 // 7. return Dirs.
1916 let n_replicas = self
1917 .params
1918 .hsdir_n_replicas
1919 .get()
1920 .try_into()
1921 .expect("BoundedInt did not enforce bounds");
1922
1923 let spread = match op {
1924 HsDirOp::Download => self.params.hsdir_spread_fetch,
1925 #[cfg(feature = "hs-service")]
1926 HsDirOp::Upload => self.params.hsdir_spread_store,
1927 };
1928
1929 let spread = spread
1930 .get()
1931 .try_into()
1932 .expect("BoundedInt did not enforce bounds!");
1933
1934 // TODO: I may be wrong here but I suspect that this function may
1935 // need refactoring so that it does not look at _all_ of the HsDirRings,
1936 // but only at the ones that corresponds to time periods for which
1937 // HsBlindId is valid. Or I could be mistaken, in which case we should
1938 // have a comment to explain why I am, since the logic is subtle.
1939 // (For clients, there is only one ring.) -nickm
1940 //
1941 // (Actually, there is no need to follow through with the above TODO,
1942 // since this function is deprecated, and not used anywhere but the
1943 // tests.)
1944
1945 let mut hs_dirs = self
1946 .hsdir_rings
1947 .iter_for_op(op)
1948 .cartesian_product(1..=n_replicas) // 1-indexed !
1949 .flat_map({
1950 let mut selected_nodes = HashSet::new();
1951
1952 move |(ring, replica): (&HsDirRing, u8)| {
1953 let hsdir_idx = hsdir_ring::service_hsdir_index(hsid, replica, ring.params());
1954
1955 ring.ring_items_at(hsdir_idx, spread, |(hsdir_idx, _)| {
1956 // According to rend-spec 2.2.3:
1957 // ... If any of those
1958 // nodes have already been selected for a lower-numbered replica of the
1959 // service, any nodes already chosen are disregarded (i.e. skipped over)
1960 // when choosing a replica's hsdir_spread_store nodes.
1961 selected_nodes.insert(*hsdir_idx)
1962 })
1963 .collect::<Vec<_>>()
1964 }
1965 })
1966 .filter_map(|(_hsdir_idx, rs_idx)| {
1967 // This ought not to be None but let's not panic or bail if it is
1968 self.relay_by_rs_idx(*rs_idx)
1969 })
1970 .collect_vec();
1971
1972 match op {
1973 HsDirOp::Download => {
1974 // When `op` is `Download`, the order is random.
1975 hs_dirs.shuffle(rng);
1976 }
1977 #[cfg(feature = "hs-service")]
1978 HsDirOp::Upload => {
1979 // When `op` is `Upload`, the order is not specified.
1980 }
1981 }
1982
1983 hs_dirs
1984 }
1985}
1986
1987impl MdReceiver for NetDir {
1988 fn missing_microdescs(&self) -> Box<dyn Iterator<Item = &MdDigest> + '_> {
1989 Box::new(self.rsidx_by_missing.keys())
1990 }
1991 fn add_microdesc(&mut self, md: Microdesc) -> bool {
1992 self.add_arc_microdesc(Arc::new(md))
1993 }
1994 fn n_missing(&self) -> usize {
1995 self.rsidx_by_missing.len()
1996 }
1997}
1998
1999impl<'a> UncheckedRelay<'a> {
2000 /// Return an [`UncheckedRelayDetails`](details::UncheckedRelayDetails) for this relay.
2001 ///
2002 /// Callers should generally avoid using this information directly if they can;
2003 /// it's better to use a higher-level function that exposes semantic information
2004 /// rather than these properties.
2005 pub fn low_level_details(&self) -> details::UncheckedRelayDetails<'_> {
2006 details::UncheckedRelayDetails(self)
2007 }
2008
2009 /// Return true if this relay is valid and [usable](NetDir#usable).
2010 ///
2011 /// This function should return `true` for every Relay we expose
2012 /// to the user.
2013 pub fn is_usable(&self) -> bool {
2014 // No need to check for 'valid' or 'running': they are implicit.
2015 self.md.is_some() && self.rs.ed25519_id_is_usable()
2016 }
2017 /// If this is [usable](NetDir#usable), return a corresponding Relay object.
2018 pub fn into_relay(self) -> Option<Relay<'a>> {
2019 if self.is_usable() {
2020 Some(Relay {
2021 rs: self.rs,
2022 md: self.md?,
2023 #[cfg(feature = "geoip")]
2024 cc: self.cc,
2025 })
2026 } else {
2027 None
2028 }
2029 }
2030
2031 /// Return true if this relay is a hidden service directory
2032 ///
2033 /// Ie, if it is to be included in the hsdir ring.
2034 #[cfg(feature = "hs-common")]
2035 pub(crate) fn is_hsdir_for_ring(&self) -> bool {
2036 // TODO are there any other flags should we check?
2037 // rend-spec-v3 2.2.3 says just
2038 // "each node listed in the current consensus with the HSDir flag"
2039 // Do we need to check ed25519_id_is_usable ?
2040 // See also https://gitlab.torproject.org/tpo/core/arti/-/issues/504
2041 self.rs.is_flagged_hsdir()
2042 }
2043}
2044
2045impl<'a> Relay<'a> {
2046 /// Return a [`RelayDetails`](details::RelayDetails) for this relay.
2047 ///
2048 /// Callers should generally avoid using this information directly if they can;
2049 /// it's better to use a higher-level function that exposes semantic information
2050 /// rather than these properties.
2051 pub fn low_level_details(&self) -> details::RelayDetails<'_> {
2052 details::RelayDetails(self)
2053 }
2054
2055 /// Return the Ed25519 ID for this relay.
2056 pub fn id(&self) -> &Ed25519Identity {
2057 self.md.ed25519_id()
2058 }
2059 /// Return the RsaIdentity for this relay.
2060 pub fn rsa_id(&self) -> &RsaIdentity {
2061 self.rs.rsa_identity()
2062 }
2063
2064 /// Return a reference to this relay's "router status" entry in
2065 /// the consensus.
2066 ///
2067 /// The router status entry contains information about the relay
2068 /// that the authorities voted on directly. For most use cases,
2069 /// you shouldn't need them.
2070 ///
2071 /// This function is only available if the crate was built with
2072 /// its `experimental-api` feature.
2073 #[cfg(feature = "experimental-api")]
2074 pub fn rs(&self) -> &netstatus::MdRouterStatus {
2075 self.rs
2076 }
2077 /// Return a reference to this relay's "microdescriptor" entry in
2078 /// the consensus.
2079 ///
2080 /// A "microdescriptor" is a synopsis of the information about a relay,
2081 /// used to determine its capabilities and route traffic through it.
2082 /// For most use cases, you shouldn't need it.
2083 ///
2084 /// This function is only available if the crate was built with
2085 /// its `experimental-api` feature.
2086 #[cfg(feature = "experimental-api")]
2087 pub fn md(&self) -> &Microdesc {
2088 self.md
2089 }
2090}
2091
2092/// An error value returned from [`NetDir::by_ids_detailed`].
2093#[cfg(feature = "hs-common")]
2094#[derive(Clone, Debug, thiserror::Error)]
2095#[non_exhaustive]
2096pub enum RelayLookupError {
2097 /// We found a relay whose presence indicates that the provided set of
2098 /// identities is impossible to resolve.
2099 #[error("Provided set of identities is impossible according to consensus.")]
2100 Impossible,
2101}
2102
2103impl<'a> HasAddrs for Relay<'a> {
2104 fn addrs(&self) -> impl Iterator<Item = std::net::SocketAddr> {
2105 self.rs.addrs()
2106 }
2107}
2108#[cfg(feature = "geoip")]
2109#[cfg_attr(docsrs, doc(cfg(feature = "geoip")))]
2110impl<'a> HasCountryCode for Relay<'a> {
2111 fn country_code(&self) -> Option<CountryCode> {
2112 self.cc
2113 }
2114}
2115impl<'a> tor_linkspec::HasRelayIdsLegacy for Relay<'a> {
2116 fn ed_identity(&self) -> &Ed25519Identity {
2117 self.id()
2118 }
2119 fn rsa_identity(&self) -> &RsaIdentity {
2120 self.rsa_id()
2121 }
2122}
2123
2124impl<'a> HasRelayIds for UncheckedRelay<'a> {
2125 fn identity(&self, key_type: RelayIdType) -> Option<RelayIdRef<'_>> {
2126 match key_type {
2127 RelayIdType::Ed25519 if self.rs.ed25519_id_is_usable() => {
2128 self.md.map(|m| m.ed25519_id().into())
2129 }
2130 RelayIdType::Rsa => Some(self.rs.rsa_identity().into()),
2131 _ => None,
2132 }
2133 }
2134}
2135#[cfg(feature = "geoip")]
2136impl<'a> HasCountryCode for UncheckedRelay<'a> {
2137 fn country_code(&self) -> Option<CountryCode> {
2138 self.cc
2139 }
2140}
2141
2142impl<'a> DirectChanMethodsHelper for Relay<'a> {}
2143impl<'a> ChanTarget for Relay<'a> {}
2144
2145impl<'a> tor_linkspec::CircTarget for Relay<'a> {
2146 fn ntor_onion_key(&self) -> &ll::pk::curve25519::PublicKey {
2147 self.md.ntor_key()
2148 }
2149 fn protovers(&self) -> &tor_protover::Protocols {
2150 self.rs.protovers()
2151 }
2152}
2153
2154#[cfg(test)]
2155mod test {
2156 // @@ begin test lint list maintained by maint/add_warning @@
2157 #![allow(clippy::bool_assert_comparison)]
2158 #![allow(clippy::clone_on_copy)]
2159 #![allow(clippy::dbg_macro)]
2160 #![allow(clippy::mixed_attributes_style)]
2161 #![allow(clippy::print_stderr)]
2162 #![allow(clippy::print_stdout)]
2163 #![allow(clippy::single_char_pattern)]
2164 #![allow(clippy::unwrap_used)]
2165 #![allow(clippy::unchecked_duration_subtraction)]
2166 #![allow(clippy::useless_vec)]
2167 #![allow(clippy::needless_pass_by_value)]
2168 //! <!-- @@ end test lint list maintained by maint/add_warning @@ -->
2169 #![allow(clippy::cognitive_complexity)]
2170 use super::*;
2171 use crate::testnet::*;
2172 use float_eq::assert_float_eq;
2173 use std::collections::HashSet;
2174 use std::time::Duration;
2175 use tor_basic_utils::test_rng::{self, testing_rng};
2176 use tor_linkspec::{RelayIdType, RelayIds};
2177
2178 #[cfg(feature = "hs-common")]
2179 fn dummy_hs_blind_id() -> HsBlindId {
2180 let hsid = [2, 1, 1, 1].iter().cycle().take(32).cloned().collect_vec();
2181 let hsid = Ed25519Identity::new(hsid[..].try_into().unwrap());
2182 HsBlindId::from(hsid)
2183 }
2184
2185 // Basic functionality for a partial netdir: Add microdescriptors,
2186 // then you have a netdir.
2187 #[test]
2188 fn partial_netdir() {
2189 let (consensus, microdescs) = construct_network().unwrap();
2190 let dir = PartialNetDir::new(consensus, None);
2191
2192 // Check the lifetime
2193 let lifetime = dir.lifetime();
2194 assert_eq!(
2195 lifetime
2196 .valid_until()
2197 .duration_since(lifetime.valid_after())
2198 .unwrap(),
2199 Duration::new(86400, 0)
2200 );
2201
2202 // No microdescriptors, so we don't have enough paths, and can't
2203 // advance.
2204 assert!(!dir.have_enough_paths());
2205 let mut dir = match dir.unwrap_if_sufficient() {
2206 Ok(_) => panic!(),
2207 Err(d) => d,
2208 };
2209
2210 let missing: HashSet<_> = dir.missing_microdescs().collect();
2211 assert_eq!(missing.len(), 40);
2212 assert_eq!(missing.len(), dir.netdir.c_relays().len());
2213 for md in µdescs {
2214 assert!(missing.contains(md.digest()));
2215 }
2216
2217 // Now add all the mds and try again.
2218 for md in microdescs {
2219 let wanted = dir.add_microdesc(md);
2220 assert!(wanted);
2221 }
2222
2223 let missing: HashSet<_> = dir.missing_microdescs().collect();
2224 assert!(missing.is_empty());
2225 assert!(dir.have_enough_paths());
2226 let _complete = match dir.unwrap_if_sufficient() {
2227 Ok(d) => d,
2228 Err(_) => panic!(),
2229 };
2230 }
2231
2232 #[test]
2233 fn override_params() {
2234 let (consensus, _microdescs) = construct_network().unwrap();
2235 let override_p = "bwweightscale=2 doesnotexist=77 circwindow=500"
2236 .parse()
2237 .unwrap();
2238 let dir = PartialNetDir::new(consensus.clone(), Some(&override_p));
2239 let params = &dir.netdir.params;
2240 assert_eq!(params.bw_weight_scale.get(), 2);
2241 assert_eq!(params.circuit_window.get(), 500_i32);
2242
2243 // try again without the override.
2244 let dir = PartialNetDir::new(consensus, None);
2245 let params = &dir.netdir.params;
2246 assert_eq!(params.bw_weight_scale.get(), 1_i32);
2247 assert_eq!(params.circuit_window.get(), 1000_i32);
2248 }
2249
2250 #[test]
2251 fn fill_from_previous() {
2252 let (consensus, microdescs) = construct_network().unwrap();
2253
2254 let mut dir = PartialNetDir::new(consensus.clone(), None);
2255 for md in microdescs.iter().skip(2) {
2256 let wanted = dir.add_microdesc(md.clone());
2257 assert!(wanted);
2258 }
2259 let dir1 = dir.unwrap_if_sufficient().unwrap();
2260 assert_eq!(dir1.missing_microdescs().count(), 2);
2261
2262 let mut dir = PartialNetDir::new(consensus, None);
2263 assert_eq!(dir.missing_microdescs().count(), 40);
2264 dir.fill_from_previous_netdir(Arc::new(dir1));
2265 assert_eq!(dir.missing_microdescs().count(), 2);
2266 }
2267
2268 #[test]
2269 fn path_count() {
2270 let low_threshold = "min_paths_for_circs_pct=64".parse().unwrap();
2271 let high_threshold = "min_paths_for_circs_pct=65".parse().unwrap();
2272
2273 let (consensus, microdescs) = construct_network().unwrap();
2274
2275 let mut dir = PartialNetDir::new(consensus.clone(), Some(&low_threshold));
2276 for (pos, md) in microdescs.iter().enumerate() {
2277 if pos % 7 == 2 {
2278 continue; // skip a few relays.
2279 }
2280 dir.add_microdesc(md.clone());
2281 }
2282 let dir = dir.unwrap_if_sufficient().unwrap();
2283
2284 // We have 40 relays that we know about from the consensus.
2285 assert_eq!(dir.all_relays().count(), 40);
2286
2287 // But only 34 are usable.
2288 assert_eq!(dir.relays().count(), 34);
2289
2290 // For guards: mds 20..=39 correspond to Guard relays.
2291 // Their bandwidth is 2*(1000+2000+...10000) = 110_000.
2292 // We skipped 23, 30, and 37. They have bandwidth
2293 // 4000 + 1000 + 8000 = 13_000. So our fractional bandwidth
2294 // should be (110-13)/110.
2295 let f = dir.frac_for_role(WeightRole::Guard, |u| u.rs.is_flagged_guard());
2296 assert!(((97.0 / 110.0) - f).abs() < 0.000001);
2297
2298 // For exits: mds 10..=19 and 30..=39 correspond to Exit relays.
2299 // We skipped 16, 30, and 37. Per above our fractional bandwidth is
2300 // (110-16)/110.
2301 let f = dir.frac_for_role(WeightRole::Exit, |u| u.rs.is_flagged_exit());
2302 assert!(((94.0 / 110.0) - f).abs() < 0.000001);
2303
2304 // For middles: all relays are middles. We skipped 2, 9, 16,
2305 // 23, 30, and 37. Per above our fractional bandwidth is
2306 // (220-33)/220
2307 let f = dir.frac_for_role(WeightRole::Middle, |_| true);
2308 assert!(((187.0 / 220.0) - f).abs() < 0.000001);
2309
2310 // Multiplying those together, we get the fraction of paths we can
2311 // build at ~0.64052066, which is above the threshold we set above for
2312 // MinPathsForCircsPct.
2313 let f = dir.frac_usable_paths();
2314 assert!((f - 0.64052066).abs() < 0.000001);
2315
2316 // But if we try again with a slightly higher threshold...
2317 let mut dir = PartialNetDir::new(consensus, Some(&high_threshold));
2318 for (pos, md) in microdescs.into_iter().enumerate() {
2319 if pos % 7 == 2 {
2320 continue; // skip a few relays.
2321 }
2322 dir.add_microdesc(md);
2323 }
2324 assert!(dir.unwrap_if_sufficient().is_err());
2325 }
2326
2327 /// Return a 3-tuple for use by `test_pick_*()` of an Rng, a number of
2328 /// iterations, and a tolerance.
2329 ///
2330 /// If the Rng is deterministic (the default), we can use a faster setup,
2331 /// with a higher tolerance and fewer iterations. But if you've explicitly
2332 /// opted into randomization (or are replaying a seed from an earlier
2333 /// randomized test), we give you more iterations and a tighter tolerance.
2334 fn testing_rng_with_tolerances() -> (impl rand::Rng, usize, f64) {
2335 // Use a deterministic RNG if none is specified, since this is slow otherwise.
2336 let config = test_rng::Config::from_env().unwrap_or(test_rng::Config::Deterministic);
2337 let (iters, tolerance) = match config {
2338 test_rng::Config::Deterministic => (5000, 0.02),
2339 _ => (50000, 0.01),
2340 };
2341 (config.into_rng(), iters, tolerance)
2342 }
2343
2344 #[test]
2345 fn test_pick() {
2346 let (consensus, microdescs) = construct_network().unwrap();
2347 let mut dir = PartialNetDir::new(consensus, None);
2348 for md in microdescs.into_iter() {
2349 let wanted = dir.add_microdesc(md.clone());
2350 assert!(wanted);
2351 }
2352 let dir = dir.unwrap_if_sufficient().unwrap();
2353
2354 let (mut rng, total, tolerance) = testing_rng_with_tolerances();
2355
2356 let mut picked = [0_isize; 40];
2357 for _ in 0..total {
2358 let r = dir.pick_relay(&mut rng, WeightRole::Middle, |r| {
2359 r.low_level_details().supports_exit_port_ipv4(80)
2360 });
2361 let r = r.unwrap();
2362 let id_byte = r.identity(RelayIdType::Rsa).unwrap().as_bytes()[0];
2363 picked[id_byte as usize] += 1;
2364 }
2365 // non-exits should never get picked.
2366 picked[0..10].iter().for_each(|x| assert_eq!(*x, 0));
2367 picked[20..30].iter().for_each(|x| assert_eq!(*x, 0));
2368
2369 let picked_f: Vec<_> = picked.iter().map(|x| *x as f64 / total as f64).collect();
2370
2371 // We didn't we any non-default weights, so the other relays get
2372 // weighted proportional to their bandwidth.
2373 assert_float_eq!(picked_f[19], (10.0 / 110.0), abs <= tolerance);
2374 assert_float_eq!(picked_f[38], (9.0 / 110.0), abs <= tolerance);
2375 assert_float_eq!(picked_f[39], (10.0 / 110.0), abs <= tolerance);
2376 }
2377
2378 #[test]
2379 fn test_pick_multiple() {
2380 // This is mostly a copy of test_pick, except that it uses
2381 // pick_n_relays to pick several relays at once.
2382
2383 let dir = construct_netdir().unwrap_if_sufficient().unwrap();
2384
2385 let (mut rng, total, tolerance) = testing_rng_with_tolerances();
2386
2387 let mut picked = [0_isize; 40];
2388 for _ in 0..total / 4 {
2389 let relays = dir.pick_n_relays(&mut rng, 4, WeightRole::Middle, |r| {
2390 r.low_level_details().supports_exit_port_ipv4(80)
2391 });
2392 assert_eq!(relays.len(), 4);
2393 for r in relays {
2394 let id_byte = r.identity(RelayIdType::Rsa).unwrap().as_bytes()[0];
2395 picked[id_byte as usize] += 1;
2396 }
2397 }
2398 // non-exits should never get picked.
2399 picked[0..10].iter().for_each(|x| assert_eq!(*x, 0));
2400 picked[20..30].iter().for_each(|x| assert_eq!(*x, 0));
2401
2402 let picked_f: Vec<_> = picked.iter().map(|x| *x as f64 / total as f64).collect();
2403
2404 // We didn't we any non-default weights, so the other relays get
2405 // weighted proportional to their bandwidth.
2406 assert_float_eq!(picked_f[19], (10.0 / 110.0), abs <= tolerance);
2407 assert_float_eq!(picked_f[36], (7.0 / 110.0), abs <= tolerance);
2408 assert_float_eq!(picked_f[39], (10.0 / 110.0), abs <= tolerance);
2409 }
2410
2411 #[test]
2412 fn subnets() {
2413 let cfg = SubnetConfig::default();
2414
2415 fn same_net(cfg: &SubnetConfig, a: &str, b: &str) -> bool {
2416 cfg.addrs_in_same_subnet(&a.parse().unwrap(), &b.parse().unwrap())
2417 }
2418
2419 assert!(same_net(&cfg, "127.15.3.3", "127.15.9.9"));
2420 assert!(!same_net(&cfg, "127.15.3.3", "127.16.9.9"));
2421
2422 assert!(!same_net(&cfg, "127.15.3.3", "127::"));
2423
2424 assert!(same_net(&cfg, "ffff:ffff:90:33::", "ffff:ffff:91:34::"));
2425 assert!(!same_net(&cfg, "ffff:ffff:90:33::", "ffff:fffe:91:34::"));
2426
2427 let cfg = SubnetConfig {
2428 subnets_family_v4: 32,
2429 subnets_family_v6: 128,
2430 };
2431 assert!(!same_net(&cfg, "127.15.3.3", "127.15.9.9"));
2432 assert!(!same_net(&cfg, "ffff:ffff:90:33::", "ffff:ffff:91:34::"));
2433
2434 assert!(same_net(&cfg, "127.0.0.1", "127.0.0.1"));
2435 assert!(!same_net(&cfg, "127.0.0.1", "127.0.0.2"));
2436 assert!(same_net(&cfg, "ffff:ffff:90:33::", "ffff:ffff:90:33::"));
2437
2438 let cfg = SubnetConfig {
2439 subnets_family_v4: 33,
2440 subnets_family_v6: 129,
2441 };
2442 assert!(!same_net(&cfg, "127.0.0.1", "127.0.0.1"));
2443 assert!(!same_net(&cfg, "::", "::"));
2444 }
2445
2446 #[test]
2447 fn subnet_union() {
2448 let cfg1 = SubnetConfig {
2449 subnets_family_v4: 16,
2450 subnets_family_v6: 64,
2451 };
2452 let cfg2 = SubnetConfig {
2453 subnets_family_v4: 24,
2454 subnets_family_v6: 32,
2455 };
2456 let a1 = "1.2.3.4".parse().unwrap();
2457 let a2 = "1.2.10.10".parse().unwrap();
2458
2459 let a3 = "ffff:ffff::7".parse().unwrap();
2460 let a4 = "ffff:ffff:1234::8".parse().unwrap();
2461
2462 assert_eq!(cfg1.addrs_in_same_subnet(&a1, &a2), true);
2463 assert_eq!(cfg2.addrs_in_same_subnet(&a1, &a2), false);
2464
2465 assert_eq!(cfg1.addrs_in_same_subnet(&a3, &a4), false);
2466 assert_eq!(cfg2.addrs_in_same_subnet(&a3, &a4), true);
2467
2468 let cfg_u = cfg1.union(&cfg2);
2469 assert_eq!(
2470 cfg_u,
2471 SubnetConfig {
2472 subnets_family_v4: 16,
2473 subnets_family_v6: 32,
2474 }
2475 );
2476 assert_eq!(cfg_u.addrs_in_same_subnet(&a1, &a2), true);
2477 assert_eq!(cfg_u.addrs_in_same_subnet(&a3, &a4), true);
2478
2479 assert_eq!(cfg1.union(&cfg1), cfg1);
2480
2481 assert_eq!(cfg1.union(&SubnetConfig::no_addresses_match()), cfg1);
2482 }
2483
2484 #[test]
2485 fn relay_funcs() {
2486 let (consensus, microdescs) = construct_custom_network(
2487 |pos, nb, _| {
2488 if pos == 15 {
2489 nb.rs.add_or_port("[f0f0::30]:9001".parse().unwrap());
2490 } else if pos == 20 {
2491 nb.rs.add_or_port("[f0f0::3131]:9001".parse().unwrap());
2492 }
2493 },
2494 None,
2495 )
2496 .unwrap();
2497 let subnet_config = SubnetConfig::default();
2498 let all_family_info = FamilyRules::all_family_info();
2499 let mut dir = PartialNetDir::new(consensus, None);
2500 for md in microdescs.into_iter() {
2501 let wanted = dir.add_microdesc(md.clone());
2502 assert!(wanted);
2503 }
2504 let dir = dir.unwrap_if_sufficient().unwrap();
2505
2506 // Pick out a few relays by ID.
2507 let k0 = Ed25519Identity::from([0; 32]);
2508 let k1 = Ed25519Identity::from([1; 32]);
2509 let k2 = Ed25519Identity::from([2; 32]);
2510 let k3 = Ed25519Identity::from([3; 32]);
2511 let k10 = Ed25519Identity::from([10; 32]);
2512 let k15 = Ed25519Identity::from([15; 32]);
2513 let k20 = Ed25519Identity::from([20; 32]);
2514
2515 let r0 = dir.by_id(&k0).unwrap();
2516 let r1 = dir.by_id(&k1).unwrap();
2517 let r2 = dir.by_id(&k2).unwrap();
2518 let r3 = dir.by_id(&k3).unwrap();
2519 let r10 = dir.by_id(&k10).unwrap();
2520 let r15 = dir.by_id(&k15).unwrap();
2521 let r20 = dir.by_id(&k20).unwrap();
2522
2523 assert_eq!(r0.id(), &[0; 32].into());
2524 assert_eq!(r0.rsa_id(), &[0; 20].into());
2525 assert_eq!(r1.id(), &[1; 32].into());
2526 assert_eq!(r1.rsa_id(), &[1; 20].into());
2527
2528 assert!(r0.same_relay_ids(&r0));
2529 assert!(r1.same_relay_ids(&r1));
2530 assert!(!r1.same_relay_ids(&r0));
2531
2532 assert!(r0.low_level_details().is_dir_cache());
2533 assert!(!r1.low_level_details().is_dir_cache());
2534 assert!(r2.low_level_details().is_dir_cache());
2535 assert!(!r3.low_level_details().is_dir_cache());
2536
2537 assert!(!r0.low_level_details().supports_exit_port_ipv4(80));
2538 assert!(!r1.low_level_details().supports_exit_port_ipv4(80));
2539 assert!(!r2.low_level_details().supports_exit_port_ipv4(80));
2540 assert!(!r3.low_level_details().supports_exit_port_ipv4(80));
2541
2542 assert!(!r0.low_level_details().policies_allow_some_port());
2543 assert!(!r1.low_level_details().policies_allow_some_port());
2544 assert!(!r2.low_level_details().policies_allow_some_port());
2545 assert!(!r3.low_level_details().policies_allow_some_port());
2546 assert!(r10.low_level_details().policies_allow_some_port());
2547
2548 assert!(r0.low_level_details().in_same_family(&r0, all_family_info));
2549 assert!(r0.low_level_details().in_same_family(&r1, all_family_info));
2550 assert!(r1.low_level_details().in_same_family(&r0, all_family_info));
2551 assert!(r1.low_level_details().in_same_family(&r1, all_family_info));
2552 assert!(!r0.low_level_details().in_same_family(&r2, all_family_info));
2553 assert!(!r2.low_level_details().in_same_family(&r0, all_family_info));
2554 assert!(r2.low_level_details().in_same_family(&r2, all_family_info));
2555 assert!(r2.low_level_details().in_same_family(&r3, all_family_info));
2556
2557 assert!(r0.low_level_details().in_same_subnet(&r10, &subnet_config));
2558 assert!(r10.low_level_details().in_same_subnet(&r10, &subnet_config));
2559 assert!(r0.low_level_details().in_same_subnet(&r0, &subnet_config));
2560 assert!(r1.low_level_details().in_same_subnet(&r1, &subnet_config));
2561 assert!(!r1.low_level_details().in_same_subnet(&r2, &subnet_config));
2562 assert!(!r2.low_level_details().in_same_subnet(&r3, &subnet_config));
2563
2564 // Make sure IPv6 families work.
2565 let subnet_config = SubnetConfig {
2566 subnets_family_v4: 128,
2567 subnets_family_v6: 96,
2568 };
2569 assert!(r15.low_level_details().in_same_subnet(&r20, &subnet_config));
2570 assert!(!r15.low_level_details().in_same_subnet(&r1, &subnet_config));
2571
2572 // Make sure that subnet configs can be disabled.
2573 let subnet_config = SubnetConfig {
2574 subnets_family_v4: 255,
2575 subnets_family_v6: 255,
2576 };
2577 assert!(!r15.low_level_details().in_same_subnet(&r20, &subnet_config));
2578 }
2579
2580 #[test]
2581 fn test_badexit() {
2582 // make a netdir where relays 10-19 are badexit, and everybody
2583 // exits to 443 on IPv6.
2584 use tor_netdoc::doc::netstatus::RelayFlags;
2585 let netdir = construct_custom_netdir(|pos, nb, _| {
2586 if (10..20).contains(&pos) {
2587 nb.rs.add_flags(RelayFlags::BAD_EXIT);
2588 }
2589 nb.md.parse_ipv6_policy("accept 443").unwrap();
2590 })
2591 .unwrap()
2592 .unwrap_if_sufficient()
2593 .unwrap();
2594
2595 let e12 = netdir.by_id(&Ed25519Identity::from([12; 32])).unwrap();
2596 let e32 = netdir.by_id(&Ed25519Identity::from([32; 32])).unwrap();
2597
2598 assert!(!e12.low_level_details().supports_exit_port_ipv4(80));
2599 assert!(e32.low_level_details().supports_exit_port_ipv4(80));
2600
2601 assert!(!e12.low_level_details().supports_exit_port_ipv6(443));
2602 assert!(e32.low_level_details().supports_exit_port_ipv6(443));
2603 assert!(!e32.low_level_details().supports_exit_port_ipv6(555));
2604
2605 assert!(!e12.low_level_details().policies_allow_some_port());
2606 assert!(e32.low_level_details().policies_allow_some_port());
2607
2608 assert!(!e12.low_level_details().ipv4_policy().allows_some_port());
2609 assert!(!e12.low_level_details().ipv6_policy().allows_some_port());
2610 assert!(e32.low_level_details().ipv4_policy().allows_some_port());
2611 assert!(e32.low_level_details().ipv6_policy().allows_some_port());
2612
2613 assert!(
2614 e12.low_level_details()
2615 .ipv4_declared_policy()
2616 .allows_some_port()
2617 );
2618 assert!(
2619 e12.low_level_details()
2620 .ipv6_declared_policy()
2621 .allows_some_port()
2622 );
2623 }
2624
2625 #[cfg(feature = "experimental-api")]
2626 #[test]
2627 fn test_accessors() {
2628 let netdir = construct_netdir().unwrap_if_sufficient().unwrap();
2629
2630 let r4 = netdir.by_id(&Ed25519Identity::from([4; 32])).unwrap();
2631 let r16 = netdir.by_id(&Ed25519Identity::from([16; 32])).unwrap();
2632
2633 assert!(!r4.md().ipv4_policy().allows_some_port());
2634 assert!(r16.md().ipv4_policy().allows_some_port());
2635
2636 assert!(!r4.rs().is_flagged_exit());
2637 assert!(r16.rs().is_flagged_exit());
2638 }
2639
2640 #[test]
2641 fn test_by_id() {
2642 // Make a netdir that omits the microdescriptor for 0xDDDDDD...
2643 let netdir = construct_custom_netdir(|pos, nb, _| {
2644 nb.omit_md = pos == 13;
2645 })
2646 .unwrap();
2647
2648 let netdir = netdir.unwrap_if_sufficient().unwrap();
2649
2650 let r = netdir.by_id(&Ed25519Identity::from([0; 32])).unwrap();
2651 assert_eq!(r.id().as_bytes(), &[0; 32]);
2652
2653 assert!(netdir.by_id(&Ed25519Identity::from([13; 32])).is_none());
2654
2655 let r = netdir.by_rsa_id(&[12; 20].into()).unwrap();
2656 assert_eq!(r.rsa_id().as_bytes(), &[12; 20]);
2657 assert!(netdir.rsa_id_is_listed(&[12; 20].into()));
2658
2659 assert!(netdir.by_rsa_id(&[13; 20].into()).is_none());
2660
2661 assert!(netdir.by_rsa_id_unchecked(&[99; 20].into()).is_none());
2662 assert!(!netdir.rsa_id_is_listed(&[99; 20].into()));
2663
2664 let r = netdir.by_rsa_id_unchecked(&[13; 20].into()).unwrap();
2665 assert_eq!(r.rs.rsa_identity().as_bytes(), &[13; 20]);
2666 assert!(netdir.rsa_id_is_listed(&[13; 20].into()));
2667
2668 let pair_13_13 = RelayIds::builder()
2669 .ed_identity([13; 32].into())
2670 .rsa_identity([13; 20].into())
2671 .build()
2672 .unwrap();
2673 let pair_14_14 = RelayIds::builder()
2674 .ed_identity([14; 32].into())
2675 .rsa_identity([14; 20].into())
2676 .build()
2677 .unwrap();
2678 let pair_14_99 = RelayIds::builder()
2679 .ed_identity([14; 32].into())
2680 .rsa_identity([99; 20].into())
2681 .build()
2682 .unwrap();
2683
2684 let r = netdir.by_ids(&pair_13_13);
2685 assert!(r.is_none());
2686 let r = netdir.by_ids(&pair_14_14).unwrap();
2687 assert_eq!(r.identity(RelayIdType::Rsa).unwrap().as_bytes(), &[14; 20]);
2688 assert_eq!(
2689 r.identity(RelayIdType::Ed25519).unwrap().as_bytes(),
2690 &[14; 32]
2691 );
2692 let r = netdir.by_ids(&pair_14_99);
2693 assert!(r.is_none());
2694
2695 assert_eq!(
2696 netdir.id_pair_listed(&[13; 32].into(), &[13; 20].into()),
2697 None
2698 );
2699 assert_eq!(
2700 netdir.id_pair_listed(&[15; 32].into(), &[15; 20].into()),
2701 Some(true)
2702 );
2703 assert_eq!(
2704 netdir.id_pair_listed(&[15; 32].into(), &[99; 20].into()),
2705 Some(false)
2706 );
2707 }
2708
2709 #[test]
2710 #[cfg(feature = "hs-common")]
2711 fn test_by_ids_detailed() {
2712 // Make a netdir that omits the microdescriptor for 0xDDDDDD...
2713 let netdir = construct_custom_netdir(|pos, nb, _| {
2714 nb.omit_md = pos == 13;
2715 })
2716 .unwrap();
2717
2718 let netdir = netdir.unwrap_if_sufficient().unwrap();
2719
2720 let id13_13 = RelayIds::builder()
2721 .ed_identity([13; 32].into())
2722 .rsa_identity([13; 20].into())
2723 .build()
2724 .unwrap();
2725 let id15_15 = RelayIds::builder()
2726 .ed_identity([15; 32].into())
2727 .rsa_identity([15; 20].into())
2728 .build()
2729 .unwrap();
2730 let id15_99 = RelayIds::builder()
2731 .ed_identity([15; 32].into())
2732 .rsa_identity([99; 20].into())
2733 .build()
2734 .unwrap();
2735 let id99_15 = RelayIds::builder()
2736 .ed_identity([99; 32].into())
2737 .rsa_identity([15; 20].into())
2738 .build()
2739 .unwrap();
2740 let id99_99 = RelayIds::builder()
2741 .ed_identity([99; 32].into())
2742 .rsa_identity([99; 20].into())
2743 .build()
2744 .unwrap();
2745 let id15_xx = RelayIds::builder()
2746 .ed_identity([15; 32].into())
2747 .build()
2748 .unwrap();
2749 let idxx_15 = RelayIds::builder()
2750 .rsa_identity([15; 20].into())
2751 .build()
2752 .unwrap();
2753
2754 assert!(matches!(netdir.by_ids_detailed(&id13_13), Ok(None)));
2755 assert!(matches!(netdir.by_ids_detailed(&id15_15), Ok(Some(_))));
2756 assert!(matches!(
2757 netdir.by_ids_detailed(&id15_99),
2758 Err(RelayLookupError::Impossible)
2759 ));
2760 assert!(matches!(
2761 netdir.by_ids_detailed(&id99_15),
2762 Err(RelayLookupError::Impossible)
2763 ));
2764 assert!(matches!(netdir.by_ids_detailed(&id99_99), Ok(None)));
2765 assert!(matches!(netdir.by_ids_detailed(&id15_xx), Ok(Some(_))));
2766 assert!(matches!(netdir.by_ids_detailed(&idxx_15), Ok(Some(_))));
2767 }
2768
2769 #[test]
2770 fn weight_type() {
2771 let r0 = RelayWeight(0);
2772 let r100 = RelayWeight(100);
2773 let r200 = RelayWeight(200);
2774 let r300 = RelayWeight(300);
2775 assert_eq!(r100 + r200, r300);
2776 assert_eq!(r100.checked_div(r200), Some(0.5));
2777 assert!(r100.checked_div(r0).is_none());
2778 assert_eq!(r200.ratio(0.5), Some(r100));
2779 assert!(r200.ratio(-1.0).is_none());
2780 }
2781
2782 #[test]
2783 fn weight_accessors() {
2784 // Make a netdir that omits the microdescriptor for 0xDDDDDD...
2785 let netdir = construct_netdir().unwrap_if_sufficient().unwrap();
2786
2787 let g_total = netdir.total_weight(WeightRole::Guard, |r| r.rs.is_flagged_guard());
2788 // This is just the total guard weight, since all our Wxy = 1.
2789 assert_eq!(g_total, RelayWeight(110_000));
2790
2791 let g_total = netdir.total_weight(WeightRole::Guard, |_| false);
2792 assert_eq!(g_total, RelayWeight(0));
2793
2794 let relay = netdir.by_id(&Ed25519Identity::from([35; 32])).unwrap();
2795 assert!(relay.rs.is_flagged_guard());
2796 let w = netdir.relay_weight(&relay, WeightRole::Guard);
2797 assert_eq!(w, RelayWeight(6_000));
2798
2799 let w = netdir
2800 .weight_by_rsa_id(&[33; 20].into(), WeightRole::Guard)
2801 .unwrap();
2802 assert_eq!(w, RelayWeight(4_000));
2803
2804 assert!(
2805 netdir
2806 .weight_by_rsa_id(&[99; 20].into(), WeightRole::Guard)
2807 .is_none()
2808 );
2809 }
2810
2811 #[test]
2812 fn family_list() {
2813 let netdir = construct_custom_netdir(|pos, n, _| {
2814 if pos == 0x0a {
2815 n.md.family(
2816 "$0B0B0B0B0B0B0B0B0B0B0B0B0B0B0B0B0B0B0B0B \
2817 $0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C \
2818 $0D0D0D0D0D0D0D0D0D0D0D0D0D0D0D0D0D0D0D0D"
2819 .parse()
2820 .unwrap(),
2821 );
2822 } else if pos == 0x0c {
2823 n.md.family("$0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A".parse().unwrap());
2824 }
2825 })
2826 .unwrap()
2827 .unwrap_if_sufficient()
2828 .unwrap();
2829
2830 // In the testing netdir, adjacent members are in the same family by default...
2831 let r0 = netdir.by_id(&Ed25519Identity::from([0; 32])).unwrap();
2832 let family: Vec<_> = netdir.known_family_members(&r0).collect();
2833 assert_eq!(family.len(), 1);
2834 assert_eq!(family[0].id(), &Ed25519Identity::from([1; 32]));
2835
2836 // But we've made this relay claim membership with several others.
2837 let r10 = netdir.by_id(&Ed25519Identity::from([10; 32])).unwrap();
2838 let family: HashSet<_> = netdir.known_family_members(&r10).map(|r| *r.id()).collect();
2839 assert_eq!(family.len(), 2);
2840 assert!(family.contains(&Ed25519Identity::from([11; 32])));
2841 assert!(family.contains(&Ed25519Identity::from([12; 32])));
2842 // Note that 13 doesn't get put in, even though it's listed, since it doesn't claim
2843 // membership with 10.
2844 }
2845 #[test]
2846 #[cfg(feature = "geoip")]
2847 fn relay_has_country_code() {
2848 let src_v6 = r#"
2849 fe80:dead:beef::,fe80:dead:ffff::,US
2850 fe80:feed:eeee::1,fe80:feed:eeee::2,AT
2851 fe80:feed:eeee::2,fe80:feed:ffff::,DE
2852 "#;
2853 let db = GeoipDb::new_from_legacy_format("", src_v6).unwrap();
2854
2855 let netdir = construct_custom_netdir_with_geoip(
2856 |pos, n, _| {
2857 if pos == 0x01 {
2858 n.rs.add_or_port("[fe80:dead:beef::1]:42".parse().unwrap());
2859 }
2860 if pos == 0x02 {
2861 n.rs.add_or_port("[fe80:feed:eeee::1]:42".parse().unwrap());
2862 n.rs.add_or_port("[fe80:feed:eeee::2]:42".parse().unwrap());
2863 }
2864 if pos == 0x03 {
2865 n.rs.add_or_port("[fe80:dead:beef::1]:42".parse().unwrap());
2866 n.rs.add_or_port("[fe80:dead:beef::2]:42".parse().unwrap());
2867 }
2868 },
2869 &db,
2870 )
2871 .unwrap()
2872 .unwrap_if_sufficient()
2873 .unwrap();
2874
2875 // No GeoIP data available -> None
2876 let r0 = netdir.by_id(&Ed25519Identity::from([0; 32])).unwrap();
2877 assert_eq!(r0.cc, None);
2878
2879 // Exactly one match -> Some
2880 let r1 = netdir.by_id(&Ed25519Identity::from([1; 32])).unwrap();
2881 assert_eq!(r1.cc.as_ref().map(|x| x.as_ref()), Some("US"));
2882
2883 // Conflicting matches -> None
2884 let r2 = netdir.by_id(&Ed25519Identity::from([2; 32])).unwrap();
2885 assert_eq!(r2.cc, None);
2886
2887 // Multiple agreeing matches -> Some
2888 let r3 = netdir.by_id(&Ed25519Identity::from([3; 32])).unwrap();
2889 assert_eq!(r3.cc.as_ref().map(|x| x.as_ref()), Some("US"));
2890 }
2891
2892 #[test]
2893 #[cfg(feature = "hs-common")]
2894 #[allow(deprecated)]
2895 fn hs_dirs_selection() {
2896 use tor_basic_utils::test_rng::testing_rng;
2897
2898 const HSDIR_SPREAD_STORE: i32 = 6;
2899 const HSDIR_SPREAD_FETCH: i32 = 2;
2900 const PARAMS: [(&str, i32); 2] = [
2901 ("hsdir_spread_store", HSDIR_SPREAD_STORE),
2902 ("hsdir_spread_fetch", HSDIR_SPREAD_FETCH),
2903 ];
2904
2905 let netdir: Arc<NetDir> =
2906 crate::testnet::construct_custom_netdir_with_params(|_, _, _| {}, PARAMS, None)
2907 .unwrap()
2908 .unwrap_if_sufficient()
2909 .unwrap()
2910 .into();
2911 let hsid = dummy_hs_blind_id();
2912
2913 const OP_RELAY_COUNT: &[(HsDirOp, usize)] = &[
2914 // We can't upload to (hsdir_n_replicas * hsdir_spread_store) = 12, relays because there
2915 // are only 10 relays with the HsDir flag in the consensus.
2916 #[cfg(feature = "hs-service")]
2917 (HsDirOp::Upload, 10),
2918 (HsDirOp::Download, 4),
2919 ];
2920
2921 for (op, relay_count) in OP_RELAY_COUNT {
2922 let relays = netdir.hs_dirs(&hsid, *op, &mut testing_rng());
2923
2924 assert_eq!(relays.len(), *relay_count);
2925
2926 // There should be no duplicates (the filtering function passed to
2927 // HsDirRing::ring_items_at() ensures the relays that are already in use for
2928 // lower-numbered replicas aren't considered a second time for a higher-numbered
2929 // replica).
2930 let unique = relays
2931 .iter()
2932 .map(|relay| relay.ed_identity())
2933 .collect::<HashSet<_>>();
2934 assert_eq!(unique.len(), relays.len());
2935 }
2936
2937 // TODO: come up with a test that checks that HsDirRing::ring_items_at() skips over the
2938 // expected relays.
2939 //
2940 // For example, let's say we have the following hsdir ring:
2941 //
2942 // A - B
2943 // / \
2944 // F C
2945 // \ /
2946 // E - D
2947 //
2948 // Let's also assume that:
2949 //
2950 // * hsdir_spread_store = 3
2951 // * the ordering of the relays on the ring is [A, B, C, D, E, F]
2952 //
2953 // If we use relays [A, B, C] for replica 1, and hs_index(2) = E, then replica 2 _must_ get
2954 // relays [E, F, D]. We should have a test that checks this.
2955 }
2956
2957 #[test]
2958 fn zero_weights() {
2959 // Here we check the behavior of IndexedRandom::{choose_weighted, choose_multiple_weighted}
2960 // in the presence of items whose weight is 0.
2961 //
2962 // We think that the behavior is:
2963 // - An item with weight 0 is never returned.
2964 // - If all items have weight 0, choose_weighted returns an error.
2965 // - If all items have weight 0, choose_multiple_weighted returns an empty list.
2966 // - If we request n items from choose_multiple_weighted,
2967 // but only m<n items have nonzero weight, we return all m of those items.
2968 // - if the request for n items can't be completely satisfied with n items of weight >= 0,
2969 // we get InsufficientNonZero.
2970 let items = vec![1, 2, 3];
2971 let mut rng = testing_rng();
2972
2973 let a = items.choose_weighted(&mut rng, |_| 0);
2974 assert!(matches!(a, Err(WeightError::InsufficientNonZero)));
2975
2976 let x = items.choose_multiple_weighted(&mut rng, 2, |_| 0);
2977 let xs: Vec<_> = x.unwrap().collect();
2978 assert!(xs.is_empty());
2979
2980 let only_one = |n: &i32| if *n == 1 { 1 } else { 0 };
2981 let x = items.choose_multiple_weighted(&mut rng, 2, only_one);
2982 let xs: Vec<_> = x.unwrap().collect();
2983 assert_eq!(&xs[..], &[&1]);
2984
2985 for _ in 0..100 {
2986 let a = items.choose_weighted(&mut rng, only_one);
2987 assert_eq!(a.unwrap(), &1);
2988
2989 let x = items
2990 .choose_multiple_weighted(&mut rng, 1, only_one)
2991 .unwrap()
2992 .collect::<Vec<_>>();
2993 assert_eq!(x, vec![&1]);
2994 }
2995 }
2996
2997 #[test]
2998 fn insufficient_but_nonzero() {
2999 // Here we check IndexedRandom::choose_multiple_weighted when there no zero values,
3000 // but there are insufficient values.
3001 // (If this behavior changes, we need to change our usage.)
3002
3003 let items = vec![1, 2, 3];
3004 let mut rng = testing_rng();
3005 let mut a = items
3006 .choose_multiple_weighted(&mut rng, 10, |_| 1)
3007 .unwrap()
3008 .copied()
3009 .collect::<Vec<_>>();
3010 a.sort();
3011 assert_eq!(a, items);
3012 }
3013}