1
//! Abstract code to manage a set of tunnels which has underlying circuit(s).
2
//!
3
//! This module implements the real logic for deciding when and how to
4
//! launch tunnels, and for which tunnels to hand out in response to
5
//! which requests.
6
//!
7
//! For testing and abstraction purposes, this module _does not_
8
//! actually know anything about tunnels _per se_.  Instead,
9
//! everything is handled using a set of traits that are internal to this
10
//! crate:
11
//!
12
//!  * [`AbstractTunnel`] is a view of a tunnel.
13
//!  * [`AbstractTunnelBuilder`] knows how to build an `AbstractCirc`.
14
//!
15
//! Using these traits, the [`AbstractTunnelMgr`] object manages a set of
16
//! tunnels , launching them as necessary, and keeping track of the
17
//! restrictions on their use.
18

            
19
// TODO:
20
// - Testing
21
//    - Error from prepare_action()
22
//    - Error reported by restrict_mut?
23

            
24
use crate::config::CircuitTiming;
25
use crate::usage::{SupportedTunnelUsage, TargetTunnelUsage};
26
use crate::{DirInfo, Error, PathConfig, Result, timeouts};
27

            
28
use retry_error::RetryError;
29
use tor_async_utils::mpsc_channel_no_memquota;
30
use tor_basic_utils::retry::RetryDelay;
31
use tor_config::MutCfg;
32
use tor_error::{AbsRetryTime, HasRetryTime, debug_report, info_report, internal, warn_report};
33
#[cfg(feature = "vanguards")]
34
use tor_guardmgr::vanguards::VanguardMgr;
35
use tor_linkspec::CircTarget;
36
use tor_proto::circuit::UniqId;
37
use tor_proto::client::circuit::{CircParameters, Path};
38
use tor_rtcompat::{Runtime, SleepProviderExt};
39

            
40
use async_trait::async_trait;
41
use futures::channel::mpsc;
42
use futures::future::{FutureExt, Shared};
43
use futures::stream::{FuturesUnordered, StreamExt};
44
use futures::task::SpawnExt;
45
use oneshot_fused_workaround as oneshot;
46
use std::collections::HashMap;
47
use std::fmt::Debug;
48
use std::hash::Hash;
49
use std::panic::AssertUnwindSafe;
50
use std::sync::{self, Arc, Weak};
51
use std::time::{Duration, Instant};
52
use tracing::{debug, warn};
53
use weak_table::PtrWeakHashSet;
54

            
55
mod streams;
56

            
57
/// Description of how we got a tunnel.
58
#[non_exhaustive]
59
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
60
pub(crate) enum TunnelProvenance {
61
    /// This channel was newly launched, or was in progress and finished while
62
    /// we were waiting.
63
    NewlyCreated,
64
    /// This channel already existed when we asked for it.
65
    Preexisting,
66
}
67

            
68
#[derive(Clone, Debug, thiserror::Error)]
69
#[non_exhaustive]
70
pub enum RestrictionFailed {
71
    /// Tried to restrict a specification, but the tunnel didn't support the
72
    /// requested usage.
73
    #[error("Specification did not support desired usage")]
74
    NotSupported,
75
}
76

            
77
/// Minimal abstract view of a tunnel.
78
///
79
/// From this module's point of view, tunnels are simply objects
80
/// with unique identities, and a possible closed-state.
81
#[async_trait]
82
pub(crate) trait AbstractTunnel: Debug {
83
    /// Type for a unique identifier for tunnels.
84
    type Id: Clone + Debug + Hash + Eq + Send + Sync;
85
    /// Return the unique identifier for this tunnel.
86
    ///
87
    /// # Requirements
88
    ///
89
    /// The values returned by this function are unique for distinct
90
    /// tunnels.
91
    fn id(&self) -> Self::Id;
92

            
93
    /// Return true if this tunnel is usable for some purpose.
94
    ///
95
    /// Reasons a tunnel might be unusable include being closed.
96
    fn usable(&self) -> bool;
97

            
98
    /// Return a list of [`Path`] objects describing the only circuit in this tunnel.
99
    ///
100
    /// Returns an error if the tunnel has more than one tunnel.
101
    fn single_path(&self) -> tor_proto::Result<Arc<Path>>;
102

            
103
    /// Return the number of hops in this tunnel.
104
    ///
105
    /// Returns an error if the circuit is closed.
106
    ///
107
    /// NOTE: This function will currently return only the number of hops
108
    /// _currently_ in the tunnel. If there is an extend operation in progress,
109
    /// the currently pending hop may or may not be counted, depending on whether
110
    /// the extend operation finishes before this call is done.
111
    fn n_hops(&self) -> tor_proto::Result<usize>;
112

            
113
    /// Return true if this tunnel is closed and therefore unusable.
114
    fn is_closing(&self) -> bool;
115

            
116
    /// Return a process-unique identifier for this tunnel.
117
    fn unique_id(&self) -> UniqId;
118

            
119
    /// Extend the tunnel via the most appropriate handshake to a new `target` hop.
120
    async fn extend<T: CircTarget + Sync>(
121
        &self,
122
        target: &T,
123
        params: CircParameters,
124
    ) -> tor_proto::Result<()>;
125
}
126

            
127
/// A plan for an `AbstractCircBuilder` that can maybe be mutated by tests.
128
///
129
/// You should implement this trait using all default methods for all code that isn't test code.
130
pub(crate) trait MockablePlan {
131
    /// Add a reason string that was passed to `SleepProvider::block_advance()` to this object
132
    /// so that it knows what to pass to `::release_advance()`.
133
    fn add_blocked_advance_reason(&mut self, _reason: String) {}
134
}
135

            
136
/// An object that knows how to build tunnels.
137
///
138
/// This creates tunnels in two phases. First, a plan is
139
/// made for how to build the tunnel. This planning phase should be
140
/// relatively fast, and must not suspend or block.  Its purpose is to
141
/// get an early estimate of which operations the tunnel will be able
142
/// to support when it's done.
143
///
144
/// Second, the tunnel is actually built, using the plan as input.
145

            
146
#[async_trait]
147
pub(crate) trait AbstractTunnelBuilder<R: Runtime>: Send + Sync {
148
    /// The tunnel type that this builder knows how to build.
149
    type Tunnel: AbstractTunnel + Send + Sync;
150
    /// An opaque type describing how a given tunnel will be built.
151
    /// It may represent some or all of a path-or it may not.
152
    //
153
    // TODO: It would be nice to have this parameterized on a lifetime,
154
    // and have that lifetime depend on the lifetime of the directory.
155
    // But I don't think that rust can do that.
156
    //
157
    // HACK(eta): I don't like the fact that `MockablePlan` is necessary here.
158
    type Plan: Send + Debug + MockablePlan;
159

            
160
    // TODO: I'd like to have a Dir type here to represent
161
    // create::DirInfo, but that would need to be parameterized too,
162
    // and would make everything complicated.
163

            
164
    /// Form a plan for how to build a new tunnel that supports `usage`.
165
    ///
166
    /// Return an opaque Plan object, and a new spec describing what
167
    /// the tunnel will actually support when it's built.  (For
168
    /// example, if the input spec requests a tunnel that connect to
169
    /// port 80, then "planning" the tunnel might involve picking an
170
    /// exit that supports port 80, and the resulting spec might be
171
    /// the exit's complete list of supported ports.)
172
    ///
173
    /// # Requirements
174
    ///
175
    /// The resulting Spec must support `usage`.
176
    fn plan_tunnel(
177
        &self,
178
        usage: &TargetTunnelUsage,
179
        dir: DirInfo<'_>,
180
    ) -> Result<(Self::Plan, SupportedTunnelUsage)>;
181

            
182
    /// Construct a tunnel according to a given plan.
183
    ///
184
    /// On success, return a spec describing what the tunnel can be used for,
185
    /// and the tunnel that was just constructed.
186
    ///
187
    /// This function should implement some kind of a timeout for
188
    /// tunnel that are taking too long.
189
    ///
190
    /// # Requirements
191
    ///
192
    /// The spec that this function returns _must_ support the usage
193
    /// that was originally passed to `plan_tunnel`.  It _must_ also
194
    /// contain the spec that was originally returned by
195
    /// `plan_tunnel`.
196
    async fn build_tunnel(&self, plan: Self::Plan) -> Result<(SupportedTunnelUsage, Self::Tunnel)>;
197

            
198
    /// Return a "parallelism factor" with which tunnels should be
199
    /// constructed for a given purpose.
200
    ///
201
    /// If this function returns N, then whenever we launch tunnels
202
    /// for this purpose, then we launch N in parallel.
203
    ///
204
    /// The default implementation returns 1.  The value of 0 is
205
    /// treated as if it were 1.
206
604
    fn launch_parallelism(&self, usage: &TargetTunnelUsage) -> usize {
207
604
        let _ = usage; // default implementation ignores this.
208
604
        1
209
604
    }
210

            
211
    /// Return a "parallelism factor" for which tunnels should be
212
    /// used for a given purpose.
213
    ///
214
    /// If this function returns N, then whenever we select among
215
    /// open tunnels for this purpose, we choose at random from the
216
    /// best N.
217
    ///
218
    /// The default implementation returns 1.  The value of 0 is
219
    /// treated as if it were 1.
220
    // TODO: Possibly this doesn't belong in this trait.
221
382
    fn select_parallelism(&self, usage: &TargetTunnelUsage) -> usize {
222
382
        let _ = usage; // default implementation ignores this.
223
382
        1
224
382
    }
225

            
226
    /// Return true if we are currently attempting to learn tunnel
227
    /// timeouts by building testing tunnels.
228
    fn learning_timeouts(&self) -> bool;
229

            
230
    /// Flush state to the state manager if we own the lock.
231
    ///
232
    /// Return `Ok(true)` if we saved, and `Ok(false)` if we didn't hold the lock.
233
    fn save_state(&self) -> Result<bool>;
234

            
235
    /// Return this builder's [`PathConfig`].
236
    fn path_config(&self) -> Arc<PathConfig>;
237

            
238
    /// Replace this builder's [`PathConfig`].
239
    // TODO: This is dead_code because we only call this for the CircuitBuilder specialization of
240
    // CircMgr, not from the generic version, because this trait doesn't provide guardmgr, which is
241
    // needed by the [`CircMgr::reconfigure`] function that would be the only caller of this. We
242
    // should add `guardmgr` to this trait, make [`CircMgr::reconfigure`] generic, and remove this
243
    // dead_code marking.
244
    #[allow(dead_code)]
245
    fn set_path_config(&self, new_config: PathConfig);
246

            
247
    /// Return a reference to this builder's timeout estimator.
248
    fn estimator(&self) -> &timeouts::Estimator;
249

            
250
    /// Return a reference to this builder's `VanguardMgr`.
251
    #[cfg(feature = "vanguards")]
252
    fn vanguardmgr(&self) -> &Arc<VanguardMgr<R>>;
253

            
254
    /// Replace our state with a new owning state, assuming we have
255
    /// storage permission.
256
    fn upgrade_to_owned_state(&self) -> Result<()>;
257

            
258
    /// Reload persistent state from disk, if we don't have storage permission.
259
    fn reload_state(&self) -> Result<()>;
260

            
261
    /// Return a reference to this builder's `GuardMgr`.
262
    fn guardmgr(&self) -> &tor_guardmgr::GuardMgr<R>;
263

            
264
    /// Reconfigure this builder using the latest set of network parameters.
265
    ///
266
    /// (NOTE: for now, this only affects tunnel timeout estimation.)
267
    fn update_network_parameters(&self, p: &tor_netdir::params::NetParameters);
268
}
269

            
270
/// Enumeration to track the expiration state of a tunnel.
271
///
272
/// A tunnel an either be unused (at which point it should expire if it is
273
/// _still unused_ by a certain time, or dirty (at which point it should
274
/// expire after a certain duration).
275
///
276
/// All tunnels start out "unused" and become "dirty" when their spec
277
/// is first restricted -- that is, when they are first handed out to be
278
/// used for a request.
279
#[derive(Debug, Clone, PartialEq, Eq)]
280
enum ExpirationInfo {
281
    /// The tunnel has never been used.
282
    Unused {
283
        /// A time when the tunnel should expire.
284
        use_before: Instant,
285
    },
286
    /// The tunnel has been used (or at least, restricted for use with a
287
    /// request) at least once.
288
    Dirty {
289
        /// The time at which this tunnel's spec was first restricted.
290
        dirty_since: Instant,
291
    },
292
}
293

            
294
impl ExpirationInfo {
295
    /// Return an ExpirationInfo for a newly created tunnel.
296
92
    fn new(use_before: Instant) -> Self {
297
92
        ExpirationInfo::Unused { use_before }
298
92
    }
299

            
300
    /// Mark this ExpirationInfo as dirty, if it is not already dirty.
301
448
    fn mark_dirty(&mut self, now: Instant) {
302
448
        if matches!(self, ExpirationInfo::Unused { .. }) {
303
52
            *self = ExpirationInfo::Dirty { dirty_since: now };
304
396
        }
305
448
    }
306
}
307

            
308
/// An entry for an open tunnel held by an `AbstractTunnelMgr`.
309
#[derive(Debug, Clone)]
310
pub(crate) struct OpenEntry<T> {
311
    /// The supported usage for this tunnel.
312
    spec: SupportedTunnelUsage,
313
    /// The tunnel under management.
314
    tunnel: Arc<T>,
315
    /// When does this tunnel expire?
316
    ///
317
    /// (Note that expired tunnels are removed from the manager,
318
    /// which does not actually close them until there are no more
319
    /// references to them.)
320
    expiration: ExpirationInfo,
321
}
322

            
323
impl<T: AbstractTunnel> OpenEntry<T> {
324
    /// Make a new OpenEntry for a given tunnel and spec.
325
98
    fn new(spec: SupportedTunnelUsage, tunnel: T, expiration: ExpirationInfo) -> Self {
326
98
        OpenEntry {
327
98
            spec,
328
98
            tunnel: tunnel.into(),
329
98
            expiration,
330
98
        }
331
98
    }
332

            
333
    /// Return true if the underlying tunnel can be used for `usage`.
334
1324
    pub(crate) fn supports(&self, usage: &TargetTunnelUsage) -> bool {
335
1324
        self.tunnel.usable() && self.spec.supports(usage)
336
1324
    }
337

            
338
    /// Change the underlying tunnel's permissible usage, based on its having
339
    /// been used for `usage` at time `now`.
340
    ///
341
    /// Return an error if the tunnel may not be used for `usage`.
342
448
    fn restrict_mut(&mut self, usage: &TargetTunnelUsage, now: Instant) -> Result<()> {
343
448
        self.spec.restrict_mut(usage)?;
344
448
        self.expiration.mark_dirty(now);
345
448
        Ok(())
346
448
    }
347

            
348
    /// Find the "best" entry from a slice of OpenEntry for supporting
349
    /// a given `usage`.
350
    ///
351
    /// If `parallelism` is some N greater than 1, we pick randomly
352
    /// from the best `N` tunnels.
353
    ///
354
    /// # Requirements
355
    ///
356
    /// Requires that `ents` is nonempty, and that every element of `ents`
357
    /// supports `spec`.
358
382
    fn find_best<'a>(
359
382
        // we do not mutate `ents`, but to return `&mut Self` we must have a mutable borrow
360
382
        ents: &'a mut [&'a mut Self],
361
382
        usage: &TargetTunnelUsage,
362
382
        parallelism: usize,
363
382
    ) -> &'a mut Self {
364
382
        let _ = usage; // not yet used.
365
        use rand::seq::IndexedMutRandom as _;
366
382
        let parallelism = parallelism.clamp(1, ents.len());
367
        // TODO: Actually look over the whole list to see which is better.
368
382
        let slice = &mut ents[0..parallelism];
369
382
        let mut rng = rand::rng();
370
382
        slice.choose_mut(&mut rng).expect("Input list was empty")
371
382
    }
372

            
373
    /// Return true if this tunnel has been marked as dirty before
374
    /// `dirty_cutoff`, or if it is an unused tunnel set to expire before
375
    /// `unused_cutoff`.
376
8
    fn should_expire(&self, unused_cutoff: Instant, dirty_cutoff: Instant) -> bool {
377
8
        match self.expiration {
378
            ExpirationInfo::Unused { use_before } => use_before <= unused_cutoff,
379
8
            ExpirationInfo::Dirty { dirty_since } => dirty_since <= dirty_cutoff,
380
        }
381
8
    }
382
}
383

            
384
/// A result type whose "Ok" value is the Id for a tunnel from B.
385
type PendResult<B, R> = Result<<<B as AbstractTunnelBuilder<R>>::Tunnel as AbstractTunnel>::Id>;
386

            
387
/// An in-progress tunnel request tracked by an `AbstractTunnelMgr`.
388
///
389
/// (In addition to tracking tunnels, `AbstractTunnelMgr` tracks
390
/// _requests_ for tunnels.  The manager uses these entries if it
391
/// finds that some tunnel created _after_ a request first launched
392
/// might meet the request's requirements.)
393
struct PendingRequest<B: AbstractTunnelBuilder<R>, R: Runtime> {
394
    /// Usage for the operation requested by this request
395
    usage: TargetTunnelUsage,
396
    /// A channel to use for telling this request about tunnels that it
397
    /// might like.
398
    notify: mpsc::Sender<PendResult<B, R>>,
399
}
400

            
401
impl<B: AbstractTunnelBuilder<R>, R: Runtime> PendingRequest<B, R> {
402
    /// Return true if this request would be supported by `spec`.
403
34
    fn supported_by(&self, spec: &SupportedTunnelUsage) -> bool {
404
34
        spec.supports(&self.usage)
405
34
    }
406
}
407

            
408
/// An entry for an under-construction in-progress tunnel tracked by
409
/// an `AbstractTunnelMgr`.
410
#[derive(Debug)]
411
struct PendingEntry<B: AbstractTunnelBuilder<R>, R: Runtime> {
412
    /// Specification that this tunnel will support, if every pending
413
    /// request that is waiting for it is attached to it.
414
    ///
415
    /// This spec becomes more and more restricted as more pending
416
    /// requests are waiting for this tunnel.
417
    ///
418
    /// This spec is contained by circ_spec, and must support the usage
419
    /// of every pending request that's waiting for this tunnel.
420
    tentative_assignment: sync::Mutex<SupportedTunnelUsage>,
421
    /// A shared future for requests to use when waiting for
422
    /// notification of this tunnel's success.
423
    receiver: Shared<oneshot::Receiver<PendResult<B, R>>>,
424
}
425

            
426
impl<B: AbstractTunnelBuilder<R>, R: Runtime> PendingEntry<B, R> {
427
    /// Make a new PendingEntry that starts out supporting a given
428
    /// spec.  Return that PendingEntry, along with a Sender to use to
429
    /// report the result of building this tunnel.
430
140
    fn new(spec: &SupportedTunnelUsage) -> (Self, oneshot::Sender<PendResult<B, R>>) {
431
140
        let tentative_assignment = sync::Mutex::new(spec.clone());
432
140
        let (sender, receiver) = oneshot::channel();
433
140
        let receiver = receiver.shared();
434
140
        let entry = PendingEntry {
435
140
            tentative_assignment,
436
140
            receiver,
437
140
        };
438
140
        (entry, sender)
439
140
    }
440

            
441
    /// Return true if this tunnel's current tentative assignment
442
    /// supports `usage`.
443
54
    fn supports(&self, usage: &TargetTunnelUsage) -> bool {
444
54
        let assignment = self.tentative_assignment.lock().expect("poisoned lock");
445
54
        assignment.supports(usage)
446
54
    }
447

            
448
    /// Try to change the tentative assignment of this tunnel by
449
    /// restricting it for use with `usage`.
450
    ///
451
    /// Return an error if the current tentative assignment didn't
452
    /// support `usage` in the first place.
453
26
    fn tentative_restrict_mut(&self, usage: &TargetTunnelUsage) -> Result<()> {
454
26
        if let Ok(mut assignment) = self.tentative_assignment.lock() {
455
26
            assignment.restrict_mut(usage)?;
456
        }
457
26
        Ok(())
458
26
    }
459

            
460
    /// Find the best PendingEntry values from a slice for use with
461
    /// `usage`.
462
    ///
463
    /// # Requirements
464
    ///
465
    /// The `ents` slice must not be empty.  Every element of `ents`
466
    /// must support the given spec.
467
26
    fn find_best(ents: &[Arc<Self>], usage: &TargetTunnelUsage) -> Vec<Arc<Self>> {
468
        // TODO: Actually look over the whole list to see which is better.
469
26
        let _ = usage; // currently unused
470
26
        vec![Arc::clone(&ents[0])]
471
26
    }
472
}
473

            
474
/// Wrapper type to represent the state between planning to build a
475
/// tunnel and constructing it.
476
#[derive(Debug)]
477
struct TunnelBuildPlan<B: AbstractTunnelBuilder<R>, R: Runtime> {
478
    /// The Plan object returned by [`AbstractTunnelBuilder::plan_tunnel`].
479
    plan: B::Plan,
480
    /// A sender to notify any pending requests when this tunnel is done.
481
    sender: oneshot::Sender<PendResult<B, R>>,
482
    /// A strong entry to the PendingEntry for this tunnel build attempt.
483
    pending: Arc<PendingEntry<B, R>>,
484
}
485

            
486
/// The inner state of an [`AbstractTunnelMgr`].
487
struct TunnelList<B: AbstractTunnelBuilder<R>, R: Runtime> {
488
    /// A map from tunnel ID to [`OpenEntry`] values for all managed
489
    /// open tunnels.
490
    ///
491
    /// A tunnel is added here from [`AbstractTunnelMgr::do_launch`] when we find
492
    /// that it completes successfully, and has not been cancelled.
493
    /// When we decide that such a tunnel should no longer be handed out for
494
    /// any new requests, we "retire" the tunnel by removing it from this map.
495
    #[allow(clippy::type_complexity)]
496
    open_tunnels: HashMap<<B::Tunnel as AbstractTunnel>::Id, OpenEntry<B::Tunnel>>,
497
    /// Weak-set of PendingEntry for tunnels that are being built.
498
    ///
499
    /// Because this set only holds weak references, and the only strong
500
    /// reference to the PendingEntry is held by the task building the tunnel,
501
    /// this set's members are lazily removed after the tunnel is either built
502
    /// or fails to build.
503
    ///
504
    /// This set is used for two purposes:
505
    ///
506
    /// 1. When a tunnel request finds that there is no open tunnel for its
507
    ///    purposes, it checks here to see if there is a pending tunnel that it
508
    ///    could wait for.
509
    /// 2. When a pending tunnel finishes building, it checks here to make sure
510
    ///    that it has not been cancelled. (Removing an entry from this set marks
511
    ///    it as cancelled.)
512
    ///
513
    /// An entry is added here in [`AbstractTunnelMgr::prepare_action`] when we
514
    /// decide that a tunnel needs to be launched.
515
    ///
516
    /// Later, in [`AbstractTunnelMgr::do_launch`], once the tunnel has finished
517
    /// (or failed), we remove the entry (by pointer identity).
518
    /// If we cannot find the entry, we conclude that the request has been
519
    /// _cancelled_, and so we discard any tunnel that was created.
520
    pending_tunnels: PtrWeakHashSet<Weak<PendingEntry<B, R>>>,
521
    /// Weak-set of PendingRequest for requests that are waiting for a
522
    /// tunnel to be built.
523
    ///
524
    /// Because this set only holds weak references, and the only
525
    /// strong reference to the PendingRequest is held by the task
526
    /// waiting for the tunnel to be built, this set's members are
527
    /// lazily removed after the request succeeds or fails.
528
    pending_requests: PtrWeakHashSet<Weak<PendingRequest<B, R>>>,
529
}
530

            
531
impl<B: AbstractTunnelBuilder<R>, R: Runtime> TunnelList<B, R> {
532
    /// Make a new empty `CircList`
533
90
    fn new() -> Self {
534
90
        TunnelList {
535
90
            open_tunnels: HashMap::new(),
536
90
            pending_tunnels: PtrWeakHashSet::new(),
537
90
            pending_requests: PtrWeakHashSet::new(),
538
90
        }
539
90
    }
540

            
541
    /// Add `e` to the list of open tunnels.
542
92
    fn add_open(&mut self, e: OpenEntry<B::Tunnel>) {
543
92
        let id = e.tunnel.id();
544
92
        self.open_tunnels.insert(id, e);
545
92
    }
546

            
547
    /// Find all the usable open tunnels that support `usage`.
548
    ///
549
    /// Return None if there are no such tunnels.
550
596
    fn find_open(&mut self, usage: &TargetTunnelUsage) -> Option<Vec<&mut OpenEntry<B::Tunnel>>> {
551
596
        let list = self.open_tunnels.values_mut();
552
596
        let v = SupportedTunnelUsage::find_supported(list, usage);
553
596
        if v.is_empty() { None } else { Some(v) }
554
596
    }
555

            
556
    /// Find an open tunnel by ID.
557
    ///
558
    /// Return None if no such tunnels exists in this list.
559
70
    fn get_open_mut(
560
70
        &mut self,
561
70
        id: &<B::Tunnel as AbstractTunnel>::Id,
562
70
    ) -> Option<&mut OpenEntry<B::Tunnel>> {
563
70
        self.open_tunnels.get_mut(id)
564
70
    }
565

            
566
    /// Extract an open tunnel by ID, removing it from this list.
567
    ///
568
    /// Return None if no such tunnel exists in this list.
569
8
    fn take_open(
570
8
        &mut self,
571
8
        id: &<B::Tunnel as AbstractTunnel>::Id,
572
8
    ) -> Option<OpenEntry<B::Tunnel>> {
573
8
        self.open_tunnels.remove(id)
574
8
    }
575

            
576
    /// Remove tunnels based on expiration times.
577
    ///
578
    /// We remove every unused tunnel that is set to expire by
579
    /// `unused_cutoff`, and every dirty tunnel that has been dirty
580
    /// since before `dirty_cutoff`.
581
4
    fn expire_tunnels(&mut self, unused_cutoff: Instant, dirty_cutoff: Instant) {
582
4
        self.open_tunnels
583
8
            .retain(|_k, v| !v.should_expire(unused_cutoff, dirty_cutoff));
584
4
    }
585

            
586
    /// Remove the tunnel with given `id`, if it is scheduled to
587
    /// expire now, according to the provided expiration times.
588
    fn expire_tunnel(
589
        &mut self,
590
        id: &<B::Tunnel as AbstractTunnel>::Id,
591
        unused_cutoff: Instant,
592
        dirty_cutoff: Instant,
593
    ) {
594
        let should_expire = self
595
            .open_tunnels
596
            .get(id)
597
            .map(|v| v.should_expire(unused_cutoff, dirty_cutoff))
598
            .unwrap_or_else(|| false);
599
        if should_expire {
600
            self.open_tunnels.remove(id);
601
        }
602
    }
603

            
604
    /// Add `pending` to the set of in-progress tunnels.
605
136
    fn add_pending_tunnel(&mut self, pending: Arc<PendingEntry<B, R>>) {
606
136
        self.pending_tunnels.insert(pending);
607
136
    }
608

            
609
    /// Find all pending tunnels that support `usage`.
610
    ///
611
    /// If no such tunnels are currently being built, return None.
612
166
    fn find_pending_tunnels(
613
166
        &self,
614
166
        usage: &TargetTunnelUsage,
615
166
    ) -> Option<Vec<Arc<PendingEntry<B, R>>>> {
616
166
        let result: Vec<_> = self
617
166
            .pending_tunnels
618
166
            .iter()
619
166
            .filter(|p| p.supports(usage))
620
166
            .filter(|p| !matches!(p.receiver.peek(), Some(Err(_))))
621
166
            .collect();
622

            
623
166
        if result.is_empty() {
624
140
            None
625
        } else {
626
26
            Some(result)
627
        }
628
166
    }
629

            
630
    /// Return true if `circ` is still pending.
631
    ///
632
    /// A tunnel will become non-pending when finishes (successfully or not), or when it's
633
    /// removed from this list via `clear_all_tunnels()`.
634
52
    fn tunnel_is_pending(&self, circ: &Arc<PendingEntry<B, R>>) -> bool {
635
52
        self.pending_tunnels.contains(circ)
636
52
    }
637

            
638
    /// Construct and add a new entry to the set of request waiting
639
    /// for a tunnel.
640
    ///
641
    /// Return the request, and a new receiver stream that it should
642
    /// use for notification of possible tunnels to use.
643
150
    fn add_pending_request(&mut self, pending: &Arc<PendingRequest<B, R>>) {
644
150
        self.pending_requests.insert(Arc::clone(pending));
645
150
    }
646

            
647
    /// Return all pending requests that would be satisfied by a tunnel
648
    /// that supports `circ_spec`.
649
32
    fn find_pending_requests(
650
32
        &self,
651
32
        circ_spec: &SupportedTunnelUsage,
652
32
    ) -> Vec<Arc<PendingRequest<B, R>>> {
653
32
        self.pending_requests
654
32
            .iter()
655
34
            .filter(|pend| pend.supported_by(circ_spec))
656
32
            .collect()
657
32
    }
658

            
659
    /// Clear all pending and open tunnels.
660
    ///
661
    /// Calling `clear_all_tunnels` ensures that any request that is answered _after
662
    /// this method runs_ will receive a tunnels that was launched _after this
663
    /// method runs_.
664
    fn clear_all_tunnels(&mut self) {
665
        // Note that removing entries from pending_circs will also cause the
666
        // tunnel tasks to realize that they are cancelled when they
667
        // go to tell anybody about their results.
668
        self.pending_tunnels.clear();
669
        self.open_tunnels.clear();
670
    }
671
}
672

            
673
/// Timing information for tunnels that have been built but never used.
674
///
675
/// Currently taken from the network parameters.
676
struct UnusedTimings {
677
    /// Minimum lifetime of a tunnel created while learning
678
    /// tunnel timeouts.
679
    learning: Duration,
680
    /// Minimum lifetime of a tunnel created while not learning
681
    /// tunnel timeouts.
682
    not_learning: Duration,
683
}
684

            
685
// This isn't really fallible, given the definitions of the underlying
686
// types.
687
#[allow(clippy::fallible_impl_from)]
688
impl From<&tor_netdir::params::NetParameters> for UnusedTimings {
689
500
    fn from(v: &tor_netdir::params::NetParameters) -> Self {
690
        // These try_into() calls can't fail, so unwrap() can't panic.
691
        #[allow(clippy::unwrap_used)]
692
500
        UnusedTimings {
693
500
            learning: v
694
500
                .unused_client_circ_timeout_while_learning_cbt
695
500
                .try_into()
696
500
                .unwrap(),
697
500
            not_learning: v.unused_client_circ_timeout.try_into().unwrap(),
698
500
        }
699
500
    }
700
}
701

            
702
/// Abstract implementation for tunnel management.
703
///
704
/// The algorithm provided here is fairly simple. In its simplest form:
705
///
706
/// When somebody asks for a tunnel for a given operation: if we find
707
/// one open already, we return it.  If we find in-progress tunnels
708
/// that would meet our needs, we wait for one to finish (or for all
709
/// to fail).  And otherwise, we launch one or more tunnels to meet the
710
/// request's needs.
711
///
712
/// If this process fails, then we retry it, up to a timeout or a
713
/// numerical limit.
714
///
715
/// If a tunnel not previously considered for a given request
716
/// finishes before the request is satisfied, and if the tunnel would
717
/// satisfy the request, we try to give that tunnel as an answer to
718
/// that request even if it was not one of the tunnels that request
719
/// was waiting for.
720
pub(crate) struct AbstractTunnelMgr<B: AbstractTunnelBuilder<R>, R: Runtime> {
721
    /// Builder used to construct tunnels.
722
    builder: B,
723
    /// An asynchronous runtime to use for launching tasks and
724
    /// checking timeouts.
725
    runtime: R,
726
    /// A CircList to manage our list of tunnels, requests, and
727
    /// pending tunnels.
728
    tunnels: sync::Mutex<TunnelList<B, R>>,
729

            
730
    /// Configured information about when to expire tunnels and requests.
731
    circuit_timing: MutCfg<CircuitTiming>,
732

            
733
    /// Minimum lifetime of an unused tunnel.
734
    ///
735
    /// Derived from the network parameters.
736
    unused_timing: sync::Mutex<UnusedTimings>,
737
}
738

            
739
/// An action to take in order to satisfy a request for a tunnel.
740
enum Action<B: AbstractTunnelBuilder<R>, R: Runtime> {
741
    /// We found an open tunnel: return immediately.
742
    Open(Arc<B::Tunnel>),
743
    /// We found one or more pending tunnels: wait until one succeeds,
744
    /// or all fail.
745
    Wait(FuturesUnordered<Shared<oneshot::Receiver<PendResult<B, R>>>>),
746
    /// We should launch tunnels: here are the instructions for how
747
    /// to do so.
748
    Build(Vec<TunnelBuildPlan<B, R>>),
749
}
750

            
751
impl<B: AbstractTunnelBuilder<R> + 'static, R: Runtime> AbstractTunnelMgr<B, R> {
752
    /// Construct a new AbstractTunnelMgr.
753
82
    pub(crate) fn new(builder: B, runtime: R, circuit_timing: CircuitTiming) -> Self {
754
82
        let circs = sync::Mutex::new(TunnelList::new());
755
82
        let dflt_params = tor_netdir::params::NetParameters::default();
756
82
        let unused_timing = (&dflt_params).into();
757
82
        AbstractTunnelMgr {
758
82
            builder,
759
82
            runtime,
760
82
            tunnels: circs,
761
82
            circuit_timing: circuit_timing.into(),
762
82
            unused_timing: sync::Mutex::new(unused_timing),
763
82
        }
764
82
    }
765

            
766
    /// Reconfigure this manager using the latest set of network parameters.
767
    pub(crate) fn update_network_parameters(&self, p: &tor_netdir::params::NetParameters) {
768
        let mut u = self
769
            .unused_timing
770
            .lock()
771
            .expect("Poisoned lock for unused_timing");
772
        *u = p.into();
773
    }
774

            
775
    /// Return this manager's [`CircuitTiming`].
776
604
    pub(crate) fn circuit_timing(&self) -> Arc<CircuitTiming> {
777
604
        self.circuit_timing.get()
778
604
    }
779

            
780
    /// Return this manager's [`CircuitTiming`].
781
4
    pub(crate) fn set_circuit_timing(&self, new_config: CircuitTiming) {
782
4
        self.circuit_timing.replace(new_config);
783
4
    }
784
    /// Return a circuit suitable for use with a given `usage`,
785
    /// creating that circuit if necessary, and restricting it
786
    /// under the assumption that it will be used for that spec.
787
    ///
788
    /// This is the primary entry point for AbstractTunnelMgr.
789
    #[allow(clippy::cognitive_complexity)] // TODO #2010: Refactor?
790
464
    pub(crate) async fn get_or_launch(
791
464
        self: &Arc<Self>,
792
464
        usage: &TargetTunnelUsage,
793
464
        dir: DirInfo<'_>,
794
464
    ) -> Result<(Arc<B::Tunnel>, TunnelProvenance)> {
795
        /// Largest number of "resets" that we will accept in this attempt.
796
        ///
797
        /// A "reset" is an internally generated error that does not represent a
798
        /// real problem; only a "whoops, got to try again" kind of a situation.
799
        /// For example, if we reconfigure in the middle of an attempt and need
800
        /// to re-launch the circuit, that counts as a "reset", since there was
801
        /// nothing actually _wrong_ with the circuit we were building.
802
        ///
803
        /// We accept more resets than we do real failures. However,
804
        /// we don't accept an unlimited number: we don't want to inadvertently
805
        /// permit infinite loops here. If we ever bump against this limit, we
806
        /// should not automatically increase it: we should instead figure out
807
        /// why it is happening and try to make it not happen.
808
        const MAX_RESETS: usize = 8;
809

            
810
464
        let circuit_timing = self.circuit_timing();
811
464
        let timeout_at = self.runtime.now() + circuit_timing.request_timeout;
812
464
        let max_tries = circuit_timing.request_max_retries;
813
        // We compute the maximum number of failures by dividing the maximum
814
        // number of circuits to attempt by the number that will be launched in
815
        // parallel for each iteration.
816
464
        let max_failures = usize::div_ceil(
817
464
            max_tries as usize,
818
464
            std::cmp::max(1, self.builder.launch_parallelism(usage)),
819
        );
820

            
821
464
        let mut retry_schedule = RetryDelay::from_msec(100);
822
464
        let mut retry_err = RetryError::<Box<Error>>::in_attempt_to("find or build a tunnel");
823

            
824
464
        let mut n_failures = 0;
825
464
        let mut n_resets = 0;
826

            
827
540
        for attempt_num in 1.. {
828
            // How much time is remaining?
829
540
            let remaining = match timeout_at.checked_duration_since(self.runtime.now()) {
830
                None => {
831
                    retry_err.push(Error::RequestTimeout);
832
                    break;
833
                }
834
540
                Some(t) => t,
835
            };
836

            
837
540
            let error = match self.prepare_action(usage, dir, true) {
838
532
                Ok(action) => {
839
                    // We successfully found an action: Take that action.
840
532
                    let outcome = self
841
532
                        .runtime
842
532
                        .timeout(remaining, Arc::clone(self).take_action(action, usage))
843
532
                        .await;
844

            
845
528
                    match outcome {
846
448
                        Ok(Ok(circ)) => return Ok(circ),
847
80
                        Ok(Err(e)) => {
848
80
                            debug!("Circuit attempt {} failed.", attempt_num);
849
80
                            Error::RequestFailed(e)
850
                        }
851
                        Err(_) => {
852
                            // We ran out of "remaining" time; there is nothing
853
                            // more to be done.
854
4
                            warn!("All tunnel attempts failed due to timeout");
855
4
                            retry_err.push(Error::RequestTimeout);
856
4
                            break;
857
                        }
858
                    }
859
                }
860
8
                Err(e) => {
861
                    // We couldn't pick the action!
862
8
                    debug_report!(
863
8
                        &e,
864
                        "Couldn't pick action for tunnel attempt {}",
865
                        attempt_num,
866
                    );
867
8
                    e
868
                }
869
            };
870

            
871
            // There's been an error.  See how long we wait before we retry.
872
88
            let now = self.runtime.now();
873
88
            let retry_time =
874
88
                error.abs_retry_time(now, || retry_schedule.next_delay(&mut rand::rng()));
875

            
876
88
            let (count, count_limit) = if error.is_internal_reset() {
877
                (&mut n_resets, MAX_RESETS)
878
            } else {
879
88
                (&mut n_failures, max_failures)
880
            };
881
            // Record the error, flattening it if needed.
882
88
            match error {
883
80
                Error::RequestFailed(e) => retry_err.extend(e),
884
8
                e => retry_err.push(e),
885
            }
886

            
887
88
            *count += 1;
888
            // If we have reached our limit of this kind of problem, we're done.
889
88
            if *count >= count_limit {
890
4
                warn!("Reached circuit build retry limit, exiting...");
891
4
                break;
892
84
            }
893

            
894
            // Wait, or not, as appropriate.
895
84
            match retry_time {
896
76
                AbsRetryTime::Immediate => {}
897
8
                AbsRetryTime::Never => break,
898
                AbsRetryTime::At(t) => {
899
                    let remaining = timeout_at.saturating_duration_since(now);
900
                    let delay = t.saturating_duration_since(now);
901
                    self.runtime.sleep(std::cmp::min(delay, remaining)).await;
902
                }
903
            }
904
        }
905

            
906
16
        warn!("Request failed");
907
16
        Err(Error::RequestFailed(retry_err))
908
464
    }
909

            
910
    /// Make sure a circuit exists, without actually asking for it.
911
    ///
912
    /// Make sure that there is a circuit (built or in-progress) that could be
913
    /// used for `usage`, and launch one or more circuits in a background task
914
    /// if there is not.
915
    // TODO: This should probably take some kind of parallelism parameter.
916
    #[cfg(test)]
917
8
    pub(crate) async fn ensure_tunnel(
918
8
        self: &Arc<Self>,
919
8
        usage: &TargetTunnelUsage,
920
8
        dir: DirInfo<'_>,
921
8
    ) -> Result<()> {
922
8
        let action = self.prepare_action(usage, dir, false)?;
923
8
        if let Action::Build(plans) = action {
924
16
            for plan in plans {
925
8
                let self_clone = Arc::clone(self);
926
8
                let _ignore_receiver = self_clone.spawn_launch(usage, plan);
927
8
            }
928
        }
929

            
930
8
        Ok(())
931
8
    }
932

            
933
    /// Choose which action we should take in order to provide a tunnel
934
    /// for a given `usage`.
935
    ///
936
    /// If `restrict_circ` is true, we restrict the spec of any
937
    /// circ we decide to use to mark that it _is_ being used for
938
    /// `usage`.
939
548
    fn prepare_action(
940
548
        &self,
941
548
        usage: &TargetTunnelUsage,
942
548
        dir: DirInfo<'_>,
943
548
        restrict_circ: bool,
944
548
    ) -> Result<Action<B, R>> {
945
548
        let mut list = self.tunnels.lock().expect("poisoned lock");
946

            
947
548
        if let Some(mut open) = list.find_open(usage) {
948
            // We have open tunnels that meet the spec: return the best one.
949
382
            let parallelism = self.builder.select_parallelism(usage);
950
382
            let best = OpenEntry::find_best(&mut open, usage, parallelism);
951
382
            if restrict_circ {
952
382
                let now = self.runtime.now();
953
382
                best.restrict_mut(usage, now)?;
954
            }
955
            // TODO: If we have fewer tunnels here than our select
956
            // parallelism, perhaps we should launch more?
957

            
958
382
            return Ok(Action::Open(best.tunnel.clone()));
959
166
        }
960

            
961
166
        if let Some(pending) = list.find_pending_tunnels(usage) {
962
            // There are pending tunnels that could meet the spec.
963
            // Restrict them under the assumption that they could all
964
            // be used for this, and then wait until one is ready (or
965
            // all have failed)
966
26
            let best = PendingEntry::find_best(&pending, usage);
967
26
            if restrict_circ {
968
52
                for item in &best {
969
                    // TODO: Do we want to tentatively restrict _all_ of these?
970
                    // not clear to me.
971
26
                    item.tentative_restrict_mut(usage)?;
972
                }
973
            }
974
26
            let stream = best.iter().map(|item| item.receiver.clone()).collect();
975
            // TODO: if we have fewer tunnels here than our launch
976
            // parallelism, we might want to launch more.
977

            
978
26
            return Ok(Action::Wait(stream));
979
140
        }
980

            
981
        // Okay, we need to launch tunnels here.
982
140
        let parallelism = std::cmp::max(1, self.builder.launch_parallelism(usage));
983
140
        let mut plans = Vec::new();
984
140
        let mut last_err = None;
985
140
        for _ in 0..parallelism {
986
140
            match self.plan_by_usage(dir, usage) {
987
132
                Ok((pending, plan)) => {
988
132
                    list.add_pending_tunnel(pending);
989
132
                    plans.push(plan);
990
132
                }
991
8
                Err(e) => {
992
8
                    debug!("Unable to make a plan for {:?}: {}", usage, e);
993
8
                    last_err = Some(e);
994
                }
995
            }
996
        }
997
140
        if !plans.is_empty() {
998
132
            Ok(Action::Build(plans))
999
8
        } else if let Some(last_err) = last_err {
8
            Err(last_err)
        } else {
            // we didn't even try to plan anything!
            Err(internal!("no plans were built, but no errors were found").into())
        }
548
    }
    /// Execute an action returned by pick-action, and return the
    /// resulting tunnel or error.
    #[allow(clippy::cognitive_complexity)] // TODO #2010: Refactor
532
    async fn take_action(
532
        self: Arc<Self>,
532
        act: Action<B, R>,
532
        usage: &TargetTunnelUsage,
532
    ) -> std::result::Result<(Arc<B::Tunnel>, TunnelProvenance), RetryError<Box<Error>>> {
        /// Store the error `err` into `retry_err`, as appropriate.
80
        fn record_error(
80
            retry_err: &mut RetryError<Box<Error>>,
80
            source: streams::Source,
80
            building: bool,
80
            mut err: Error,
80
        ) {
80
            if source == streams::Source::Right {
                // We don't care about this error, since it is from neither a tunnel we launched
                // nor one that we're waiting on.
                return;
80
            }
80
            if !building {
8
                // We aren't building our own tunnels, so our errors are
8
                // secondary reports of other tunnels' failures.
8
                err = Error::PendingFailed(Box::new(err));
72
            }
80
            retry_err.push(err);
80
        }
        /// Return a string describing what it means, within the context of this
        /// function, to have gotten an answer from `source`.
        fn describe_source(building: bool, source: streams::Source) -> &'static str {
            match (building, source) {
                (_, streams::Source::Right) => "optimistic advice",
                (true, streams::Source::Left) => "tunnel we're building",
                (false, streams::Source::Left) => "pending tunnel",
            }
        }
        // Get or make a stream of futures to wait on.
532
        let (building, wait_on_stream) = match act {
382
            Action::Open(c) => {
                // There's already a perfectly good open tunnel; we can return
                // it now.
382
                return Ok((c, TunnelProvenance::Preexisting));
            }
26
            Action::Wait(f) => {
                // There is one or more pending tunnel that we're waiting for.
                // If any succeeds, we try to use it.  If they all fail, we
                // fail.
26
                (false, f)
            }
124
            Action::Build(plans) => {
                // We're going to launch one or more tunnels in parallel.  We
                // report success if any succeeds, and failure of they all fail.
124
                let futures = FuturesUnordered::new();
248
                for plan in plans {
124
                    let self_clone = Arc::clone(&self);
124
                    // (This is where we actually launch tunnels.)
124
                    futures.push(self_clone.spawn_launch(usage, plan));
124
                }
124
                (true, futures)
            }
        };
        // Insert ourself into the list of pending requests, and make a
        // stream for us to listen on for notification from pending tunnels
        // other than those we are pending on.
150
        let (pending_request, additional_stream) = {
150
            // We don't want this queue to participate in memory quota tracking.
150
            // There isn't any tunnel yet, so there wouldn't be anything to account it to.
150
            // If this queue has the oldest data, probably the whole system is badly broken.
150
            // Tearing down the whole tunnel manager won't help.
150
            let (send, recv) = mpsc_channel_no_memquota(8);
150
            let pending = Arc::new(PendingRequest {
150
                usage: usage.clone(),
150
                notify: send,
150
            });
150

            
150
            let mut list = self.tunnels.lock().expect("poisoned lock");
150
            list.add_pending_request(&pending);
150

            
150
            (pending, recv)
150
        };
        // We use our "select_biased" stream combiner here to ensure that:
        //   1) Circuits from wait_on_stream (the ones we're pending on) are
        //      preferred.
        //   2) We exit this function when those tunnels are exhausted.
        //   3) We still get notified about other tunnels that might meet our
        //      interests.
        //
        // The events from Left stream are the oes that we explicitly asked for,
        // so we'll treat errors there as real problems.  The events from the
        // Right stream are ones that we got opportunistically told about; it's
        // not a big deal if those fail.
150
        let mut incoming = streams::select_biased(wait_on_stream, additional_stream.map(Ok));
150
        let mut retry_error = RetryError::in_attempt_to("wait for tunnels");
234
        while let Some((src, id)) = incoming.next().await {
150
            match id {
70
                Ok(Ok(ref id)) => {
                    // Great, we have a tunnel . See if we can use it!
70
                    let mut list = self.tunnels.lock().expect("poisoned lock");
70
                    if let Some(ent) = list.get_open_mut(id) {
66
                        let now = self.runtime.now();
66
                        match ent.restrict_mut(usage, now) {
                            Ok(()) => {
                                // Great, this will work.  We drop the
                                // pending request now explicitly to remove
                                // it from the list.
66
                                drop(pending_request);
66
                                if matches!(ent.expiration, ExpirationInfo::Unused { .. }) {
                                    // Since this tunnel hasn't been used yet, schedule expiration task after `max_dirtiness` from now.
                                    spawn_expiration_task(
                                        &self.runtime,
                                        Arc::downgrade(&self),
                                        ent.tunnel.id(),
                                        now + self.circuit_timing().max_dirtiness,
                                    );
66
                                }
66
                                return Ok((ent.tunnel.clone(), TunnelProvenance::NewlyCreated));
                            }
                            Err(e) => {
                                // In this case, a `UsageMismatched` error just means that we lost the race
                                // to restrict this tunnel.
                                let e = match e {
                                    Error::UsageMismatched(e) => Error::LostUsabilityRace(e),
                                    x => x,
                                };
                                if src == streams::Source::Left {
                                    info_report!(
                                        &e,
                                        "{} suggested we use {:?}, but restrictions failed",
                                        describe_source(building, src),
                                        id,
                                    );
                                } else {
                                    debug_report!(
                                        &e,
                                        "{} suggested we use {:?}, but restrictions failed",
                                        describe_source(building, src),
                                        id,
                                    );
                                }
                                record_error(&mut retry_error, src, building, e);
                                continue;
                            }
                        }
4
                    }
                }
80
                Ok(Err(ref e)) => {
80
                    debug!("{} sent error {:?}", describe_source(building, src), e);
80
                    record_error(&mut retry_error, src, building, e.clone());
                }
                Err(oneshot::Canceled) => {
                    debug!(
                        "{} went away (Canceled), quitting take_action right away",
                        describe_source(building, src)
                    );
                    record_error(&mut retry_error, src, building, Error::PendingCanceled);
                    return Err(retry_error);
                }
            }
84
            debug!(
                "While waiting on tunnel: {:?} from {}",
                id,
                describe_source(building, src)
            );
        }
        // Nothing worked.  We drop the pending request now explicitly
        // to remove it from the list.  (We could just let it get dropped
        // implicitly, but that's a bit confusing.)
80
        drop(pending_request);
80
        Err(retry_error)
528
    }
    /// Given a directory and usage, compute the necessary objects to
    /// build a tunnel: A [`PendingEntry`] to keep track of the in-process
    /// tunnel, and a [`TunnelBuildPlan`] that we'll give to the thread
    /// that will build the tunnel.
    ///
    /// The caller should probably add the resulting `PendingEntry` to
    /// `self.circs`.
    ///
    /// This is an internal function that we call when we're pretty sure
    /// we want to build a tunnel.
    #[allow(clippy::type_complexity)]
148
    fn plan_by_usage(
148
        &self,
148
        dir: DirInfo<'_>,
148
        usage: &TargetTunnelUsage,
148
    ) -> Result<(Arc<PendingEntry<B, R>>, TunnelBuildPlan<B, R>)> {
148
        let (plan, bspec) = self.builder.plan_tunnel(usage, dir)?;
140
        let (pending, sender) = PendingEntry::new(&bspec);
140
        let pending = Arc::new(pending);
140
        let plan = TunnelBuildPlan {
140
            plan,
140
            sender,
140
            pending: Arc::clone(&pending),
140
        };
140
        Ok((pending, plan))
148
    }
    /// Launch a managed tunnel for a target usage, without checking
    /// whether one already exists or is pending.
    ///
    /// Return a listener that will be informed when the tunnel is done.
4
    pub(crate) fn launch_by_usage(
4
        self: &Arc<Self>,
4
        usage: &TargetTunnelUsage,
4
        dir: DirInfo<'_>,
4
    ) -> Result<Shared<oneshot::Receiver<PendResult<B, R>>>> {
4
        let (pending, plan) = self.plan_by_usage(dir, usage)?;
4
        self.tunnels
4
            .lock()
4
            .expect("Poisoned lock for tunnel list")
4
            .add_pending_tunnel(pending);
4
        Ok(Arc::clone(self).spawn_launch(usage, plan))
4
    }
    /// Spawn a background task to launch a tunnel, and report its status.
    ///
    /// The `usage` argument is the usage from the original request that made
    /// us build this tunnel.
136
    fn spawn_launch(
136
        self: Arc<Self>,
136
        usage: &TargetTunnelUsage,
136
        plan: TunnelBuildPlan<B, R>,
136
    ) -> Shared<oneshot::Receiver<PendResult<B, R>>> {
136
        let _ = usage; // Currently unused.
        let TunnelBuildPlan {
136
            mut plan,
136
            sender,
136
            pending,
136
        } = plan;
136
        let request_loyalty = self.circuit_timing().request_loyalty;
136
        let wait_on_future = pending.receiver.clone();
136
        let runtime = self.runtime.clone();
136
        let runtime_copy = self.runtime.clone();
136
        let tid = rand::random::<u64>();
        // We release this block when the tunnel builder task terminates.
136
        let reason = format!("tunnel builder task {}", tid);
136
        runtime.block_advance(reason.clone());
        // During tests, the `FakeBuilder` will need to release the block in order to fake a timeout
        // correctly.
136
        plan.add_blocked_advance_reason(reason);
136
        runtime
136
            .spawn(async move {
136
                let self_clone = Arc::clone(&self);
136
                let future = AssertUnwindSafe(self_clone.do_launch(plan, pending)).catch_unwind();
136
                let (new_spec, reply) = match future.await {
124
                    Ok(x) => x, // Success or regular failure
                    Err(e) => {
                        // Okay, this is a panic.  We have to tell the calling
                        // thread about it, then exit this tunnel builder task.
                        let _ = sender.send(Err(internal!("tunnel build task panicked").into()));
                        std::panic::panic_any(e);
                    }
                };
                // Tell anybody who was listening about it that this
                // tunnel is now usable or failed.
                //
                // (We ignore any errors from `send`: That just means that nobody
                // was waiting for this tunnel.)
124
                let _ = sender.send(reply.clone());
124
                if let Some(new_spec) = new_spec {
                    // Wait briefly before we notify opportunistically.  This
                    // delay will give the tunnels that were originally
                    // specifically intended for a request a little more time
                    // to finish, before we offer it this tunnel instead.
52
                    let sl = runtime_copy.sleep(request_loyalty);
52
                    runtime_copy.allow_one_advance(request_loyalty);
52
                    sl.await;
32
                    let pending = {
32
                        let list = self.tunnels.lock().expect("poisoned lock");
32
                        list.find_pending_requests(&new_spec)
                    };
40
                    for pending_request in pending {
8
                        let _ = pending_request.notify.clone().try_send(reply.clone());
8
                    }
72
                }
104
                runtime_copy.release_advance(format!("tunnel builder task {}", tid));
104
            })
136
            .expect("Couldn't spawn tunnel-building task");
136
        wait_on_future
136
    }
    /// Run in the background to launch a tunnel. Return a 2-tuple of the new
    /// tunnel spec and the outcome that should be sent to the initiator.
136
    async fn do_launch(
136
        self: Arc<Self>,
136
        plan: <B as AbstractTunnelBuilder<R>>::Plan,
136
        pending: Arc<PendingEntry<B, R>>,
136
    ) -> (Option<SupportedTunnelUsage>, PendResult<B, R>) {
136
        let outcome = self.builder.build_tunnel(plan).await;
124
        match outcome {
72
            Err(e) => (None, Err(e)),
52
            Ok((new_spec, tunnel)) => {
52
                let id = tunnel.id();
52
                let use_duration = self.pick_use_duration();
52
                let exp_inst = self.runtime.now() + use_duration;
52
                let runtime_copy = self.runtime.clone();
52
                spawn_expiration_task(&runtime_copy, Arc::downgrade(&self), tunnel.id(), exp_inst);
                // I used to call restrict_mut here, but now I'm not so
                // sure. Doing restrict_mut makes sure that this
                // tunnel will be suitable for the request that asked
                // for us in the first place, but that should be
                // ensured anyway by our tracking its tentative
                // assignment.
                //
                // new_spec.restrict_mut(&usage_copy).unwrap();
52
                let use_before = ExpirationInfo::new(exp_inst);
52
                let open_ent = OpenEntry::new(new_spec.clone(), tunnel, use_before);
                {
52
                    let mut list = self.tunnels.lock().expect("poisoned lock");
                    // Finally, before we return this tunnel, we need to make
                    // sure that this pending tunnel is still pending.  (If it
                    // is not pending, then it was cancelled through a call to
                    // `retire_all_tunnels`, and the configuration that we used
                    // to launch it is now sufficiently outdated that we should
                    // no longer give this tunnel to a client.)
52
                    if list.tunnel_is_pending(&pending) {
52
                        list.add_open(open_ent);
                        // We drop our reference to 'pending' here:
                        // this should make all the weak references to
                        // the `PendingEntry` become dangling.
52
                        drop(pending);
52
                        (Some(new_spec), Ok(id))
                    } else {
                        // This tunnel is no longer pending! It must have been cancelled, probably
                        // by a call to retire_all_tunnels()
                        drop(pending); // ibid
                        (None, Err(Error::CircCanceled))
                    }
                }
            }
        }
124
    }
    /// Plan and launch a new tunnel to a given target, bypassing our managed
    /// pool of tunnels.
    ///
    /// This method will always return a new tunnel, and never return a tunnel
    /// that this CircMgr gives out for anything else.
    ///
    /// The new tunnel will participate in the guard and timeout apparatus as
    /// appropriate, no retry attempt will be made if the tunnel fails.
    #[cfg(feature = "hs-common")]
4
    pub(crate) async fn launch_unmanaged(
4
        &self,
4
        usage: &TargetTunnelUsage,
4
        dir: DirInfo<'_>,
4
    ) -> Result<(SupportedTunnelUsage, B::Tunnel)> {
4
        let (_, plan) = self.plan_by_usage(dir, usage)?;
4
        self.builder.build_tunnel(plan.plan).await
4
    }
    /// Remove the tunnel with a given `id` from this manager.
    ///
    /// After this function is called, that tunnel will no longer be handed
    /// out to any future requests.
    ///
    /// Return None if we have no tunnel with the given ID.
8
    pub(crate) fn take_tunnel(
8
        &self,
8
        id: &<B::Tunnel as AbstractTunnel>::Id,
8
    ) -> Option<Arc<B::Tunnel>> {
8
        let mut list = self.tunnels.lock().expect("poisoned lock");
8
        list.take_open(id).map(|e| e.tunnel)
8
    }
    /// Remove all open and pending tunnels and from this manager, to ensure
    /// they can't be given out for any more requests.
    ///
    /// Calling `retire_all_tunnels` ensures that any tunnel request that gets
    /// an  answer _after this method runs_ will receive a tunnel that was
    /// launched _after this method runs_.
    ///
    /// We call this method this when our configuration changes in such a way
    /// that we want to make sure that any new (or pending) requests will
    /// receive tunnels that are built using the new configuration.
    //
    // For more information, see documentation on [`CircuitList::open_circs`],
    // [`CircuitList::pending_circs`], and comments in `do_launch`.
    pub(crate) fn retire_all_tunnels(&self) {
        let mut list = self.tunnels.lock().expect("poisoned lock");
        list.clear_all_tunnels();
    }
    /// Expire tunnels according to the rules in `config` and the
    /// current time `now`.
    ///
    /// Expired tunnels will not be automatically closed, but they will
    /// no longer be given out for new tunnels.
4
    pub(crate) fn expire_tunnels(&self, now: Instant) {
4
        let mut list = self.tunnels.lock().expect("poisoned lock");
4
        if let Some(dirty_cutoff) = now.checked_sub(self.circuit_timing().max_dirtiness) {
4
            list.expire_tunnels(now, dirty_cutoff);
4
        }
4
    }
    /// Consider expiring the tunnel with given tunnel `id`,
    /// according to the rules in `config` and the current time `now`.
    pub(crate) fn expire_tunnel(&self, tun_id: &<B::Tunnel as AbstractTunnel>::Id, now: Instant) {
        let mut list = self.tunnels.lock().expect("poisoned lock");
        if let Some(dirty_cutoff) = now.checked_sub(self.circuit_timing().max_dirtiness) {
            list.expire_tunnel(tun_id, now, dirty_cutoff);
        }
    }
    /// Return the number of open tunnels held by this tunnel manager.
24
    pub(crate) fn n_tunnels(&self) -> usize {
24
        let list = self.tunnels.lock().expect("poisoned lock");
24
        list.open_tunnels.len()
24
    }
    /// Return the number of pending tunnels tracked by this tunnel manager.
    #[cfg(test)]
8
    pub(crate) fn n_pending_tunnels(&self) -> usize {
8
        let list = self.tunnels.lock().expect("poisoned lock");
8
        list.pending_tunnels.len()
8
    }
    /// Get a reference to this manager's runtime.
22
    pub(crate) fn peek_runtime(&self) -> &R {
22
        &self.runtime
22
    }
    /// Get a reference to this manager's builder.
124
    pub(crate) fn peek_builder(&self) -> &B {
124
        &self.builder
124
    }
    /// Pick a duration by when a new tunnel should expire from now
    /// if it has not yet been used
52
    fn pick_use_duration(&self) -> Duration {
52
        let timings = self
52
            .unused_timing
52
            .lock()
52
            .expect("Poisoned lock for unused_timing");
52
        if self.builder.learning_timeouts() {
            timings.learning
        } else {
            // TODO: In Tor, this calculation also depends on
            // stuff related to predicted ports and channel
            // padding.
            use tor_basic_utils::RngExt as _;
52
            let mut rng = rand::rng();
52
            rng.gen_range_checked(timings.not_learning..=timings.not_learning * 2)
52
                .expect("T .. 2x T turned out to be an empty duration range?!")
        }
52
    }
}
/// Spawn an expiration task that expires a tunnel at given instant.
///
/// If given instant is earlier than now, expire the tunnel immediately.
/// Otherwise, spawn a timer expiration task on given runtime.
///
/// When the timeout occurs, if the tunnel manager is still present,
/// the task will ask the manager to expire the tunnel, if the tunnel
/// is ready to expire.
52
fn spawn_expiration_task<B, R>(
52
    runtime: &R,
52
    circmgr: Weak<AbstractTunnelMgr<B, R>>,
52
    circ_id: <<B as AbstractTunnelBuilder<R>>::Tunnel as AbstractTunnel>::Id,
52
    exp_inst: Instant,
52
) where
52
    R: Runtime,
52
    B: 'static + AbstractTunnelBuilder<R>,
{
52
    let now = runtime.now();
52
    let rt_copy = runtime.clone();
52
    let duration = exp_inst.saturating_duration_since(now);
52
    if duration == Duration::ZERO {
        // Circuit should already expire. Expire it now.
        let cm = if let Some(cm) = Weak::upgrade(&circmgr) {
            cm
        } else {
            // Circuits manager has already been dropped, so are the references it held.
            return;
        };
        cm.expire_tunnel(&circ_id, now);
    } else {
        // Spawn a timer expiration task with given expiration instant.
52
        if let Err(e) = runtime.spawn(async move {
52
            rt_copy.sleep(duration).await;
            let cm = if let Some(cm) = Weak::upgrade(&circmgr) {
                cm
            } else {
                return;
            };
            cm.expire_tunnel(&circ_id, exp_inst);
        }) {
            warn_report!(e, "Unable to launch expiration task");
52
        }
    }
52
}
#[cfg(test)]
mod test {
    // @@ begin test lint list maintained by maint/add_warning @@
    #![allow(clippy::bool_assert_comparison)]
    #![allow(clippy::clone_on_copy)]
    #![allow(clippy::dbg_macro)]
    #![allow(clippy::mixed_attributes_style)]
    #![allow(clippy::print_stderr)]
    #![allow(clippy::print_stdout)]
    #![allow(clippy::single_char_pattern)]
    #![allow(clippy::unwrap_used)]
    #![allow(clippy::unchecked_duration_subtraction)]
    #![allow(clippy::useless_vec)]
    #![allow(clippy::needless_pass_by_value)]
    //! <!-- @@ end test lint list maintained by maint/add_warning @@ -->
    use super::*;
    use crate::isolation::test::{IsolationTokenEq, assert_isoleq};
    use crate::mocks::{FakeBuilder, FakeCirc, FakeId, FakeOp};
    use crate::usage::{ExitPolicy, SupportedTunnelUsage};
    use crate::{
        Error, IsolationToken, StreamIsolation, TargetPort, TargetPorts, TargetTunnelUsage,
    };
    use std::sync::LazyLock;
    use tor_dircommon::fallback::FallbackList;
    use tor_guardmgr::TestConfig;
    use tor_llcrypto::pk::ed25519::Ed25519Identity;
    use tor_netdir::testnet;
    use tor_persist::TestingStateMgr;
    use tor_rtcompat::SleepProvider;
    use tor_rtmock::MockRuntime;
    #[allow(deprecated)] // TODO #1885
    use tor_rtmock::MockSleepRuntime;
    static FALLBACKS_EMPTY: LazyLock<FallbackList> = LazyLock::new(|| [].into());
    fn di() -> DirInfo<'static> {
        (&*FALLBACKS_EMPTY).into()
    }
    fn target_to_spec(target: &TargetTunnelUsage) -> SupportedTunnelUsage {
        match target {
            TargetTunnelUsage::Exit {
                ports,
                isolation,
                country_code,
                require_stability,
            } => SupportedTunnelUsage::Exit {
                policy: ExitPolicy::from_target_ports(&TargetPorts::from(&ports[..])),
                isolation: Some(isolation.clone()),
                country_code: country_code.clone(),
                all_relays_stable: *require_stability,
            },
            _ => unimplemented!(),
        }
    }
    impl<U: PartialEq> IsolationTokenEq for OpenEntry<U> {
        fn isol_eq(&self, other: &Self) -> bool {
            self.spec.isol_eq(&other.spec)
                && self.tunnel == other.tunnel
                && self.expiration == other.expiration
        }
    }
    impl<U: PartialEq> IsolationTokenEq for &mut OpenEntry<U> {
        fn isol_eq(&self, other: &Self) -> bool {
            self.spec.isol_eq(&other.spec)
                && self.tunnel == other.tunnel
                && self.expiration == other.expiration
        }
    }
    fn make_builder<R: Runtime>(runtime: &R) -> FakeBuilder<R> {
        let state_mgr = TestingStateMgr::new();
        let guard_config = TestConfig::default();
        FakeBuilder::new(runtime, state_mgr, &guard_config)
    }
    #[test]
    fn basic_tests() {
        MockRuntime::test_with_various(|rt| async move {
            #[allow(deprecated)] // TODO #1885
            let rt = MockSleepRuntime::new(rt);
            let builder = make_builder(&rt);
            let mgr = Arc::new(AbstractTunnelMgr::new(
                builder,
                rt.clone(),
                CircuitTiming::default(),
            ));
            let webports = TargetTunnelUsage::new_from_ipv4_ports(&[80, 443]);
            // Check initialization.
            assert_eq!(mgr.n_tunnels(), 0);
            assert!(mgr.peek_builder().script.lock().unwrap().is_empty());
            // Launch a tunnel ; make sure we get it.
            let c1 = rt.wait_for(mgr.get_or_launch(&webports, di())).await;
            let c1 = c1.unwrap().0;
            assert_eq!(mgr.n_tunnels(), 1);
            // Make sure we get the one we already made if we ask for it.
            let port80 = TargetTunnelUsage::new_from_ipv4_ports(&[80]);
            let c2 = mgr.get_or_launch(&port80, di()).await;
            let c2 = c2.unwrap().0;
            assert!(FakeCirc::eq(&c1, &c2));
            assert_eq!(mgr.n_tunnels(), 1);
            // Now try launching two tunnels "at once" to make sure that our
            // pending-tunnel code works.
            let dnsport = TargetTunnelUsage::new_from_ipv4_ports(&[53]);
            let dnsport_restrict = TargetTunnelUsage::Exit {
                ports: vec![TargetPort::ipv4(53)],
                isolation: StreamIsolation::builder().build().unwrap(),
                country_code: None,
                require_stability: false,
            };
            let (c3, c4) = rt
                .wait_for(futures::future::join(
                    mgr.get_or_launch(&dnsport, di()),
                    mgr.get_or_launch(&dnsport_restrict, di()),
                ))
                .await;
            let c3 = c3.unwrap().0;
            let c4 = c4.unwrap().0;
            assert!(!FakeCirc::eq(&c1, &c3));
            assert!(FakeCirc::eq(&c3, &c4));
            assert_eq!(c3.id(), c4.id());
            assert_eq!(mgr.n_tunnels(), 2);
            // Now we're going to remove c3 from consideration.  It's the
            // same as c4, so removing c4 will give us None.
            let c3_taken = mgr.take_tunnel(&c3.id()).unwrap();
            let now_its_gone = mgr.take_tunnel(&c4.id());
            assert!(FakeCirc::eq(&c3_taken, &c3));
            assert!(now_its_gone.is_none());
            assert_eq!(mgr.n_tunnels(), 1);
            // Having removed them, let's launch another dnsport and make
            // sure we get a different tunnel.
            let c5 = rt.wait_for(mgr.get_or_launch(&dnsport, di())).await;
            let c5 = c5.unwrap().0;
            assert!(!FakeCirc::eq(&c3, &c5));
            assert!(!FakeCirc::eq(&c4, &c5));
            assert_eq!(mgr.n_tunnels(), 2);
            // Now try launch_by_usage.
            let prev = mgr.n_pending_tunnels();
            assert!(mgr.launch_by_usage(&dnsport, di()).is_ok());
            assert_eq!(mgr.n_pending_tunnels(), prev + 1);
            // TODO: Actually make sure that launch_by_usage launched
            // the right thing.
        });
    }
    #[test]
    fn request_timeout() {
        MockRuntime::test_with_various(|rt| async move {
            #[allow(deprecated)] // TODO #1885
            let rt = MockSleepRuntime::new(rt);
            let ports = TargetTunnelUsage::new_from_ipv4_ports(&[80, 443]);
            // This will fail once, and then completely time out.  The
            // result will be a failure.
            let builder = make_builder(&rt);
            builder.set(&ports, vec![FakeOp::Fail, FakeOp::Timeout]);
            let mgr = Arc::new(AbstractTunnelMgr::new(
                builder,
                rt.clone(),
                CircuitTiming::default(),
            ));
            let c1 = mgr
                .peek_runtime()
                .wait_for(mgr.get_or_launch(&ports, di()))
                .await;
            assert!(matches!(c1, Err(Error::RequestFailed(_))));
        });
    }
    #[test]
    fn request_timeout2() {
        MockRuntime::test_with_various(|rt| async move {
            #[allow(deprecated)] // TODO #1885
            let rt = MockSleepRuntime::new(rt);
            // Now try a more complicated case: we'll try to get things so
            // that we wait for a little over our predicted time because
            // of our wait-for-next-action logic.
            let ports = TargetTunnelUsage::new_from_ipv4_ports(&[80, 443]);
            let builder = make_builder(&rt);
            builder.set(
                &ports,
                vec![
                    FakeOp::Delay(Duration::from_millis(60_000 - 25)),
                    FakeOp::NoPlan,
                ],
            );
            let mgr = Arc::new(AbstractTunnelMgr::new(
                builder,
                rt.clone(),
                CircuitTiming::default(),
            ));
            let c1 = mgr
                .peek_runtime()
                .wait_for(mgr.get_or_launch(&ports, di()))
                .await;
            assert!(matches!(c1, Err(Error::RequestFailed(_))));
        });
    }
    #[test]
    fn request_unplannable() {
        MockRuntime::test_with_various(|rt| async move {
            #[allow(deprecated)] // TODO #1885
            let rt = MockSleepRuntime::new(rt);
            let ports = TargetTunnelUsage::new_from_ipv4_ports(&[80, 443]);
            // This will fail a the planning stages, a lot.
            let builder = make_builder(&rt);
            builder.set(&ports, vec![FakeOp::NoPlan; 2000]);
            let mgr = Arc::new(AbstractTunnelMgr::new(
                builder,
                rt.clone(),
                CircuitTiming::default(),
            ));
            let c1 = rt.wait_for(mgr.get_or_launch(&ports, di())).await;
            assert!(matches!(c1, Err(Error::RequestFailed(_))));
        });
    }
    #[test]
    fn request_fails_too_much() {
        MockRuntime::test_with_various(|rt| async move {
            #[allow(deprecated)] // TODO #1885
            let rt = MockSleepRuntime::new(rt);
            let ports = TargetTunnelUsage::new_from_ipv4_ports(&[80, 443]);
            // This will fail 1000 times, which is above the retry limit.
            let builder = make_builder(&rt);
            builder.set(&ports, vec![FakeOp::Fail; 1000]);
            let mgr = Arc::new(AbstractTunnelMgr::new(
                builder,
                rt.clone(),
                CircuitTiming::default(),
            ));
            let c1 = rt.wait_for(mgr.get_or_launch(&ports, di())).await;
            assert!(matches!(c1, Err(Error::RequestFailed(_))));
        });
    }
    #[test]
    fn request_wrong_spec() {
        MockRuntime::test_with_various(|rt| async move {
            #[allow(deprecated)] // TODO #1885
            let rt = MockSleepRuntime::new(rt);
            let ports = TargetTunnelUsage::new_from_ipv4_ports(&[80, 443]);
            // The first time this is called, it will build a tunnel
            // with the wrong spec.  (A tunnel builder should never
            // actually _do_ that, but it's something we code for.)
            let builder = make_builder(&rt);
            builder.set(
                &ports,
                vec![FakeOp::WrongSpec(target_to_spec(
                    &TargetTunnelUsage::new_from_ipv4_ports(&[22]),
                ))],
            );
            let mgr = Arc::new(AbstractTunnelMgr::new(
                builder,
                rt.clone(),
                CircuitTiming::default(),
            ));
            let c1 = rt.wait_for(mgr.get_or_launch(&ports, di())).await;
            assert!(c1.is_ok());
        });
    }
    #[test]
    fn request_retried() {
        MockRuntime::test_with_various(|rt| async move {
            #[allow(deprecated)] // TODO #1885
            let rt = MockSleepRuntime::new(rt);
            let ports = TargetTunnelUsage::new_from_ipv4_ports(&[80, 443]);
            // This will fail twice, and then succeed. The result will be
            // a success.
            let builder = make_builder(&rt);
            builder.set(&ports, vec![FakeOp::Fail, FakeOp::Fail]);
            let mgr = Arc::new(AbstractTunnelMgr::new(
                builder,
                rt.clone(),
                CircuitTiming::default(),
            ));
            // This test doesn't exercise any timeout behaviour.
            rt.block_advance("test doesn't require advancing");
            let (c1, c2) = rt
                .wait_for(futures::future::join(
                    mgr.get_or_launch(&ports, di()),
                    mgr.get_or_launch(&ports, di()),
                ))
                .await;
            let c1 = c1.unwrap().0;
            let c2 = c2.unwrap().0;
            assert!(FakeCirc::eq(&c1, &c2));
        });
    }
    #[test]
    fn isolated() {
        MockRuntime::test_with_various(|rt| async move {
            #[allow(deprecated)] // TODO #1885
            let rt = MockSleepRuntime::new(rt);
            let builder = make_builder(&rt);
            let mgr = Arc::new(AbstractTunnelMgr::new(
                builder,
                rt.clone(),
                CircuitTiming::default(),
            ));
            // Set our isolation so that iso1 and iso2 can't share a tunnel,
            // but no_iso can share a tunnel with either.
            let iso1 = TargetTunnelUsage::Exit {
                ports: vec![TargetPort::ipv4(443)],
                isolation: StreamIsolation::builder()
                    .owner_token(IsolationToken::new())
                    .build()
                    .unwrap(),
                country_code: None,
                require_stability: false,
            };
            let iso2 = TargetTunnelUsage::Exit {
                ports: vec![TargetPort::ipv4(443)],
                isolation: StreamIsolation::builder()
                    .owner_token(IsolationToken::new())
                    .build()
                    .unwrap(),
                country_code: None,
                require_stability: false,
            };
            let no_iso1 = TargetTunnelUsage::new_from_ipv4_ports(&[443]);
            let no_iso2 = no_iso1.clone();
            // We're going to try launching these tunnels in 24 different
            // orders, to make sure that the outcome is correct each time.
            use itertools::Itertools;
            let timeouts: Vec<_> = [0_u64, 2, 4, 6]
                .iter()
                .map(|d| Duration::from_millis(*d))
                .collect();
            for delays in timeouts.iter().permutations(4) {
                let d1 = delays[0];
                let d2 = delays[1];
                let d3 = delays[2];
                let d4 = delays[2];
                let (c_iso1, c_iso2, c_no_iso1, c_no_iso2) = rt
                    .wait_for(futures::future::join4(
                        async {
                            rt.sleep(*d1).await;
                            mgr.get_or_launch(&iso1, di()).await
                        },
                        async {
                            rt.sleep(*d2).await;
                            mgr.get_or_launch(&iso2, di()).await
                        },
                        async {
                            rt.sleep(*d3).await;
                            mgr.get_or_launch(&no_iso1, di()).await
                        },
                        async {
                            rt.sleep(*d4).await;
                            mgr.get_or_launch(&no_iso2, di()).await
                        },
                    ))
                    .await;
                let c_iso1 = c_iso1.unwrap().0;
                let c_iso2 = c_iso2.unwrap().0;
                let c_no_iso1 = c_no_iso1.unwrap().0;
                let c_no_iso2 = c_no_iso2.unwrap().0;
                assert!(!FakeCirc::eq(&c_iso1, &c_iso2));
                assert!(!FakeCirc::eq(&c_iso1, &c_no_iso1));
                assert!(!FakeCirc::eq(&c_iso1, &c_no_iso2));
                assert!(!FakeCirc::eq(&c_iso2, &c_no_iso1));
                assert!(!FakeCirc::eq(&c_iso2, &c_no_iso2));
                assert!(FakeCirc::eq(&c_no_iso1, &c_no_iso2));
            }
        });
    }
    #[test]
    fn opportunistic() {
        MockRuntime::test_with_various(|rt| async move {
            #[allow(deprecated)] // TODO #1885
            let rt = MockSleepRuntime::new(rt);
            // The first request will time out completely, but we're
            // making a second request after we launch it.  That
            // request should succeed, and notify the first request.
            let ports1 = TargetTunnelUsage::new_from_ipv4_ports(&[80]);
            let ports2 = TargetTunnelUsage::new_from_ipv4_ports(&[80, 443]);
            let builder = make_builder(&rt);
            builder.set(&ports1, vec![FakeOp::Timeout]);
            let mgr = Arc::new(AbstractTunnelMgr::new(
                builder,
                rt.clone(),
                CircuitTiming::default(),
            ));
            // Note that ports2 will be wider than ports1, so the second
            // request will have to launch a new tunnel.
            let (c1, c2) = rt
                .wait_for(futures::future::join(
                    mgr.get_or_launch(&ports1, di()),
                    async {
                        rt.sleep(Duration::from_millis(100)).await;
                        mgr.get_or_launch(&ports2, di()).await
                    },
                ))
                .await;
            if let (Ok((c1, _)), Ok((c2, _))) = (c1, c2) {
                assert!(FakeCirc::eq(&c1, &c2));
            } else {
                panic!();
            };
        });
    }
    #[test]
    fn prebuild() {
        MockRuntime::test_with_various(|rt| async move {
            // This time we're going to use ensure_tunnel() to make
            // sure that a tunnel gets built, and then launch two
            // other tunnels that will use it.
            #[allow(deprecated)] // TODO #1885
            let rt = MockSleepRuntime::new(rt);
            let builder = make_builder(&rt);
            let mgr = Arc::new(AbstractTunnelMgr::new(
                builder,
                rt.clone(),
                CircuitTiming::default(),
            ));
            let ports1 = TargetTunnelUsage::new_from_ipv4_ports(&[80, 443]);
            let ports2 = TargetTunnelUsage::new_from_ipv4_ports(&[80]);
            let ports3 = TargetTunnelUsage::new_from_ipv4_ports(&[443]);
            let (ok, c1, c2) = rt
                .wait_for(futures::future::join3(
                    mgr.ensure_tunnel(&ports1, di()),
                    async {
                        rt.sleep(Duration::from_millis(10)).await;
                        mgr.get_or_launch(&ports2, di()).await
                    },
                    async {
                        rt.sleep(Duration::from_millis(50)).await;
                        mgr.get_or_launch(&ports3, di()).await
                    },
                ))
                .await;
            assert!(ok.is_ok());
            let c1 = c1.unwrap().0;
            let c2 = c2.unwrap().0;
            // If we had launched these separately, they wouldn't share
            // a tunnel.
            assert!(FakeCirc::eq(&c1, &c2));
        });
    }
    #[test]
    fn expiration() {
        MockRuntime::test_with_various(|rt| async move {
            use crate::config::CircuitTimingBuilder;
            // Now let's make some tunnels -- one dirty, one clean, and
            // make sure that one expires and one doesn't.
            #[allow(deprecated)] // TODO #1885
            let rt = MockSleepRuntime::new(rt);
            let builder = make_builder(&rt);
            let circuit_timing = CircuitTimingBuilder::default()
                .max_dirtiness(Duration::from_secs(15))
                .build()
                .unwrap();
            let mgr = Arc::new(AbstractTunnelMgr::new(builder, rt.clone(), circuit_timing));
            let imap = TargetTunnelUsage::new_from_ipv4_ports(&[993]);
            let pop = TargetTunnelUsage::new_from_ipv4_ports(&[995]);
            let (ok, pop1) = rt
                .wait_for(futures::future::join(
                    mgr.ensure_tunnel(&imap, di()),
                    mgr.get_or_launch(&pop, di()),
                ))
                .await;
            assert!(ok.is_ok());
            let pop1 = pop1.unwrap().0;
            rt.advance(Duration::from_secs(30)).await;
            rt.advance(Duration::from_secs(15)).await;
            let imap1 = rt.wait_for(mgr.get_or_launch(&imap, di())).await.unwrap().0;
            // This should expire the pop tunnel, since it came from
            // get_or_launch() [which marks the tunnel as being
            // used].  It should not expire the imap tunnel, since
            // it was not dirty until 15 seconds after the cutoff.
            let now = rt.now();
            mgr.expire_tunnels(now);
            let (pop2, imap2) = rt
                .wait_for(futures::future::join(
                    mgr.get_or_launch(&pop, di()),
                    mgr.get_or_launch(&imap, di()),
                ))
                .await;
            let pop2 = pop2.unwrap().0;
            let imap2 = imap2.unwrap().0;
            assert!(!FakeCirc::eq(&pop2, &pop1));
            assert!(FakeCirc::eq(&imap2, &imap1));
        });
    }
    /// Returns three exit policies; one that permits nothing, one that permits ports 80
    /// and 443 only, and one that permits all ports.
    fn get_exit_policies() -> (ExitPolicy, ExitPolicy, ExitPolicy) {
        // FIXME(eta): the below is copypasta; would be nice to have a better way of
        //             constructing ExitPolicy objects for testing maybe
        let network = testnet::construct_netdir().unwrap_if_sufficient().unwrap();
        // Nodes with ID 0x0a through 0x13 and 0x1e through 0x27 are
        // exits.  Odd-numbered ones allow only ports 80 and 443;
        // even-numbered ones allow all ports.
        let id_noexit: Ed25519Identity = [0x05; 32].into();
        let id_webexit: Ed25519Identity = [0x11; 32].into();
        let id_fullexit: Ed25519Identity = [0x20; 32].into();
        let not_exit = network.by_id(&id_noexit).unwrap();
        let web_exit = network.by_id(&id_webexit).unwrap();
        let full_exit = network.by_id(&id_fullexit).unwrap();
        let ep_none = ExitPolicy::from_relay(&not_exit);
        let ep_web = ExitPolicy::from_relay(&web_exit);
        let ep_full = ExitPolicy::from_relay(&full_exit);
        (ep_none, ep_web, ep_full)
    }
    #[test]
    fn test_find_supported() {
        let (ep_none, ep_web, ep_full) = get_exit_policies();
        let fake_circ = FakeCirc { id: FakeId::next() };
        let expiration = ExpirationInfo::Unused {
            use_before: Instant::now() + Duration::from_secs(60 * 60),
        };
        let mut entry_none = OpenEntry::new(
            SupportedTunnelUsage::Exit {
                policy: ep_none,
                isolation: None,
                country_code: None,
                all_relays_stable: true,
            },
            fake_circ.clone(),
            expiration.clone(),
        );
        let mut entry_none_c = entry_none.clone();
        let mut entry_web = OpenEntry::new(
            SupportedTunnelUsage::Exit {
                policy: ep_web,
                isolation: None,
                country_code: None,
                all_relays_stable: true,
            },
            fake_circ.clone(),
            expiration.clone(),
        );
        let mut entry_web_c = entry_web.clone();
        let mut entry_full = OpenEntry::new(
            SupportedTunnelUsage::Exit {
                policy: ep_full,
                isolation: None,
                country_code: None,
                all_relays_stable: true,
            },
            fake_circ,
            expiration,
        );
        let mut entry_full_c = entry_full.clone();
        let usage_web = TargetTunnelUsage::new_from_ipv4_ports(&[80]);
        let empty: Vec<&mut OpenEntry<FakeCirc>> = vec![];
        assert_isoleq!(
            SupportedTunnelUsage::find_supported(vec![&mut entry_none].into_iter(), &usage_web),
            empty
        );
        // HACK(eta): We have to faff around with clones and such because
        //            `abstract_spec_find_supported` has a silly signature that involves `&mut`
        //            refs, which we can't have more than one of.
        assert_isoleq!(
            SupportedTunnelUsage::find_supported(
                vec![&mut entry_none, &mut entry_web].into_iter(),
                &usage_web,
            ),
            vec![&mut entry_web_c]
        );
        assert_isoleq!(
            SupportedTunnelUsage::find_supported(
                vec![&mut entry_none, &mut entry_web, &mut entry_full].into_iter(),
                &usage_web,
            ),
            vec![&mut entry_web_c, &mut entry_full_c]
        );
        // Test preemptive tunnel usage:
        let usage_preemptive_web = TargetTunnelUsage::Preemptive {
            port: Some(TargetPort::ipv4(80)),
            circs: 2,
            require_stability: false,
        };
        let usage_preemptive_dns = TargetTunnelUsage::Preemptive {
            port: None,
            circs: 2,
            require_stability: false,
        };
        // shouldn't return anything unless there are >=2 tunnels
        assert_isoleq!(
            SupportedTunnelUsage::find_supported(
                vec![&mut entry_none].into_iter(),
                &usage_preemptive_web
            ),
            empty
        );
        assert_isoleq!(
            SupportedTunnelUsage::find_supported(
                vec![&mut entry_none].into_iter(),
                &usage_preemptive_dns
            ),
            empty
        );
        assert_isoleq!(
            SupportedTunnelUsage::find_supported(
                vec![&mut entry_none, &mut entry_web].into_iter(),
                &usage_preemptive_web
            ),
            empty
        );
        assert_isoleq!(
            SupportedTunnelUsage::find_supported(
                vec![&mut entry_none, &mut entry_web].into_iter(),
                &usage_preemptive_dns
            ),
            vec![&mut entry_none_c, &mut entry_web_c]
        );
        assert_isoleq!(
            SupportedTunnelUsage::find_supported(
                vec![&mut entry_none, &mut entry_web, &mut entry_full].into_iter(),
                &usage_preemptive_web
            ),
            vec![&mut entry_web_c, &mut entry_full_c]
        );
    }
    #[test]
    fn test_circlist_preemptive_target_circs() {
        MockRuntime::test_with_various(|rt| async move {
            #[allow(deprecated)] // TODO #1885
            let rt = MockSleepRuntime::new(rt);
            let netdir = testnet::construct_netdir().unwrap_if_sufficient().unwrap();
            let dirinfo = DirInfo::Directory(&netdir);
            let builder = make_builder(&rt);
            for circs in [2, 8].iter() {
                let mut circlist = TunnelList::<FakeBuilder<MockRuntime>, MockRuntime>::new();
                let preemptive_target = TargetTunnelUsage::Preemptive {
                    port: Some(TargetPort::ipv4(80)),
                    circs: *circs,
                    require_stability: false,
                };
                for _ in 0..*circs {
                    assert!(circlist.find_open(&preemptive_target).is_none());
                    let usage = TargetTunnelUsage::new_from_ipv4_ports(&[80]);
                    let (plan, _) = builder.plan_tunnel(&usage, dirinfo).unwrap();
                    let (spec, circ) = rt.wait_for(builder.build_tunnel(plan)).await.unwrap();
                    let entry = OpenEntry::new(
                        spec,
                        circ,
                        ExpirationInfo::new(rt.now() + Duration::from_secs(60)),
                    );
                    circlist.add_open(entry);
                }
                assert!(circlist.find_open(&preemptive_target).is_some());
            }
        });
    }
}