1
//! Publish and maintain onion service descriptors
2
//!
3
//! See the [`reactor`] module-level documentation for more details.
4

            
5
mod backoff;
6
mod descriptor;
7
mod reactor;
8
mod reupload_timer;
9

            
10
use crate::config::restricted_discovery::RestrictedDiscoveryKeys;
11
use crate::internal_prelude::*;
12
use crate::pow::PowManager;
13

            
14
use backoff::{BackoffError, BackoffSchedule, RetriableError, Runner};
15
use descriptor::{DescriptorStatus, VersionedDescriptor, build_sign};
16
use reactor::Reactor;
17
use reactor::read_blind_id_keypair;
18
use reupload_timer::ReuploadTimer;
19

            
20
use tor_config_path::CfgPathResolver;
21

            
22
pub use reactor::UploadError;
23
pub(crate) use reactor::{Mockable, OVERALL_UPLOAD_TIMEOUT, Real};
24

            
25
/// A handle for the Hsdir Publisher for an onion service.
26
///
27
/// This handle represents a set of tasks that identify the hsdirs for each
28
/// relevant time period, construct descriptors, publish them, and keep them
29
/// up-to-date.
30
#[must_use = "If you don't call launch() on the publisher, it won't publish any descriptors."]
31
pub(crate) struct Publisher<R: Runtime, M: Mockable> {
32
    /// The runtime.
33
    runtime: R,
34
    /// The service for which we're publishing descriptors.
35
    nickname: HsNickname,
36
    /// A source for new network directories that we use to determine
37
    /// our HsDirs.
38
    dir_provider: Arc<dyn NetDirProvider>,
39
    /// Mockable state.
40
    ///
41
    /// This is used for launching circuits and for obtaining random number generators.
42
    mockable: M,
43
    /// The onion service config.
44
    config: Arc<OnionServiceConfig>,
45
    /// A channel for receiving IPT change notifications.
46
    ipt_watcher: IptsPublisherView,
47
    /// A channel for receiving onion service config change notifications.
48
    config_rx: watch::Receiver<Arc<OnionServiceConfig>>,
49
    /// The key manager.
50
    keymgr: Arc<KeyMgr>,
51
    /// A sender for updating the status of the onion service.
52
    status_tx: PublisherStatusSender,
53
    /// Path resolver for configuration files.
54
    path_resolver: Arc<CfgPathResolver>,
55
    /// Proof-of-work state
56
    pow_manager: Arc<PowManager<R>>,
57
    /// Queue on which we receive messages from the [`PowManager`] telling us that a seed has
58
    /// rotated and thus we need to republish the descriptor for a particular time period.
59
    update_from_pow_manager_rx: mpsc::Receiver<TimePeriod>,
60
}
61

            
62
impl<R: Runtime, M: Mockable> Publisher<R, M> {
63
    /// Create a new publisher.
64
    ///
65
    /// When it launches, it will know no keys or introduction points,
66
    /// and will therefore not upload any descriptors.
67
    ///
68
    /// The publisher won't start publishing until you call [`Publisher::launch`].
69
    #[allow(clippy::too_many_arguments)]
70
8
    pub(crate) fn new(
71
8
        runtime: R,
72
8
        nickname: HsNickname,
73
8
        dir_provider: Arc<dyn NetDirProvider>,
74
8
        mockable: impl Into<M>,
75
8
        ipt_watcher: IptsPublisherView,
76
8
        config_rx: watch::Receiver<Arc<OnionServiceConfig>>,
77
8
        status_tx: PublisherStatusSender,
78
8
        keymgr: Arc<KeyMgr>,
79
8
        path_resolver: Arc<CfgPathResolver>,
80
8
        pow_manager: Arc<PowManager<R>>,
81
8
        update_from_pow_manager_rx: mpsc::Receiver<TimePeriod>,
82
8
    ) -> Self {
83
8
        let config = config_rx.borrow().clone();
84
8
        Self {
85
8
            runtime,
86
8
            nickname,
87
8
            dir_provider,
88
8
            mockable: mockable.into(),
89
8
            config,
90
8
            ipt_watcher,
91
8
            config_rx,
92
8
            status_tx,
93
8
            keymgr,
94
8
            path_resolver,
95
8
            pow_manager,
96
8
            update_from_pow_manager_rx,
97
8
        }
98
8
    }
99

            
100
    /// Launch the publisher reactor.
101
8
    pub(crate) fn launch(self) -> Result<(), StartupError> {
102
        let Publisher {
103
8
            runtime,
104
8
            nickname,
105
8
            dir_provider,
106
8
            mockable,
107
8
            config,
108
8
            ipt_watcher,
109
8
            config_rx,
110
8
            status_tx,
111
8
            keymgr,
112
8
            path_resolver,
113
8
            pow_manager,
114
8
            update_from_pow_manager_rx: publisher_update_rx,
115
8
        } = self;
116

            
117
8
        let reactor = Reactor::new(
118
8
            runtime.clone(),
119
8
            nickname,
120
8
            dir_provider,
121
8
            mockable,
122
8
            &config,
123
8
            ipt_watcher,
124
8
            config_rx,
125
8
            status_tx,
126
8
            keymgr,
127
8
            path_resolver,
128
8
            pow_manager,
129
8
            publisher_update_rx,
130
        );
131

            
132
8
        runtime
133
8
            .spawn(async move {
134
8
                match reactor.run().await {
135
                    Ok(()) => debug!("the publisher reactor has shut down"),
136
                    Err(e) => warn_report!(e, "the publisher reactor has shut down"),
137
                }
138
            })
139
8
            .map_err(|e| StartupError::Spawn {
140
                spawning: "publisher reactor task",
141
                cause: e.into(),
142
            })?;
143

            
144
8
        Ok(())
145
8
    }
146
}
147

            
148
#[cfg(test)]
149
mod test {
150
    // @@ begin test lint list maintained by maint/add_warning @@
151
    #![allow(clippy::bool_assert_comparison)]
152
    #![allow(clippy::clone_on_copy)]
153
    #![allow(clippy::dbg_macro)]
154
    #![allow(clippy::mixed_attributes_style)]
155
    #![allow(clippy::print_stderr)]
156
    #![allow(clippy::print_stdout)]
157
    #![allow(clippy::single_char_pattern)]
158
    #![allow(clippy::unwrap_used)]
159
    #![allow(clippy::unchecked_duration_subtraction)]
160
    #![allow(clippy::useless_vec)]
161
    #![allow(clippy::needless_pass_by_value)]
162
    //! <!-- @@ end test lint list maintained by maint/add_warning @@ -->
163
    use super::*;
164

            
165
    use std::collections::HashMap;
166
    use std::io;
167
    use std::path::Path;
168
    use std::pin::Pin;
169
    use std::sync::Mutex;
170
    use std::sync::atomic::{AtomicUsize, Ordering};
171
    use std::task::{Context, Poll};
172
    use std::time::Duration;
173

            
174
    use async_trait::async_trait;
175
    use fs_mistrust::Mistrust;
176
    use futures::{AsyncRead, AsyncWrite};
177
    use tempfile::{TempDir, tempdir};
178
    use test_temp_dir::test_temp_dir;
179

            
180
    use tor_basic_utils::test_rng::{TestingRng, testing_rng};
181

            
182
    use tor_hscrypto::pk::{HsBlindId, HsDescSigningKeypair, HsId, HsIdKey, HsIdKeypair};
183
    use tor_key_forge::ToEncodableKey;
184
    use tor_keymgr::{ArtiNativeKeystore, KeyMgrBuilder, KeySpecifier};
185
    use tor_llcrypto::pk::{ed25519, rsa};
186
    use tor_netdir::testprovider::TestNetDirProvider;
187
    use tor_netdir::{NetDir, testnet};
188
    use tor_netdoc::doc::hsdesc::test_data;
189
    use tor_rtcompat::ToplevelBlockOn;
190
    use tor_rtmock::MockRuntime;
191

            
192
    use crate::HsNickname;
193
    use crate::config::OnionServiceConfigBuilder;
194
    use crate::ipt_set::{IptInSet, IptSet, ipts_channel};
195
    use crate::pow::NewPowManager;
196
    use crate::publish::reactor::MockableDirTunnel;
197
    use crate::status::{OnionServiceStatus, StatusSender};
198
    use crate::test::create_storage_handles;
199
    use crate::{
200
        BlindIdKeypairSpecifier, BlindIdPublicKeySpecifier, DescSigningKeypairSpecifier,
201
        HsIdKeypairSpecifier, HsIdPublicKeySpecifier,
202
    };
203

            
204
    /// The nickname of the test service.
205
    const TEST_SVC_NICKNAME: &str = "test-svc";
206

            
207
    /// The HTTP response the HSDir returns if everything went well.
208
    const OK_RESPONSE: &str = "HTTP/1.1 200 OK\r\n\r\n";
209

            
210
    /// The HTTP response the HSDir returns if something went wrong
211
    const ERR_RESPONSE: &str = "HTTP/1.1 500 UH_OH\r\n\r\n";
212

            
213
    /// The error doesn't matter (we return a dummy io::Error from poll_read).
214
    ///
215
    /// NOTE: ideally, this would be an io::Result, but io::Error isn't Clone (the tests need to
216
    /// clone the iterator over these Results for each HSDir).
217
    type PollReadResult<T> = Result<T, ()>;
218

            
219
    /// A trait for our poll_read response iterator.
220
    trait PollReadIter:
221
        Iterator<Item = PollReadResult<String>> + Send + Sync + Clone + Unpin + 'static
222
    {
223
    }
224

            
225
    impl<I> PollReadIter for I where
226
        I: Iterator<Item = PollReadResult<String>> + Send + Sync + Clone + Unpin + 'static
227
    {
228
    }
229

            
230
    #[derive(Clone, Debug, Default)]
231
    struct MockReactorState<I: PollReadIter> {
232
        /// The number of `POST /tor/hs/3/publish` requests sent by the reactor.
233
        publish_count: Arc<AtomicUsize>,
234
        /// The values returned by `DataStream::poll_read` when uploading to an HSDir.
235
        ///
236
        /// The values represent the HTTP response (or lack thereof) each HSDir sends upon
237
        /// receiving a POST request for uploading a descriptor.
238
        ///
239
        /// Note: this field is only used for populating responses_for_hsdir. Each time
240
        /// get_or_launch_specific is called for a new CircTarget, this iterator is cloned and
241
        /// added to the responses_for_hsdir entry corresponding to the new CircTarget (HSDir).
242
        poll_read_responses: I,
243
        /// The responses that will be returned by each test HSDir (identified by its RsaIdentity).
244
        ///
245
        /// Used for testing whether the reactor correctly retries on failure.
246
        responses_for_hsdir: Arc<Mutex<HashMap<rsa::RsaIdentity, I>>>,
247
    }
248

            
249
    #[async_trait]
250
    impl<I: PollReadIter> Mockable for MockReactorState<I> {
251
        type Rng = TestingRng;
252
        type Tunnel = MockClientCirc<I>;
253

            
254
        fn thread_rng(&self) -> Self::Rng {
255
            testing_rng()
256
        }
257

            
258
        async fn get_or_launch_hs_dir<T>(
259
            &self,
260
            _netdir: &tor_netdir::NetDir,
261
            target: T,
262
        ) -> Result<Self::Tunnel, tor_circmgr::Error>
263
        where
264
            T: tor_linkspec::CircTarget + Send + Sync,
265
        {
266
            // Look up the next poll_read value to return for this relay.
267
            let id = target.rsa_identity().unwrap();
268
            let mut map = self.responses_for_hsdir.lock().unwrap();
269
            let poll_read_responses = map
270
                .entry(*id)
271
                .or_insert_with(|| self.poll_read_responses.clone());
272

            
273
            Ok(MockClientCirc {
274
                publish_count: Arc::clone(&self.publish_count),
275
                poll_read_responses: poll_read_responses.clone(),
276
            })
277
        }
278

            
279
        fn estimate_upload_timeout(&self) -> Duration {
280
            // chosen arbitrarily for testing.
281
            Duration::from_secs(30)
282
        }
283
    }
284

            
285
    #[derive(Debug, Clone)]
286
    struct MockClientCirc<I: PollReadIter> {
287
        /// The number of `POST /tor/hs/3/publish` requests sent by the reactor.
288
        publish_count: Arc<AtomicUsize>,
289
        /// The values to return from `poll_read`.
290
        ///
291
        /// Used for testing whether the reactor correctly retries on failure.
292
        poll_read_responses: I,
293
    }
294

            
295
    #[async_trait]
296
    impl<I: PollReadIter> MockableDirTunnel for MockClientCirc<I> {
297
        type DataStream = MockDataStream<I>;
298

            
299
        async fn begin_dir_stream(&self) -> Result<Self::DataStream, tor_circmgr::Error> {
300
            Ok(MockDataStream {
301
                publish_count: Arc::clone(&self.publish_count),
302
                // TODO: this will need to change when we start reusing circuits (currently,
303
                // we only ever create one data stream per circuit).
304
                poll_read_responses: self.poll_read_responses.clone(),
305
            })
306
        }
307

            
308
        fn source_info(&self) -> tor_proto::Result<Option<tor_dirclient::SourceInfo>> {
309
            Ok(None)
310
        }
311
    }
312

            
313
    #[derive(Debug)]
314
    struct MockDataStream<I: PollReadIter> {
315
        /// The number of `POST /tor/hs/3/publish` requests sent by the reactor.
316
        publish_count: Arc<AtomicUsize>,
317
        /// The values to return from `poll_read`.
318
        ///
319
        /// Used for testing whether the reactor correctly retries on failure.
320
        poll_read_responses: I,
321
    }
322

            
323
    impl<I: PollReadIter> AsyncRead for MockDataStream<I> {
324
        fn poll_read(
325
            mut self: Pin<&mut Self>,
326
            _cx: &mut Context<'_>,
327
            buf: &mut [u8],
328
        ) -> Poll<io::Result<usize>> {
329
            match self.as_mut().poll_read_responses.next() {
330
                Some(res) => {
331
                    match res {
332
                        Ok(res) => {
333
                            buf[..res.len()].copy_from_slice(res.as_bytes());
334

            
335
                            Poll::Ready(Ok(res.len()))
336
                        }
337
                        Err(()) => {
338
                            // Return an error. This should cause the reactor to reattempt the
339
                            // upload.
340
                            Poll::Ready(Err(io::Error::other("test error")))
341
                        }
342
                    }
343
                }
344
                None => Poll::Ready(Ok(0)),
345
            }
346
        }
347
    }
348

            
349
    impl<I: PollReadIter> AsyncWrite for MockDataStream<I> {
350
        fn poll_write(
351
            self: Pin<&mut Self>,
352
            _cx: &mut Context<'_>,
353
            buf: &[u8],
354
        ) -> Poll<io::Result<usize>> {
355
            let request = std::str::from_utf8(buf).unwrap();
356

            
357
            assert!(request.starts_with("POST /tor/hs/3/publish HTTP/1.0\r\n"));
358
            let _prev = self.publish_count.fetch_add(1, Ordering::SeqCst);
359

            
360
            Poll::Ready(Ok(request.len()))
361
        }
362

            
363
        fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
364
            Poll::Ready(Ok(()))
365
        }
366

            
367
        fn poll_close(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
368
            Poll::Ready(Ok(()))
369
        }
370
    }
371

            
372
    /// Insert the specified key into the keystore.
373
    fn insert_svc_key<K>(key: K, keymgr: &KeyMgr, svc_key_spec: &dyn KeySpecifier)
374
    where
375
        K: ToEncodableKey,
376
    {
377
        keymgr
378
            .insert(
379
                key,
380
                svc_key_spec,
381
                tor_keymgr::KeystoreSelector::Primary,
382
                true,
383
            )
384
            .unwrap();
385
    }
386

            
387
    /// Create a new `KeyMgr`, provisioning its keystore with the necessary keys.
388
    fn init_keymgr(
389
        keystore_dir: &TempDir,
390
        nickname: &HsNickname,
391
        netdir: &NetDir,
392
    ) -> (HsId, HsBlindId, Arc<KeyMgr>) {
393
        let period = netdir.hs_time_period();
394

            
395
        let mut rng = testing_rng();
396
        let keypair = ed25519::Keypair::generate(&mut rng);
397
        let id_pub = HsIdKey::from(keypair.verifying_key());
398
        let id_keypair = HsIdKeypair::from(ed25519::ExpandedKeypair::from(&keypair));
399

            
400
        let (hs_blind_id_key, hs_blind_id_kp, _subcredential) =
401
            id_keypair.compute_blinded_key(period).unwrap();
402

            
403
        let keystore = ArtiNativeKeystore::from_path_and_mistrust(
404
            keystore_dir,
405
            &Mistrust::new_dangerously_trust_everyone(),
406
        )
407
        .unwrap();
408

            
409
        // Provision the keystore with the necessary keys:
410
        let keymgr = KeyMgrBuilder::default()
411
            .primary_store(Box::new(keystore))
412
            .build()
413
            .unwrap();
414

            
415
        insert_svc_key(
416
            id_keypair,
417
            &keymgr,
418
            &HsIdKeypairSpecifier::new(nickname.clone()),
419
        );
420

            
421
        insert_svc_key(
422
            id_pub.clone(),
423
            &keymgr,
424
            &HsIdPublicKeySpecifier::new(nickname.clone()),
425
        );
426

            
427
        insert_svc_key(
428
            hs_blind_id_kp,
429
            &keymgr,
430
            &BlindIdKeypairSpecifier::new(nickname.clone(), period),
431
        );
432

            
433
        insert_svc_key(
434
            hs_blind_id_key.clone(),
435
            &keymgr,
436
            &BlindIdPublicKeySpecifier::new(nickname.clone(), period),
437
        );
438

            
439
        insert_svc_key(
440
            HsDescSigningKeypair::from(ed25519::Keypair::generate(&mut rng)),
441
            &keymgr,
442
            &DescSigningKeypairSpecifier::new(nickname.clone(), period),
443
        );
444

            
445
        let hs_id = id_pub.into();
446
        (hs_id, hs_blind_id_key.into(), keymgr.into())
447
    }
448

            
449
    fn build_test_config(nickname: HsNickname) -> OnionServiceConfig {
450
        OnionServiceConfigBuilder::default()
451
            .nickname(nickname)
452
            .rate_limit_at_intro(None)
453
            .build()
454
            .unwrap()
455
    }
456

            
457
    #[allow(clippy::too_many_arguments)]
458
    fn run_test<I: PollReadIter>(
459
        runtime: MockRuntime,
460
        nickname: HsNickname,
461
        keymgr: Arc<KeyMgr>,
462
        pv: IptsPublisherView,
463
        config_rx: watch::Receiver<Arc<OnionServiceConfig>>,
464
        status_tx: StatusSender,
465
        netdir: NetDir,
466
        reactor_event: impl FnOnce(),
467
        poll_read_responses: I,
468
        expected_upload_count: usize,
469
        republish_count: usize,
470
        expect_errors: bool,
471
    ) {
472
        runtime.clone().block_on(async move {
473
            let netdir_provider: Arc<dyn NetDirProvider> =
474
                Arc::new(TestNetDirProvider::from(netdir));
475
            let publish_count = Default::default();
476
            let circpool = MockReactorState {
477
                publish_count: Arc::clone(&publish_count),
478
                poll_read_responses,
479
                responses_for_hsdir: Arc::new(Mutex::new(Default::default())),
480
            };
481

            
482
            let temp_dir = test_temp_dir!();
483
            let state_dir = temp_dir.subdir_untracked("state_dir");
484
            let mistrust = fs_mistrust::Mistrust::new_dangerously_trust_everyone();
485
            let state_dir = StateDirectory::new(state_dir, &mistrust).unwrap();
486
            let state_handle = state_dir.acquire_instance(&nickname).unwrap();
487
            let pow_nonce_dir = state_handle.raw_subdir("pow_nonces").unwrap();
488
            let pow_manager_storage_handle = state_handle.storage_handle("pow_manager").unwrap();
489

            
490
            let NewPowManager {
491
                pow_manager,
492
                rend_req_tx: _,
493
                rend_req_rx: _,
494
                publisher_update_rx: update_from_pow_manager_rx,
495
            } = PowManager::new(
496
                runtime.clone(),
497
                nickname.clone(),
498
                pow_nonce_dir,
499
                keymgr.clone(),
500
                pow_manager_storage_handle,
501
                netdir_provider.clone(),
502
                status_tx.clone().into(),
503
                config_rx.clone(),
504
            )
505
            .unwrap();
506
            let mut status_rx = status_tx.subscribe();
507
            let publisher: Publisher<MockRuntime, MockReactorState<_>> = Publisher::new(
508
                runtime.clone(),
509
                nickname,
510
                netdir_provider,
511
                circpool,
512
                pv,
513
                config_rx,
514
                status_tx.into(),
515
                keymgr,
516
                Arc::new(CfgPathResolver::default()),
517
                pow_manager,
518
                update_from_pow_manager_rx,
519
            );
520

            
521
            publisher.launch().unwrap();
522
            runtime.progress_until_stalled().await;
523
            let status = status_rx.next().await.unwrap().publisher_status();
524
            assert_eq!(State::Shutdown, status.state());
525
            assert!(status.current_problem().is_none());
526

            
527
            // Check that we haven't published anything yet
528
            assert_eq!(publish_count.load(Ordering::SeqCst), 0);
529

            
530
            reactor_event();
531

            
532
            runtime.progress_until_stalled().await;
533

            
534
            // We need to manually advance the time, because some of our tests check that the
535
            // failed uploads are retried, and there's a sleep() between the retries
536
            // (see BackoffSchedule::next_delay).
537
            runtime.advance_by(Duration::from_secs(1)).await;
538
            runtime.progress_until_stalled().await;
539

            
540
            let initial_publish_count = publish_count.load(Ordering::SeqCst);
541
            assert_eq!(initial_publish_count, expected_upload_count);
542

            
543
            let status = status_rx.next().await.unwrap().publisher_status();
544
            if expect_errors {
545
                // The upload results aren't ready yet.
546
                assert_eq!(State::Bootstrapping, status.state());
547
            } else {
548
                // The test network doesn't have an SRV for the previous TP,
549
                // so we are "unreachable".
550
                assert_eq!(State::DegradedUnreachable, status.state());
551
            }
552
            assert!(status.current_problem().is_none());
553

            
554
            if republish_count > 0 {
555
                /// The latest time the descriptor can be republished.
556
                const MAX_TIMEOUT: Duration = Duration::from_secs(60 * 120);
557

            
558
                // Wait until the reactor triggers the necessary number of reuploads.
559
                runtime
560
                    .advance_by(MAX_TIMEOUT * (republish_count as u32))
561
                    .await;
562
                runtime.progress_until_stalled().await;
563

            
564
                let min_upload_count = expected_upload_count * republish_count;
565
                // There will be twice as many reuploads if the publisher happens
566
                // to reupload every hour (as opposed to every 2h).
567
                let max_upload_count = 2 * min_upload_count;
568
                let publish_count_now = publish_count.load(Ordering::SeqCst);
569
                // This is the total number of reuploads (i.e. the number of times
570
                // we published the descriptor to an HsDir).
571
                let actual_reupload_count = publish_count_now - initial_publish_count;
572

            
573
                assert!((min_upload_count..=max_upload_count).contains(&actual_reupload_count));
574
            }
575
        });
576
    }
577

            
578
    /// Test that the publisher publishes the descriptor when the IPTs change.
579
    ///
580
    /// The `poll_read_responses` are returned by each HSDir, in order, in response to each POST
581
    /// request received from the publisher.
582
    ///
583
    /// The `multiplier` represents the multiplier by which to multiply the number of HSDirs to
584
    /// obtain the total expected number of uploads (this works because the test "HSDirs" all
585
    /// behave the same, so the number of uploads is the number of HSDirs multiplied by the number
586
    /// of retries).
587
    fn publish_after_ipt_change<I: PollReadIter>(
588
        temp_dir: &Path,
589
        poll_read_responses: I,
590
        multiplier: usize,
591
        republish_count: usize,
592
        expect_errors: bool,
593
    ) {
594
        let runtime = MockRuntime::new();
595
        let nickname = HsNickname::try_from(TEST_SVC_NICKNAME.to_string()).unwrap();
596
        let config = build_test_config(nickname.clone());
597
        let (_config_tx, config_rx) = watch::channel_with(Arc::new(config));
598

            
599
        let (mut mv, pv) = ipts_channel(&runtime, create_storage_handles(temp_dir).1).unwrap();
600
        let update_ipts = || {
601
            let ipts: Vec<IptInSet> = test_data::test_parsed_hsdesc()
602
                .unwrap()
603
                .intro_points()
604
                .iter()
605
                .enumerate()
606
                .map(|(i, ipt)| IptInSet {
607
                    ipt: ipt.clone(),
608
                    lid: [i.try_into().unwrap(); 32].into(),
609
                })
610
                .collect();
611

            
612
            mv.borrow_for_update(runtime.clone()).ipts = Some(IptSet {
613
                ipts,
614
                lifetime: Duration::from_secs(20),
615
            });
616
        };
617

            
618
        let netdir = testnet::construct_netdir().unwrap_if_sufficient().unwrap();
619
        let keystore_dir = tempdir().unwrap();
620

            
621
        let (_hsid, blind_id, keymgr) = init_keymgr(&keystore_dir, &nickname, &netdir);
622

            
623
        let hsdir_count = netdir
624
            .hs_dirs_upload(blind_id, netdir.hs_time_period())
625
            .unwrap()
626
            .collect::<Vec<_>>()
627
            .len();
628

            
629
        assert!(hsdir_count > 0);
630

            
631
        // If any of the uploads fail, they will be retried. Note that the upload failure will
632
        // affect _each_ hsdir, so the expected number of uploads is a multiple of hsdir_count.
633
        let expected_upload_count = hsdir_count * multiplier;
634
        let status_tx = StatusSender::new(OnionServiceStatus::new_shutdown());
635

            
636
        run_test(
637
            runtime.clone(),
638
            nickname,
639
            keymgr,
640
            pv,
641
            config_rx,
642
            status_tx,
643
            netdir,
644
            update_ipts,
645
            poll_read_responses,
646
            expected_upload_count,
647
            republish_count,
648
            expect_errors,
649
        );
650
    }
651

            
652
    #[test]
653
    fn publish_after_ipt_change_no_errors() {
654
        // The HSDirs always respond with 200 OK, so we expect to publish hsdir_count times.
655
        let poll_reads = [Ok(OK_RESPONSE.into())].into_iter();
656

            
657
        test_temp_dir!().used_by(|dir| publish_after_ipt_change(dir, poll_reads, 1, 0, false));
658
    }
659

            
660
    #[test]
661
    fn publish_after_ipt_change_with_errors() {
662
        let err_responses = vec![
663
            // The HSDir closed the connection without sending a response.
664
            Err(()),
665
            // The HSDir responded with an internal server error,
666
            Ok(ERR_RESPONSE.to_string()),
667
        ];
668

            
669
        for error_res in err_responses.into_iter() {
670
            let poll_reads = vec![
671
                // Each HSDir first responds with an error, which causes the publisher to retry the
672
                // upload. The HSDir then responds with "200 OK".
673
                //
674
                // We expect to publish hsdir_count * 2 times (for each HSDir, the first upload
675
                // attempt fails, but the second succeeds).
676
                error_res,
677
                Ok(OK_RESPONSE.to_string()),
678
            ]
679
            .into_iter();
680

            
681
            test_temp_dir!().used_by(|dir| publish_after_ipt_change(dir, poll_reads, 2, 0, true));
682
        }
683
    }
684

            
685
    #[test]
686
    fn reupload_after_publishing() {
687
        let poll_reads = [Ok(OK_RESPONSE.into())].into_iter();
688
        // Test that 4 reuploads happen after the initial upload
689
        const REUPLOAD_COUNT: usize = 4;
690

            
691
        test_temp_dir!()
692
            .used_by(|dir| publish_after_ipt_change(dir, poll_reads, 1, REUPLOAD_COUNT, false));
693
    }
694

            
695
    // TODO (#1120): test that the descriptor is republished when the config changes
696

            
697
    // TODO (#1120): test that the descriptor is reuploaded only to the HSDirs that need it (i.e. the
698
    // ones for which it's dirty)
699

            
700
    // TODO (#1120): test that rate-limiting works correctly
701

            
702
    // TODO (#1120): test that the uploaded descriptor contains the expected values
703

            
704
    // TODO (#1120): test that the publisher stops publishing if the IPT manager sets the IPTs to
705
    // `None`.
706
}