1mod backoff;
6mod descriptor;
7mod reactor;
8mod reupload_timer;
9
10use crate::config::restricted_discovery::RestrictedDiscoveryKeys;
11use crate::internal_prelude::*;
12use crate::pow::PowManager;
13
14use backoff::{BackoffError, BackoffSchedule, RetriableError, Runner};
15use descriptor::{build_sign, DescriptorStatus, VersionedDescriptor};
16use reactor::read_blind_id_keypair;
17use reactor::Reactor;
18use reupload_timer::ReuploadTimer;
19
20use tor_config_path::CfgPathResolver;
21
22pub use reactor::UploadError;
23pub(crate) use reactor::{Mockable, Real, OVERALL_UPLOAD_TIMEOUT};
24
25#[must_use = "If you don't call launch() on the publisher, it won't publish any descriptors."]
31pub(crate) struct Publisher<R: Runtime, M: Mockable> {
32 runtime: R,
34 nickname: HsNickname,
36 dir_provider: Arc<dyn NetDirProvider>,
39 mockable: M,
43 config: Arc<OnionServiceConfig>,
45 ipt_watcher: IptsPublisherView,
47 config_rx: watch::Receiver<Arc<OnionServiceConfig>>,
49 keymgr: Arc<KeyMgr>,
51 status_tx: PublisherStatusSender,
53 path_resolver: Arc<CfgPathResolver>,
55 pow_manager: Arc<PowManager<R>>,
57 update_from_pow_manager_rx: mpsc::Receiver<TimePeriod>,
60}
61
62impl<R: Runtime, M: Mockable> Publisher<R, M> {
63 #[allow(clippy::too_many_arguments)]
70 pub(crate) fn new(
71 runtime: R,
72 nickname: HsNickname,
73 dir_provider: Arc<dyn NetDirProvider>,
74 mockable: impl Into<M>,
75 ipt_watcher: IptsPublisherView,
76 config_rx: watch::Receiver<Arc<OnionServiceConfig>>,
77 status_tx: PublisherStatusSender,
78 keymgr: Arc<KeyMgr>,
79 path_resolver: Arc<CfgPathResolver>,
80 pow_manager: Arc<PowManager<R>>,
81 update_from_pow_manager_rx: mpsc::Receiver<TimePeriod>,
82 ) -> Self {
83 let config = config_rx.borrow().clone();
84 Self {
85 runtime,
86 nickname,
87 dir_provider,
88 mockable: mockable.into(),
89 config,
90 ipt_watcher,
91 config_rx,
92 status_tx,
93 keymgr,
94 path_resolver,
95 pow_manager,
96 update_from_pow_manager_rx,
97 }
98 }
99
100 pub(crate) fn launch(self) -> Result<(), StartupError> {
102 let Publisher {
103 runtime,
104 nickname,
105 dir_provider,
106 mockable,
107 config,
108 ipt_watcher,
109 config_rx,
110 status_tx,
111 keymgr,
112 path_resolver,
113 pow_manager,
114 update_from_pow_manager_rx: publisher_update_rx,
115 } = self;
116
117 let reactor = Reactor::new(
118 runtime.clone(),
119 nickname,
120 dir_provider,
121 mockable,
122 &config,
123 ipt_watcher,
124 config_rx,
125 status_tx,
126 keymgr,
127 path_resolver,
128 pow_manager,
129 publisher_update_rx,
130 );
131
132 runtime
133 .spawn(async move {
134 match reactor.run().await {
135 Ok(()) => debug!("the publisher reactor has shut down"),
136 Err(e) => warn_report!(e, "the publisher reactor has shut down"),
137 }
138 })
139 .map_err(|e| StartupError::Spawn {
140 spawning: "publisher reactor task",
141 cause: e.into(),
142 })?;
143
144 Ok(())
145 }
146}
147
148#[cfg(all(test, not(feature = "hs-pow-full")))]
150mod test {
151 #![allow(clippy::bool_assert_comparison)]
153 #![allow(clippy::clone_on_copy)]
154 #![allow(clippy::dbg_macro)]
155 #![allow(clippy::mixed_attributes_style)]
156 #![allow(clippy::print_stderr)]
157 #![allow(clippy::print_stdout)]
158 #![allow(clippy::single_char_pattern)]
159 #![allow(clippy::unwrap_used)]
160 #![allow(clippy::unchecked_duration_subtraction)]
161 #![allow(clippy::useless_vec)]
162 #![allow(clippy::needless_pass_by_value)]
163 use super::*;
165
166 use std::collections::HashMap;
167 use std::io;
168 use std::path::Path;
169 use std::pin::Pin;
170 use std::sync::atomic::{AtomicUsize, Ordering};
171 use std::sync::Mutex;
172 use std::task::{Context, Poll};
173 use std::time::Duration;
174
175 use async_trait::async_trait;
176 use fs_mistrust::Mistrust;
177 use futures::{AsyncRead, AsyncWrite};
178 use tempfile::{tempdir, TempDir};
179 use test_temp_dir::test_temp_dir;
180
181 use tor_basic_utils::test_rng::{testing_rng, TestingRng};
182 use tor_circmgr::hspool::HsCircKind;
183 use tor_hscrypto::pk::{HsBlindId, HsDescSigningKeypair, HsId, HsIdKey, HsIdKeypair};
184 use tor_key_forge::ToEncodableKey;
185 use tor_keymgr::{ArtiNativeKeystore, KeyMgrBuilder, KeySpecifier};
186 use tor_llcrypto::pk::{ed25519, rsa};
187 use tor_netdir::testprovider::TestNetDirProvider;
188 use tor_netdir::{testnet, NetDir};
189 use tor_netdoc::doc::hsdesc::test_data;
190 use tor_rtcompat::ToplevelBlockOn;
191 use tor_rtmock::MockRuntime;
192
193 use crate::config::OnionServiceConfigBuilder;
194 use crate::ipt_set::{ipts_channel, IptInSet, IptSet};
195 use crate::pow::NewPowManager;
196 use crate::publish::reactor::MockableClientCirc;
197 use crate::status::{OnionServiceStatus, StatusSender};
198 use crate::test::create_storage_handles;
199 use crate::HsNickname;
200 use crate::{
201 BlindIdKeypairSpecifier, BlindIdPublicKeySpecifier, DescSigningKeypairSpecifier,
202 HsIdKeypairSpecifier, HsIdPublicKeySpecifier,
203 };
204
205 const TEST_SVC_NICKNAME: &str = "test-svc";
207
208 const OK_RESPONSE: &str = "HTTP/1.1 200 OK\r\n\r\n";
210
211 const ERR_RESPONSE: &str = "HTTP/1.1 500 UH_OH\r\n\r\n";
213
214 type PollReadResult<T> = Result<T, ()>;
219
220 trait PollReadIter:
222 Iterator<Item = PollReadResult<String>> + Send + Sync + Clone + Unpin + 'static
223 {
224 }
225
226 impl<I> PollReadIter for I where
227 I: Iterator<Item = PollReadResult<String>> + Send + Sync + Clone + Unpin + 'static
228 {
229 }
230
231 #[derive(Clone, Debug, Default)]
232 struct MockReactorState<I: PollReadIter> {
233 publish_count: Arc<AtomicUsize>,
235 poll_read_responses: I,
244 responses_for_hsdir: Arc<Mutex<HashMap<rsa::RsaIdentity, I>>>,
248 }
249
250 #[async_trait]
251 impl<I: PollReadIter> Mockable for MockReactorState<I> {
252 type Rng = TestingRng;
253 type ClientCirc = MockClientCirc<I>;
254
255 fn thread_rng(&self) -> Self::Rng {
256 testing_rng()
257 }
258
259 async fn get_or_launch_specific<T>(
260 &self,
261 _netdir: &tor_netdir::NetDir,
262 kind: HsCircKind,
263 target: T,
264 ) -> Result<Arc<Self::ClientCirc>, tor_circmgr::Error>
265 where
266 T: tor_linkspec::CircTarget + Send + Sync,
267 {
268 assert_eq!(kind, HsCircKind::SvcHsDir);
269
270 let id = target.rsa_identity().unwrap();
272 let mut map = self.responses_for_hsdir.lock().unwrap();
273 let poll_read_responses = map
274 .entry(*id)
275 .or_insert_with(|| self.poll_read_responses.clone());
276
277 Ok(MockClientCirc {
278 publish_count: Arc::clone(&self.publish_count),
279 poll_read_responses: poll_read_responses.clone(),
280 }
281 .into())
282 }
283
284 fn estimate_upload_timeout(&self) -> Duration {
285 Duration::from_secs(30)
287 }
288 }
289
290 #[derive(Debug, Clone)]
291 struct MockClientCirc<I: PollReadIter> {
292 publish_count: Arc<AtomicUsize>,
294 poll_read_responses: I,
298 }
299
300 #[async_trait]
301 impl<I: PollReadIter> MockableClientCirc for MockClientCirc<I> {
302 type DataStream = MockDataStream<I>;
303
304 async fn begin_dir_stream(self: Arc<Self>) -> Result<Self::DataStream, tor_proto::Error> {
305 Ok(MockDataStream {
306 publish_count: Arc::clone(&self.publish_count),
307 poll_read_responses: self.poll_read_responses.clone(),
310 })
311 }
312 }
313
314 #[derive(Debug)]
315 struct MockDataStream<I: PollReadIter> {
316 publish_count: Arc<AtomicUsize>,
318 poll_read_responses: I,
322 }
323
324 impl<I: PollReadIter> AsyncRead for MockDataStream<I> {
325 fn poll_read(
326 mut self: Pin<&mut Self>,
327 _cx: &mut Context<'_>,
328 buf: &mut [u8],
329 ) -> Poll<io::Result<usize>> {
330 match self.as_mut().poll_read_responses.next() {
331 Some(res) => {
332 match res {
333 Ok(res) => {
334 buf[..res.len()].copy_from_slice(res.as_bytes());
335
336 Poll::Ready(Ok(res.len()))
337 }
338 Err(()) => {
339 Poll::Ready(Err(io::Error::other("test error")))
342 }
343 }
344 }
345 None => Poll::Ready(Ok(0)),
346 }
347 }
348 }
349
350 impl<I: PollReadIter> AsyncWrite for MockDataStream<I> {
351 fn poll_write(
352 self: Pin<&mut Self>,
353 _cx: &mut Context<'_>,
354 buf: &[u8],
355 ) -> Poll<io::Result<usize>> {
356 let request = std::str::from_utf8(buf).unwrap();
357
358 assert!(request.starts_with("POST /tor/hs/3/publish HTTP/1.0\r\n"));
359 let _prev = self.publish_count.fetch_add(1, Ordering::SeqCst);
360
361 Poll::Ready(Ok(request.len()))
362 }
363
364 fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
365 Poll::Ready(Ok(()))
366 }
367
368 fn poll_close(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
369 Poll::Ready(Ok(()))
370 }
371 }
372
373 fn insert_svc_key<K>(key: K, keymgr: &KeyMgr, svc_key_spec: &dyn KeySpecifier)
375 where
376 K: ToEncodableKey,
377 {
378 keymgr
379 .insert(
380 key,
381 svc_key_spec,
382 tor_keymgr::KeystoreSelector::Primary,
383 true,
384 )
385 .unwrap();
386 }
387
388 fn init_keymgr(
390 keystore_dir: &TempDir,
391 nickname: &HsNickname,
392 netdir: &NetDir,
393 ) -> (HsId, HsBlindId, Arc<KeyMgr>) {
394 let period = netdir.hs_time_period();
395
396 let mut rng = testing_rng();
397 let keypair = ed25519::Keypair::generate(&mut rng);
398 let id_pub = HsIdKey::from(keypair.verifying_key());
399 let id_keypair = HsIdKeypair::from(ed25519::ExpandedKeypair::from(&keypair));
400
401 let (hs_blind_id_key, hs_blind_id_kp, _subcredential) =
402 id_keypair.compute_blinded_key(period).unwrap();
403
404 let keystore = ArtiNativeKeystore::from_path_and_mistrust(
405 keystore_dir,
406 &Mistrust::new_dangerously_trust_everyone(),
407 )
408 .unwrap();
409
410 let keymgr = KeyMgrBuilder::default()
412 .primary_store(Box::new(keystore))
413 .build()
414 .unwrap();
415
416 insert_svc_key(
417 id_keypair,
418 &keymgr,
419 &HsIdKeypairSpecifier::new(nickname.clone()),
420 );
421
422 insert_svc_key(
423 id_pub.clone(),
424 &keymgr,
425 &HsIdPublicKeySpecifier::new(nickname.clone()),
426 );
427
428 insert_svc_key(
429 hs_blind_id_kp,
430 &keymgr,
431 &BlindIdKeypairSpecifier::new(nickname.clone(), period),
432 );
433
434 insert_svc_key(
435 hs_blind_id_key.clone(),
436 &keymgr,
437 &BlindIdPublicKeySpecifier::new(nickname.clone(), period),
438 );
439
440 insert_svc_key(
441 HsDescSigningKeypair::from(ed25519::Keypair::generate(&mut rng)),
442 &keymgr,
443 &DescSigningKeypairSpecifier::new(nickname.clone(), period),
444 );
445
446 let hs_id = id_pub.into();
447 (hs_id, hs_blind_id_key.into(), keymgr.into())
448 }
449
450 fn build_test_config(nickname: HsNickname) -> OnionServiceConfig {
451 OnionServiceConfigBuilder::default()
452 .nickname(nickname)
453 .rate_limit_at_intro(None)
454 .build()
455 .unwrap()
456 }
457
458 #[allow(clippy::too_many_arguments)]
459 fn run_test<I: PollReadIter>(
460 runtime: MockRuntime,
461 nickname: HsNickname,
462 keymgr: Arc<KeyMgr>,
463 pv: IptsPublisherView,
464 config_rx: watch::Receiver<Arc<OnionServiceConfig>>,
465 status_tx: PublisherStatusSender,
466 netdir: NetDir,
467 reactor_event: impl FnOnce(),
468 poll_read_responses: I,
469 expected_upload_count: usize,
470 republish_count: usize,
471 expect_errors: bool,
472 ) {
473 runtime.clone().block_on(async move {
474 let netdir_provider: Arc<dyn NetDirProvider> =
475 Arc::new(TestNetDirProvider::from(netdir));
476 let publish_count = Default::default();
477 let circpool = MockReactorState {
478 publish_count: Arc::clone(&publish_count),
479 poll_read_responses,
480 responses_for_hsdir: Arc::new(Mutex::new(Default::default())),
481 };
482
483 let temp_dir = test_temp_dir!();
484 let state_dir = temp_dir.subdir_untracked("state_dir");
485 let mistrust = fs_mistrust::Mistrust::new_dangerously_trust_everyone();
486 let state_dir = StateDirectory::new(state_dir, &mistrust).unwrap();
487 let state_handle = state_dir.acquire_instance(&nickname).unwrap();
488 let pow_nonce_dir = state_handle.raw_subdir("pow_nonces").unwrap();
489 let pow_manager_storage_handle = state_handle.storage_handle("pow_manager").unwrap();
490
491 let NewPowManager {
492 pow_manager,
493 rend_req_tx: _,
494 rend_req_rx: _,
495 publisher_update_rx: update_from_pow_manager_rx,
496 } = PowManager::new(
497 runtime.clone(),
498 nickname.clone(),
499 pow_nonce_dir,
500 keymgr.clone(),
501 pow_manager_storage_handle,
502 )
503 .unwrap();
504 let mut status_rx = status_tx.subscribe();
505 let publisher: Publisher<MockRuntime, MockReactorState<_>> = Publisher::new(
506 runtime.clone(),
507 nickname,
508 netdir_provider,
509 circpool,
510 pv,
511 config_rx,
512 status_tx,
513 keymgr,
514 Arc::new(CfgPathResolver::default()),
515 pow_manager,
516 update_from_pow_manager_rx,
517 );
518
519 publisher.launch().unwrap();
520 runtime.progress_until_stalled().await;
521 let status = status_rx.next().await.unwrap().publisher_status();
522 assert_eq!(State::Shutdown, status.state());
523 assert!(status.current_problem().is_none());
524
525 assert_eq!(publish_count.load(Ordering::SeqCst), 0);
527
528 reactor_event();
529
530 runtime.progress_until_stalled().await;
531
532 runtime.advance_by(Duration::from_secs(1)).await;
536 runtime.progress_until_stalled().await;
537
538 let initial_publish_count = publish_count.load(Ordering::SeqCst);
539 assert_eq!(initial_publish_count, expected_upload_count);
540
541 let status = status_rx.next().await.unwrap().publisher_status();
542 if expect_errors {
543 assert_eq!(State::Bootstrapping, status.state());
545 } else {
546 assert_eq!(State::DegradedUnreachable, status.state());
549 }
550 assert!(status.current_problem().is_none());
551
552 if republish_count > 0 {
553 const MAX_TIMEOUT: Duration = Duration::from_secs(60 * 120);
555
556 runtime
558 .advance_by(MAX_TIMEOUT * (republish_count as u32))
559 .await;
560 runtime.progress_until_stalled().await;
561
562 let min_upload_count = expected_upload_count * republish_count;
563 let max_upload_count = 2 * min_upload_count;
566 let publish_count_now = publish_count.load(Ordering::SeqCst);
567 let actual_reupload_count = publish_count_now - initial_publish_count;
570
571 assert!((min_upload_count..=max_upload_count).contains(&actual_reupload_count));
572 }
573 });
574 }
575
576 fn publish_after_ipt_change<I: PollReadIter>(
586 temp_dir: &Path,
587 poll_read_responses: I,
588 multiplier: usize,
589 republish_count: usize,
590 expect_errors: bool,
591 ) {
592 let runtime = MockRuntime::new();
593 let nickname = HsNickname::try_from(TEST_SVC_NICKNAME.to_string()).unwrap();
594 let config = build_test_config(nickname.clone());
595 let (_config_tx, config_rx) = watch::channel_with(Arc::new(config));
596
597 let (mut mv, pv) = ipts_channel(&runtime, create_storage_handles(temp_dir).1).unwrap();
598 let update_ipts = || {
599 let ipts: Vec<IptInSet> = test_data::test_parsed_hsdesc()
600 .unwrap()
601 .intro_points()
602 .iter()
603 .enumerate()
604 .map(|(i, ipt)| IptInSet {
605 ipt: ipt.clone(),
606 lid: [i.try_into().unwrap(); 32].into(),
607 })
608 .collect();
609
610 mv.borrow_for_update(runtime.clone()).ipts = Some(IptSet {
611 ipts,
612 lifetime: Duration::from_secs(20),
613 });
614 };
615
616 let netdir = testnet::construct_netdir().unwrap_if_sufficient().unwrap();
617 let keystore_dir = tempdir().unwrap();
618
619 let (_hsid, blind_id, keymgr) = init_keymgr(&keystore_dir, &nickname, &netdir);
620
621 let hsdir_count = netdir
622 .hs_dirs_upload(blind_id, netdir.hs_time_period())
623 .unwrap()
624 .collect::<Vec<_>>()
625 .len();
626
627 assert!(hsdir_count > 0);
628
629 let expected_upload_count = hsdir_count * multiplier;
632 let status_tx = StatusSender::new(OnionServiceStatus::new_shutdown()).into();
633
634 run_test(
635 runtime.clone(),
636 nickname,
637 keymgr,
638 pv,
639 config_rx,
640 status_tx,
641 netdir,
642 update_ipts,
643 poll_read_responses,
644 expected_upload_count,
645 republish_count,
646 expect_errors,
647 );
648 }
649
650 #[test]
651 fn publish_after_ipt_change_no_errors() {
652 let poll_reads = [Ok(OK_RESPONSE.into())].into_iter();
654
655 test_temp_dir!().used_by(|dir| publish_after_ipt_change(dir, poll_reads, 1, 0, false));
656 }
657
658 #[test]
659 fn publish_after_ipt_change_with_errors() {
660 let err_responses = vec![
661 Err(()),
663 Ok(ERR_RESPONSE.to_string()),
665 ];
666
667 for error_res in err_responses.into_iter() {
668 let poll_reads = vec![
669 error_res,
675 Ok(OK_RESPONSE.to_string()),
676 ]
677 .into_iter();
678
679 test_temp_dir!().used_by(|dir| publish_after_ipt_change(dir, poll_reads, 2, 0, true));
680 }
681 }
682
683 #[test]
684 fn reupload_after_publishing() {
685 let poll_reads = [Ok(OK_RESPONSE.into())].into_iter();
686 const REUPLOAD_COUNT: usize = 4;
688
689 test_temp_dir!()
690 .used_by(|dir| publish_after_ipt_change(dir, poll_reads, 1, REUPLOAD_COUNT, false));
691 }
692
693 }