Tor  0.4.8.0-alpha-dev
cpuworker.c
Go to the documentation of this file.
1 /* Copyright (c) 2003-2004, Roger Dingledine.
2  * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
3  * Copyright (c) 2007-2021, The Tor Project, Inc. */
4 /* See LICENSE for licensing information */
5 
6 /**
7  * \file cpuworker.c
8  * \brief Uses the workqueue/threadpool code to farm CPU-intensive activities
9  * out to subprocesses.
10  *
11  * The multithreading backend for this module is in workqueue.c; this module
12  * specializes workqueue.c.
13  *
14  * Right now, we use this infrastructure
15  * <ul><li>for processing onionskins in onion.c
16  * <li>for compressing consensuses in consdiffmgr.c,
17  * <li>and for calculating diffs and compressing them in consdiffmgr.c.
18  * </ul>
19  **/
20 #include "core/or/or.h"
21 #include "core/or/channel.h"
22 #include "core/or/circuitlist.h"
23 #include "core/or/connection_or.h"
26 #include "app/config/config.h"
30 #include "core/or/onion.h"
33 #include "feature/stats/rephist.h"
34 #include "feature/relay/router.h"
36 #include "lib/evloop/workqueue.h"
38 
39 #include "core/or/or_circuit_st.h"
40 
41 static void queue_pending_tasks(void);
42 
43 typedef struct worker_state_t {
44  int generation;
45  server_onion_keys_t *onion_keys;
47 
48 static void *
49 worker_state_new(void *arg)
50 {
51  worker_state_t *ws;
52  (void)arg;
53  ws = tor_malloc_zero(sizeof(worker_state_t));
54  ws->onion_keys = server_onion_keys_new();
55  return ws;
56 }
57 
58 #define worker_state_free(ws) \
59  FREE_AND_NULL(worker_state_t, worker_state_free_, (ws))
60 
61 static void
62 worker_state_free_(worker_state_t *ws)
63 {
64  if (!ws)
65  return;
66  server_onion_keys_free(ws->onion_keys);
67  tor_free(ws);
68 }
69 
70 static void
71 worker_state_free_void(void *arg)
72 {
73  worker_state_free_(arg);
74 }
75 
76 static replyqueue_t *replyqueue = NULL;
77 static threadpool_t *threadpool = NULL;
78 
79 static uint32_t total_pending_tasks = 0;
80 static uint32_t max_pending_tasks = 128;
81 
82 /** Return the consensus parameter max pending tasks per CPU. */
83 static uint32_t
85 {
86 /* Total voodoo. Can we make this more sensible? Maybe, that is why we made it
87  * a consensus parameter so our future self can figure out this magic. */
88 #define MAX_PENDING_TASKS_PER_CPU_DEFAULT 64
89 #define MAX_PENDING_TASKS_PER_CPU_MIN 1
90 #define MAX_PENDING_TASKS_PER_CPU_MAX INT32_MAX
91 
92  return networkstatus_get_param(ns, "max_pending_tasks_per_cpu",
93  MAX_PENDING_TASKS_PER_CPU_DEFAULT,
94  MAX_PENDING_TASKS_PER_CPU_MIN,
95  MAX_PENDING_TASKS_PER_CPU_MAX);
96 }
97 
98 /** Set the max pending tasks per CPU worker. This uses the consensus to check
99  * for the allowed number per CPU. The ns parameter can be NULL as in that no
100  * consensus is available at the time of setting this value. */
101 static void
103 {
104  max_pending_tasks =
106 }
107 
108 /** Called when the consensus has changed. */
109 void
111 {
112  tor_assert(ns);
114 }
115 
116 /** Initialize the cpuworker subsystem. It is OK to call this more than once
117  * during Tor's lifetime.
118  */
119 void
120 cpu_init(void)
121 {
122  if (!replyqueue) {
123  replyqueue = replyqueue_new(0);
124  }
125  if (!threadpool) {
126  /*
127  In our threadpool implementation, half the threads are permissive and
128  half are strict (when it comes to running lower-priority tasks). So we
129  always make sure we have at least two threads, so that there will be at
130  least one thread of each kind.
131  */
132  const int n_threads = MAX(get_num_cpus(get_options()), 2);
133  threadpool = threadpool_new(n_threads,
134  replyqueue,
135  worker_state_new,
136  worker_state_free_void,
137  NULL);
138 
139  int r = threadpool_register_reply_event(threadpool, NULL);
140 
141  tor_assert(r == 0);
142  }
143 
144  set_max_pending_tasks(NULL);
145 }
146 
147 /** Return the number of threads configured for our CPU worker. */
148 unsigned int
150 {
151  if (!threadpool) {
152  return 0;
153  }
154  return threadpool_get_n_threads(threadpool);
155 }
156 
157 /** Magic numbers to make sure our cpuworker_requests don't grow any
158  * mis-framing bugs. */
159 #define CPUWORKER_REQUEST_MAGIC 0xda4afeed
160 #define CPUWORKER_REPLY_MAGIC 0x5eedf00d
161 
162 /** A request sent to a cpuworker. */
163 typedef struct cpuworker_request_t {
164  /** Magic number; must be CPUWORKER_REQUEST_MAGIC. */
165  uint32_t magic;
166 
167  /** Flag: Are we timing this request? */
168  unsigned timed : 1;
169  /** If we're timing this request, when was it sent to the cpuworker? */
170  struct timeval started_at;
171 
172  /** A create cell for the cpuworker to process. */
174 
175  /**
176  * A copy of this relay's consensus params that are relevant to
177  * the circuit, for use in negotiation. */
179 
180  /* Turn the above into a tagged union if needed. */
182 
183 /** A reply sent by a cpuworker. */
184 typedef struct cpuworker_reply_t {
185  /** Magic number; must be CPUWORKER_REPLY_MAGIC. */
186  uint32_t magic;
187 
188  /** True iff we got a successful request. */
189  uint8_t success;
190 
191  /** Are we timing this request? */
192  unsigned int timed : 1;
193  /** What handshake type was the request? (Used for timing) */
194  uint16_t handshake_type;
195  /** When did we send the request to the cpuworker? */
196  struct timeval started_at;
197  /** Once the cpuworker received the request, how many microseconds did it
198  * take? (This shouldn't overflow; 4 billion micoseconds is over an hour,
199  * and we'll never have an onion handshake that takes so long.) */
200  uint32_t n_usec;
201 
202  /** Output of processing a create cell
203  *
204  * @{
205  */
206  /** The created cell to send back. */
208  /** The keys to use on this circuit. */
209  uint8_t keys[CPATH_KEY_MATERIAL_LEN];
210  /** Input to use for authenticating introduce1 cells. */
212  /** Negotiated circuit parameters. */
215 
216 typedef struct cpuworker_job_u_t {
217  or_circuit_t *circ;
218  union {
219  cpuworker_request_t request;
220  cpuworker_reply_t reply;
221  } u;
223 
224 static workqueue_reply_t
225 update_state_threadfn(void *state_, void *work_)
226 {
227  worker_state_t *state = state_;
228  worker_state_t *update = work_;
229  server_onion_keys_free(state->onion_keys);
230  state->onion_keys = update->onion_keys;
231  update->onion_keys = NULL;
232  worker_state_free(update);
233  ++state->generation;
234  return WQ_RPL_REPLY;
235 }
236 
237 /** Called when the onion key has changed so update all CPU worker(s) with
238  * new function pointers with which a new state will be generated.
239  */
240 void
242 {
243  if (!threadpool) {
244  /* If we're a client, then we won't have cpuworkers, and we won't need
245  * to tell them to rotate their state.
246  */
247  return;
248  }
249  if (threadpool_queue_update(threadpool,
250  worker_state_new,
251  update_state_threadfn,
252  worker_state_free_void,
253  NULL)) {
254  log_warn(LD_OR, "Failed to queue key update for worker threads.");
255  }
256 }
257 
258 /** Indexed by handshake type: how many onionskins have we processed and
259  * counted of that type? */
260 static uint64_t onionskins_n_processed[MAX_ONION_HANDSHAKE_TYPE+1];
261 /** Indexed by handshake type, corresponding to the onionskins counted in
262  * onionskins_n_processed: how many microseconds have we spent in cpuworkers
263  * processing that kind of onionskin? */
264 static uint64_t onionskins_usec_internal[MAX_ONION_HANDSHAKE_TYPE+1];
265 /** Indexed by handshake type, corresponding to onionskins counted in
266  * onionskins_n_processed: how many microseconds have we spent waiting for
267  * cpuworkers to give us answers for that kind of onionskin?
268  */
269 static uint64_t onionskins_usec_roundtrip[MAX_ONION_HANDSHAKE_TYPE+1];
270 
271 /** If any onionskin takes longer than this, we clip them to this
272  * time. (microseconds) */
273 #define MAX_BELIEVABLE_ONIONSKIN_DELAY (2*1000*1000)
274 
275 /** Return true iff we'd like to measure a handshake of type
276  * <b>onionskin_type</b>. Call only from the main thread. */
277 static int
278 should_time_request(uint16_t onionskin_type)
279 {
280  /* If we've never heard of this type, we shouldn't even be here. */
281  if (onionskin_type > MAX_ONION_HANDSHAKE_TYPE)
282  return 0;
283  /* Measure the first N handshakes of each type, to ensure we have a
284  * sample */
285  if (onionskins_n_processed[onionskin_type] < 4096)
286  return 1;
287 
288  /** Otherwise, measure with P=1/128. We avoid doing this for every
289  * handshake, since the measurement itself can take a little time. */
291 }
292 
293 /** Return an estimate of how many microseconds we will need for a single
294  * cpuworker to process <b>n_requests</b> onionskins of type
295  * <b>onionskin_type</b>. */
296 uint64_t
297 estimated_usec_for_onionskins(uint32_t n_requests, uint16_t onionskin_type)
298 {
299  if (onionskin_type > MAX_ONION_HANDSHAKE_TYPE) /* should be impossible */
300  return 1000 * (uint64_t)n_requests;
301  if (PREDICT_UNLIKELY(onionskins_n_processed[onionskin_type] < 100)) {
302  /* Until we have 100 data points, just assume everything takes 1 msec. */
303  return 1000 * (uint64_t)n_requests;
304  } else {
305  /* This can't overflow: we'll never have more than 500000 onionskins
306  * measured in onionskin_usec_internal, and they won't take anything near
307  * 1 sec each, and we won't have anything like 1 million queued
308  * onionskins. But that's 5e5 * 1e6 * 1e6, which is still less than
309  * UINT64_MAX. */
310  return (onionskins_usec_internal[onionskin_type] * n_requests) /
311  onionskins_n_processed[onionskin_type];
312  }
313 }
314 
315 /** Compute the absolute and relative overhead of using the cpuworker
316  * framework for onionskins of type <b>onionskin_type</b>.*/
317 static int
318 get_overhead_for_onionskins(uint32_t *usec_out, double *frac_out,
319  uint16_t onionskin_type)
320 {
321  uint64_t overhead;
322 
323  *usec_out = 0;
324  *frac_out = 0.0;
325 
326  if (onionskin_type > MAX_ONION_HANDSHAKE_TYPE) /* should be impossible */
327  return -1;
328  if (onionskins_n_processed[onionskin_type] == 0 ||
329  onionskins_usec_internal[onionskin_type] == 0 ||
330  onionskins_usec_roundtrip[onionskin_type] == 0)
331  return -1;
332 
333  overhead = onionskins_usec_roundtrip[onionskin_type] -
334  onionskins_usec_internal[onionskin_type];
335 
336  *usec_out = (uint32_t)(overhead / onionskins_n_processed[onionskin_type]);
337  *frac_out = ((double)overhead) / onionskins_usec_internal[onionskin_type];
338 
339  return 0;
340 }
341 
342 /** If we've measured overhead for onionskins of type <b>onionskin_type</b>,
343  * log it. */
344 void
345 cpuworker_log_onionskin_overhead(int severity, int onionskin_type,
346  const char *onionskin_type_name)
347 {
348  uint32_t overhead;
349  double relative_overhead;
350  int r;
351 
352  r = get_overhead_for_onionskins(&overhead, &relative_overhead,
353  onionskin_type);
354  if (!overhead || r<0)
355  return;
356 
357  log_fn(severity, LD_OR,
358  "%s onionskins have averaged %u usec overhead (%.2f%%) in "
359  "cpuworker code ",
360  onionskin_type_name, (unsigned)overhead, relative_overhead*100);
361 }
362 
363 /** Handle a reply from the worker threads. */
364 static void
366 {
367  cpuworker_job_t *job = work_;
368  cpuworker_reply_t rpl;
369  or_circuit_t *circ = NULL;
370 
371  tor_assert(total_pending_tasks > 0);
372  --total_pending_tasks;
373 
374  /* Could avoid this, but doesn't matter. */
375  memcpy(&rpl, &job->u.reply, sizeof(rpl));
376 
377  tor_assert(rpl.magic == CPUWORKER_REPLY_MAGIC);
378 
379  if (rpl.timed && rpl.success &&
380  rpl.handshake_type <= MAX_ONION_HANDSHAKE_TYPE) {
381  /* Time how long this request took. The handshake_type check should be
382  needless, but let's leave it in to be safe. */
383  struct timeval tv_end, tv_diff;
384  int64_t usec_roundtrip;
385  tor_gettimeofday(&tv_end);
386  timersub(&tv_end, &rpl.started_at, &tv_diff);
387  usec_roundtrip = ((int64_t)tv_diff.tv_sec)*1000000 + tv_diff.tv_usec;
388  if (usec_roundtrip >= 0 &&
389  usec_roundtrip < MAX_BELIEVABLE_ONIONSKIN_DELAY) {
392  onionskins_usec_roundtrip[rpl.handshake_type] += usec_roundtrip;
393  if (onionskins_n_processed[rpl.handshake_type] >= 500000) {
394  /* Scale down every 500000 handshakes. On a busy server, that's
395  * less impressive than it sounds. */
399  }
400  }
401  }
402 
403  circ = job->circ;
404 
405  log_debug(LD_OR,
406  "Unpacking cpuworker reply %p, circ=%p, success=%d",
407  job, circ, rpl.success);
408 
409  if (circ->base_.magic == DEAD_CIRCUIT_MAGIC) {
410  /* The circuit was supposed to get freed while the reply was
411  * pending. Instead, it got left for us to free so that we wouldn't freak
412  * out when the job->circ field wound up pointing to nothing. */
413  log_debug(LD_OR, "Circuit died while reply was pending. Freeing memory.");
414  circ->base_.magic = 0;
415  tor_free(circ);
416  goto done_processing;
417  }
418 
419  circ->workqueue_entry = NULL;
420 
421  if (TO_CIRCUIT(circ)->marked_for_close) {
422  /* We already marked this circuit; we can't call it open. */
423  log_debug(LD_OR,"circuit is already marked.");
424  goto done_processing;
425  }
426 
427  if (rpl.success == 0) {
428  log_debug(LD_OR,
429  "decoding onionskin failed. "
430  "(Old key or bad software.) Closing.");
431  circuit_mark_for_close(TO_CIRCUIT(circ), END_CIRC_REASON_TORPROTOCOL);
432  goto done_processing;
433  }
434 
435  /* If the client asked for congestion control, if our consensus parameter
436  * allowed it to negotiate as enabled, allocate a congestion control obj. */
437  if (rpl.circ_params.cc_enabled) {
438  if (get_options()->SbwsExit) {
439  TO_CIRCUIT(circ)->ccontrol = congestion_control_new(&rpl.circ_params,
440  CC_PATH_SBWS);
441  } else {
442  TO_CIRCUIT(circ)->ccontrol = congestion_control_new(&rpl.circ_params,
443  CC_PATH_EXIT);
444  }
445  }
446 
447  if (onionskin_answer(circ,
448  &rpl.created_cell,
449  (const char*)rpl.keys, sizeof(rpl.keys),
450  rpl.rend_auth_material) < 0) {
451  log_warn(LD_OR,"onionskin_answer failed. Closing.");
452  circuit_mark_for_close(TO_CIRCUIT(circ), END_CIRC_REASON_INTERNAL);
453  goto done_processing;
454  }
455 
456  log_debug(LD_OR,"onionskin_answer succeeded. Yay.");
457 
458  done_processing:
459  memwipe(&rpl, 0, sizeof(rpl));
460  memwipe(job, 0, sizeof(*job));
461  tor_free(job);
463 }
464 
465 /** Implementation function for onion handshake requests. */
466 static workqueue_reply_t
467 cpuworker_onion_handshake_threadfn(void *state_, void *work_)
468 {
469  worker_state_t *state = state_;
470  cpuworker_job_t *job = work_;
471 
472  /* variables for onion processing */
473  server_onion_keys_t *onion_keys = state->onion_keys;
475  cpuworker_reply_t rpl;
476 
477  memcpy(&req, &job->u.request, sizeof(req));
478 
480  memset(&rpl, 0, sizeof(rpl));
481 
482  const create_cell_t *cc = &req.create_cell;
483  created_cell_t *cell_out = &rpl.created_cell;
484  struct timeval tv_start = {0,0}, tv_end;
485  int n;
486  rpl.timed = req.timed;
487  rpl.started_at = req.started_at;
488  rpl.handshake_type = cc->handshake_type;
489  if (req.timed)
490  tor_gettimeofday(&tv_start);
492  cc->onionskin, cc->handshake_len,
493  onion_keys,
494  &req.circ_ns_params,
495  cell_out->reply,
496  sizeof(cell_out->reply),
497  rpl.keys, CPATH_KEY_MATERIAL_LEN,
498  rpl.rend_auth_material,
499  &rpl.circ_params);
500  if (n < 0) {
501  /* failure */
502  log_debug(LD_OR,"onion_skin_server_handshake failed.");
503  memset(&rpl, 0, sizeof(rpl));
504  rpl.success = 0;
505  } else {
506  /* success */
507  log_debug(LD_OR,"onion_skin_server_handshake succeeded.");
508  cell_out->handshake_len = n;
509  switch (cc->cell_type) {
510  case CELL_CREATE:
511  cell_out->cell_type = CELL_CREATED; break;
512  case CELL_CREATE2:
513  cell_out->cell_type = CELL_CREATED2; break;
514  case CELL_CREATE_FAST:
515  cell_out->cell_type = CELL_CREATED_FAST; break;
516  default:
517  tor_assert(0);
518  return WQ_RPL_SHUTDOWN;
519  }
520  rpl.success = 1;
521  }
522 
523  rpl.magic = CPUWORKER_REPLY_MAGIC;
524  if (req.timed) {
525  struct timeval tv_diff;
526  int64_t usec;
527  tor_gettimeofday(&tv_end);
528  timersub(&tv_end, &tv_start, &tv_diff);
529  usec = ((int64_t)tv_diff.tv_sec)*1000000 + tv_diff.tv_usec;
530  if (usec < 0 || usec > MAX_BELIEVABLE_ONIONSKIN_DELAY)
532  else
533  rpl.n_usec = (uint32_t) usec;
534  }
535 
536  memcpy(&job->u.reply, &rpl, sizeof(rpl));
537 
538  memwipe(&req, 0, sizeof(req));
539  memwipe(&rpl, 0, sizeof(req));
540  return WQ_RPL_REPLY;
541 }
542 
543 /** Take pending tasks from the queue and assign them to cpuworkers. */
544 static void
546 {
547  or_circuit_t *circ;
548  create_cell_t *onionskin = NULL;
549 
550  while (total_pending_tasks < max_pending_tasks) {
551  circ = onion_next_task(&onionskin);
552 
553  if (!circ)
554  return;
555 
556  if (assign_onionskin_to_cpuworker(circ, onionskin) < 0)
557  log_info(LD_OR,"assign_to_cpuworker failed. Ignoring.");
558  }
559 }
560 
561 /** DOCDOC */
564  workqueue_reply_t (*fn)(void *, void *),
565  void (*reply_fn)(void *),
566  void *arg))
567 {
568  tor_assert(threadpool);
569 
570  return threadpool_queue_work_priority(threadpool,
571  priority,
572  fn,
573  reply_fn,
574  arg);
575 }
576 
577 /** Try to tell a cpuworker to perform the public key operations necessary to
578  * respond to <b>onionskin</b> for the circuit <b>circ</b>.
579  *
580  * Return 0 if we successfully assign the task, or -1 on failure.
581  */
582 int
584  create_cell_t *onionskin)
585 {
586  workqueue_entry_t *queue_entry;
587  cpuworker_job_t *job;
589  int should_time;
590 
591  tor_assert(threadpool);
592 
593  if (!circ->p_chan) {
594  log_info(LD_OR,"circ->p_chan gone. Failing circ.");
595  tor_free(onionskin);
596  return -1;
597  }
598 
599  if (total_pending_tasks >= max_pending_tasks) {
600  log_debug(LD_OR,"No idle cpuworkers. Queuing.");
601  if (onion_pending_add(circ, onionskin) < 0) {
602  tor_free(onionskin);
603  return -1;
604  }
605  return 0;
606  }
607 
608  if (!channel_is_client(circ->p_chan))
610 
611  should_time = should_time_request(onionskin->handshake_type);
612  memset(&req, 0, sizeof(req));
614  req.timed = should_time;
615 
616  memcpy(&req.create_cell, onionskin, sizeof(create_cell_t));
617 
618  tor_free(onionskin);
619 
620  if (should_time)
622 
623  /* Copy the current cached consensus params relevant to
624  * circuit negotiation into the CPU worker context */
627 
628  job = tor_malloc_zero(sizeof(cpuworker_job_t));
629  job->circ = circ;
630  memcpy(&job->u.request, &req, sizeof(req));
631  memwipe(&req, 0, sizeof(req));
632 
633  ++total_pending_tasks;
634  queue_entry = threadpool_queue_work_priority(threadpool,
635  WQ_PRI_HIGH,
638  job);
639  if (!queue_entry) {
640  log_warn(LD_BUG, "Couldn't queue work on threadpool");
641  tor_free(job);
642  return -1;
643  }
644 
645  log_debug(LD_OR, "Queued task %p (qe=%p, circ=%p)",
646  job, queue_entry, job->circ);
647 
648  circ->workqueue_entry = queue_entry;
649 
650  return 0;
651 }
652 
653 /** If <b>circ</b> has a pending handshake that hasn't been processed yet,
654  * remove it from the worker queue. */
655 void
657 {
658  cpuworker_job_t *job;
659  if (circ->workqueue_entry == NULL)
660  return;
661 
663  if (job) {
664  /* It successfully cancelled. */
665  memwipe(job, 0xe0, sizeof(*job));
666  tor_free(job);
667  tor_assert(total_pending_tasks > 0);
668  --total_pending_tasks;
669  /* if (!job), this is done in cpuworker_onion_handshake_replyfn. */
670  circ->workqueue_entry = NULL;
671  }
672 }
int channel_is_client(const channel_t *chan)
Definition: channel.c:2915
Header file for channel.c.
#define DEAD_CIRCUIT_MAGIC
Definition: circuit_st.h:37
int onionskin_answer(struct or_circuit_t *circ, const created_cell_t *created_cell, const char *keys, size_t keys_len, const uint8_t *rend_circ_nonce)
Header for feature/relay/circuitbuild_relay.c.
Header file for circuitlist.c.
#define MAX(a, b)
Definition: cmp.h:22
int get_num_cpus(const or_options_t *options)
Definition: config.c:7034
const or_options_t * get_options(void)
Definition: config.c:926
Header file for config.c.
congestion_control_t * congestion_control_new(const circuit_params_t *params, cc_path_t path)
bool congestion_control_enabled(void)
Public APIs for congestion control.
static uint8_t congestion_control_sendme_inc(void)
APIs for stream flow control on congestion controlled circuits.
Header file for connection_or.c.
#define CPUWORKER_REQUEST_MAGIC
Definition: cpuworker.c:159
static void queue_pending_tasks(void)
Definition: cpuworker.c:545
void cpuworker_cancel_circ_handshake(or_circuit_t *circ)
Definition: cpuworker.c:656
#define MAX_BELIEVABLE_ONIONSKIN_DELAY
Definition: cpuworker.c:273
static uint64_t onionskins_usec_roundtrip[MAX_ONION_HANDSHAKE_TYPE+1]
Definition: cpuworker.c:269
static uint32_t get_max_pending_tasks_per_cpu(const networkstatus_t *ns)
Definition: cpuworker.c:84
int assign_onionskin_to_cpuworker(or_circuit_t *circ, create_cell_t *onionskin)
Definition: cpuworker.c:583
static void set_max_pending_tasks(const networkstatus_t *ns)
Definition: cpuworker.c:102
workqueue_entry_t * cpuworker_queue_work(workqueue_priority_t priority, workqueue_reply_t(*fn)(void *, void *), void(*reply_fn)(void *), void *arg)
Definition: cpuworker.c:566
void cpu_init(void)
Definition: cpuworker.c:120
static uint64_t onionskins_n_processed[MAX_ONION_HANDSHAKE_TYPE+1]
Definition: cpuworker.c:260
static int get_overhead_for_onionskins(uint32_t *usec_out, double *frac_out, uint16_t onionskin_type)
Definition: cpuworker.c:318
unsigned int cpuworker_get_n_threads(void)
Definition: cpuworker.c:149
void cpuworker_consensus_has_changed(const networkstatus_t *ns)
Definition: cpuworker.c:110
void cpuworker_log_onionskin_overhead(int severity, int onionskin_type, const char *onionskin_type_name)
Definition: cpuworker.c:345
static workqueue_reply_t cpuworker_onion_handshake_threadfn(void *state_, void *work_)
Definition: cpuworker.c:467
void cpuworkers_rotate_keyinfo(void)
Definition: cpuworker.c:241
static uint64_t onionskins_usec_internal[MAX_ONION_HANDSHAKE_TYPE+1]
Definition: cpuworker.c:264
static void cpuworker_onion_handshake_replyfn(void *work_)
Definition: cpuworker.c:365
uint64_t estimated_usec_for_onionskins(uint32_t n_requests, uint16_t onionskin_type)
Definition: cpuworker.c:297
static int should_time_request(uint16_t onionskin_type)
Definition: cpuworker.c:278
Header file for cpuworker.c.
Common functions for using (pseudo-)random number generators.
#define crypto_fast_rng_one_in_n(rng, n)
Definition: crypto_rand.h:80
crypto_fast_rng_t * get_thread_fast_rng(void)
void memwipe(void *mem, uint8_t byte, size_t sz)
Definition: crypto_util.c:55
Common functions for cryptographic routines.
#define DIGEST_LEN
Definition: digest_sizes.h:20
#define log_fn(severity, domain, args,...)
Definition: log.h:283
#define LD_OR
Definition: log.h:92
#define LD_BUG
Definition: log.h:86
#define tor_free(p)
Definition: malloc.h:56
int32_t networkstatus_get_param(const networkstatus_t *ns, const char *param_name, int32_t default_val, int32_t min_val, int32_t max_val)
Header file for networkstatus.c.
Header file for onion.c.
server_onion_keys_t * server_onion_keys_new(void)
Definition: onion_crypto.c:65
int onion_skin_server_handshake(int type, const uint8_t *onion_skin, size_t onionskin_len, const server_onion_keys_t *keys, const circuit_params_t *our_ns_params, uint8_t *reply_out, size_t reply_out_maxlen, uint8_t *keys_out, size_t keys_out_len, uint8_t *rend_nonce_out, circuit_params_t *params_out)
Definition: onion_crypto.c:273
Header file for onion_crypto.c.
int onion_pending_add(or_circuit_t *circ, create_cell_t *onionskin)
Definition: onion_queue.c:214
or_circuit_t * onion_next_task(create_cell_t **onionskin_out)
Definition: onion_queue.c:333
Header file for onion_queue.c.
Master header file for Tor-specific functionality.
#define TO_CIRCUIT(x)
Definition: or.h:836
void rep_hist_note_circuit_handshake_assigned(uint16_t type)
Definition: rephist.c:2385
Header file for rephist.c.
Header file for router.c.
uint8_t sendme_inc_cells
Definition: onion_crypto.h:36
uint32_t magic
Definition: circuit_st.h:63
struct timeval started_at
Definition: cpuworker.c:196
created_cell_t created_cell
Definition: cpuworker.c:207
unsigned int timed
Definition: cpuworker.c:192
uint32_t magic
Definition: cpuworker.c:186
uint8_t keys[CPATH_KEY_MATERIAL_LEN]
Definition: cpuworker.c:209
uint8_t rend_auth_material[DIGEST_LEN]
Definition: cpuworker.c:211
uint16_t handshake_type
Definition: cpuworker.c:194
circuit_params_t circ_params
Definition: cpuworker.c:213
uint32_t n_usec
Definition: cpuworker.c:200
struct timeval started_at
Definition: cpuworker.c:170
create_cell_t create_cell
Definition: cpuworker.c:173
circuit_params_t circ_ns_params
Definition: cpuworker.c:178
uint16_t handshake_len
Definition: onion.h:30
uint16_t handshake_type
Definition: onion.h:28
uint8_t onionskin[CELL_PAYLOAD_SIZE - 4]
Definition: onion.h:32
uint8_t cell_type
Definition: onion.h:26
uint16_t handshake_len
Definition: onion.h:40
uint8_t reply[CELL_PAYLOAD_SIZE - 2]
Definition: onion.h:42
uint8_t cell_type
Definition: onion.h:38
channel_t * p_chan
Definition: or_circuit_st.h:37
struct workqueue_entry_t * workqueue_entry
Definition: or_circuit_st.h:30
Definition: workqueue.c:95
#define MOCK_IMPL(rv, funcname, arglist)
Definition: testsupport.h:133
#define timersub(tv1, tv2, tvout)
Definition: timeval.h:61
void tor_gettimeofday(struct timeval *timeval)
#define tor_assert(expr)
Definition: util_bug.h:102
threadpool_t * threadpool_new(int n_threads, replyqueue_t *replyqueue, void *(*new_thread_state_fn)(void *), void(*free_thread_state_fn)(void *), void *arg)
Definition: workqueue.c:541
void * workqueue_entry_cancel(workqueue_entry_t *ent)
Definition: workqueue.c:191
replyqueue_t * replyqueue_new(uint32_t alertsocks_flags)
Definition: workqueue.c:586
workqueue_entry_t * threadpool_queue_work_priority(threadpool_t *pool, workqueue_priority_t prio, workqueue_reply_t(*fn)(void *, void *), void(*reply_fn)(void *), void *arg)
Definition: workqueue.c:386
int threadpool_register_reply_event(threadpool_t *tp, void(*cb)(threadpool_t *tp))
Definition: workqueue.c:623
int threadpool_queue_update(threadpool_t *pool, void *(*dup_fn)(void *), workqueue_reply_t(*fn)(void *, void *), void(*free_fn)(void *), void *arg)
Definition: workqueue.c:437
unsigned int threadpool_get_n_threads(threadpool_t *tp)
Definition: workqueue.c:678
Header for workqueue.c.
workqueue_reply_t
Definition: workqueue.h:24
@ WQ_RPL_SHUTDOWN
Definition: workqueue.h:27
workqueue_priority_t
Definition: workqueue.h:31