Tor 0.4.9.2-alpha-dev
cpuworker.c
Go to the documentation of this file.
1/* Copyright (c) 2003-2004, Roger Dingledine.
2 * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
3 * Copyright (c) 2007-2024, The Tor Project, Inc. */
4/* See LICENSE for licensing information */
5
6/**
7 * \file cpuworker.c
8 * \brief Uses the workqueue/threadpool code to farm CPU-intensive activities
9 * out to subprocesses.
10 *
11 * The multithreading backend for this module is in workqueue.c; this module
12 * specializes workqueue.c.
13 *
14 * Right now, we use this infrastructure
15 * <ul><li>for processing onionskins in onion.c
16 * <li>for compressing consensuses in consdiffmgr.c,
17 * <li>for calculating diffs and compressing them in consdiffmgr.c.
18 * <li>and for solving onion service PoW challenges in pow.c.
19 * </ul>
20 **/
21#include "core/or/or.h"
22#include "core/or/channel.h"
23#include "core/or/circuitlist.h"
27#include "app/config/config.h"
31#include "core/or/onion.h"
39
40#include "core/or/or_circuit_st.h"
41
42static void queue_pending_tasks(void);
43
44typedef struct worker_state_t {
45 int generation;
46 server_onion_keys_t *onion_keys;
48
49static void *
50worker_state_new(void *arg)
51{
53 (void)arg;
54 ws = tor_malloc_zero(sizeof(worker_state_t));
55 ws->onion_keys = server_onion_keys_new();
56 return ws;
57}
58
59#define worker_state_free(ws) \
60 FREE_AND_NULL(worker_state_t, worker_state_free_, (ws))
61
62static void
63worker_state_free_(worker_state_t *ws)
64{
65 if (!ws)
66 return;
67 server_onion_keys_free(ws->onion_keys);
68 tor_free(ws);
69}
70
71static void
72worker_state_free_void(void *arg)
73{
74 worker_state_free_(arg);
75}
76
77static threadpool_t *threadpool = NULL;
78
79static uint32_t total_pending_tasks = 0;
80static uint32_t max_pending_tasks = 128;
81
82/** Return the consensus parameter max pending tasks per CPU. */
83static uint32_t
85{
86/* Total voodoo. Can we make this more sensible? Maybe, that is why we made it
87 * a consensus parameter so our future self can figure out this magic. */
88#define MAX_PENDING_TASKS_PER_CPU_DEFAULT 64
89#define MAX_PENDING_TASKS_PER_CPU_MIN 1
90#define MAX_PENDING_TASKS_PER_CPU_MAX INT32_MAX
91
92 return networkstatus_get_param(ns, "max_pending_tasks_per_cpu",
93 MAX_PENDING_TASKS_PER_CPU_DEFAULT,
94 MAX_PENDING_TASKS_PER_CPU_MIN,
95 MAX_PENDING_TASKS_PER_CPU_MAX);
96}
97
98/** Set the max pending tasks per CPU worker. This uses the consensus to check
99 * for the allowed number per CPU. The ns parameter can be NULL as in that no
100 * consensus is available at the time of setting this value. */
101static void
103{
104 max_pending_tasks =
106}
107
108/** Called when the consensus has changed. */
109void
111{
112 tor_assert(ns);
114}
115
116/** Initialize the cpuworker subsystem. */
117int
119{
120 /*
121 In our threadpool implementation, half the threads are permissive and
122 half are strict (when it comes to running lower-priority tasks). So we
123 always make sure we have at least two threads, so that there will be at
124 least one thread of each kind.
125 */
126 const int n_threads = MAX(get_num_cpus(get_options()), 2);
127 threadpool = threadpool_new(n_threads,
129 worker_state_new,
130 worker_state_free_void,
131 NULL);
132
133 if (!threadpool) {
134 log_err(LD_GENERAL, "Can't create worker thread pool");
135 return -1;
136 }
137
138 int r = threadpool_register_reply_event(threadpool, NULL);
139
140 tor_assert(r == 0);
141
143
144 return 0;
145}
146
147/** Free all resources allocated by cpuworker. */
148void
150{
151 threadpool_free(threadpool);
152}
153
154/** Return the number of threads configured for our CPU worker. */
155unsigned int
157{
158 if (!threadpool) {
159 return 0;
160 }
161 return threadpool_get_n_threads(threadpool);
162}
163
164/** Magic numbers to make sure our cpuworker_requests don't grow any
165 * mis-framing bugs. */
166#define CPUWORKER_REQUEST_MAGIC 0xda4afeed
167#define CPUWORKER_REPLY_MAGIC 0x5eedf00d
168
169/** A request sent to a cpuworker. */
170typedef struct cpuworker_request_t {
171 /** Magic number; must be CPUWORKER_REQUEST_MAGIC. */
172 uint32_t magic;
173
174 /** Flag: Are we timing this request? */
175 unsigned timed : 1;
176 /** If we're timing this request, when was it sent to the cpuworker? */
178
179 /** A create cell for the cpuworker to process. */
181
182 /**
183 * A copy of this relay's consensus params that are relevant to
184 * the circuit, for use in negotiation. */
186
187 /* Turn the above into a tagged union if needed. */
189
190/** A reply sent by a cpuworker. */
191typedef struct cpuworker_reply_t {
192 /** Magic number; must be CPUWORKER_REPLY_MAGIC. */
193 uint32_t magic;
194
195 /** True iff we got a successful request. */
196 uint8_t success;
197
198 /** Are we timing this request? */
199 unsigned int timed : 1;
200 /** What handshake type was the request? (Used for timing) */
202 /** When did we send the request to the cpuworker? */
204 /** Once the cpuworker received the request, how many microseconds did it
205 * take? (This shouldn't overflow; 4 billion micoseconds is over an hour,
206 * and we'll never have an onion handshake that takes so long.) */
207 uint32_t n_usec;
208
209 /** Output of processing a create cell
210 *
211 * @{
212 */
213 /** The created cell to send back. */
215 /** The keys to use on this circuit. */
216 uint8_t keys[CPATH_KEY_MATERIAL_LEN];
217 /** Input to use for authenticating introduce1 cells. */
219 /** Negotiated circuit parameters. */
222
223typedef struct cpuworker_job_u_t {
224 or_circuit_t *circ;
225 union {
226 cpuworker_request_t request;
227 cpuworker_reply_t reply;
228 } u;
230
232update_state_threadfn(void *state_, void *work_)
233{
234 worker_state_t *state = state_;
235 worker_state_t *update = work_;
236 server_onion_keys_free(state->onion_keys);
237 state->onion_keys = update->onion_keys;
238 update->onion_keys = NULL;
239 worker_state_free(update);
240 ++state->generation;
241 return WQ_RPL_REPLY;
242}
243
244/** Called when the onion key has changed so update all CPU worker(s) with
245 * new function pointers with which a new state will be generated.
246 */
247void
249{
250 if (!threadpool) {
251 /* If we're a client, then we won't have cpuworkers, and we won't need
252 * to tell them to rotate their state.
253 */
254 return;
255 }
256 if (threadpool_queue_update(threadpool,
257 worker_state_new,
258 update_state_threadfn,
259 worker_state_free_void,
260 NULL)) {
261 log_warn(LD_OR, "Failed to queue key update for worker threads.");
262 }
263}
264
265/** Indexed by handshake type: how many onionskins have we processed and
266 * counted of that type? */
267static uint64_t onionskins_n_processed[MAX_ONION_HANDSHAKE_TYPE+1];
268/** Indexed by handshake type, corresponding to the onionskins counted in
269 * onionskins_n_processed: how many microseconds have we spent in cpuworkers
270 * processing that kind of onionskin? */
271static uint64_t onionskins_usec_internal[MAX_ONION_HANDSHAKE_TYPE+1];
272/** Indexed by handshake type, corresponding to onionskins counted in
273 * onionskins_n_processed: how many microseconds have we spent waiting for
274 * cpuworkers to give us answers for that kind of onionskin?
275 */
276static uint64_t onionskins_usec_roundtrip[MAX_ONION_HANDSHAKE_TYPE+1];
277
278/** If any onionskin takes longer than this, we clip them to this
279 * time. (microseconds) */
280#define MAX_BELIEVABLE_ONIONSKIN_DELAY (2*1000*1000)
281
282/** Return true iff we'd like to measure a handshake of type
283 * <b>onionskin_type</b>. Call only from the main thread. */
284static int
285should_time_request(uint16_t onionskin_type)
286{
287 /* If we've never heard of this type, we shouldn't even be here. */
288 if (onionskin_type > MAX_ONION_HANDSHAKE_TYPE)
289 return 0;
290 /* Measure the first N handshakes of each type, to ensure we have a
291 * sample */
292 if (onionskins_n_processed[onionskin_type] < 4096)
293 return 1;
294
295 /** Otherwise, measure with P=1/128. We avoid doing this for every
296 * handshake, since the measurement itself can take a little time. */
298}
299
300/** Return an estimate of how many microseconds we will need for a single
301 * cpuworker to process <b>n_requests</b> onionskins of type
302 * <b>onionskin_type</b>. */
303uint64_t
304estimated_usec_for_onionskins(uint32_t n_requests, uint16_t onionskin_type)
305{
306 if (onionskin_type > MAX_ONION_HANDSHAKE_TYPE) /* should be impossible */
307 return 1000 * (uint64_t)n_requests;
308 if (PREDICT_UNLIKELY(onionskins_n_processed[onionskin_type] < 100)) {
309 /* Until we have 100 data points, just assume everything takes 1 msec. */
310 return 1000 * (uint64_t)n_requests;
311 } else {
312 /* This can't overflow: we'll never have more than 500000 onionskins
313 * measured in onionskin_usec_internal, and they won't take anything near
314 * 1 sec each, and we won't have anything like 1 million queued
315 * onionskins. But that's 5e5 * 1e6 * 1e6, which is still less than
316 * UINT64_MAX. */
317 return (onionskins_usec_internal[onionskin_type] * n_requests) /
318 onionskins_n_processed[onionskin_type];
319 }
320}
321
322/** Compute the absolute and relative overhead of using the cpuworker
323 * framework for onionskins of type <b>onionskin_type</b>.*/
324static int
325get_overhead_for_onionskins(uint32_t *usec_out, double *frac_out,
326 uint16_t onionskin_type)
327{
328 uint64_t overhead;
329
330 *usec_out = 0;
331 *frac_out = 0.0;
332
333 if (onionskin_type > MAX_ONION_HANDSHAKE_TYPE) /* should be impossible */
334 return -1;
335 if (onionskins_n_processed[onionskin_type] == 0 ||
336 onionskins_usec_internal[onionskin_type] == 0 ||
337 onionskins_usec_roundtrip[onionskin_type] == 0)
338 return -1;
339
340 overhead = onionskins_usec_roundtrip[onionskin_type] -
341 onionskins_usec_internal[onionskin_type];
342
343 *usec_out = (uint32_t)(overhead / onionskins_n_processed[onionskin_type]);
344 *frac_out = ((double)overhead) / onionskins_usec_internal[onionskin_type];
345
346 return 0;
347}
348
349/** If we've measured overhead for onionskins of type <b>onionskin_type</b>,
350 * log it. */
351void
352cpuworker_log_onionskin_overhead(int severity, int onionskin_type,
353 const char *onionskin_type_name)
354{
355 uint32_t overhead;
356 double relative_overhead;
357 int r;
358
359 r = get_overhead_for_onionskins(&overhead, &relative_overhead,
360 onionskin_type);
361 if (!overhead || r<0)
362 return;
363
364 log_fn(severity, LD_OR,
365 "%s onionskins have averaged %u usec overhead (%.2f%%) in "
366 "cpuworker code ",
367 onionskin_type_name, (unsigned)overhead, relative_overhead*100);
368}
369
370/** Handle a reply from the worker threads. */
371static void
373{
374 cpuworker_job_t *job = work_;
376 or_circuit_t *circ = NULL;
377
378 tor_assert(total_pending_tasks > 0);
379 --total_pending_tasks;
380
381 /* Could avoid this, but doesn't matter. */
382 memcpy(&rpl, &job->u.reply, sizeof(rpl));
383
384 tor_assert(rpl.magic == CPUWORKER_REPLY_MAGIC);
385
386 if (rpl.timed && rpl.success &&
387 rpl.handshake_type <= MAX_ONION_HANDSHAKE_TYPE) {
388 /* Time how long this request took. The handshake_type check should be
389 needless, but let's leave it in to be safe. */
390 struct timeval tv_end, tv_diff;
391 int64_t usec_roundtrip;
392 tor_gettimeofday(&tv_end);
393 timersub(&tv_end, &rpl.started_at, &tv_diff);
394 usec_roundtrip = ((int64_t)tv_diff.tv_sec)*1000000 + tv_diff.tv_usec;
395 if (usec_roundtrip >= 0 &&
396 usec_roundtrip < MAX_BELIEVABLE_ONIONSKIN_DELAY) {
399 onionskins_usec_roundtrip[rpl.handshake_type] += usec_roundtrip;
400 if (onionskins_n_processed[rpl.handshake_type] >= 500000) {
401 /* Scale down every 500000 handshakes. On a busy server, that's
402 * less impressive than it sounds. */
406 }
407 }
408 }
409
410 circ = job->circ;
411
412 log_debug(LD_OR,
413 "Unpacking cpuworker reply %p, circ=%p, success=%d",
414 job, circ, rpl.success);
415
416 if (circ->base_.magic == DEAD_CIRCUIT_MAGIC) {
417 /* The circuit was supposed to get freed while the reply was
418 * pending. Instead, it got left for us to free so that we wouldn't freak
419 * out when the job->circ field wound up pointing to nothing. */
420 log_debug(LD_OR, "Circuit died while reply was pending. Freeing memory.");
421 circ->base_.magic = 0;
422 tor_free(circ);
423 goto done_processing;
424 }
425
426 circ->workqueue_entry = NULL;
427
428 if (TO_CIRCUIT(circ)->marked_for_close) {
429 /* We already marked this circuit; we can't call it open. */
430 log_debug(LD_OR,"circuit is already marked.");
431 goto done_processing;
432 }
433
434 if (rpl.success == 0) {
435 log_debug(LD_OR,
436 "decoding onionskin failed. "
437 "(Old key or bad software.) Closing.");
438 circuit_mark_for_close(TO_CIRCUIT(circ), END_CIRC_REASON_TORPROTOCOL);
439 goto done_processing;
440 }
441
442 /* If the client asked for congestion control, if our consensus parameter
443 * allowed it to negotiate as enabled, allocate a congestion control obj. */
444 if (rpl.circ_params.cc_enabled) {
445 if (get_options()->SbwsExit) {
446 TO_CIRCUIT(circ)->ccontrol = congestion_control_new(&rpl.circ_params,
447 CC_PATH_SBWS);
448 } else {
449 TO_CIRCUIT(circ)->ccontrol = congestion_control_new(&rpl.circ_params,
450 CC_PATH_EXIT);
451 }
452 }
453
454 if (onionskin_answer(circ,
455 &rpl.created_cell,
456 (const char*)rpl.keys, sizeof(rpl.keys),
457 rpl.rend_auth_material) < 0) {
458 log_warn(LD_OR,"onionskin_answer failed. Closing.");
459 circuit_mark_for_close(TO_CIRCUIT(circ), END_CIRC_REASON_INTERNAL);
460 goto done_processing;
461 }
462
463 log_debug(LD_OR,"onionskin_answer succeeded. Yay.");
464
465 done_processing:
466 memwipe(&rpl, 0, sizeof(rpl));
467 memwipe(job, 0, sizeof(*job));
468 tor_free(job);
470}
471
472/** Implementation function for onion handshake requests. */
474cpuworker_onion_handshake_threadfn(void *state_, void *work_)
475{
476 worker_state_t *state = state_;
477 cpuworker_job_t *job = work_;
478
479 /* variables for onion processing */
480 server_onion_keys_t *onion_keys = state->onion_keys;
483
484 memcpy(&req, &job->u.request, sizeof(req));
485
487 memset(&rpl, 0, sizeof(rpl));
488
489 const create_cell_t *cc = &req.create_cell;
490 created_cell_t *cell_out = &rpl.created_cell;
491 struct timeval tv_start = {0,0}, tv_end;
492 int n;
493 rpl.timed = req.timed;
494 rpl.started_at = req.started_at;
496 if (req.timed)
497 tor_gettimeofday(&tv_start);
499 cc->onionskin, cc->handshake_len,
500 onion_keys,
501 &req.circ_ns_params,
502 cell_out->reply,
503 sizeof(cell_out->reply),
504 rpl.keys, CPATH_KEY_MATERIAL_LEN,
506 &rpl.circ_params);
507 if (n < 0) {
508 /* failure */
509 log_debug(LD_OR,"onion_skin_server_handshake failed.");
510 memset(&rpl, 0, sizeof(rpl));
511 rpl.success = 0;
512 } else {
513 /* success */
514 log_debug(LD_OR,"onion_skin_server_handshake succeeded.");
515 cell_out->handshake_len = n;
516 switch (cc->cell_type) {
517 case CELL_CREATE:
518 cell_out->cell_type = CELL_CREATED; break;
519 case CELL_CREATE2:
520 cell_out->cell_type = CELL_CREATED2; break;
521 case CELL_CREATE_FAST:
522 cell_out->cell_type = CELL_CREATED_FAST; break;
523 default:
524 tor_assert(0);
525 return WQ_RPL_SHUTDOWN;
526 }
527 rpl.success = 1;
528 }
529
530 rpl.magic = CPUWORKER_REPLY_MAGIC;
531 if (req.timed) {
532 struct timeval tv_diff;
533 int64_t usec;
534 tor_gettimeofday(&tv_end);
535 timersub(&tv_end, &tv_start, &tv_diff);
536 usec = ((int64_t)tv_diff.tv_sec)*1000000 + tv_diff.tv_usec;
537 if (usec < 0 || usec > MAX_BELIEVABLE_ONIONSKIN_DELAY)
539 else
540 rpl.n_usec = (uint32_t) usec;
541 }
542
543 memcpy(&job->u.reply, &rpl, sizeof(rpl));
544
545 memwipe(&req, 0, sizeof(req));
546 memwipe(&rpl, 0, sizeof(req));
547 return WQ_RPL_REPLY;
548}
549
550/** Take pending tasks from the queue and assign them to cpuworkers. */
551static void
553{
554 or_circuit_t *circ;
555 create_cell_t *onionskin = NULL;
556
557 while (total_pending_tasks < max_pending_tasks) {
558 circ = onion_next_task(&onionskin);
559
560 if (!circ)
561 return;
562
563 if (assign_onionskin_to_cpuworker(circ, onionskin) < 0)
564 log_info(LD_OR,"assign_to_cpuworker failed. Ignoring.");
565 }
566}
567
568/** DOCDOC */
571 workqueue_reply_t (*fn)(void *, void *),
572 void (*reply_fn)(void *),
573 void *arg))
574{
575 tor_assert(threadpool);
576
577 return threadpool_queue_work_priority(threadpool,
578 priority,
579 fn,
580 reply_fn,
581 arg);
582}
583
584/** Try to tell a cpuworker to perform the public key operations necessary to
585 * respond to <b>onionskin</b> for the circuit <b>circ</b>.
586 *
587 * Return 0 if we successfully assign the task, or -1 on failure.
588 */
589int
591 create_cell_t *onionskin)
592{
593 workqueue_entry_t *queue_entry;
594 cpuworker_job_t *job;
596 int should_time;
597
598 tor_assert(threadpool);
599
600 if (!circ->p_chan) {
601 log_info(LD_OR,"circ->p_chan gone. Failing circ.");
602 tor_free(onionskin);
603 return -1;
604 }
605
606 if (total_pending_tasks >= max_pending_tasks) {
607 log_debug(LD_OR,"No idle cpuworkers. Queuing.");
608 if (onion_pending_add(circ, onionskin) < 0) {
609 tor_free(onionskin);
610 return -1;
611 }
612 return 0;
613 }
614
615 if (!channel_is_client(circ->p_chan))
617
618 should_time = should_time_request(onionskin->handshake_type);
619 memset(&req, 0, sizeof(req));
621 req.timed = should_time;
622
623 memcpy(&req.create_cell, onionskin, sizeof(create_cell_t));
624
625 tor_free(onionskin);
626
627 if (should_time)
629
630 /* Copy the current cached consensus params relevant to
631 * circuit negotiation into the CPU worker context */
634
635 job = tor_malloc_zero(sizeof(cpuworker_job_t));
636 job->circ = circ;
637 memcpy(&job->u.request, &req, sizeof(req));
638 memwipe(&req, 0, sizeof(req));
639
640 ++total_pending_tasks;
641 queue_entry = threadpool_queue_work_priority(threadpool,
642 WQ_PRI_HIGH,
645 job);
646 if (!queue_entry) {
647 log_warn(LD_BUG, "Couldn't queue work on threadpool");
648 tor_free(job);
649 return -1;
650 }
651
652 log_debug(LD_OR, "Queued task %p (qe=%p, circ=%p)",
653 job, queue_entry, job->circ);
654
655 circ->workqueue_entry = queue_entry;
656
657 return 0;
658}
659
660/** If <b>circ</b> has a pending handshake that hasn't been processed yet,
661 * remove it from the worker queue. */
662void
664{
665 cpuworker_job_t *job;
666 if (circ->workqueue_entry == NULL)
667 return;
668
670 if (job) {
671 /* It successfully cancelled. */
672 memwipe(job, 0xe0, sizeof(*job));
673 tor_free(job);
674 tor_assert(total_pending_tasks > 0);
675 --total_pending_tasks;
676 /* if (!job), this is done in cpuworker_onion_handshake_replyfn. */
677 circ->workqueue_entry = NULL;
678 }
679}
int channel_is_client(const channel_t *chan)
Definition: channel.c:2918
Header file for channel.c.
#define DEAD_CIRCUIT_MAGIC
Definition: circuit_st.h:37
int onionskin_answer(struct or_circuit_t *circ, const created_cell_t *created_cell, const char *keys, size_t keys_len, const uint8_t *rend_circ_nonce)
Header for feature/relay/circuitbuild_relay.c.
Header file for circuitlist.c.
#define MAX(a, b)
Definition: cmp.h:22
int get_num_cpus(const or_options_t *options)
Definition: config.c:7112
const or_options_t * get_options(void)
Definition: config.c:947
Header file for config.c.
congestion_control_t * congestion_control_new(const circuit_params_t *params, cc_path_t path)
bool congestion_control_enabled(void)
Public APIs for congestion control.
static uint8_t congestion_control_sendme_inc(void)
APIs for stream flow control on congestion controlled circuits.
Header file for connection_or.c.
#define CPUWORKER_REQUEST_MAGIC
Definition: cpuworker.c:166
static void queue_pending_tasks(void)
Definition: cpuworker.c:552
void cpuworker_cancel_circ_handshake(or_circuit_t *circ)
Definition: cpuworker.c:663
#define MAX_BELIEVABLE_ONIONSKIN_DELAY
Definition: cpuworker.c:280
static uint64_t onionskins_usec_roundtrip[MAX_ONION_HANDSHAKE_TYPE+1]
Definition: cpuworker.c:276
static uint32_t get_max_pending_tasks_per_cpu(const networkstatus_t *ns)
Definition: cpuworker.c:84
int assign_onionskin_to_cpuworker(or_circuit_t *circ, create_cell_t *onionskin)
Definition: cpuworker.c:590
static void set_max_pending_tasks(const networkstatus_t *ns)
Definition: cpuworker.c:102
static uint64_t onionskins_n_processed[MAX_ONION_HANDSHAKE_TYPE+1]
Definition: cpuworker.c:267
workqueue_entry_t * cpuworker_queue_work(workqueue_priority_t priority, workqueue_reply_t(*fn)(void *, void *), void(*reply_fn)(void *), void *arg)
Definition: cpuworker.c:573
void cpuworker_free_all(void)
Definition: cpuworker.c:149
static int get_overhead_for_onionskins(uint32_t *usec_out, double *frac_out, uint16_t onionskin_type)
Definition: cpuworker.c:325
int cpuworker_init(void)
Definition: cpuworker.c:118
unsigned int cpuworker_get_n_threads(void)
Definition: cpuworker.c:156
void cpuworker_consensus_has_changed(const networkstatus_t *ns)
Definition: cpuworker.c:110
void cpuworker_log_onionskin_overhead(int severity, int onionskin_type, const char *onionskin_type_name)
Definition: cpuworker.c:352
static workqueue_reply_t cpuworker_onion_handshake_threadfn(void *state_, void *work_)
Definition: cpuworker.c:474
void cpuworkers_rotate_keyinfo(void)
Definition: cpuworker.c:248
static uint64_t onionskins_usec_internal[MAX_ONION_HANDSHAKE_TYPE+1]
Definition: cpuworker.c:271
static void cpuworker_onion_handshake_replyfn(void *work_)
Definition: cpuworker.c:372
uint64_t estimated_usec_for_onionskins(uint32_t n_requests, uint16_t onionskin_type)
Definition: cpuworker.c:304
static int should_time_request(uint16_t onionskin_type)
Definition: cpuworker.c:285
Header file for cpuworker.c.
Common functions for using (pseudo-)random number generators.
#define crypto_fast_rng_one_in_n(rng, n)
Definition: crypto_rand.h:80
crypto_fast_rng_t * get_thread_fast_rng(void)
void memwipe(void *mem, uint8_t byte, size_t sz)
Definition: crypto_util.c:55
Common functions for cryptographic routines.
#define DIGEST_LEN
Definition: digest_sizes.h:20
#define log_fn(severity, domain, args,...)
Definition: log.h:283
#define LD_OR
Definition: log.h:92
#define LD_BUG
Definition: log.h:86
#define LD_GENERAL
Definition: log.h:62
#define tor_free(p)
Definition: malloc.h:56
int32_t networkstatus_get_param(const networkstatus_t *ns, const char *param_name, int32_t default_val, int32_t min_val, int32_t max_val)
Header file for networkstatus.c.
Header file for onion.c.
int onion_skin_server_handshake(int type, const uint8_t *onion_skin, size_t onionskin_len, const server_onion_keys_t *keys, const circuit_params_t *our_ns_params, uint8_t *reply_out, size_t reply_out_maxlen, uint8_t *keys_out, size_t keys_out_len, uint8_t *rend_nonce_out, circuit_params_t *params_out)
Definition: onion_crypto.c:262
server_onion_keys_t * server_onion_keys_new(void)
Definition: onion_crypto.c:64
Header file for onion_crypto.c.
int onion_pending_add(or_circuit_t *circ, create_cell_t *onionskin)
Definition: onion_queue.c:177
or_circuit_t * onion_next_task(create_cell_t **onionskin_out)
Definition: onion_queue.c:265
Header file for onion_queue.c.
Master header file for Tor-specific functionality.
#define TO_CIRCUIT(x)
Definition: or.h:850
void rep_hist_note_circuit_handshake_assigned(uint16_t type)
Definition: rephist.c:2384
Header file for rephist.c.
Header file for router.c.
uint8_t sendme_inc_cells
Definition: onion_crypto.h:36
uint32_t magic
Definition: circuit_st.h:63
struct timeval started_at
Definition: cpuworker.c:203
created_cell_t created_cell
Definition: cpuworker.c:214
unsigned int timed
Definition: cpuworker.c:199
uint32_t magic
Definition: cpuworker.c:193
uint8_t keys[CPATH_KEY_MATERIAL_LEN]
Definition: cpuworker.c:216
uint8_t rend_auth_material[DIGEST_LEN]
Definition: cpuworker.c:218
uint16_t handshake_type
Definition: cpuworker.c:201
circuit_params_t circ_params
Definition: cpuworker.c:220
uint32_t n_usec
Definition: cpuworker.c:207
struct timeval started_at
Definition: cpuworker.c:177
create_cell_t create_cell
Definition: cpuworker.c:180
circuit_params_t circ_ns_params
Definition: cpuworker.c:185
uint16_t handshake_len
Definition: onion.h:30
uint16_t handshake_type
Definition: onion.h:28
uint8_t onionskin[CELL_PAYLOAD_SIZE - 4]
Definition: onion.h:32
uint8_t cell_type
Definition: onion.h:26
uint16_t handshake_len
Definition: onion.h:40
uint8_t reply[CELL_PAYLOAD_SIZE - 2]
Definition: onion.h:42
uint8_t cell_type
Definition: onion.h:38
channel_t * p_chan
Definition: or_circuit_st.h:37
struct workqueue_entry_t * workqueue_entry
Definition: or_circuit_st.h:30
Definition: workqueue.c:105
#define MOCK_IMPL(rv, funcname, arglist)
Definition: testsupport.h:133
#define timersub(tv1, tv2, tvout)
Definition: timeval.h:61
void tor_gettimeofday(struct timeval *timeval)
#define tor_assert(expr)
Definition: util_bug.h:103
void * workqueue_entry_cancel(workqueue_entry_t *ent)
Definition: workqueue.c:207
workqueue_entry_t * threadpool_queue_work_priority(threadpool_t *pool, workqueue_priority_t prio, workqueue_reply_t(*fn)(void *, void *), void(*reply_fn)(void *), void *arg)
Definition: workqueue.c:446
replyqueue_t * replyqueue_new(uint32_t alertsocks_flags)
Definition: workqueue.c:771
int threadpool_register_reply_event(threadpool_t *tp, void(*cb)(threadpool_t *tp))
Definition: workqueue.c:828
int threadpool_queue_update(threadpool_t *pool, void *(*dup_fn)(void *), workqueue_reply_t(*fn)(void *, void *), void(*free_fn)(void *), void *arg)
Definition: workqueue.c:497
threadpool_t * threadpool_new(int n_threads, replyqueue_t *replyqueue, void *(*new_thread_state_fn)(void *), void(*free_thread_state_fn)(void *), void *arg)
Definition: workqueue.c:670
unsigned int threadpool_get_n_threads(threadpool_t *tp)
Definition: workqueue.c:883
Header for workqueue.c.
workqueue_reply_t
Definition: workqueue.h:24
@ WQ_RPL_SHUTDOWN
Definition: workqueue.h:27
workqueue_priority_t
Definition: workqueue.h:31