Tor 0.4.9.2-alpha-dev
All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
workqueue.c
Go to the documentation of this file.
1
2/* copyright (c) 2013-2024, The Tor Project, Inc. */
3/* See LICENSE for licensing information */
4
5/**
6 * \file workqueue.c
7 *
8 * \brief Implements worker threads, queues of work for them, and mechanisms
9 * for them to send answers back to the main thread.
10 *
11 * The main structure here is a threadpool_t : it manages a set of worker
12 * threads, a queue of pending work, and a reply queue. Every piece of work
13 * is a workqueue_entry_t, containing data to process and a function to
14 * process it with.
15 *
16 * The main thread informs the worker threads of pending work by using a
17 * condition variable. The workers inform the main process of completed work
18 * by using an alert_sockets_t object, as implemented in net/alertsock.c.
19 *
20 * The main thread can also queue an "update" that will be handled by all the
21 * workers. This is useful for updating state that all the workers share.
22 *
23 * In Tor today, there is currently only one thread pool, managed
24 * in cpuworker.c and handling a variety of types of work, from the original
25 * "onion skin" circuit handshakes, to consensus diff computation, to
26 * client-side onion service PoW generation.
27 */
28
29#include "orconfig.h"
32
34#include "lib/intmath/weakrng.h"
35#include "lib/log/ratelim.h"
36#include "lib/log/log.h"
37#include "lib/log/util_bug.h"
38#include "lib/net/alertsock.h"
39#include "lib/net/socket.h"
40#include "lib/thread/threads.h"
41
42#include "ext/tor_queue.h"
43#include <event2/event.h>
44#include <string.h>
45
46#define WORKQUEUE_PRIORITY_FIRST WQ_PRI_HIGH
47#define WORKQUEUE_PRIORITY_LAST WQ_PRI_LOW
48#define WORKQUEUE_N_PRIORITIES (((int) WORKQUEUE_PRIORITY_LAST)+1)
49
50TOR_TAILQ_HEAD(work_tailq_t, workqueue_entry_t);
51typedef struct work_tailq_t work_tailq_t;
52
54 /** An array of pointers to workerthread_t: one for each running worker
55 * thread. */
57
58 /** Condition variable that we wait on when we have no work, and which
59 * gets signaled when our queue becomes nonempty. */
61 /** Queues of pending work that we have to do. The queue with priority
62 * <b>p</b> is work[p]. */
63 work_tailq_t work[WORKQUEUE_N_PRIORITIES];
64
65 /** The current 'update generation' of the threadpool. Any thread that is
66 * at an earlier generation needs to run the update function. */
67 unsigned generation;
68
69 /** Function that should be run for updates on each thread. */
70 workqueue_reply_t (*update_fn)(void *, void *);
71 /** Function to free update arguments if they can't be run. */
72 void (*free_update_arg_fn)(void *);
73 /** Array of n_threads update arguments. */
75 /** Event to notice when another thread has sent a reply. */
76 struct event *reply_event;
77 void (*reply_cb)(threadpool_t *);
78
79 /** Number of elements in threads. */
81 /** Number of elements to be created in threads. */
83 /** Mutex to protect all the above fields. */
85
86 /** A reply queue to use when constructing new threads. */
88
89 /** Functions used to allocate and free thread state. */
90 void *(*new_thread_state_fn)(void*);
91 void (*free_thread_state_fn)(void*);
92 void *new_thread_state_arg;
93
94 /** Used for signalling the worker threads to exit. */
95 int exit;
96 /** Mutex for controlling worker threads' startup and exit. */
98};
99
100/** Used to put a workqueue_priority_t value into a bitfield. */
101#define workqueue_priority_bitfield_t ENUM_BF(workqueue_priority_t)
102/** Number of bits needed to hold all legal values of workqueue_priority_t */
103#define WORKQUEUE_PRIORITY_BITS 2
104
106 /** The next workqueue_entry_t that's pending on the same thread or
107 * reply queue. */
108 TOR_TAILQ_ENTRY(workqueue_entry_t) next_work;
109 /** The threadpool to which this workqueue_entry_t was assigned. This field
110 * is set when the workqueue_entry_t is created, and won't be cleared until
111 * after it's handled in the main thread. */
112 struct threadpool_t *on_pool;
113 /** True iff this entry is waiting for a worker to start processing it. */
114 uint8_t pending;
115 /** Priority of this entry. */
117 /** Function to run in the worker thread. */
118 workqueue_reply_t (*fn)(void *state, void *arg);
119 /** Function to run while processing the reply queue. */
120 void (*reply_fn)(void *arg);
121 /** Argument for the above functions. */
122 void *arg;
123};
124
126 /** Mutex to protect the answers field */
128 /** Doubly-linked list of answers that the reply queue needs to handle. */
129 TOR_TAILQ_HEAD(, workqueue_entry_t) answers;
130
131 /** Mechanism to wake up the main thread when it is receiving answers. */
132 alert_sockets_t alert;
133};
134
135/** A worker thread represents a single thread in a thread pool. */
136typedef struct workerthread_t {
137 /** Which thread it this? In range 0..in_pool->n_threads-1 */
138 int index;
139 /** The pool this thread is a part of. */
141 /** User-supplied state field that we pass to the worker functions of each
142 * work item. */
143 void *state;
144 /** Reply queue to which we pass our results. */
146 /** The current update generation of this thread */
147 unsigned generation;
148 /** One over the probability of taking work from a lower-priority queue. */
151
152static void queue_reply(replyqueue_t *queue, workqueue_entry_t *work);
153static void workerthread_free_(workerthread_t *thread);
154#define workerthread_free(thread) \
155 FREE_AND_NULL(workerthread_t, workerthread_free_, (thread))
156static void replyqueue_free_(replyqueue_t *queue);
157#define replyqueue_free(queue) \
158 FREE_AND_NULL(replyqueue_t, replyqueue_free_, (queue))
159
160/** Allocate and return a new workqueue_entry_t, set up to run the function
161 * <b>fn</b> in the worker thread, and <b>reply_fn</b> in the main
162 * thread. See threadpool_queue_work() for full documentation. */
163static workqueue_entry_t *
165 void (*reply_fn)(void*),
166 void *arg)
167{
168 workqueue_entry_t *ent = tor_malloc_zero(sizeof(workqueue_entry_t));
169 ent->fn = fn;
170 ent->reply_fn = reply_fn;
171 ent->arg = arg;
172 ent->priority = WQ_PRI_HIGH;
173 return ent;
174}
175
176#define workqueue_entry_free(ent) \
177 FREE_AND_NULL(workqueue_entry_t, workqueue_entry_free_, (ent))
178
179/**
180 * Release all storage held in <b>ent</b>. Call only when <b>ent</b> is not on
181 * any queue.
182 */
183static void
185{
186 if (!ent)
187 return;
188 memset(ent, 0xf0, sizeof(*ent));
189 tor_free(ent);
190}
191
192/**
193 * Cancel a workqueue_entry_t that has been returned from
194 * threadpool_queue_work.
195 *
196 * You must not call this function on any work whose reply function has been
197 * executed in the main thread; that will cause undefined behavior (probably,
198 * a crash).
199 *
200 * If the work is cancelled, this function return the argument passed to the
201 * work function. It is the caller's responsibility to free this storage.
202 *
203 * This function will have no effect if the worker thread has already executed
204 * or begun to execute the work item. In that case, it will return NULL.
205 */
206void *
208{
209 int cancelled = 0;
210 void *result = NULL;
211 tor_mutex_acquire(&ent->on_pool->lock);
212 workqueue_priority_t prio = ent->priority;
213 if (ent->pending) {
214 TOR_TAILQ_REMOVE(&ent->on_pool->work[prio], ent, next_work);
215 cancelled = 1;
216 result = ent->arg;
217 }
218 tor_mutex_release(&ent->on_pool->lock);
219
220 if (cancelled) {
221 workqueue_entry_free(ent);
222 }
223 return result;
224}
225
226/**DOCDOC
227
228 must hold lock */
229static int
231{
232 unsigned i;
233 for (i = WORKQUEUE_PRIORITY_FIRST; i <= WORKQUEUE_PRIORITY_LAST; ++i) {
234 if (!TOR_TAILQ_EMPTY(&thread->in_pool->work[i]))
235 return 1;
236 }
237 return thread->generation != thread->in_pool->generation;
238}
239
240/** Extract the next workqueue_entry_t from the the thread's pool, removing
241 * it from the relevant queues and marking it as non-pending.
242 *
243 * The caller must hold the lock. */
244static workqueue_entry_t *
246{
247 threadpool_t *pool = thread->in_pool;
248 work_tailq_t *queue = NULL, *this_queue;
249 unsigned i;
250 for (i = WORKQUEUE_PRIORITY_FIRST; i <= WORKQUEUE_PRIORITY_LAST; ++i) {
251 this_queue = &pool->work[i];
252 if (!TOR_TAILQ_EMPTY(this_queue)) {
253 queue = this_queue;
255 thread->lower_priority_chance)) {
256 /* Usually we'll just break now, so that we can get out of the loop
257 * and use the queue where we found work. But with a small
258 * probability, we'll keep looking for lower priority work, so that
259 * we don't ignore our low-priority queues entirely. */
260 break;
261 }
262 }
263 }
264
265 if (queue == NULL)
266 return NULL;
267
268 workqueue_entry_t *work = TOR_TAILQ_FIRST(queue);
269 TOR_TAILQ_REMOVE(queue, work, next_work);
270 work->pending = 0;
271 return work;
272}
273
274/**
275 * Main function for the worker thread.
276 */
277static void
278worker_thread_main(void *thread_)
279{
280 static int n_worker_threads_running = 0;
281 workerthread_t *thread = thread_;
282 threadpool_t *pool = thread->in_pool;
284 workqueue_reply_t result;
285
287 log_debug(LD_GENERAL, "Worker thread %u/%u has started [TID: %lu].",
288 n_worker_threads_running + 1, pool->n_threads_max,
290
291 if (++n_worker_threads_running == pool->n_threads_max)
293
295
296 /* Wait until all worker threads have started.
297 * pool->lock must be prelocked here. */
298 tor_mutex_acquire(&pool->lock);
299
300 log_debug(LD_GENERAL, "Worker thread has entered the work loop [TID: %lu].",
302
303 while (1) {
304 /* Exit thread when signaled to exit */
305 if (pool->exit)
306 goto exit;
307
308 /* lock must be held at this point. */
309 while (worker_thread_has_work(thread)) {
310 /* lock must be held at this point. */
311 if (thread->in_pool->generation != thread->generation) {
312 void *arg = thread->in_pool->update_args[thread->index];
313 thread->in_pool->update_args[thread->index] = NULL;
314 workqueue_reply_t (*update_fn)(void*,void*) =
315 thread->in_pool->update_fn;
316 thread->generation = thread->in_pool->generation;
317 tor_mutex_release(&pool->lock);
318
319 workqueue_reply_t r = update_fn(thread->state, arg);
320
321 tor_mutex_acquire(&pool->lock);
322
323 /* We may need to exit the thread. */
324 if (r != WQ_RPL_REPLY)
325 goto exit;
326
327 continue;
328 }
330 if (BUG(work == NULL))
331 break;
332 tor_mutex_release(&pool->lock);
333
334 /* We run the work function without holding the thread lock. This
335 * is the main thread's first opportunity to give us more work. */
336 result = work->fn(thread->state, work->arg);
337
338 /* Queue the reply for the main thread. */
339 queue_reply(thread->reply_queue, work);
340
341 tor_mutex_acquire(&pool->lock);
342
343 /* We may need to exit the thread. */
344 if (result != WQ_RPL_REPLY)
345 goto exit;
346 }
347 /* At this point the lock is held, and there is no work in this thread's
348 * queue. */
349
350 /* TODO: support an idle-function */
351
352 /* Okay. Now, wait till somebody has work for us. */
353 if (tor_cond_wait(&pool->condition, &pool->lock, NULL) < 0) {
354 log_warn(LD_GENERAL, "Fail tor_cond_wait.");
355 }
356 }
357
358exit:
359 /* At this point pool->lock must be held */
360
361 log_debug(LD_GENERAL, "Worker thread %u/%u has exited [TID: %lu].",
362 pool->n_threads_max - n_worker_threads_running + 1,
364
365 if (--n_worker_threads_running == 0)
366 /* Let the main thread know, the last worker thread has exited. */
368
369 tor_mutex_release(&pool->lock);
370}
371
372/** Put a reply on the reply queue. The reply must not currently be on
373 * any thread's work queue. */
374static void
376{
377 int was_empty;
378 tor_mutex_acquire(&queue->lock);
379 was_empty = TOR_TAILQ_EMPTY(&queue->answers);
380 TOR_TAILQ_INSERT_TAIL(&queue->answers, work, next_work);
381 tor_mutex_release(&queue->lock);
382
383 if (was_empty) {
384 if (queue->alert.alert_fn(queue->alert.write_fd) < 0) {
385 /* XXXX complain! */
386 }
387 }
388}
389
390/** Allocate and start a new worker thread to use state object <b>state</b>,
391 * and send responses to <b>replyqueue</b>. */
392static workerthread_t *
393workerthread_new(int32_t lower_priority_chance,
394 void *state, threadpool_t *pool, replyqueue_t *replyqueue)
395{
396 workerthread_t *thr = tor_malloc_zero(sizeof(workerthread_t));
397 thr->state = state;
398 thr->reply_queue = replyqueue;
399 thr->in_pool = pool;
400 thr->lower_priority_chance = lower_priority_chance;
401
402 if (spawn_func(worker_thread_main, thr) < 0) {
403 //LCOV_EXCL_START
405 log_err(LD_GENERAL, "Can't launch worker thread.");
406 workerthread_free(thr);
407 return NULL;
408 //LCOV_EXCL_STOP
409 }
410
411 return thr;
412}
413
414/**
415 * Free up the resources allocated by a worker thread.
416 */
417static void
419{
420 tor_free(thread);
421}
422
423/**
424 * Queue an item of work for a thread in a thread pool. The function
425 * <b>fn</b> will be run in a worker thread, and will receive as arguments the
426 * thread's state object, and the provided object <b>arg</b>. It must return
427 * one of WQ_RPL_REPLY, WQ_RPL_ERROR, or WQ_RPL_SHUTDOWN.
428 *
429 * Regardless of its return value, the function <b>reply_fn</b> will later be
430 * run in the main thread when it invokes replyqueue_process(), and will
431 * receive as its argument the same <b>arg</b> object. It's the reply
432 * function's responsibility to free the work object.
433 *
434 * On success, return a workqueue_entry_t object that can be passed to
435 * workqueue_entry_cancel(). On failure, return NULL. (Failure is not
436 * currently possible, but callers should check anyway.)
437 *
438 * Items are executed in a loose priority order -- each thread will usually
439 * take from the queued work with the highest prioirity, but will occasionally
440 * visit lower-priority queues to keep them from starving completely.
441 *
442 * Note that because of priorities and thread behavior, work items may not
443 * be executed strictly in order.
444 */
448 workqueue_reply_t (*fn)(void *, void *),
449 void (*reply_fn)(void *),
450 void *arg)
451{
452 tor_assert(((int)prio) >= WORKQUEUE_PRIORITY_FIRST &&
453 ((int)prio) <= WORKQUEUE_PRIORITY_LAST);
454
455 workqueue_entry_t *ent = workqueue_entry_new(fn, reply_fn, arg);
456 ent->on_pool = pool;
457 ent->pending = 1;
458 ent->priority = prio;
459
460 tor_mutex_acquire(&pool->lock);
461
462 TOR_TAILQ_INSERT_TAIL(&pool->work[prio], ent, next_work);
463
465
466 tor_mutex_release(&pool->lock);
467
468 return ent;
469}
470
471/** As threadpool_queue_work_priority(), but assumes WQ_PRI_HIGH */
474 workqueue_reply_t (*fn)(void *, void *),
475 void (*reply_fn)(void *),
476 void *arg)
477{
478 return threadpool_queue_work_priority(pool, WQ_PRI_HIGH, fn, reply_fn, arg);
479}
480
481/**
482 * Queue a copy of a work item for every thread in a pool. This can be used,
483 * for example, to tell the threads to update some parameter in their states.
484 *
485 * Arguments are as for <b>threadpool_queue_work</b>, except that the
486 * <b>arg</b> value is passed to <b>dup_fn</b> once per each thread to
487 * make a copy of it.
488 *
489 * UPDATE FUNCTIONS MUST BE IDEMPOTENT. We do not guarantee that every update
490 * will be run. If a new update is scheduled before the old update finishes
491 * running, then the new will replace the old in any threads that haven't run
492 * it yet.
493 *
494 * Return 0 on success, -1 on failure.
495 */
496int
498 void *(*dup_fn)(void *),
499 workqueue_reply_t (*fn)(void *, void *),
500 void (*free_fn)(void *),
501 void *arg)
502{
503 int i, n_threads;
504 void (*old_args_free_fn)(void *arg);
505 void **old_args;
506 void **new_args;
507
508 tor_mutex_acquire(&pool->lock);
509 n_threads = pool->n_threads;
510 old_args = pool->update_args;
511 old_args_free_fn = pool->free_update_arg_fn;
512
513 new_args = tor_calloc(n_threads, sizeof(void*));
514 for (i = 0; i < n_threads; ++i) {
515 if (dup_fn)
516 new_args[i] = dup_fn(arg);
517 else
518 new_args[i] = arg;
519 }
520
521 pool->update_args = new_args;
522 pool->free_update_arg_fn = free_fn;
523 pool->update_fn = fn;
524 ++pool->generation;
525
527
528 tor_mutex_release(&pool->lock);
529
530 if (old_args) {
531 for (i = 0; i < n_threads; ++i) {
532 if (old_args[i] && old_args_free_fn)
533 old_args_free_fn(old_args[i]);
534 }
535 tor_free(old_args);
536 }
537
538 return 0;
539}
540
541/** Don't have more than this many threads per pool. */
542#define MAX_THREADS 1024
543
544/** For half of our threads, choose lower priority queues with probability
545 * 1/N for each of these values. Both are chosen somewhat arbitrarily. If
546 * CHANCE_PERMISSIVE is too low, then we have a risk of low-priority tasks
547 * stalling forever. If it's too high, we have a risk of low-priority tasks
548 * grabbing half of the threads. */
549#define CHANCE_PERMISSIVE 37
550#define CHANCE_STRICT INT32_MAX
551
552/** Launch threads until we have <b>n</b>. */
553static int
555{
556 if (BUG(n < 0))
557 return -1; // LCOV_EXCL_LINE
558 if (n > MAX_THREADS)
559 n = MAX_THREADS;
560
562 tor_mutex_acquire(&pool->lock);
563
564 if (pool->n_threads < n)
565 pool->threads = tor_reallocarray(pool->threads,
566 sizeof(workerthread_t*), n);
567
568 int status = 0;
569 pool->n_threads_max = n;
570 log_debug(LD_GENERAL, "Starting worker threads...");
571
572 while (pool->n_threads < n) {
573 /* For half of our threads, we'll choose lower priorities permissively;
574 * for the other half, we'll stick more strictly to higher priorities.
575 * This keeps slow low-priority tasks from taking over completely. */
576 int32_t chance = (pool->n_threads & 1) ? CHANCE_STRICT : CHANCE_PERMISSIVE;
577
578 void *state = pool->new_thread_state_fn(pool->new_thread_state_arg);
579 workerthread_t *thr = workerthread_new(chance,
580 state, pool, pool->reply_queue);
581
582 if (!thr) {
583 //LCOV_EXCL_START
585 pool->free_thread_state_fn(state);
586 status = -1;
587 goto check_status;
588 //LCOV_EXCL_STOP
589 }
590 thr->index = pool->n_threads;
591 pool->threads[pool->n_threads++] = thr;
592 }
593
594 struct timeval tv = {.tv_sec = 30, .tv_usec = 0};
595
596 /* Wait for the last launched thread to confirm us, it has started.
597 * Wait max 30 seconds */
598 status = tor_cond_wait(&pool->condition, &pool->control_lock, &tv);
599
600check_status:
601 switch (status) {
602 case 0:
603 log_debug(LD_GENERAL, "Starting worker threads finished.");
604 break;
605 case -1:
606 log_warn(LD_GENERAL, "Failed to confirm worker threads' start up.");
607 break;
608 case 1:
609 log_warn(LD_GENERAL, "Failed to confirm worker threads' "
610 "start up after timeout.");
611 FALLTHROUGH;
612 default:
613 status = -1;
614 }
615
616 log_debug(LD_GENERAL, "Signaled the worker threads to enter the work loop.");
617
618 /* If we had an error, let the worker threads (if any) exit directly. */
619 if (status != 0) {
620 pool->exit = 1;
621 log_debug(LD_GENERAL, "Signaled the worker threads to exit...");
622 }
623
624 /* Let worker threads enter the work loop. */
625 tor_mutex_release(&pool->lock);
626
627 /* pool->control_lock stays locked. This is required for the main thread
628 * to wait for the worker threads to exit on shutdown. */
629
630 return status;
631}
632
633/** Stop all worker threads */
634static void
636{
637 tor_mutex_acquire(&pool->lock);
638
639 if (pool->exit == 0) {
640 /* Signal the worker threads to exit */
641 pool->exit = 1;
642 /* If worker threads are waiting for work, let them continue to exit */
644
645 log_debug(LD_GENERAL, "Signaled worker threads to exit. "
646 "Waiting for them to exit...");
647 }
648
649 tor_mutex_release(&pool->lock);
650
651 /* Wait until all worker threads have exited.
652 * pool->control_lock must be prelocked here. */
654 /* Unlock required, else main thread hangs on mutex uninit. */
656
657 /* If this message appears in the log before all threads have confirmed
658 * their exit, then pool->control_lock wasn't prelocked for some reason. */
659 log_debug(LD_GENERAL, "All worker threads have exited.");
660}
661
662/**
663 * Construct a new thread pool with <b>n</b> worker threads, configured to
664 * send their output to <b>replyqueue</b>. The threads' states will be
665 * constructed with the <b>new_thread_state_fn</b> call, receiving <b>arg</b>
666 * as its argument. When the threads close, they will call
667 * <b>free_thread_state_fn</b> on their states.
668 */
670threadpool_new(int n_threads,
671 replyqueue_t *replyqueue,
672 void *(*new_thread_state_fn)(void*),
673 void (*free_thread_state_fn)(void*),
674 void *arg)
675{
676 threadpool_t *pool;
677 pool = tor_malloc_zero(sizeof(threadpool_t));
679 tor_cond_init(&pool->condition);
681 pool->exit = 0;
682
683 unsigned i;
684 for (i = WORKQUEUE_PRIORITY_FIRST; i <= WORKQUEUE_PRIORITY_LAST; ++i) {
685 TOR_TAILQ_INIT(&pool->work[i]);
686 }
687
688 pool->new_thread_state_fn = new_thread_state_fn;
689 pool->new_thread_state_arg = arg;
690 pool->free_thread_state_fn = free_thread_state_fn;
691 pool->reply_queue = replyqueue;
692
693 if (threadpool_start_threads(pool, n_threads) < 0) {
694 //LCOV_EXCL_START
696 threadpool_free(pool);
697 return NULL;
698 //LCOV_EXCL_STOP
699 }
700
701 return pool;
702}
703
704/**
705 * Free up the resources allocated by worker threads, worker thread pool, ...
706 */
707void
709{
710 if (!pool)
711 return;
712
714
715 log_debug(LD_GENERAL, "Beginning to clean up...");
716
718 tor_mutex_uninit(&pool->lock);
720
721 if (pool->threads) {
722 for (int i = 0; i != pool->n_threads; ++i)
723 workerthread_free(pool->threads[i]);
724
725 tor_free(pool->threads);
726 }
727
728 if (pool->update_args) {
729 if (!pool->free_update_arg_fn)
730 log_warn(LD_GENERAL, "Freeing pool->update_args not possible. "
731 "pool->free_update_arg_fn is not set.");
732 else
733 pool->free_update_arg_fn(pool->update_args);
734 }
735
736 if (pool->reply_event) {
737 if (tor_event_del(pool->reply_event) == -1)
738 log_warn(LD_GENERAL, "libevent error: deleting reply event failed.");
739 else
740 tor_event_free(pool->reply_event);
741 }
742
743 if (pool->reply_queue)
744 replyqueue_free(pool->reply_queue);
745
746 if (pool->new_thread_state_arg) {
747 if (!pool->free_thread_state_fn)
748 log_warn(LD_GENERAL, "Freeing pool->new_thread_state_arg not possible. "
749 "pool->free_thread_state_fn is not set.");
750 else
751 pool->free_thread_state_fn(pool->new_thread_state_arg);
752 }
753
754 tor_free(pool);
755
756 log_debug(LD_GENERAL, "Cleanup finished.");
757}
758
759/** Return the reply queue associated with a given thread pool. */
762{
763 return tp->reply_queue;
764}
765
766/** Allocate a new reply queue. Reply queues are used to pass results from
767 * worker threads to the main thread. Since the main thread is running an
768 * IO-centric event loop, it needs to get woken up with means other than a
769 * condition variable. */
771replyqueue_new(uint32_t alertsocks_flags)
772{
773 replyqueue_t *rq;
774
775 rq = tor_malloc_zero(sizeof(replyqueue_t));
776 if (alert_sockets_create(&rq->alert, alertsocks_flags) < 0) {
777 //LCOV_EXCL_START
778 replyqueue_free(rq);
779 return NULL;
780 //LCOV_EXCL_STOP
781 }
782
783 tor_mutex_init(&rq->lock);
784 TOR_TAILQ_INIT(&rq->answers);
785
786 return rq;
787}
788
789/**
790 * Free up the resources allocated by a reply queue.
791 */
792static void
794{
795 if (!queue)
796 return;
797
798 workqueue_entry_t *work;
799
800 while (!TOR_TAILQ_EMPTY(&queue->answers)) {
801 work = TOR_TAILQ_FIRST(&queue->answers);
802 TOR_TAILQ_REMOVE(&queue->answers, work, next_work);
803 workqueue_entry_free(work);
804 }
805
806 tor_free(queue);
807}
808
809/** Internal: Run from the libevent mainloop when there is work to handle in
810 * the reply queue handler. */
811static void
812reply_event_cb(evutil_socket_t sock, short events, void *arg)
813{
814 threadpool_t *tp = arg;
815 (void) sock;
816 (void) events;
818 if (tp->reply_cb)
819 tp->reply_cb(tp);
820}
821
822/** Register the threadpool <b>tp</b>'s reply queue with Tor's global
823 * libevent mainloop. If <b>cb</b> is provided, it is run after
824 * each time there is work to process from the reply queue. Return 0 on
825 * success, -1 on failure.
826 */
827int
829 void (*cb)(threadpool_t *tp))
830{
831 struct event_base *base = tor_libevent_get_base();
832
833 if (tp->reply_event) {
834 tor_event_free(tp->reply_event);
835 }
836 tp->reply_event = tor_event_new(base,
837 tp->reply_queue->alert.read_fd,
838 EV_READ|EV_PERSIST,
840 tp);
842 tp->reply_cb = cb;
843 return event_add(tp->reply_event, NULL);
844}
845
846/**
847 * Process all pending replies on a reply queue. The main thread should call
848 * this function every time the socket returned by replyqueue_get_socket() is
849 * readable.
850 */
851void
853{
854 int r = queue->alert.drain_fn(queue->alert.read_fd);
855 if (r < 0) {
856 //LCOV_EXCL_START
857 static ratelim_t warn_limit = RATELIM_INIT(7200);
858 log_fn_ratelim(&warn_limit, LOG_WARN, LD_GENERAL,
859 "Failure from drain_fd: %s",
860 tor_socket_strerror(-r));
861 //LCOV_EXCL_STOP
862 }
863
864 tor_mutex_acquire(&queue->lock);
865 while (!TOR_TAILQ_EMPTY(&queue->answers)) {
866 /* lock must be held at this point.*/
867 workqueue_entry_t *work = TOR_TAILQ_FIRST(&queue->answers);
868 TOR_TAILQ_REMOVE(&queue->answers, work, next_work);
869 tor_mutex_release(&queue->lock);
870 work->on_pool = NULL;
871
872 work->reply_fn(work->arg);
873 workqueue_entry_free(work);
874
875 tor_mutex_acquire(&queue->lock);
876 }
877
878 tor_mutex_release(&queue->lock);
879}
880
881/** Return the number of threads configured for the given pool. */
882unsigned int
884{
885 tor_assert(tp);
886 return tp->n_threads;
887}
int alert_sockets_create(alert_sockets_t *socks_out, uint32_t flags)
Definition: alertsock.c:191
Header for alertsock.c.
struct event_base * tor_libevent_get_base(void)
Header for compat_libevent.c.
void tor_mutex_init_nonrecursive(tor_mutex_t *m)
void tor_mutex_release(tor_mutex_t *m)
void tor_mutex_init(tor_mutex_t *m)
void tor_mutex_acquire(tor_mutex_t *m)
void tor_mutex_uninit(tor_mutex_t *m)
void tor_cond_signal_all(tor_cond_t *cond)
int tor_cond_init(tor_cond_t *cond)
int spawn_func(void(*func)(void *), void *data)
void tor_cond_uninit(tor_cond_t *cond)
void tor_cond_signal_one(tor_cond_t *cond)
int tor_cond_wait(tor_cond_t *cond, tor_mutex_t *mutex, const struct timeval *tv)
unsigned long tor_get_thread_id(void)
Common functions for using (pseudo-)random number generators.
#define crypto_fast_rng_one_in_n(rng, n)
Definition: crypto_rand.h:80
crypto_fast_rng_t * get_thread_fast_rng(void)
Headers for log.c.
#define log_fn_ratelim(ratelim, severity, domain, args,...)
Definition: log.h:288
#define LD_GENERAL
Definition: log.h:62
#define LOG_WARN
Definition: log.h:53
#define tor_free(p)
Definition: malloc.h:56
Summarize similar messages that would otherwise flood the logs.
Header for socket.c.
tor_mutex_t lock
Definition: workqueue.c:127
void *(* new_thread_state_fn)(void *)
Definition: workqueue.c:90
int n_threads_max
Definition: workqueue.c:82
int n_threads
Definition: workqueue.c:80
work_tailq_t work[WORKQUEUE_N_PRIORITIES]
Definition: workqueue.c:63
tor_mutex_t control_lock
Definition: workqueue.c:97
unsigned generation
Definition: workqueue.c:67
void(* free_update_arg_fn)(void *)
Definition: workqueue.c:72
tor_mutex_t lock
Definition: workqueue.c:84
struct event * reply_event
Definition: workqueue.c:76
tor_cond_t condition
Definition: workqueue.c:60
workqueue_reply_t(* update_fn)(void *, void *)
Definition: workqueue.c:70
struct workerthread_t ** threads
Definition: workqueue.c:56
replyqueue_t * reply_queue
Definition: workqueue.c:87
void ** update_args
Definition: workqueue.c:74
int32_t lower_priority_chance
Definition: workqueue.c:149
void * state
Definition: workqueue.c:143
struct threadpool_t * in_pool
Definition: workqueue.c:140
unsigned generation
Definition: workqueue.c:147
replyqueue_t * reply_queue
Definition: workqueue.c:145
Definition: workqueue.c:105
Header for threads.c.
Macros to manage assertions, fatal and non-fatal.
#define tor_assert_nonfatal_unreached()
Definition: util_bug.h:177
#define tor_assert(expr)
Definition: util_bug.h:103
Header for weakrng.c.
#define workqueue_priority_bitfield_t
Definition: workqueue.c:101
void threadpool_free_(threadpool_t *pool)
Definition: workqueue.c:708
static int worker_thread_has_work(workerthread_t *thread)
Definition: workqueue.c:230
static void threadpool_stop_threads(threadpool_t *pool)
Definition: workqueue.c:635
replyqueue_t * threadpool_get_replyqueue(threadpool_t *tp)
Definition: workqueue.c:761
static int threadpool_start_threads(threadpool_t *pool, int n)
Definition: workqueue.c:554
static void workqueue_entry_free_(workqueue_entry_t *ent)
Definition: workqueue.c:184
void replyqueue_process(replyqueue_t *queue)
Definition: workqueue.c:852
void * workqueue_entry_cancel(workqueue_entry_t *ent)
Definition: workqueue.c:207
static workqueue_entry_t * worker_thread_extract_next_work(workerthread_t *thread)
Definition: workqueue.c:245
static void worker_thread_main(void *thread_)
Definition: workqueue.c:278
static void reply_event_cb(evutil_socket_t sock, short events, void *arg)
Definition: workqueue.c:812
static void replyqueue_free_(replyqueue_t *queue)
Definition: workqueue.c:793
workqueue_entry_t * threadpool_queue_work_priority(threadpool_t *pool, workqueue_priority_t prio, workqueue_reply_t(*fn)(void *, void *), void(*reply_fn)(void *), void *arg)
Definition: workqueue.c:446
workqueue_entry_t * threadpool_queue_work(threadpool_t *pool, workqueue_reply_t(*fn)(void *, void *), void(*reply_fn)(void *), void *arg)
Definition: workqueue.c:473
#define MAX_THREADS
Definition: workqueue.c:542
replyqueue_t * replyqueue_new(uint32_t alertsocks_flags)
Definition: workqueue.c:771
int threadpool_register_reply_event(threadpool_t *tp, void(*cb)(threadpool_t *tp))
Definition: workqueue.c:828
int threadpool_queue_update(threadpool_t *pool, void *(*dup_fn)(void *), workqueue_reply_t(*fn)(void *, void *), void(*free_fn)(void *), void *arg)
Definition: workqueue.c:497
static void queue_reply(replyqueue_t *queue, workqueue_entry_t *work)
Definition: workqueue.c:375
#define CHANCE_PERMISSIVE
Definition: workqueue.c:549
threadpool_t * threadpool_new(int n_threads, replyqueue_t *replyqueue, void *(*new_thread_state_fn)(void *), void(*free_thread_state_fn)(void *), void *arg)
Definition: workqueue.c:670
static workerthread_t * workerthread_new(int32_t lower_priority_chance, void *state, threadpool_t *pool, replyqueue_t *replyqueue)
Definition: workqueue.c:393
unsigned int threadpool_get_n_threads(threadpool_t *tp)
Definition: workqueue.c:883
static workqueue_entry_t * workqueue_entry_new(workqueue_reply_t(*fn)(void *, void *), void(*reply_fn)(void *), void *arg)
Definition: workqueue.c:164
#define WORKQUEUE_PRIORITY_BITS
Definition: workqueue.c:103
static void workerthread_free_(workerthread_t *thread)
Definition: workqueue.c:418
Header for workqueue.c.
workqueue_reply_t
Definition: workqueue.h:24
workqueue_priority_t
Definition: workqueue.h:31