tor_memquota/mtracker.rs
1//! Memory quota tracker, core and low-level API
2//!
3//! # Example
4//!
5//! ```cfg(feature = "memquota")
6//! use std::{collections::VecDeque, sync::{Arc, Mutex}};
7//! use tor_rtcompat::{CoarseInstant, CoarseTimeProvider, PreferredRuntime};
8//! use tor_memquota::{mtracker, MemoryQuotaTracker, MemoryReclaimedError, EnabledToken};
9//! use void::{ResultVoidExt, Void};
10//!
11//! #[derive(Debug)]
12//! struct TrackingQueue(Mutex<Result<Inner, MemoryReclaimedError>>);
13//! #[derive(Debug)]
14//! struct Inner {
15//! partn: mtracker::Participation,
16//! data: VecDeque<(Box<[u8]>, CoarseInstant)>,
17//! }
18//!
19//! impl TrackingQueue {
20//! fn push(&self, now: CoarseInstant, bytes: Box<[u8]>) -> Result<(), MemoryReclaimedError> {
21//! let mut inner = self.0.lock().unwrap();
22//! let inner = inner.as_mut().map_err(|e| e.clone())?;
23//! inner.partn.claim(bytes.len())?;
24//! inner.data.push_back((bytes, now));
25//! Ok(())
26//! }
27//! }
28//!
29//! impl mtracker::IsParticipant for TrackingQueue {
30//! fn get_oldest(&self, _: EnabledToken) -> Option<CoarseInstant> {
31//! let inner = self.0.lock().unwrap();
32//! Some(inner.as_ref().ok()?.data.front()?.1)
33//! }
34//! fn reclaim(self: Arc<Self>, _: EnabledToken) -> mtracker::ReclaimFuture {
35//! let mut inner = self.0.lock().unwrap();
36//! *inner = Err(MemoryReclaimedError::new());
37//! Box::pin(async { mtracker::Reclaimed::Collapsing })
38//! }
39//! }
40//!
41//! let runtime = PreferredRuntime::create().unwrap();
42//! let config = tor_memquota::Config::builder().max(1024*1024*1024).build().unwrap();
43#![cfg_attr(
44 feature = "memquota",
45 doc = "let trk = MemoryQuotaTracker::new(&runtime, config).unwrap();"
46)]
47#![cfg_attr(
48 not(feature = "memquota"),
49 doc = "let trk = MemoryQuotaTracker::new_noop();"
50)]
51//!
52//! let account = trk.new_account(None).unwrap();
53//!
54//! let queue: Arc<TrackingQueue> = account.register_participant_with(
55//! runtime.now_coarse(),
56//! |partn| {
57//! Ok::<_, Void>((Arc::new(TrackingQueue(Mutex::new(Ok(Inner {
58//! partn,
59//! data: VecDeque::new(),
60//! })))), ()))
61//! },
62//! ).unwrap().void_unwrap().0;
63//!
64//! queue.push(runtime.now_coarse(), Box::new([0; 24])).unwrap();
65//! ```
66//
67// For key internal documentation about the data structure, see the doc comment for
68// `struct State` (down in the middle of the file).
69
70#![forbid(unsafe_code)] // if you remove this, enable (or write) miri tests (git grep miri)
71
72use crate::internal_prelude::*;
73
74use IfEnabled::*;
75
76mod bookkeeping;
77mod reclaim;
78mod total_qty_notifier;
79
80#[cfg(all(test, feature = "memquota", not(miri) /* coarsetime */))]
81pub(crate) mod test;
82
83use bookkeeping::{BookkeepableQty, ClaimedQty, ParticipQty, TotalQty};
84use total_qty_notifier::TotalQtyNotifier;
85
86/// Maximum amount we'll "cache" locally in a [`Participation`]
87///
88/// ie maximum value of `Participation.cache`.
89//
90// TODO is this a good amount? should it be configurable?
91pub(crate) const MAX_CACHE: Qty = Qty(16384);
92
93/// Target cache size when we seem to be claiming
94const TARGET_CACHE_CLAIMING: Qty = Qty(MAX_CACHE.as_usize() * 3 / 4);
95/// Target cache size when we seem to be releasing
96#[allow(clippy::identity_op)] // consistency
97const TARGET_CACHE_RELEASING: Qty = Qty(MAX_CACHE.as_usize() * 1 / 4);
98
99//---------- public data types ----------
100
101/// Memory data tracker
102///
103/// Instance of the memory quota system.
104///
105/// Usually found as `Arc<MemoryQuotaTracker>`.
106#[derive(Debug)]
107pub struct MemoryQuotaTracker {
108 /// The actual tracker state etc.
109 state: IfEnabled<Mutex<State>>,
110}
111
112/// Handle onto an Account
113///
114/// An `Account` is a handle. All clones refer to the same underlying conceptual Account.
115///
116/// `Account`s are created using [`MemoryQuotaTracker::new_account`].
117///
118/// # Use in Arti
119///
120/// In Arti, we usually use a newtype around `Account`, rather than a bare `Account`.
121/// See `tor_proto::memquota`.
122#[derive(Educe)]
123#[educe(Debug)]
124pub struct Account(IfEnabled<AccountInner>);
125
126/// Contents of an enabled [`Account`]
127#[derive(Educe)]
128#[educe(Debug)]
129pub struct AccountInner {
130 /// The account ID
131 aid: refcount::Ref<AId>,
132
133 /// The underlying tracker
134 #[educe(Debug(ignore))]
135 tracker: Arc<MemoryQuotaTracker>,
136}
137
138/// Weak handle onto an Account
139///
140/// Like [`Account`], but doesn't keep the account alive.
141/// Must be upgraded before use.
142//
143// Doesn't count for ARecord.account_clones
144//
145// We can't lift out Arc, so that the caller sees `Arc<Account>`,
146// because an Account is Arc<MemoryQuotaTracker> plus AId,
147// not Arc of something account-specific.
148#[derive(Clone, Educe)]
149#[educe(Debug)]
150pub struct WeakAccount(IfEnabled<WeakAccountInner>);
151
152/// Contents of an enabled [`WeakAccount`]
153#[derive(Clone, Educe)]
154#[educe(Debug)]
155pub struct WeakAccountInner {
156 /// The account ID
157 aid: AId,
158
159 /// The underlying tracker
160 #[educe(Debug(ignore))]
161 tracker: Weak<MemoryQuotaTracker>,
162}
163
164/// Handle onto a participant's participation in a tracker
165///
166/// `Participation` is a handle. All clones are for use by the same conceptual Participant.
167/// It doesn't keep the underlying Account alive.
168///
169/// `Participation`s are created by registering new participants,
170/// for example using [`Account::register_participant`].
171///
172/// Variables of this type are often named `partn`.
173#[derive(Debug)]
174pub struct Participation(IfEnabled<ParticipationInner>);
175
176/// Contents of an enabled [`Participation`]
177#[derive(Debug)]
178pub struct ParticipationInner {
179 /// Participant id
180 pid: refcount::Ref<PId>,
181
182 /// Account id
183 aid: AId,
184
185 /// The underlying tracker
186 tracker: Weak<MemoryQuotaTracker>,
187
188 /// Quota we have preemptively claimed for use by this Account
189 ///
190 /// Has been added to `PRecord.used`,
191 /// but not yet returned by `Participation::claim`.
192 ///
193 /// This cache field arranges that most of the time we don't have to hammer a
194 /// single cache line.
195 ///
196 /// The value here is bounded by a configured limit.
197 ///
198 /// Invariants on memory accounting:
199 ///
200 /// * `Participation.cache < configured limit`
201 /// * `PRecord.used = Participation.cache + Σ Participation::claim - Σ P'n::release`
202 /// except if `PRecord` has been deleted
203 /// (ie when we aren't tracking any more and think the Participant is `Collapsing`).
204 /// * `Σ PRecord.used = State.total_used`
205 ///
206 /// Enforcement of these invariants is partially assured by
207 /// types in [`bookkeeping`].
208 cache: ClaimedQty,
209}
210
211/// Participants provide an impl of the hooks in this trait
212///
213/// Trait implemented by client of the memtrack API.
214///
215/// # Panic handling, "unwind safety"
216///
217/// If these methods panic, the memory tracker will tear down its records of the
218/// participant, preventing future allocations.
219///
220/// But, it's not guaranteed that these methods on `IsParticipant` won't be called again,
221/// even if they have already panicked on a previous occasion.
222/// Thus the implementations might see "broken invariants"
223/// as discussed in the docs for `std::panic::UnwindSafe`.
224///
225/// Nevertheless we don't make `RefUnwindSafe` a supertrait of `IsParticipant`.
226/// That would force the caller to mark *all* their methods unwind-safe,
227/// which is unreasonable (and probably undesirable).
228///
229/// Variables which are `IsParticipant` are often named `particip`.
230pub trait IsParticipant: Debug + Send + Sync + 'static {
231 /// Return the age of the oldest data held by this Participant
232 ///
233 /// `None` means this Participant holds no data.
234 ///
235 /// # Performance and reentrancy
236 ///
237 /// This function runs with the `MemoryQuotaTracker`'s internal global lock held.
238 /// Therefore:
239 ///
240 /// * It must be fast.
241 /// * it *must not* call back into methods from [`tracker`](crate::mtracker).
242 /// * It *must not* even `Clone` or `Drop` a [`MemoryQuotaTracker`],
243 /// [`Account`], or [`Participation`].
244 fn get_oldest(&self, _: EnabledToken) -> Option<CoarseInstant>;
245
246 /// Start memory reclamation
247 ///
248 /// The Participant should start to free all of its memory,
249 /// and then return `Reclaimed::Collapsing`.
250 //
251 // In the future:
252 //
253 // Should free *at least* all memory at least as old as discard_...
254 //
255 // v1 of the actual implementation might not have `discard_everything_as_old_as`
256 // and `but_can_stop_discarding_...`,
257 // and might therefore only support Reclaimed::Collapsing
258 fn reclaim(
259 self: Arc<Self>,
260 _: EnabledToken,
261 // Future:
262 // discard_everything_as_old_as_this: RoughTime,
263 // but_can_stop_discarding_after_freeing_this_much: Qty,
264 ) -> ReclaimFuture;
265}
266
267/// Future returned by the [`IsParticipant::reclaim`] reclamation request
268pub type ReclaimFuture = Pin<Box<dyn Future<Output = Reclaimed> + Send + Sync>>;
269
270/// Outcome of [`IsParticipant::reclaim`]
271#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)]
272#[non_exhaustive]
273pub enum Reclaimed {
274 /// Participant is responding to reclamation by collapsing completely.
275 ///
276 /// All memory will be freed and `release`'d soon (if it hasn't been already).
277 /// `MemoryQuotaTracker` should forget the Participant and all memory it used, right away.
278 ///
279 /// Currently this is the only supported behaviour.
280 Collapsing,
281 // Future:
282 // /// Participant has now reclaimed some memory as instructed
283 // ///
284 // /// If this is not sufficient, tracker must call reclaim() again.
285 // /// (We may not want to implement Partial right away but the API
286 // /// ought to support it so let's think about it now, even if we don't implement it.)
287 // Partial,
288}
289
290//---------- principal data structure ----------
291
292slotmap_careful::new_key_type! {
293 /// Identifies an Account
294 ///
295 /// After an account is torn down, the `AId` becomes invalid
296 /// and attempts to use it will give an error.
297 ///
298 /// The same `AId` won't be reused for a later Account.
299 struct AId;
300
301 /// Identifies a Participant within an Account
302 ///
303 /// Ie, PId is scoped within in the context of an account.
304 ///
305 /// As with `AId`, a `PId` is invalid after the
306 /// participation is torn down, and is not reused.
307 struct PId;
308}
309
310/// Memory tracker inner, including mutable state
311///
312/// # Module internal documentation
313///
314/// ## Data structure
315///
316/// * [`MemoryQuotaTracker`] contains mutex-protected `State`.
317/// * The `State` contains a [`SlotMap`] of account records [`ARecord`].
318/// * Each `ARecord` contains a `SlotMap` of participant records [`PRecord`].
319///
320/// The handles [`Account`], [`WeakAccount`], and [`Participation`],
321/// each contain a reference (`Arc`/`Weak`) to the `MemoryQuotaTracker`,
322/// and the necessary slotmap keys.
323///
324/// The `ARecord` and `PRecord` each contain a reference count,
325/// which is used to clean up when all the handles are gone.
326///
327/// The slotmap keys which count for the reference count (ie, strong references)
328/// are stored as [`refcount::Ref`],
329/// which helps assure correct reference counting.
330/// (Bare ids [`AId`] and [`PId`] are weak references.)
331///
332/// ## Data structure lookup
333///
334/// Given a reference to the tracker, and some ids, the macro `find_in_tracker!`
335/// is used to obtain mutable references to the `ARecord` and (if applicable) `PRecord`.
336///
337/// ## Bookkeeping
338///
339/// We use separate types for quantities of memory in various "states",
340/// rather than working with raw quantities.
341///
342/// The types, and the legitimate transactions, are in `bookkeeping`.
343///
344/// ## Reentrancy (esp. `Drop` and `Clone`)
345///
346/// When the handle structs are dropped or cloned, they must manipulate the refcount(s).
347/// So they must take the lock.
348/// Therefore, an `Account` and `Participation` may not be dropped with the lock held!
349///
350/// Internally, this is actually fairly straightforward:
351/// we take handles by reference, and constructors only make them at the last moment on return,
352/// so our internal code here, in this module, doesn't have owned handles.
353///
354/// We also need to worry about reentrantly reentering the tracker code, from user code.
355/// The user supplies a `dyn IsParticipant`.
356/// The principal methods are from [`IsParticipant`],
357/// for which we handle reentrancy in the docs.
358/// But we also implicitly invoke its `Drop` impl, which might in turn drop stuff of ours,
359/// such as [`Account`]s and [`Participation`]s, whose `Drop` impls need to take our lock.
360/// To make sure this isn't done reentrantly, we have a special newtype around it,
361/// and defer some of our drops during reclaim.
362/// That's in `drop_reentrancy` and `tracker::reclaim::deferred_drop`.
363///
364/// The `Debug` impl isn't of concern, since we don't call it ourselves.
365/// And we don't rely on it being `Clone`, since it's in an `Arc`.
366///
367/// ## Drop bombs
368///
369/// With `#[cfg(test)]`, several of our types have "drop bombs":
370/// they cause a panic if dropped inappropriately.
371/// This is intended to detect bad code paths during testing.
372#[derive(Debug, Deref, DerefMut)]
373struct State {
374 /// Global parts of state
375 ///
376 /// Broken out to allow passing both
377 /// `&mut Global` and `&mut ARecord`/`&mut PRecord`
378 /// to some function(s).
379 #[deref]
380 #[deref_mut]
381 global: Global,
382
383 /// Accounts
384 accounts: SlotMap<AId, ARecord>,
385}
386
387/// Global parts of `State`
388#[derive(Debug)]
389struct Global {
390 /// Total memory used
391 ///
392 /// Wrapper type for ensuring we wake up the reclamation task
393 total_used: TotalQtyNotifier,
394
395 /// Configuration
396 config: ConfigInner,
397
398 /// Make this type uninhabited if memory tracking is compiled out
399 #[allow(dead_code)]
400 enabled: EnabledToken,
401}
402
403/// Account record, within `State.accounts`
404#[derive(Debug)]
405#[must_use = "don't just drop, call auto_release"]
406struct ARecord {
407 /// Number of clones of `Account`; to know when to tear down the account
408 refcount: refcount::Count<AId>,
409
410 /// Child accounts
411 children: Vec<AId>,
412
413 /// Participants linked to this Account
414 ps: SlotMap<PId, PRecord>,
415
416 /// Make this type uninhbaited if memory tracking is compiled out
417 #[allow(dead_code)]
418 enabled: EnabledToken,
419}
420
421/// Participant record, within `ARecord.ps`
422#[derive(Debug)]
423#[must_use = "don't just drop, call auto_release"]
424struct PRecord {
425 /// Number of clones of `Participation`; to know when to tear down the participant
426 refcount: refcount::Count<PId>,
427
428 /// Memory usage of this participant
429 ///
430 /// Not 100% accurate, can lag, and be (boundedly) an overestimate
431 used: ParticipQty,
432
433 /// The hooks provided by the Participant
434 particip: drop_reentrancy::ProtectedWeak<dyn IsParticipant>,
435
436 /// Make this type uninhabited if memory tracking is compiled out
437 #[allow(dead_code)]
438 enabled: EnabledToken,
439}
440
441//#################### IMPLEMENTATION ####################
442
443/// Given a `&Weak<MemoryQuotaTracker>`, find an account and maybe participant
444///
445/// ### Usage templates
446///
447/// ```rust,ignore
448/// find_in_tracker! {
449/// enabled;
450/// weak_tracker => + tracker, state;
451/// aid => arecord;
452/// [ pid => precord; ]
453/// [ ?Error | ?None ]
454/// };
455///
456/// find_in_tracker! {
457/// enabled;
458/// strong_tracker => state;
459/// .. // as above
460/// };
461/// ```
462///
463/// ### Input expressions (value arguments to the macro0
464///
465/// * `weak_tracker: &Weak<MemoryQuotaTracker>` (or equivalent)
466/// * `strong_tracker: &MemoryQuotaTracker` (or equivalent)
467/// * `enabled: EnabledToken` (or equivalent)
468/// * `aid: AId`
469/// * `pid: PId`
470///
471/// ### Generated bindings (identifier arguments to the macro)
472///
473/// * `tracker: Arc<MemoryQuotaTracker>`
474/// * `state: &mut State` (borrowed from a `MutexGuard<State>` borrowed from `tracker`)
475/// * `arecord: &mut ARecord` (mut borrowed from `state.accounts`)
476/// * `precord: &mut PRecord` (mut borrowed from `arecord.ps`)
477///
478/// There is no access to the `MutexGuard` itself.
479/// For control of the mutex release point, place `find_in_tracker!` in an enclosing block.
480///
481/// ### Error handling
482///
483/// If the tracker, account, or participant, can't be found,
484/// the macro returns early from the enclosing scope (using `?`).
485///
486/// If `Error` is specified, applies `?` to `Err(Error::...)`.
487/// If `None` is specified, just returns `None` (by applying `?` to None`).
488//
489// This has to be a macro because it makes a self-referential set of bindings.
490// Input syntax is a bit janky because macro_rules is so bad.
491// For an internal macro with ~9 call sites it's not worth making a big parsing contraption.
492macro_rules! find_in_tracker { {
493 // This `+` is needed because otherwise it's LL1-ambiguous and macro_rules can't cope
494 $enabled:expr;
495 $tracker_input:expr => $( + $tracker:ident, )? $state:ident;
496 $aid:expr => $arecord:ident;
497 $( $pid:expr => $precord:ident; )?
498 // Either `Error` or None, to be passed to `find_in_tracker_eh!($eh ...: ...)`
499 // (We need this to be an un-repeated un-optional binding, because
500 // it is used within some other $( ... )?, and macro_rules gets confused.)
501 ? $eh:tt
502} => {
503 let tracker = &$tracker_input;
504 $(
505 let $tracker: Arc<MemoryQuotaTracker> = find_in_tracker_eh!(
506 $eh Error::TrackerShutdown;
507 tracker.upgrade()
508 );
509 let tracker = &$tracker;
510 )?
511 let _: &EnabledToken = &$enabled;
512 let state = find_in_tracker_eh!(
513 $eh Error::Bug(internal!("noop MemoryQuotaTracker found via enabled datastructure"));
514 tracker.state.as_enabled()
515 );
516 let mut state: MutexGuard<State> = find_in_tracker_eh!(
517 $eh Error::TrackerCorrupted;
518 state.lock().ok()
519 );
520 let $state: &mut State = &mut *state;
521 let aid: AId = $aid;
522 let $arecord: &mut ARecord = find_in_tracker_eh!(
523 $eh Error::AccountClosed;
524 $state.accounts.get_mut(aid)
525 );
526 $(
527 let pid: PId = $pid;
528 let $precord: &mut PRecord = find_in_tracker_eh!(
529 $eh Error::ParticipantShutdown;
530 $arecord.ps.get_mut(pid)
531 );
532 )?
533} }
534/// Error handling helper for `find_in_tracker`
535macro_rules! find_in_tracker_eh {
536 { None $variant:expr; $result:expr } => { $result? };
537 { Error $variant:expr; $result:expr } => { $result.ok_or_else(|| $variant)? };
538}
539
540//========== impls on public types, including public methods and trait impls ==========
541
542//---------- MemoryQuotaTracker ----------
543
544impl MemoryQuotaTracker {
545 /// Set up a new `MemoryDataTracker`
546 pub fn new<R: Spawn>(runtime: &R, config: Config) -> Result<Arc<Self>, StartupError> {
547 let Enabled(config, enabled) = config.0 else {
548 return Ok(MemoryQuotaTracker::new_noop());
549 };
550
551 let (reclaim_tx, reclaim_rx) =
552 mpsc_channel_no_memquota(0 /* plus num_senders, ie 1 */);
553 let total_used = TotalQtyNotifier::new_zero(reclaim_tx);
554 let ConfigInner { max, low_water } = config; // for logging
555
556 let global = Global {
557 total_used,
558 config,
559 enabled,
560 };
561 let accounts = SlotMap::default();
562 let state = Enabled(Mutex::new(State { global, accounts }), enabled);
563 let tracker = Arc::new(MemoryQuotaTracker { state });
564
565 // We don't provide a separate `launch_background_tasks`, because this task doesn't
566 // wake up periodically, or, indeed, do anything until the tracker is used.
567
568 let for_task = Arc::downgrade(&tracker);
569 runtime.spawn(reclaim::task(for_task, reclaim_rx, enabled))?;
570
571 info!(%max, %low_water, "memory quota tracking initialised");
572
573 Ok(tracker)
574 }
575
576 /// Reconfigure
577 pub fn reconfigure(
578 &self,
579 new_config: Config,
580 how: tor_config::Reconfigure,
581 ) -> Result<(), ReconfigureError> {
582 use tor_config::Reconfigure;
583
584 let state = self.lock().map_err(into_internal!(
585 "cannot reconfigure corrupted memquota tracker"
586 ))?;
587
588 let (state, new_config) = match (state, new_config.0) {
589 (Noop, Noop) => return Ok(()),
590 (Noop, Enabled(..)) => return how.cannot_change(
591 // TODO #1577 (3) this isn't the `field` wanted by `cannot_change`
592 "tor-memquota max (`system.memory.max`) cannot be set: cannot enable memory quota tracking, when disabled at program start"
593 ),
594 (Enabled(state, _enabled), new_config) => {
595 let new_config = new_config.into_enabled().unwrap_or(
596 // If the new configuration is "Noop", set the limit values to MAX
597 // so we will never think we want to reclaim.
598 // We don't replace ourselves with a Noop or something,
599 // in case the user wants to re-enable tracking.
600 ConfigInner {
601 max: Qty::MAX,
602 low_water: Qty::MAX,
603 },
604 );
605
606 (state, new_config)
607 },
608 };
609
610 // Bind state mutably only if we're supposed to actually be modifying anything
611 let mut state = match how {
612 Reconfigure::CheckAllOrNothing => return Ok(()),
613 Reconfigure::AllOrNothing | Reconfigure::WarnOnFailures => state,
614 _ => Err(internal!("Reconfigure variant unknown! {how:?}"))?, // TODO #1577 (1)
615 };
616
617 let global = &mut state.global;
618 global.config = new_config;
619
620 // If the new limit is lower, we might need to start reclaiming:
621 global.total_used.maybe_wakeup(&global.config);
622
623 // If the new low_water is higher, we might need to *stop* reclaiming.
624 // We don't have a way to abort an ongoing reclaim request,
625 // but the usage vs low_water will be rechecked before we reclaim
626 // from another Participant, which will be sufficient.
627
628 Ok(())
629 }
630
631 /// Returns an estimate of the total memory use
632 ///
633 /// The returned value is:
634 ///
635 /// * [Approximate.](../index.html#is-approximate)
636 /// * A snapshot as of the current moment (and there is no way to await changes)
637 /// * Always `usize::MAX` for a no-op tracker
638 pub fn used_current_approx(&self) -> Result<usize, TrackerCorrupted> {
639 let Enabled(state, _enabled) = self.lock()? else {
640 return Ok(usize::MAX);
641 };
642 Ok(*state.total_used.as_raw())
643 }
644
645 /// Make a new `Account`
646 ///
647 /// To actually record memory usage, a Participant must be added.
648 ///
649 /// At most call sites, take an `Account` rather than a `MemoryQuotaTracker`,
650 /// and use [`Account::new_child()`].
651 /// That improves the ability to manage the hierarchy of Participants.
652 //
653 // Right now, parent can't be changed after construction of an Account,
654 // so circular accounts are impossible.
655 // But, we might choose to support that in the future.
656 // Circular parent relationships might need just a little care
657 // in the reclamation loop (to avoid infinitely looping),
658 // but aren't inherently unsupportable.
659 #[allow(clippy::redundant_closure_call)] // We have IEFEs for good reasons
660 pub fn new_account(self: &Arc<Self>, parent: Option<&Account>) -> crate::Result<Account> {
661 let Enabled(mut state, enabled) = self.lock()? else {
662 return Ok(Account(Noop));
663 };
664
665 let parent_aid_good = parent
666 .map(|parent| {
667 // Find and check the requested parent's Accountid
668
669 let Enabled(parent, _enabled) = &parent.0 else {
670 return Err(
671 internal!("used no-op Account as parent for enabled new_account").into(),
672 );
673 };
674
675 let parent_aid = *parent.aid;
676 let parent_arecord = state
677 .accounts
678 .get_mut(parent_aid)
679 .ok_or(Error::AccountClosed)?;
680
681 // Can we insert the new child without reallocating?
682 if !parent_arecord.children.spare_capacity_mut().is_empty() {
683 return Ok(parent_aid);
684 }
685
686 // No. Well, let's do some garbage collection.
687 // (Otherwise .children might grow without bound as accounts come and go)
688 //
689 // We would like to scan the accounts array while mutating this account.
690 // Instead, steal the children array temporarily and put the filtered one back.
691 // Must be infallible!
692 //
693 // The next line can't be in the closure (confuses borrowck)
694 let mut parent_children = mem::take(&mut parent_arecord.children);
695 (|| {
696 parent_children.retain(|child_aid| state.accounts.contains_key(*child_aid));
697
698 // Put the filtered list back, so sanity is restored.
699 state
700 .accounts
701 .get_mut(parent_aid)
702 .expect("parent vanished!")
703 .children = parent_children;
704 })();
705
706 Ok::<_, Error>(parent_aid)
707 })
708 .transpose()?;
709
710 // We have resolved the parent AId and prepared to add the new account to its list of
711 // children. We still hold the lock, so nothing can have changed.
712
713 // commitment - infallible IEFE assures that so we don't do half of it
714 Ok((|| {
715 let aid = refcount::slotmap_insert(&mut state.accounts, |refcount| ARecord {
716 refcount,
717 children: vec![],
718 ps: SlotMap::default(),
719 enabled,
720 });
721
722 if let Some(parent_aid_good) = parent_aid_good {
723 state
724 .accounts
725 .get_mut(parent_aid_good)
726 .expect("parent vanished!")
727 .children
728 .push(*aid);
729 }
730
731 let tracker = self.clone();
732 let inner = AccountInner { aid, tracker };
733 Account(Enabled(inner, enabled)) // don't make this fallible, see above.
734 })())
735 }
736
737 /// Obtain a new `MemoryQuotaTracker` that doesn't track anything and never reclaims
738 pub fn new_noop() -> Arc<MemoryQuotaTracker> {
739 Arc::new(MemoryQuotaTracker { state: Noop })
740 }
741
742 /// Obtain the lock on the state
743 fn lock(&self) -> Result<IfEnabled<MutexGuard<State>>, TrackerCorrupted> {
744 let Enabled(state, enabled) = &self.state else {
745 return Ok(Noop);
746 };
747 Ok(Enabled(state.lock()?, *enabled))
748 }
749}
750
751//---------- Account ----------
752
753impl Account {
754 /// Register a new Participant
755 ///
756 /// Returns the [`Participation`], which can be used to record memory allocations.
757 ///
758 /// Often, your implementation of [`IsParticipant`] wants to contain the [`Participation`].
759 /// If so, use [`register_participant_with`](Account::register_participant_with) instead.
760 pub fn register_participant(
761 &self,
762 particip: Weak<dyn IsParticipant>,
763 ) -> Result<Participation, Error> {
764 let Enabled(self_, enabled) = &self.0 else {
765 return Ok(Participation(Noop));
766 };
767
768 let aid = *self_.aid;
769 find_in_tracker! {
770 enabled;
771 self_.tracker => state;
772 aid => arecord;
773 ?Error
774 }
775
776 let (pid, cache) = refcount::slotmap_try_insert(&mut arecord.ps, |refcount| {
777 let mut precord = PRecord {
778 refcount,
779 used: ParticipQty::ZERO,
780 particip: drop_reentrancy::ProtectedWeak::new(particip),
781 enabled: *enabled,
782 };
783 let cache =
784 state
785 .global
786 .total_used
787 .claim(&mut precord, MAX_CACHE, &state.global.config)?;
788 Ok::<_, Error>((precord, cache))
789 })?;
790
791 let tracker = Arc::downgrade(&self_.tracker);
792 let inner = ParticipationInner {
793 tracker,
794 pid,
795 aid,
796 cache,
797 };
798 Ok(Participation(Enabled(inner, *enabled)))
799 }
800
801 /// Set the callbacks for a Participant (identified by its weak ids)
802 fn set_participant_callbacks(
803 &self,
804 aid: AId,
805 pid: PId,
806 particip: drop_reentrancy::ProtectedWeak<dyn IsParticipant>,
807 ) -> Result<(), Error> {
808 let Enabled(self_, enabled) = &self.0 else {
809 return Ok(());
810 };
811 find_in_tracker! {
812 enabled;
813 self_.tracker => state;
814 aid => arecord;
815 pid => precord;
816 ?Error
817 }
818 precord.particip = particip;
819 Ok(())
820 }
821
822 /// Register a new Participant using a constructor
823 ///
824 /// Passes `constructor` a [`Participation`] for the nascent Participant.
825 /// Returns the `P: IsParticipant` provided by the constructor.
826 ///
827 /// For use when your `impl `[`IsParticipant`] wants to own the `Participation`.
828 ///
829 /// # Re-entrancy guarantees
830 ///
831 /// The `Participation` *may* be used by `constructor` for claiming memory use,
832 /// even during construction.
833 /// `constructor` may also clone the `Participation`, etc.
834 ///
835 /// Reclamation callbacks (via the `P as IsParticipant` impl) cannot occur
836 /// until `constructor` returns.
837 ///
838 /// # Error handling
839 ///
840 /// Failures can occur before `constructor` is called,
841 /// or be detected afterwards.
842 /// If a failure is detected after `constructor` returns,
843 /// the `Arc<P>` from `constructor` will be dropped
844 /// (resulting in `P` being dropped, unless `constructor` kept another clone of it).
845 ///
846 /// `constructor` may also fail (throwing a different error type, `E`),
847 /// in which case `register_participant_with` returns `Ok(Err(E))`.
848 ///
849 /// On successful setup of the Participant, returns `Ok(Ok(Arc<P>))`.
850 pub fn register_participant_with<P: IsParticipant, X, E>(
851 &self,
852 now: CoarseInstant,
853 constructor: impl FnOnce(Participation) -> Result<(Arc<P>, X), E>,
854 ) -> Result<Result<(Arc<P>, X), E>, Error> {
855 let Enabled(_self, _enabled) = &self.0 else {
856 return Ok(constructor(Participation(Noop)));
857 };
858
859 use std::sync::atomic::{AtomicBool, Ordering};
860
861 /// Temporary participant, which stands in during construction
862 #[derive(Debug)]
863 struct TemporaryParticipant {
864 /// The age, which is right now. We hope this is all fast!
865 now: CoarseInstant,
866 /// Did someone call reclaim() ?
867 collapsing: AtomicBool,
868 }
869
870 impl IsParticipant for TemporaryParticipant {
871 fn get_oldest(&self, _: EnabledToken) -> Option<CoarseInstant> {
872 Some(self.now)
873 }
874 fn reclaim(self: Arc<Self>, _: EnabledToken) -> ReclaimFuture {
875 self.collapsing.store(true, Ordering::Release);
876 Box::pin(async { Reclaimed::Collapsing })
877 }
878 }
879
880 let temp_particip = Arc::new(TemporaryParticipant {
881 now,
882 collapsing: false.into(),
883 });
884
885 let partn = self.register_participant(Arc::downgrade(&temp_particip) as _)?;
886 let partn_ = partn
887 .0
888 .as_enabled()
889 .ok_or_else(|| internal!("Enabled Account gave Noop Participant"))?;
890 let aid = partn_.aid;
891 let pid_weak = *partn_.pid;
892
893 // We don't hold the state lock here. register_participant took it and released it.
894 // This is important, because the constructor might call claim!
895 // (And, also, we don't want the constructor panicking to poison the whole tracker.)
896 // But it means there can be quite a lot of concurrent excitement,
897 // including, theoretically, a possible reclaim.
898 let (particip, xdata) = match constructor(partn) {
899 Ok(y) => y,
900 Err(e) => return Ok(Err(e)),
901 };
902 let particip = drop_reentrancy::ProtectedArc::new(particip);
903
904 // IEFE prevents use from accidentally dropping `particip` until we mean to
905 let r = (|| {
906 let weak = {
907 let weak = particip.downgrade();
908
909 // Trait cast, from Weak<P> to Weak<dyn IsParticipant>.
910 // We can only do this for a primitive, so we must unprotect
911 // the Weak, converr it, and protect it again.
912 drop_reentrancy::ProtectedWeak::new(weak.unprotect() as _)
913 };
914 self.set_participant_callbacks(aid, pid_weak, weak)?;
915
916 if temp_particip.collapsing.load(Ordering::Acquire) {
917 return Err(Error::ParticipantShutdown);
918 }
919 Ok(())
920 })();
921
922 let particip = particip.promise_dropping_is_ok();
923 r?;
924 Ok(Ok((particip, xdata)))
925 }
926
927 /// Obtain a new `Account` which is a child of this one
928 ///
929 /// Equivalent to
930 /// [`MemoryQuotaTracker.new_account`](MemoryQuotaTracker::new_account)`(Some(..))`
931 pub fn new_child(&self) -> crate::Result<Self> {
932 let Enabled(self_, _enabled) = &self.0 else {
933 return Ok(Account::new_noop());
934 };
935 self_.tracker.new_account(Some(self))
936 }
937
938 /// Obtains a handle for the `MemoryQuotaTracker`
939 pub fn tracker(&self) -> Arc<MemoryQuotaTracker> {
940 let Enabled(self_, _enabled) = &self.0 else {
941 return MemoryQuotaTracker::new_noop();
942 };
943 self_.tracker.clone()
944 }
945
946 /// Downgrade to a weak handle for the same Account
947 pub fn downgrade(&self) -> WeakAccount {
948 let Enabled(self_, enabled) = &self.0 else {
949 return WeakAccount(Noop);
950 };
951 let inner = WeakAccountInner {
952 aid: *self_.aid,
953 tracker: Arc::downgrade(&self_.tracker),
954 };
955 WeakAccount(Enabled(inner, *enabled))
956 }
957
958 /// Obtain a new `Account` that does nothing and has no associated tracker
959 ///
960 /// All methods on this succeed, but they don't do anything.
961 pub fn new_noop() -> Self {
962 Account(IfEnabled::Noop)
963 }
964}
965
966impl Clone for Account {
967 fn clone(&self) -> Account {
968 let Enabled(self_, enabled) = &self.0 else {
969 return Account(Noop);
970 };
971 let tracker = self_.tracker.clone();
972 let aid = (|| {
973 let aid = *self_.aid;
974 find_in_tracker! {
975 enabled;
976 tracker => state;
977 aid => arecord;
978 ?None
979 }
980 let aid = refcount::Ref::new(aid, &mut arecord.refcount).ok()?;
981 // commitment point
982 Some(aid)
983 })()
984 .unwrap_or_else(|| {
985 // Either the account has been closed, or our refcount overflowed.
986 // Return a busted `Account`, which always fails when we try to use it.
987 //
988 // If the problem was a refcount overflow, we're technically violating the
989 // documented behaviour, since the returned `Account` isn't equivalent
990 // to the original. We could instead choose to tear down the Account;
991 // that would be legal; but it's a lot of code to marginally change the
992 // behaviour for a very unlikely situation.
993 refcount::Ref::null()
994 });
995 let inner = AccountInner { aid, tracker };
996 Account(Enabled(inner, *enabled))
997 }
998}
999
1000impl Drop for Account {
1001 fn drop(&mut self) {
1002 let Enabled(self_, enabled) = &mut self.0 else {
1003 return;
1004 };
1005 (|| {
1006 find_in_tracker! {
1007 enabled;
1008 self_.tracker => state;
1009 *self_.aid => arecord;
1010 ?None
1011 }
1012 if let Some(refcount::Garbage(mut removed)) =
1013 slotmap_dec_ref!(&mut state.accounts, self_.aid.take(), &mut arecord.refcount)
1014 {
1015 // This account is gone. Automatically release everything.
1016 removed.auto_release(state);
1017 }
1018 Some(())
1019 })()
1020 .unwrap_or_else(|| {
1021 // Account has been torn down. Dispose of the strong ref.
1022 // (This has no effect except in cfg(test), when it defuses the drop bombs)
1023 self_.aid.take().dispose_container_destroyed();
1024 });
1025 }
1026}
1027
1028//---------- WeakAccount ----------
1029
1030impl WeakAccount {
1031 /// Upgrade to an `Account`, if the account still exists
1032 ///
1033 /// No-op `WeakAccounts` can always be upgraded.
1034 pub fn upgrade(&self) -> crate::Result<Account> {
1035 let Enabled(self_, enabled) = &self.0 else {
1036 return Ok(Account(Noop));
1037 };
1038 let aid = self_.aid;
1039 // (we must use a block, and can't use find_in_tracker's upgrade, because borrowck)
1040 let tracker = self_.tracker.upgrade().ok_or(Error::TrackerShutdown)?;
1041 let aid = {
1042 find_in_tracker! {
1043 enabled;
1044 tracker => state;
1045 aid => arecord;
1046 ?Error
1047 }
1048 refcount::Ref::new(aid, &mut arecord.refcount)?
1049 // commitment point
1050 };
1051 let inner = AccountInner { aid, tracker };
1052 Ok(Account(Enabled(inner, *enabled)))
1053 }
1054
1055 /// Obtains a handle onto the `MemoryQuotaTracker`
1056 ///
1057 /// The returned handle is itself weak, and needs to be upgraded before use.
1058 ///
1059 /// If the `Account` was made a no-op `MemoryQuotaTracker`
1060 /// (ie, one from [`MemoryQuotaTracker::new_noop`])
1061 /// the returned value is always `Weak`.
1062 pub fn tracker(&self) -> Weak<MemoryQuotaTracker> {
1063 let Enabled(self_, _enabled) = &self.0 else {
1064 return Weak::default();
1065 };
1066 self_.tracker.clone()
1067 }
1068
1069 /// Creates a new dangling, dummy, `WeakAccount`
1070 ///
1071 /// This can be used as a standin where a value of type `WeakAccount` is needed.
1072 /// The returned value cannot be upgraded to an `Account`,
1073 /// so cannot be used to claim memory or find a `MemoryQuotaTracker`.
1074 ///
1075 /// (If memory quota tracking is disabled at compile time,
1076 /// the returned value *can* be upgraded, to a no-op `Account`.)
1077 pub fn new_dangling() -> Self {
1078 let Some(enabled) = EnabledToken::new_if_compiled_in() else {
1079 return WeakAccount(Noop);
1080 };
1081
1082 let inner = WeakAccountInner {
1083 aid: AId::default(),
1084 tracker: Weak::default(),
1085 };
1086 WeakAccount(Enabled(inner, enabled))
1087 }
1088}
1089
1090//---------- Participation ----------
1091
1092impl Participation {
1093 /// Record that some memory has been (or will be) allocated
1094 pub fn claim(&mut self, want: usize) -> crate::Result<()> {
1095 self.claim_qty(Qty(want))
1096 }
1097
1098 /// Record that some memory has been (or will be) allocated (using `Qty`)
1099 pub(crate) fn claim_qty(&mut self, want: Qty) -> crate::Result<()> {
1100 self.claim_qty_inner(want)
1101 .inspect_err(|e| trace_report!(e, "claim {}", want))
1102 }
1103
1104 /// Record that some memory has been (or will be) allocated - core implementation
1105 ///
1106 /// Caller must handles trace logging.
1107 fn claim_qty_inner(&mut self, want: Qty) -> crate::Result<()> {
1108 let Enabled(self_, enabled) = &mut self.0 else {
1109 return Ok(());
1110 };
1111
1112 // In debug builds, check that the Account is still live, to detect lifetime trouble
1113 // (we repeat this later, which is OK in a debug build)
1114 #[cfg(debug_assertions)]
1115 {
1116 find_in_tracker! {
1117 enabled;
1118 self_.tracker => + tracker, state;
1119 self_.aid => _arecord;
1120 *self_.pid => _precord;
1121 ?Error
1122 };
1123 }
1124
1125 if let Some(got) = self_.cache.split_off(want) {
1126 return got.claim_return_to_participant();
1127 }
1128
1129 find_in_tracker! {
1130 enabled;
1131 self_.tracker => + tracker, state;
1132 self_.aid => arecord;
1133 *self_.pid => precord;
1134 ?Error
1135 };
1136
1137 let mut claim = |want| -> Result<ClaimedQty, _> {
1138 state
1139 .global
1140 .total_used
1141 .claim(precord, want, &state.global.config)
1142 };
1143 let got = claim(want)?;
1144
1145 if want <= TARGET_CACHE_CLAIMING {
1146 // While we're here, fill the cache to TARGET_CACHE_CLAIMING.
1147 // Cannot underflow: cache < want (since we failed at `got` earlier
1148 // and we've just checked want <= TARGET_CACHE_CLAIMING.
1149 let want_more_cache = TARGET_CACHE_CLAIMING
1150 .checked_sub(*self_.cache.as_raw())
1151 .expect("but cache < want");
1152 let want_more_cache = Qty(want_more_cache);
1153 if let Ok(add_cache) = claim(want_more_cache) {
1154 // On error, just don't do this; presumably the error will show up later
1155 // (we mustn't early exit here, because we've got the claim in our hand).
1156 self_.cache.merge_into(add_cache);
1157 }
1158 }
1159 got.claim_return_to_participant()
1160 }
1161
1162 /// Record that some memory has been (or will be) freed by a participant
1163 pub fn release(&mut self, have: usize) // infallible
1164 {
1165 self.release_qty(Qty(have));
1166 }
1167
1168 /// Record that some memory has been (or will be) freed by a participant (using `Qty`)
1169 pub(crate) fn release_qty(&mut self, have: Qty) // infallible
1170 {
1171 let Enabled(self_, enabled) = &mut self.0 else {
1172 return;
1173 };
1174
1175 let have = ClaimedQty::release_got_from_participant(have);
1176 self_.cache.merge_into(have);
1177 if self_.cache > MAX_CACHE {
1178 match (|| {
1179 find_in_tracker! {
1180 enabled;
1181 self_.tracker => + tracker, state;
1182 self_.aid => arecord;
1183 *self_.pid => precord;
1184 ?None
1185 }
1186 let return_from_cache = self_
1187 .cache
1188 .as_raw()
1189 .checked_sub(*TARGET_CACHE_RELEASING)
1190 .expect("TARGET_CACHE_RELEASING > MAX_CACHE ?!");
1191 let return_from_cache = Qty(return_from_cache);
1192 let from_cache = self_
1193 .cache
1194 .split_off(return_from_cache)
1195 .expect("impossible");
1196 state.global.total_used.release(precord, from_cache);
1197 Some(())
1198 })() {
1199 Some(()) => {} // we've given our cache back to the tracker
1200 None => {
1201 // account (or whole tracker!) is gone
1202 // throw away the cache so that we don't take this path again for a bit
1203 self_.cache.take().dispose_participant_destroyed();
1204 }
1205 }
1206 }
1207 }
1208
1209 /// Obtain a handle onto the account
1210 ///
1211 /// The returned handle is weak, and needs to be upgraded before use,
1212 /// since a [`Participation`] doesn't keep its Account alive.
1213 ///
1214 /// The returned `WeakAccount` is equivalent to
1215 /// all the other account handles for the same account.
1216 pub fn account(&self) -> WeakAccount {
1217 let Enabled(self_, enabled) = &self.0 else {
1218 return WeakAccount(Noop);
1219 };
1220
1221 let inner = WeakAccountInner {
1222 aid: self_.aid,
1223 tracker: self_.tracker.clone(),
1224 };
1225 WeakAccount(Enabled(inner, *enabled))
1226 }
1227
1228 /// Destroy this participant
1229 ///
1230 /// Treat as freed all the memory allocated via this `Participation` and its clones.
1231 /// After this, other clones of this `Participation` are no longer usable:
1232 /// attempts to do so will give errors.
1233 /// (although they can still be used to get at the `Account`, if it still exists).
1234 ///
1235 /// The actual memory should be freed promptly.
1236 ///
1237 /// (It is not necessary to call this function in order to get the memory tracker
1238 /// to free its handle onto the `IsParticipant`,
1239 /// because the memory quota system holds only a [`Weak`] reference.)
1240 pub fn destroy_participant(mut self) {
1241 let Enabled(self_, enabled) = &mut self.0 else {
1242 return;
1243 };
1244 (|| {
1245 find_in_tracker! {
1246 enabled;
1247 self_.tracker => + tracker, state;
1248 self_.aid => arecord;
1249 ?None
1250 };
1251 if let Some(mut removed) =
1252 refcount::slotmap_remove_early(&mut arecord.ps, self_.pid.take())
1253 {
1254 removed.auto_release(&mut state.global);
1255 }
1256 Some(())
1257 })();
1258 // self will be dropped now, but we have already cleared it out.
1259 }
1260
1261 /// Creates a new dangling, dummy, `Participation`
1262 ///
1263 /// This can be used as a standin where a value of type `Participation` is needed.
1264 /// The returned value cannot be used to claim memory,
1265 /// or find an `Account` or `MemoryQuotaTracker`.
1266 pub fn new_dangling() -> Self {
1267 let Some(enabled) = EnabledToken::new_if_compiled_in() else {
1268 return Participation(Noop);
1269 };
1270
1271 let inner = ParticipationInner {
1272 pid: refcount::Ref::default(),
1273 aid: AId::default(),
1274 tracker: Weak::default(),
1275 cache: ClaimedQty::ZERO,
1276 };
1277 Participation(Enabled(inner, enabled))
1278 }
1279}
1280
1281impl Clone for Participation {
1282 fn clone(&self) -> Participation {
1283 let Enabled(self_, enabled) = &self.0 else {
1284 return Participation(Noop);
1285 };
1286 let aid = self_.aid;
1287 let cache = ClaimedQty::ZERO;
1288 let tracker: Weak<_> = self_.tracker.clone();
1289 let pid = (|| {
1290 let pid = *self_.pid;
1291 find_in_tracker! {
1292 enabled;
1293 self_.tracker => + tracker_strong, state;
1294 aid => _arecord;
1295 pid => precord;
1296 ?None
1297 }
1298 let pid = refcount::Ref::new(pid, &mut precord.refcount).ok()?;
1299 // commitment point
1300 Some(pid)
1301 })()
1302 .unwrap_or_else(|| {
1303 // The account has been closed, the participant torn down, or the refcount
1304 // overflowed. We can a busted `Participation`.
1305 //
1306 // We *haven't* incremented the refcount, so we mustn't return pid as a strong
1307 // reference. We aren't supposed to count towards PRecord.refcount, we we *can*
1308 // return the weak reference aid. (`refcount` type-fu assures this is correct.)
1309 //
1310 // If the problem was refcount overflow, we're technically violating the
1311 // documented behaviour. This is OK; see comment in `<Account as Clone>::clone`.
1312 refcount::Ref::null()
1313 });
1314 let inner = ParticipationInner {
1315 aid,
1316 pid,
1317 cache,
1318 tracker,
1319 };
1320 Participation(Enabled(inner, *enabled))
1321 }
1322}
1323
1324impl Drop for Participation {
1325 fn drop(&mut self) {
1326 let Enabled(self_, enabled) = &mut self.0 else {
1327 return;
1328 };
1329 (|| {
1330 find_in_tracker! {
1331 enabled;
1332 self_.tracker => + tracker_strong, state;
1333 self_.aid => arecord;
1334 *self_.pid => precord;
1335 ?None
1336 }
1337 // release the cached claim
1338 let from_cache = self_.cache.take();
1339 state.global.total_used.release(precord, from_cache);
1340
1341 if let Some(refcount::Garbage(mut removed)) =
1342 slotmap_dec_ref!(&mut arecord.ps, self_.pid.take(), &mut precord.refcount)
1343 {
1344 // We might not have called `release` on everything, so we do that here.
1345 removed.auto_release(&mut state.global);
1346 }
1347 Some(())
1348 })()
1349 .unwrap_or_else(|| {
1350 // Account or Participation or tracker destroyed.
1351 // (This has no effect except in cfg(test), when it defuses the drop bombs)
1352 self_.pid.take().dispose_container_destroyed();
1353 self_.cache.take().dispose_participant_destroyed();
1354 });
1355 }
1356}
1357
1358//========== impls on internal types ==========
1359
1360impl State {
1361 /// Obtain all of the descendants of `parent_aid` according to the Child relation
1362 ///
1363 /// The returned `HashSet` includes `parent_aid`, its children,
1364 /// their children, and so on.
1365 ///
1366 /// Used in the reclamation algorithm in [`reclaim`].
1367 fn get_aid_and_children_recursively(&self, parent_aid: AId) -> HashSet<AId> {
1368 let mut out = HashSet::<AId>::new();
1369 let mut queue: Vec<AId> = vec![parent_aid];
1370 while let Some(aid) = queue.pop() {
1371 let Some(arecord) = self.accounts.get(aid) else {
1372 // shouldn't happen but no need to panic
1373 continue;
1374 };
1375 if out.insert(aid) {
1376 queue.extend(arecord.children.iter().cloned());
1377 }
1378 }
1379 out
1380 }
1381}
1382
1383impl ARecord {
1384 /// Release all memory that this account's participants claimed
1385 fn auto_release(&mut self, global: &mut Global) {
1386 for (_pid, mut precord) in self.ps.drain() {
1387 precord.auto_release(global);
1388 }
1389 }
1390}
1391
1392impl PRecord {
1393 /// Release all memory that this participant claimed
1394 fn auto_release(&mut self, global: &mut Global) {
1395 let for_teardown = self.used.for_participant_teardown();
1396 global.total_used.release(self, for_teardown);
1397 }
1398}