Project import generated by Copybara.
GitOrigin-RevId: f70580d4e9be3db690b9b6bdfb8e253a496b0ecd
Change-Id: I88739121e946b84d3268a4fb62731070c76b309d
diff --git a/nearby/presence/handle_map/Cargo.toml b/nearby/presence/handle_map/Cargo.toml
new file mode 100644
index 0000000..8f00bf7
--- /dev/null
+++ b/nearby/presence/handle_map/Cargo.toml
@@ -0,0 +1,19 @@
+[package]
+name = "handle_map"
+version.workspace = true
+edition.workspace = true
+publish.workspace = true
+
+[dependencies]
+hashbrown.workspace = true
+lock_api.workspace = true
+portable-atomic.workspace = true
+spin.workspace = true
+crypto_provider.workspace = true
+
+[dev-dependencies]
+criterion.workspace = true
+
+[[bench]]
+name = "benches"
+harness = false
diff --git a/nearby/presence/handle_map/benches/benches.rs b/nearby/presence/handle_map/benches/benches.rs
new file mode 100644
index 0000000..f1ee427
--- /dev/null
+++ b/nearby/presence/handle_map/benches/benches.rs
@@ -0,0 +1,330 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+use criterion::{black_box, criterion_group, criterion_main, Criterion};
+use handle_map::*;
+use std::sync::Arc;
+use std::thread;
+use std::time::{Duration, Instant};
+
+// For the sake of these benchmarks, we just want
+// to be able to allocate an almost-arbitrary number of handles
+// if needed by the particular benchmark.
+const MAX_ACTIVE_HANDLES: u32 = u32::MAX - 1;
+
+// The default number of threads+shards to use
+// in multi-threaded benchmarks, chosen
+// to represent a typical multi-threaded use-case.
+const DEFAULT_SHARDS_AND_THREADS: u8 = 8;
+
+/// We're using u8's for the wrapped object type to deliberately ignore
+/// costs related to accessing the underlying data - we just care about
+/// benchmarking handle-map operations.
+type BenchHandleMap = HandleMap<u8>;
+
+fn build_handle_map(num_shards: u8) -> BenchHandleMap {
+ let dimensions = HandleMapDimensions { num_shards, max_active_handles: MAX_ACTIVE_HANDLES };
+ HandleMap::with_dimensions(dimensions)
+}
+
+/// Benchmark for repeated reads on a single thread
+fn single_threaded_read_benchmark(c: &mut Criterion) {
+ // The number of shards should not matter much here.
+ let handle_map = build_handle_map(8);
+ // Pre-populate the map with a single handle.
+ let handle = handle_map.allocate(|| 0xFF).unwrap();
+ let handle_map_ref = &handle_map;
+ // Perform repeated reads
+ c.bench_function("single-threaded reads", |b| {
+ b.iter(|| {
+ let guard = handle_map_ref.get(black_box(handle)).unwrap();
+ black_box(*guard)
+ })
+ });
+}
+
+/// Benchmark for repeated writes on a single thread
+fn single_threaded_write_benchmark(c: &mut Criterion) {
+ // The number of shards should not matter much here.
+ let handle_map = build_handle_map(8);
+ // Pre-populate the map with a single handle.
+ let handle = handle_map.allocate(|| 0xFF).unwrap();
+ let handle_map_ref = &handle_map;
+ // Perform repeated writes
+ c.bench_function("single-threaded writes", |b| {
+ b.iter(|| {
+ let mut guard = handle_map_ref.get_mut(black_box(handle)).unwrap();
+ *guard *= black_box(0);
+ })
+ });
+}
+
+/// Benchmark for repeated allocations on a single thread
+#[allow(unused_must_use)]
+fn single_threaded_allocate_benchmark(c: &mut Criterion) {
+ // The number of shards here is chosen to be quasi-reasonable.
+ // Realistically, performance will fluctuate somewhat with this
+ // due to the difference in the number of internal hash-map collisions,
+ // but ultimately we only care about the asymptotic behavior.
+ let handle_map = build_handle_map(8);
+ let handle_map_ref = &handle_map;
+ // Perform repeated allocations
+ c.bench_function("single-threaded allocations", |b| {
+ b.iter(|| {
+ handle_map_ref.allocate(|| black_box(0xEE));
+ })
+ });
+}
+
+/// Benchmark for repeated allocation->deallocation on a single thread
+/// starting from an empty map.
+#[allow(unused_must_use)]
+fn single_threaded_allocate_deallocate_near_empty_benchmark(c: &mut Criterion) {
+ // The number of shards should not matter much here.
+ let handle_map = build_handle_map(8);
+ let handle_map_ref = &handle_map;
+ // Perform repeated allocation/deallocation pairs
+ c.bench_function("single-threaded allocate/deallocate pairs (empty init state)", |b| {
+ b.iter(|| {
+ let handle = handle_map_ref.allocate(|| black_box(0xDD)).unwrap();
+ handle_map_ref.deallocate(handle);
+ })
+ });
+}
+
+/// Benchmark for repeated allocation->deallocation starting
+/// from a map with a thousand elements per shard.
+#[allow(unused_must_use)]
+fn single_threaded_allocate_deallocate_one_thousand_benchmark(c: &mut Criterion) {
+ let handle_map = build_handle_map(8);
+ for _ in 0..(8 * 1000) {
+ handle_map.allocate(|| black_box(0xFF)).unwrap();
+ }
+ let handle_map_ref = &handle_map;
+
+ // Perform repeated allocation/deallocation pairs
+ c.bench_function(
+ "single-threaded allocate/deallocate pairs (init one thousand elements per shard)",
+ |b| {
+ b.iter(|| {
+ let handle = handle_map_ref.allocate(|| black_box(0xDD)).unwrap();
+ handle_map_ref.deallocate(handle);
+ })
+ },
+ );
+}
+
+/// Benchmark for repeated allocation->deallocation starting
+/// from a map with a million elements per shard.
+#[allow(unused_must_use)]
+fn single_threaded_allocate_deallocate_one_million_benchmark(c: &mut Criterion) {
+ let handle_map = build_handle_map(8);
+ for _ in 0..(8 * 1000000) {
+ handle_map.allocate(|| black_box(0xFF)).unwrap();
+ }
+ let handle_map_ref = &handle_map;
+
+ // Perform repeated allocation/deallocation pairs
+ c.bench_function(
+ "single-threaded allocate/deallocate pairs (init one million elements per shard)",
+ |b| {
+ b.iter(|| {
+ let handle = handle_map_ref.allocate(|| black_box(0xDD)).unwrap();
+ handle_map_ref.deallocate(handle);
+ })
+ },
+ );
+}
+
+/// Helper function for creating a multi-threaded benchmark
+/// with the set number of threads. Untimed Initialization of state
+/// should occur prior to this call. This method will repeatedly
+/// execute the benchmark code on all threads
+fn bench_multithreaded<F>(name: &str, num_threads: u8, benchable_fn_ref: Arc<F>, c: &mut Criterion)
+where
+ F: Fn() + Send + Sync + 'static,
+{
+ c.bench_function(name, move |b| {
+ b.iter_custom(|iters| {
+ // The returned results from the threads will be their timings
+ let mut join_handles = Vec::new();
+ for _ in 0..num_threads {
+ let benchable_fn_ref_clone = benchable_fn_ref.clone();
+ let join_handle = thread::spawn(move || {
+ let start = Instant::now();
+ for _ in 0..iters {
+ benchable_fn_ref_clone();
+ }
+ start.elapsed()
+ });
+ join_handles.push(join_handle);
+ }
+ //Threads spawned, now collect the results
+ let mut total_duration = Duration::from_nanos(0);
+ for join_handle in join_handles {
+ let thread_duration = join_handle.join().unwrap();
+ total_duration += thread_duration;
+ }
+ total_duration /= num_threads as u32;
+ total_duration
+ })
+ });
+}
+
+/// Defines a number of reads/writes to perform [relative
+/// to other operations performed in a given test]
+#[derive(Debug, Clone, Copy)]
+struct ReadWriteCount {
+ num_reads: usize,
+ num_writes: usize,
+}
+
+const READS_ONLY: ReadWriteCount = ReadWriteCount { num_reads: 4, num_writes: 0 };
+
+const READ_LEANING: ReadWriteCount = ReadWriteCount { num_reads: 3, num_writes: 1 };
+
+const BALANCED: ReadWriteCount = ReadWriteCount { num_reads: 2, num_writes: 2 };
+
+const WRITE_LEANING: ReadWriteCount = ReadWriteCount { num_reads: 1, num_writes: 3 };
+
+const WRITES_ONLY: ReadWriteCount = ReadWriteCount { num_reads: 0, num_writes: 4 };
+
+const READ_WRITE_COUNTS: [ReadWriteCount; 5] =
+ [READS_ONLY, READ_LEANING, BALANCED, WRITE_LEANING, WRITES_ONLY];
+
+/// Benchmarks a repeated allocate/[X writes]/[Y reads]/deallocate workflow across
+/// the default number of threads and shards.
+fn lifecycle_read_and_write_multithreaded(per_object_rw_count: ReadWriteCount, c: &mut Criterion) {
+ let name = format!(
+ "lifecycle_read_and_write_multithreaded(reads/object: {}, writes/object:{})",
+ per_object_rw_count.num_reads, per_object_rw_count.num_writes
+ );
+ // We need an Arc to be able to pass this off to `bench_multithreaded`.
+ let handle_map = Arc::new(build_handle_map(DEFAULT_SHARDS_AND_THREADS));
+ bench_multithreaded(
+ &name,
+ DEFAULT_SHARDS_AND_THREADS,
+ Arc::new(move || {
+ let handle = handle_map.allocate(|| black_box(0xFF)).unwrap();
+ for _ in 0..per_object_rw_count.num_writes {
+ let mut write_guard = handle_map.get_mut(handle).unwrap();
+ *write_guard *= black_box(1);
+ }
+ for _ in 0..per_object_rw_count.num_reads {
+ let read_guard = handle_map.get_mut(handle).unwrap();
+ black_box(*read_guard);
+ }
+ handle_map.deallocate(handle).unwrap();
+ }),
+ c,
+ );
+}
+
+fn lifecycle_benchmarks(c: &mut Criterion) {
+ for read_write_count in READ_WRITE_COUNTS {
+ // Using 8 separate shards to roughly match the number of active threads.
+ lifecycle_read_and_write_multithreaded(read_write_count, c);
+ }
+}
+
+/// Benchmarks repeated cycles of accessing the given number of distinct objects, each
+/// with the given number of reads and writes across the given number of threads.
+fn read_and_write_contention_multithreaded(
+ num_threads: u8,
+ num_distinct_objects: u32,
+ per_object_rw_count: ReadWriteCount,
+ c: &mut Criterion,
+) {
+ let name = format!("read_and_write_contention_multithreaded(threads: {}, objects: {}, reads/object: {}, writes/object:{})",
+ num_threads, num_distinct_objects, per_object_rw_count.num_reads, per_object_rw_count.num_writes);
+ // Default to 8 shards, ultimately doesn't matter much since we only ever access a few shards.
+ // This needs to be `'static` in order to pass a ref off to `bench_multithreaded`.
+ let handle_map = Arc::new(build_handle_map(8));
+ let mut handles = Vec::new();
+ for i in 0..num_distinct_objects {
+ let handle = handle_map.allocate(|| i as u8).unwrap();
+ handles.push(handle);
+ }
+
+ bench_multithreaded(
+ &name,
+ num_threads,
+ Arc::new(move || {
+ // Deliberately performing all writes first, with
+ // the goal of getting staggered timings from
+ // all of the threads for subsequent iterations.
+ // We also perform reads and writes across all objects
+ // in batches to get something resembling uniform access.
+ for _ in 0..per_object_rw_count.num_writes {
+ for handle in handles.iter() {
+ let mut write_guard = handle_map.get_mut(*handle).unwrap();
+ *write_guard *= black_box(1);
+ }
+ }
+ for _ in 0..per_object_rw_count.num_reads {
+ for handle in handles.iter() {
+ let read_guard = handle_map.get(*handle).unwrap();
+ black_box(*read_guard);
+ }
+ }
+ }),
+ c,
+ );
+}
+
+fn read_write_contention_benchmarks(c: &mut Criterion) {
+ for num_threads in [2, 4, 8].iter() {
+ for num_distinct_objects in [1u32, 2u32, 4u32].iter() {
+ if num_distinct_objects >= &(*num_threads as u32) {
+ continue;
+ }
+ for read_write_count in READ_WRITE_COUNTS {
+ read_and_write_contention_multithreaded(
+ *num_threads,
+ *num_distinct_objects,
+ read_write_count,
+ c,
+ );
+ }
+ }
+ }
+}
+
+/// Benchmarks allocation (starting from empty) for the default number of shards and threads
+fn allocator_contention_benchmark(c: &mut Criterion) {
+ let name = "allocator_contention_multithreaded";
+ // We need an Arc to pass this off to `bench_multithreaded`
+ let handle_map = Arc::new(build_handle_map(DEFAULT_SHARDS_AND_THREADS));
+ bench_multithreaded(
+ name,
+ DEFAULT_SHARDS_AND_THREADS,
+ Arc::new(move || {
+ handle_map.allocate(|| black_box(0xFF)).unwrap();
+ }),
+ c,
+ );
+}
+
+criterion_group!(
+ benches,
+ single_threaded_read_benchmark,
+ single_threaded_write_benchmark,
+ single_threaded_allocate_benchmark,
+ single_threaded_allocate_deallocate_near_empty_benchmark,
+ single_threaded_allocate_deallocate_one_thousand_benchmark,
+ single_threaded_allocate_deallocate_one_million_benchmark,
+ lifecycle_benchmarks,
+ read_write_contention_benchmarks,
+ allocator_contention_benchmark,
+);
+criterion_main!(benches);
diff --git a/nearby/presence/handle_map/src/lib.rs b/nearby/presence/handle_map/src/lib.rs
new file mode 100644
index 0000000..745aec4
--- /dev/null
+++ b/nearby/presence/handle_map/src/lib.rs
@@ -0,0 +1,608 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//! A thread-safe implementation of a map for managing object handles,
+//! a safer alternative to raw pointers for FFI interop.
+#![cfg_attr(not(test), no_std)]
+#![forbid(unsafe_code)]
+#![deny(missing_docs)]
+extern crate alloc;
+extern crate core;
+
+use alloc::boxed::Box;
+use alloc::vec::Vec;
+use core::ops::{Deref, DerefMut};
+use core::sync::atomic::Ordering;
+use hashbrown::hash_map::EntryRef;
+use portable_atomic::{AtomicU32, AtomicU64};
+use spin::lock_api::*;
+
+#[cfg(test)]
+mod tests;
+
+/// A RAII read lock guard for an object in a [`HandleMap`]
+/// pointed-to by a given [`Handle`]. When this struct is
+/// dropped, the underlying read lock on the associated
+/// shard will be dropped.
+pub struct ObjectReadGuard<'a, T> {
+ guard: lock_api::MappedRwLockReadGuard<'a, spin::RwLock<()>, T>,
+}
+
+impl<'a, T> Deref for ObjectReadGuard<'a, T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ self.guard.deref()
+ }
+}
+
+/// A RAII read-write lock guard for an object in a [`HandleMap`]
+/// pointed-to by a given [`Handle`]. When this struct is
+/// dropped, the underlying read-write lock on the associated
+/// shard will be dropped.
+pub struct ObjectReadWriteGuard<'a, T> {
+ guard: lock_api::MappedRwLockWriteGuard<'a, spin::RwLock<()>, T>,
+}
+
+impl<'a, T> Deref for ObjectReadWriteGuard<'a, T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ self.guard.deref()
+ }
+}
+
+impl<'a, T> DerefMut for ObjectReadWriteGuard<'a, T> {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ self.guard.deref_mut()
+ }
+}
+
+/// FFI-transmissible structure expressing the dimensions
+/// (max # of allocatable slots, number of shards) of a handle-map
+/// to be used upon initialization.
+#[repr(C)]
+#[derive(Clone, Copy)]
+pub struct HandleMapDimensions {
+ /// The number of shards which are employed
+ /// by the associated handle-map.
+ pub num_shards: u8,
+ /// The maximum number of active handles which may be
+ /// stored within the associated handle-map.
+ pub max_active_handles: u32,
+}
+
+/// Trait for marker structs containing information about a
+/// given [`SingletonHandleMap`], including the underlying
+/// object type and the dimensions of the map.
+/// Used primarily to enforce type-level constraints
+/// on `SingletoneHandleMap`s, including
+/// ensuring that they are truly singletons.
+pub trait SingletonHandleMapInfo {
+ /// The underlying kind of object pointed-to by handles
+ /// derived from the associated handle-map.
+ type Object: Send + Sync;
+
+ /// Gets the dimensions of the corresponding handle-map.
+ fn dimensions(&self) -> HandleMapDimensions;
+
+ /// Gets a static reference to the global handle-map
+ fn get_handle_map() -> &'static HandleMap<Self::Object>;
+}
+
+/// An individual handle to be given out by a [`HandleMap`].
+/// This representation is untyped, and just a wrapper
+/// around a handle-id, in contrast to implementors of `HandleLike`.
+#[derive(Clone, Copy, PartialEq, Eq, Hash)]
+pub struct Handle {
+ handle_id: u64,
+}
+
+impl From<&Handle> for Handle {
+ fn from(handle: &Handle) -> Self {
+ *handle
+ }
+}
+
+impl Handle {
+ /// Constructs a handle wrapping the given ID.
+ ///
+ /// No validity checks are done on the wrapped ID
+ /// to ensure that the given ID is active in
+ /// any specific handle-map, and the type
+ /// of the handle is not represented.
+ ///
+ /// As a result, this method is only useful for
+ /// allowing access to handles from an FFI layer.
+ pub fn from_id(handle_id: u64) -> Self {
+ Self { handle_id }
+ }
+ /// Gets the ID for this handle.
+ ///
+ /// Since the underlying handle is un-typed,`
+ /// this method is only suitable for
+ /// transmitting handles across an FFI layer.
+ pub fn get_id(&self) -> u64 {
+ self.handle_id
+ }
+ /// Derives the shard index from the handle id
+ fn get_shard_index(&self, num_shards: u8) -> usize {
+ (self.handle_id % (num_shards as u64)) as usize
+ }
+}
+
+/// Error raised when attempting to allocate into a full handle-map.
+#[derive(Debug)]
+pub struct HandleMapFullError;
+
+/// Internal error enum for failed allocations into a given shard.
+enum ShardAllocationError<T, F: FnOnce() -> T> {
+ /// Error for when the entry for the handle is occupied,
+ /// in which case we spit out the object-provider to try again
+ /// with a new handle-id.
+ EntryOccupied(F),
+ /// Error for when we would exceed the maximum number of allocations.
+ ExceedsAllocationLimit,
+}
+
+/// Error raised when the entry for a given [`Handle`] doesn't exist.
+#[derive(Debug)]
+pub struct HandleNotPresentError;
+
+/// Wrapper around a `HandleMap` which is specifically tailored for maintaining
+/// a global singleton `'static` reference to a handle-map.
+/// The wrapper handles initialization of the singleton in a const-context
+/// and subsequent accesses to ensure that the underlying map is always
+/// initialized upon attempts to access it.
+pub struct SingletonHandleMap<I: SingletonHandleMapInfo> {
+ info: I,
+ wrapped: spin::once::Once<HandleMap<I::Object>, spin::relax::Spin>,
+}
+
+impl<I: SingletonHandleMapInfo> SingletonHandleMap<I> {
+ /// Constructs a new global handle-map using the given
+ /// singleton handle-map info. This method is callable
+ /// from a `const`-context to allow it to be used to initialize
+ /// `static` variables.
+ pub const fn with_info(info: I) -> Self {
+ Self { info, wrapped: spin::once::Once::new() }
+ }
+ /// Initialize the handle-map if it's not already initialized,
+ /// and return a reference to the result.
+ pub fn get(&self) -> &HandleMap<I::Object> {
+ self.wrapped.call_once(|| {
+ let dimensions = self.info.dimensions();
+ HandleMap::with_dimensions(dimensions)
+ })
+ }
+}
+
+/// A thread-safe mapping from "handle"s [like pointers, but safer]
+/// to underlying structures, supporting allocations, reads, writes,
+/// and deallocations of objects behind handles.
+pub struct HandleMap<T: Send + Sync> {
+ /// The dimensions of this handle-map
+ dimensions: HandleMapDimensions,
+
+ /// The individually-lockable "shards" of the handle-map,
+ /// among which the keys will be roughly uniformly-distributed.
+ handle_map_shards: Box<[HandleMapShard<T>]>,
+
+ /// An atomically-incrementing counter which tracks the
+ /// next handle ID which allocations will attempt to use.
+ new_handle_id_counter: AtomicU64,
+
+ /// An atomic integer roughly tracking the number of
+ /// currently-outstanding allocated entries in this
+ /// handle-map among all [`HandleMapShard`]s.
+ outstanding_allocations_counter: AtomicU32,
+}
+
+impl<T: Send + Sync> HandleMap<T> {
+ /// Creates a new handle-map with the given `HandleMapDimensions`.
+ pub fn with_dimensions(dimensions: HandleMapDimensions) -> Self {
+ let mut handle_map_shards = Vec::with_capacity(dimensions.num_shards as usize);
+ for _ in 0..dimensions.num_shards {
+ handle_map_shards.push(HandleMapShard::default());
+ }
+ let handle_map_shards = handle_map_shards.into_boxed_slice();
+ Self {
+ dimensions,
+ handle_map_shards,
+ new_handle_id_counter: AtomicU64::new(0),
+ outstanding_allocations_counter: AtomicU32::new(0),
+ }
+ }
+}
+
+impl<T: Send + Sync> HandleMap<T> {
+ /// Allocates a new object within the given handle-map, returning
+ /// a handle to the location it was stored at. This operation
+ /// may fail if attempting to allocate over the `dimensions.max_active_handles`
+ /// limit imposed on the handle-map, in which case this method
+ /// will return a `HandleMapFullError`.
+ pub fn allocate(
+ &self,
+ initial_value_provider: impl FnOnce() -> T,
+ ) -> Result<Handle, HandleMapFullError> {
+ let mut initial_value_provider = initial_value_provider;
+ loop {
+ // Increment the new-handle-ID counter using relaxed memory ordering,
+ // since the only invariant that we want to enforce is that concurrently-running
+ // threads always get distinct new handle-ids.
+ let new_handle_id = self.new_handle_id_counter.fetch_add(1, Ordering::Relaxed);
+ let new_handle = Handle::from_id(new_handle_id);
+ let shard_index = new_handle.get_shard_index(self.dimensions.num_shards);
+
+ // Now, check the shard to see if we can actually allocate into it.
+ let shard_allocate_result = self.handle_map_shards[shard_index].try_allocate(
+ new_handle,
+ initial_value_provider,
+ &self.outstanding_allocations_counter,
+ self.dimensions.max_active_handles,
+ );
+ match shard_allocate_result {
+ Ok(_) => {
+ return Ok(new_handle);
+ }
+ Err(ShardAllocationError::ExceedsAllocationLimit) => {
+ return Err(HandleMapFullError);
+ }
+ Err(ShardAllocationError::EntryOccupied(thrown_back_provider)) => {
+ // We need to do the whole thing again with a new ID
+ initial_value_provider = thrown_back_provider;
+ }
+ }
+ }
+ }
+
+ /// Gets a read-only reference to an object within the given handle-map,
+ /// if the given handle is present. Otherwise, returns [`HandleNotPresentError`].
+ pub fn get(&self, handle: Handle) -> Result<ObjectReadGuard<T>, HandleNotPresentError> {
+ let shard_index = handle.get_shard_index(self.dimensions.num_shards);
+ self.handle_map_shards[shard_index].get(handle)
+ }
+
+ /// Gets a read+write reference to an object within the given handle-map,
+ /// if the given handle is present. Otherwise, returns [`HandleNotPresentError`].
+ pub fn get_mut(
+ &self,
+ handle: Handle,
+ ) -> Result<ObjectReadWriteGuard<T>, HandleNotPresentError> {
+ let shard_index = handle.get_shard_index(self.dimensions.num_shards);
+ self.handle_map_shards[shard_index].get_mut(handle)
+ }
+
+ /// Removes the object pointed to by the given handle in
+ /// the handle-map, returning the removed object if it
+ /// exists. Otherwise, returns [`HandleNotPresentError`].
+ pub fn deallocate(&self, handle: Handle) -> Result<T, HandleNotPresentError> {
+ let shard_index = handle.get_shard_index(self.dimensions.num_shards);
+ self.handle_map_shards[shard_index]
+ .deallocate(handle, &self.outstanding_allocations_counter)
+ }
+
+ /// Gets the actual number of elements stored in the entire map.
+ /// Only suitable for single-threaded sections of tests.
+ #[cfg(test)]
+ pub(crate) fn len(&self) -> usize {
+ self.handle_map_shards.iter().map(|s| s.len()).sum()
+ }
+
+ /// Sets the new-handle-id counter to the given value.
+ /// Only suitable for tests.
+ #[cfg(test)]
+ pub(crate) fn set_new_handle_id_counter(&mut self, value: u64) {
+ self.new_handle_id_counter = AtomicU64::new(value);
+ }
+}
+
+// Bunch o' type aliases to make talking about them much easier in the shard code.
+type ShardMapType<T> = hashbrown::HashMap<Handle, T>;
+type ShardReadWriteLock<T> = RwLock<ShardMapType<T>>;
+type ShardReadGuard<'a, T> = RwLockReadGuard<'a, ShardMapType<T>>;
+type ShardUpgradableReadGuard<'a, T> = RwLockUpgradableReadGuard<'a, ShardMapType<T>>;
+type ShardReadWriteGuard<'a, T> = RwLockWriteGuard<'a, ShardMapType<T>>;
+
+/// An individual handle-map shard, which is ultimately
+/// just a hash-map behind a lock.
+pub(crate) struct HandleMapShard<T: Send + Sync> {
+ data: RwLock<ShardMapType<T>>,
+}
+
+impl<T: Send + Sync> Default for HandleMapShard<T> {
+ fn default() -> Self {
+ Self { data: RwLock::new(hashbrown::HashMap::new()) }
+ }
+}
+
+impl<T: Send + Sync> HandleMapShard<T> {
+ fn get(&self, handle: Handle) -> Result<ObjectReadGuard<T>, HandleNotPresentError> {
+ let map_read_guard = ShardReadWriteLock::<T>::read(&self.data);
+ let read_only_map_ref = map_read_guard.deref();
+ if read_only_map_ref.contains_key(&handle) {
+ let object_read_guard = ShardReadGuard::<T>::map(map_read_guard, move |map_ref| {
+ // We know that the entry exists, since we've locked the
+ // shard and already checked that it exists prior to
+ // handing out this new, mapped read-lock.
+ map_ref.get(&handle).unwrap()
+ });
+ Ok(ObjectReadGuard { guard: object_read_guard })
+ } else {
+ // Auto-drop the read guard, and return an error
+ Err(HandleNotPresentError)
+ }
+ }
+ /// Gets a read-write guard on the entire shard map if an entry for the given
+ /// handle exists, but if not, yield [`HandleNotPresentError`].
+ fn get_read_write_guard_if_entry_exists(
+ &self,
+ handle: Handle,
+ ) -> Result<ShardReadWriteGuard<T>, HandleNotPresentError> {
+ // Start with an upgradable read lock and then upgrade to a write lock.
+ // By doing this, we prevent new readers from entering (see `spin` documentation)
+ let map_upgradable_read_guard = ShardReadWriteLock::<T>::upgradable_read(&self.data);
+ let read_only_map_ref = map_upgradable_read_guard.deref();
+ if read_only_map_ref.contains_key(&handle) {
+ // If we know that the entry exists, and we're currently
+ // holding a read-lock, we know that we're safe to request
+ // an upgrade to a write lock, since only one write or
+ // upgradable read lock can be outstanding at any one time.
+ let map_read_write_guard =
+ ShardUpgradableReadGuard::<T>::upgrade(map_upgradable_read_guard);
+ Ok(map_read_write_guard)
+ } else {
+ // Auto-drop the read guard, we don't need to allow a write.
+ Err(HandleNotPresentError)
+ }
+ }
+
+ fn get_mut(&self, handle: Handle) -> Result<ObjectReadWriteGuard<T>, HandleNotPresentError> {
+ let map_read_write_guard = self.get_read_write_guard_if_entry_exists(handle)?;
+ // Expose only the pointed-to object with a mapped read-write guard
+ let object_read_write_guard =
+ ShardReadWriteGuard::<T>::map(map_read_write_guard, move |map_ref| {
+ // Already checked that the entry exists while holding the lock
+ map_ref.get_mut(&handle).unwrap()
+ });
+ Ok(ObjectReadWriteGuard { guard: object_read_write_guard })
+ }
+ fn deallocate(
+ &self,
+ handle: Handle,
+ outstanding_allocations_counter: &AtomicU32,
+ ) -> Result<T, HandleNotPresentError> {
+ let mut map_read_write_guard = self.get_read_write_guard_if_entry_exists(handle)?;
+ // We don't need to worry about double-decrements, since the above call
+ // got us an upgradable read guard for our read, which means it's the only
+ // outstanding upgradeable guard on the shard. See `spin` documentation.
+ // Remove the pointed-to object from the map, and return it,
+ // releasing the lock when the guard goes out of scope.
+ let removed_object = map_read_write_guard.deref_mut().remove(&handle).unwrap();
+ // Decrement the allocations counter. Release ordering because we want
+ // to ensure that clearing the map entry never gets re-ordered to after when
+ // this counter gets decremented.
+ outstanding_allocations_counter.sub(1, Ordering::Release);
+ Ok(removed_object)
+ }
+
+ fn try_allocate<F>(
+ &self,
+ handle: Handle,
+ object_provider: F,
+ outstanding_allocations_counter: &AtomicU32,
+ max_active_handles: u32,
+ ) -> Result<(), ShardAllocationError<T, F>>
+ where
+ F: FnOnce() -> T,
+ {
+ // Use an upgradeable read guard -> write guard to provide greater fairness
+ // toward writers (see `spin` documentation)
+ let map_upgradable_read_guard = ShardReadWriteLock::<T>::upgradable_read(&self.data);
+ let mut map_read_write_guard =
+ ShardUpgradableReadGuard::<T>::upgrade(map_upgradable_read_guard);
+ match map_read_write_guard.entry_ref(&handle) {
+ EntryRef::Occupied(_) => {
+ // We've already allocated for that handle-id, so yield
+ // the object provider back to the caller.
+ Err(ShardAllocationError::EntryOccupied(object_provider))
+ }
+ EntryRef::Vacant(vacant_entry) => {
+ // An entry is open, but we haven't yet checked the allocations count.
+ // Try to increment the total allocations count atomically.
+ // Use acquire ordering on a successful bump, because we don't want
+ // to invoke the allocation closure before we have a guaranteed slot.
+ // On the other hand, upon failure, we don't care about ordering
+ // of surrounding operations, and so we use a relaxed ordering there.
+ let allocation_count_bump_result = outstanding_allocations_counter.fetch_update(
+ Ordering::Acquire,
+ Ordering::Relaxed,
+ |old_total_allocations| {
+ if old_total_allocations >= max_active_handles {
+ None
+ } else {
+ Some(old_total_allocations + 1)
+ }
+ },
+ );
+ match allocation_count_bump_result {
+ Ok(_) => {
+ // We're good to actually allocate
+ let object = object_provider();
+ vacant_entry.insert(object);
+ Ok(())
+ }
+ Err(_) => {
+ // The allocation would cause us to exceed the allowed allocations,
+ // so release all locks and error.
+ Err(ShardAllocationError::ExceedsAllocationLimit)
+ }
+ }
+ }
+ }
+ }
+ /// Gets the actual number of elements stored in this shard.
+ /// Only suitable for single-threaded sections of tests.
+ #[cfg(test)]
+ fn len(&self) -> usize {
+ let guard = ShardReadWriteLock::<T>::read(&self.data);
+ guard.deref().len()
+ }
+}
+
+/// Externally-facing trait for things which behave like handle-map handles
+/// with a globally-defined handle-map for the type.
+pub trait HandleLike: Sized {
+ /// The underlying object type pointed-to by this handle
+ type Object: Send + Sync;
+
+ /// Tries to allocate a new handle using the given provider
+ /// to construct the underlying stored object as a new
+ /// entry into the global handle table for this type.
+ fn allocate(
+ initial_value_provider: impl FnOnce() -> Self::Object,
+ ) -> Result<Self, HandleMapFullError>;
+
+ /// Gets a RAII read-guard on the contents behind this handle.
+ fn get(&self) -> Result<ObjectReadGuard<Self::Object>, HandleNotPresentError>;
+
+ /// Gets a RAII read-write guard on the contents behind this handle.
+ fn get_mut(&self) -> Result<ObjectReadWriteGuard<Self::Object>, HandleNotPresentError>;
+
+ /// Deallocates the contents behind this handle.
+ fn deallocate(self) -> Result<Self::Object, HandleNotPresentError>;
+}
+
+#[macro_export]
+/// `declare_handle_map! { handle_module_name, handle_type_name, wrapped_type,
+/// map_dimension_provider }`
+///
+/// Declares a new public module with name `handle_module_name` which includes a new type
+/// `handle_type_name` which is `#[repr(C)]` and represents FFI-accessible handles
+/// to values of type `wrapped_type`.
+///
+/// Internal to the generated module, a new static `SingletonHandleMap` is created, where the
+/// maximum number of active handles and the number of shards are given by
+/// the dimensions returned by evaluation of the `map_dimension_provider` expression.
+///
+/// Note: `map_dimension_provider` will be evaluated within the defined module's scope,
+/// so you will likely need to use `super` to refer to definitions in the enclosing scope.
+///
+/// # Example
+/// The following code defines an FFI-safe type `StringHandle` which references
+/// the `String` data-type, and uses it to define a (contrived)
+/// function `sample` which will print "Hello World".
+///
+/// ```
+/// mod sample {
+/// use core::ops::Deref;
+/// use handle_map::{declare_handle_map, HandleMapDimensions, HandleLike};
+///
+/// fn get_string_handle_map_dimensions() -> HandleMapDimensions {
+/// HandleMapDimensions {
+/// num_shards: 8,
+/// max_active_handles: 100,
+/// }
+/// }
+///
+/// declare_handle_map! { string_handle, StringHandle, String,
+/// super::get_string_handle_map_dimensions() }
+///
+/// use string_handle::StringHandle;
+///
+/// fn sample() {
+/// // Note: this method could panic if there are
+/// // more than 99 outstanding handles.
+///
+/// // Allocate a new string-handle pointing to the string "Hello"
+/// let handle = StringHandle::allocate(|| { "Hello".to_string() }).unwrap();
+/// {
+/// // Obtain a write-guard on the contents of our handle
+/// let mut handle_write_guard = handle.get_mut().unwrap();
+/// handle_write_guard.push_str(" World");
+/// // Write guard is auto-dropped at the end of this block.
+/// }
+/// {
+/// // Obtain a read-guard on the contents of our handle.
+/// // Note that we had to ensure that the write-guard was
+/// // dropped prior to doing this, or else execution
+/// // could potentially hang.
+/// let handle_read_guard = handle.get().unwrap();
+/// println!("{}", handle_read_guard.deref());
+/// }
+/// // Clean up the data behind the created handle
+/// handle.deallocate().unwrap();
+/// }
+/// }
+/// ```
+macro_rules! declare_handle_map {
+ ($handle_module_name:ident, $handle_type_name:ident, $wrapped_type:ty,
+ $map_dimension_provider:expr) => {
+ #[doc = concat!("Macro-generated (via `handle_map::declare_handle_map!`) module",
+ " which defines the `", stringify!($handle_module_name), "::",
+ stringify!($handle_type_name), "` FFI-transmissible handle type ",
+ " which references values of type `", stringify!($wrapped_type), "`.")]
+ pub mod $handle_module_name {
+ use $crate::SingletonHandleMapInfo;
+
+ struct SingletonInfo;
+
+ static GLOBAL_HANDLE_MAP: $crate::SingletonHandleMap<SingletonInfo> =
+ $crate::SingletonHandleMap::with_info(SingletonInfo);
+
+ impl $crate::SingletonHandleMapInfo for SingletonInfo {
+ type Object = $wrapped_type;
+
+ fn dimensions(&self) -> $crate::HandleMapDimensions {
+ $map_dimension_provider
+ }
+ fn get_handle_map() -> &'static $crate::HandleMap<$wrapped_type> {
+ GLOBAL_HANDLE_MAP.get()
+ }
+ }
+
+ #[doc = concat!("A `#[repr(C)]` handle to a value of type `",
+ stringify!($wrapped_type), "`.")]
+ #[repr(C)]
+ #[derive(Clone, Copy, PartialEq, Eq)]
+ pub struct $handle_type_name {
+ handle_id: u64,
+ }
+
+ impl $handle_type_name {
+ fn get_as_handle(&self) -> $crate::Handle {
+ $crate::Handle::from_id(self.handle_id)
+ }
+ }
+ impl $crate::HandleLike for $handle_type_name {
+ type Object = $wrapped_type;
+ fn allocate(initial_value_provider: impl FnOnce() -> $wrapped_type,
+ ) -> Result<Self, $crate::HandleMapFullError> {
+ SingletonInfo::get_handle_map().allocate(initial_value_provider)
+ .map(|derived_handle| Self {
+ handle_id: derived_handle.get_id(),
+ })
+ }
+ fn get(&self) -> Result<$crate::ObjectReadGuard<$wrapped_type>, $crate::HandleNotPresentError> {
+ SingletonInfo::get_handle_map().get(self.get_as_handle())
+ }
+ fn get_mut(&self) -> Result<$crate::ObjectReadWriteGuard<$wrapped_type>, $crate::HandleNotPresentError> {
+ SingletonInfo::get_handle_map().get_mut(self.get_as_handle())
+ }
+ fn deallocate(self) -> Result<$wrapped_type, $crate::HandleNotPresentError> {
+ SingletonInfo::get_handle_map().deallocate(self.get_as_handle())
+ }
+ }
+ }
+ }
+}
diff --git a/nearby/presence/handle_map/src/tests.rs b/nearby/presence/handle_map/src/tests.rs
new file mode 100644
index 0000000..d97657f
--- /dev/null
+++ b/nearby/presence/handle_map/src/tests.rs
@@ -0,0 +1,288 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+use crate::*;
+
+use hashbrown::HashSet;
+use std::sync::Arc;
+use std::thread;
+
+// Maximum number of active handles used across all tests
+// Chosen to be divisible by the number of active threads
+const MAX_ACTIVE_HANDLES: u32 = NUM_ACTIVE_THREADS * MAX_ACTIVE_HANDLES_PER_THREAD;
+
+const MAX_ACTIVE_HANDLES_PER_THREAD: u32 = 16384;
+
+// Deliberately picking a low number of shards so that we
+// are more likely to discover conflicts between threads.
+const NUM_SHARDS: u8 = 4;
+
+// Deliberately picking a higher number of threads.
+const NUM_ACTIVE_THREADS: u32 = 8;
+
+const DEFAULT_DIMENSIONS: HandleMapDimensions =
+ HandleMapDimensions { num_shards: NUM_SHARDS, max_active_handles: MAX_ACTIVE_HANDLES };
+
+fn build_handle_map<T: Send + Sync>() -> HandleMap<T> {
+ HandleMap::with_dimensions(DEFAULT_DIMENSIONS)
+}
+
+/// Performs the same testing function for each thread
+fn test_for_each_thread<F>(test_function_ref: Arc<F>, num_repetitions_per_thread: usize)
+where
+ F: Fn() + Send + Sync + 'static,
+{
+ let mut join_handles = Vec::new();
+ for _ in 0..NUM_ACTIVE_THREADS {
+ let test_function_clone = test_function_ref.clone();
+ let join_handle = thread::spawn(move || {
+ for _ in 0..num_repetitions_per_thread {
+ test_function_clone();
+ }
+ });
+ join_handles.push(join_handle);
+ }
+ for join_handle in join_handles {
+ join_handle.join().unwrap()
+ }
+}
+
+/// Tests the consistency of reads from the same handle across
+/// multiple threads.
+#[test]
+fn test_read_consistency_same_address() {
+ let num_repetitions_per_thread = 10000;
+ let handle_map = build_handle_map::<String>();
+ let handle = handle_map.allocate(|| "hello".to_string()).expect("Allocation shouldn't fail");
+ let test_fn = Arc::new(move || {
+ let value_ref = handle_map.get(handle).expect("Getting shouldn't fail");
+ assert_eq!("hello", value_ref.deref());
+ });
+ test_for_each_thread(test_fn, num_repetitions_per_thread);
+}
+
+/// Tests overloading the table with allocations to ensure
+/// that when all is said and done, we still haven't exceeded
+/// the allocations limit.
+#[test]
+#[allow(unused_must_use)]
+fn test_overload_with_allocations() {
+ let num_repetitions_per_thread = 2 * MAX_ACTIVE_HANDLES_PER_THREAD as usize;
+ let handle_map = build_handle_map::<u8>();
+
+ let handle_map_function_ref = Arc::new(handle_map);
+ let handle_map_post_function_ref = handle_map_function_ref.clone();
+
+ let test_fn = Arc::new(move || {
+ handle_map_function_ref.allocate(|| 0xFF);
+ });
+ test_for_each_thread(test_fn, num_repetitions_per_thread);
+
+ let actual_num_active_handles = handle_map_post_function_ref.len();
+ assert_eq!(MAX_ACTIVE_HANDLES as usize, actual_num_active_handles);
+}
+
+/// Tests deallocations and allocations near the allocation limit.
+#[test]
+#[allow(unused_must_use)]
+fn test_overload_allocations_deallocations() {
+ let num_repetitions_per_thread = 10000;
+
+ //Pre-fill the map so that there's only one available entry.
+ let handle_map = build_handle_map::<u8>();
+ for i in 0..(MAX_ACTIVE_HANDLES - 1) {
+ handle_map.allocate(|| (i % 256) as u8);
+ }
+
+ let handle_map_function_ref = Arc::new(handle_map);
+ let handle_map_post_function_ref = handle_map_function_ref.clone();
+
+ let test_fn = Arc::new(move || {
+ let allocation_result = handle_map_function_ref.allocate(|| 0xFF);
+ if let Ok(handle) = allocation_result {
+ handle_map_function_ref.deallocate(handle).unwrap();
+ }
+ });
+ test_for_each_thread(test_fn, num_repetitions_per_thread);
+
+ // No matter what happened above, we should have the same number
+ // of handles as when we started, because every successful allocation
+ // should have been paired with a successful deallocation.
+ let actual_num_active_handles = handle_map_post_function_ref.len();
+ assert_eq!((MAX_ACTIVE_HANDLES - 1) as usize, actual_num_active_handles);
+
+ //Verify that we still have space for one more entry after all that.
+ handle_map_post_function_ref.allocate(|| 0xEE).unwrap();
+}
+
+/// Tests the progress of allocate/read/write/read/deallocate
+/// to independent handles across multiple threads.
+#[test]
+fn test_full_lifecycle_independent_handles() {
+ let num_repetitions_per_thread = 10000;
+ let handle_map = build_handle_map::<String>();
+ let test_fn = Arc::new(move || {
+ let handle =
+ handle_map.allocate(|| "Hello".to_string()).expect("Allocation shouldn't fail");
+ {
+ let value_ref = handle_map.get(handle).expect("Getting the value shouldn't fail");
+ assert_eq!("Hello", &*value_ref);
+ };
+ {
+ let mut value_mut_ref =
+ handle_map.get_mut(handle).expect("Mutating the value shouldn't fail");
+ value_mut_ref.deref_mut().push_str(" World!");
+ };
+ {
+ let value_ref = handle_map
+ .get(handle)
+ .expect("Getting the value after modification shouldn't fail");
+ assert_eq!("Hello World!", &*value_ref);
+ };
+ let removed = handle_map.deallocate(handle).expect("Deallocation shouldn't fail");
+ assert_eq!("Hello World!", removed);
+ });
+ test_for_each_thread(test_fn, num_repetitions_per_thread);
+}
+
+/// Tests the consistency of reads+writes to the same handle,
+/// where threads modify and read different parts of an
+/// underlying structure.
+#[test]
+fn test_consistency_of_same_handle_multithreaded_modifications() {
+ let num_repetitions_per_thread = 10000;
+ let handle_map = Arc::new(build_handle_map::<(String, String)>());
+ let handle = handle_map
+ .allocate(|| ("A".to_string(), "B".to_string()))
+ .expect("Allocation shouldn't fail");
+
+ let handle_map_second_ref = handle_map.clone();
+
+ let join_handle_a = thread::spawn(move || {
+ for i in 1..num_repetitions_per_thread {
+ {
+ let value_ref =
+ handle_map.get(handle).expect("Getting the value from thread A shouldn't fail");
+ let value = &value_ref.0;
+ assert_eq!(i, value.len());
+ }
+ {
+ let mut value_mut_ref = handle_map
+ .get_mut(handle)
+ .expect("Mutating the value from thread A shouldn't fail");
+ value_mut_ref.0.push('A');
+ }
+ }
+ });
+
+ let join_handle_b = thread::spawn(move || {
+ for i in 1..num_repetitions_per_thread {
+ {
+ let value_ref = handle_map_second_ref
+ .get(handle)
+ .expect("Getting the value from thread B shouldn't fail");
+ let value = &value_ref.1;
+ assert_eq!(i, value.len());
+ }
+ {
+ let mut value_mut_ref = handle_map_second_ref
+ .get_mut(handle)
+ .expect("Mutating the value from thread B shouldn't fail");
+ value_mut_ref.1.push('B');
+ }
+ }
+ });
+
+ join_handle_a.join().unwrap();
+ join_handle_b.join().unwrap();
+}
+
+/// Multi-threaded test to ensure that when attempting
+/// to allocate over handle IDs which are already allocated,
+/// all threads eventually get distinct, unused handle IDs
+/// for their own allocations.
+#[test]
+fn test_non_overwriting_old_handles() {
+ let mut all_handles: HashSet<Handle> = HashSet::new();
+ let num_repetitions_per_thread = 100;
+ let mut handle_map = build_handle_map::<u8>();
+ for _ in 0..(num_repetitions_per_thread * NUM_ACTIVE_THREADS) {
+ let handle = handle_map.allocate(|| 0xFF).expect("Initial allocations shouldn't fail");
+ all_handles.insert(handle);
+ }
+ // Reset the new-handle-id counter
+ handle_map.set_new_handle_id_counter(0);
+
+ let handle_map = Arc::new(handle_map);
+
+ let mut thread_handles: Vec<thread::JoinHandle<Vec<Handle>>> = Vec::new();
+ for _ in 0..NUM_ACTIVE_THREADS {
+ let handle_map_reference = handle_map.clone();
+ let thread_handle = thread::spawn(move || {
+ let mut handles = Vec::new();
+ for i in 0..num_repetitions_per_thread {
+ let handle = handle_map_reference
+ .allocate(move || (i % 256) as u8)
+ .expect("No allocation should fail");
+ handles.push(handle);
+ }
+ handles
+ });
+ thread_handles.push(thread_handle);
+ }
+ for thread_handle in thread_handles {
+ let handles: Vec<Handle> = thread_handle.join().expect("Individual threads shouldn't fail");
+ for handle in handles {
+ let was_distinct = all_handles.insert(handle);
+ assert!(was_distinct);
+ }
+ }
+}
+
+#[test]
+fn test_id_wraparound() {
+ let mut handle_map = build_handle_map::<u8>();
+ handle_map.set_new_handle_id_counter(u64::MAX);
+ handle_map.allocate(|| 0xAB).expect("Counter wrap-around allocation should not fail");
+ handle_map.allocate(|| 0xCD).expect("Post-counter-wrap-around allocation should not fail");
+}
+
+#[test]
+fn test_deallocate_unallocated_handle() {
+ let handle_map = build_handle_map::<usize>();
+ let handle = handle_map.allocate(|| 2).expect("Allocation shouldn't fail");
+ let deallocated = handle_map.deallocate(handle).expect("Deallocation shouldn't fail");
+ assert_eq!(2, deallocated);
+ let double_deallocate_result = handle_map.deallocate(handle);
+ assert!(double_deallocate_result.is_err());
+}
+
+#[test]
+fn test_get_unallocated_handle() {
+ let handle_map = build_handle_map::<u8>();
+ let handle = handle_map.allocate(|| 0xFE).expect("Allocation shouldn't fail");
+ let deallocated = handle_map.deallocate(handle).expect("Deallocation shouldn't fail");
+ assert_eq!(0xFE, deallocated);
+ let read_result = handle_map.get(handle);
+ assert!(read_result.is_err());
+}
+
+#[test]
+fn test_get_mut_unallocated_handle() {
+ let handle_map = build_handle_map::<(usize, usize, usize)>();
+ let handle = handle_map.allocate(|| (1, 2, 3)).expect("Allocation shouldn't fail");
+ let deallocated = handle_map.deallocate(handle).expect("Deallocation shouldn't fail");
+ assert_eq!((1, 2, 3), deallocated);
+ let get_mut_result = handle_map.get_mut(handle);
+ assert!(get_mut_result.is_err());
+}