Skip to content

Commit 49ca138

Browse files
authored
Force callers to handle allocation failure in Slab (#12450)
* Add `Vec::drain` method * Force callers to handle allocation failure in `Slab`
1 parent 342710a commit 49ca138

7 files changed

Lines changed: 74 additions & 39 deletions

File tree

crates/core/src/slab.rs

Lines changed: 34 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -14,14 +14,15 @@
1414
//! # Example
1515
//!
1616
//! ```
17+
//! # fn foo() -> wasmtime_internal_core::error::Result<()> {
1718
//! use wasmtime_internal_core::slab::Slab;
1819
//!
1920
//! let mut slab = Slab::new();
2021
//!
2122
//! // Insert some values into the slab.
22-
//! let rza = slab.alloc("Robert Fitzgerald Diggs");
23-
//! let gza = slab.alloc("Gary Grice");
24-
//! let bill = slab.alloc("Bill Gates");
23+
//! let rza = slab.alloc("Robert Fitzgerald Diggs")?;
24+
//! let gza = slab.alloc("Gary Grice")?;
25+
//! let bill = slab.alloc("Bill Gates")?;
2526
//!
2627
//! // Allocated elements can be accessed infallibly via indexing (and missing and
2728
//! // deallocated entries will panic).
@@ -39,7 +40,10 @@
3940
//! slab.dealloc(bill);
4041
//!
4142
//! // Allocate a new entry.
42-
//! let bill = slab.alloc("Bill Murray");
43+
//! let bill = slab.alloc("Bill Murray")?;
44+
//! # Ok(())
45+
//! # }
46+
//! # foo().unwrap();
4347
//! ```
4448
//!
4549
//! # Using `Id`s with the Wrong `Slab`
@@ -78,6 +82,8 @@
7882
//! the following:
7983
//!
8084
//! ```rust
85+
//! use wasmtime_internal_core::error::OutOfMemory;
86+
//!
8187
//! pub struct GenerationalId {
8288
//! id: wasmtime_internal_core::slab::Id,
8389
//! generation: u32,
@@ -94,10 +100,10 @@
94100
//! }
95101
//!
96102
//! impl<T> GenerationalSlab<T> {
97-
//! pub fn alloc(&mut self, value: T) -> GenerationalId {
103+
//! pub fn alloc(&mut self, value: T) -> Result<GenerationalId, OutOfMemory> {
98104
//! let generation = self.generation;
99-
//! let id = self.slab.alloc(GenerationalEntry { value, generation });
100-
//! GenerationalId { id, generation }
105+
//! let id = self.slab.alloc(GenerationalEntry { value, generation })?;
106+
//! Ok(GenerationalId { id, generation })
101107
//! }
102108
//!
103109
//! pub fn get(&self, id: GenerationalId) -> Option<&T> {
@@ -127,9 +133,10 @@
127133
//! }
128134
//! ```
129135
136+
use crate::alloc::Vec;
137+
use crate::error::OutOfMemory;
130138
use core::fmt;
131139
use core::num::NonZeroU32;
132-
use std_alloc::vec::Vec;
133140

134141
/// An identifier for an allocated value inside a `slab`.
135142
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
@@ -255,10 +262,10 @@ impl<T> Slab<T> {
255262
/// Construct a new, empty slab, pre-reserving space for at least `capacity`
256263
/// elements.
257264
#[inline]
258-
pub fn with_capacity(capacity: usize) -> Self {
265+
pub fn with_capacity(capacity: usize) -> Result<Self, OutOfMemory> {
259266
let mut slab = Self::new();
260-
slab.reserve(capacity);
261-
slab
267+
slab.reserve(capacity)?;
268+
Ok(slab)
262269
}
263270

264271
/// Ensure that there is space for at least `additional` elements in this
@@ -267,29 +274,31 @@ impl<T> Slab<T> {
267274
/// # Panics
268275
///
269276
/// Panics if the new capacity exceeds `Self::MAX_CAPACITY`.
270-
pub fn reserve(&mut self, additional: usize) {
277+
pub fn reserve(&mut self, additional: usize) -> Result<(), OutOfMemory> {
271278
let cap = self.capacity();
272279
let len = self.len();
273280
assert!(cap >= len);
274281
if cap - len >= additional {
275282
// Already have `additional` capacity available.
276-
return;
283+
return Ok(());
277284
}
278285

279-
self.entries.reserve(additional);
286+
self.entries.reserve(additional)?;
280287

281288
// Maintain the invariant that `i <= MAX_CAPACITY` for all indices `i`
282289
// in `self.entries`.
283290
assert!(self.entries.capacity() <= Self::MAX_CAPACITY);
291+
292+
Ok(())
284293
}
285294

286-
fn double_capacity(&mut self) {
295+
fn double_capacity(&mut self) -> Result<(), OutOfMemory> {
287296
// Double our capacity to amortize the cost of resizing. But make sure
288297
// we add some amount of minimum additional capacity, since doubling
289298
// zero capacity isn't useful.
290299
const MIN_CAPACITY: usize = 16;
291300
let additional = core::cmp::max(self.entries.capacity(), MIN_CAPACITY);
292-
self.reserve(additional);
301+
self.reserve(additional)
293302
}
294303

295304
/// What is the capacity of this slab? That is, how many entries can it
@@ -336,7 +345,9 @@ impl<T> Slab<T> {
336345
self.free.take().or_else(|| {
337346
if self.entries.len() < self.entries.capacity() {
338347
let index = EntryIndex::new(self.entries.len());
339-
self.entries.push(Entry::Free { next_free: None });
348+
self.entries
349+
.push(Entry::Free { next_free: None })
350+
.expect("have capacity");
340351
Some(index)
341352
} else {
342353
None
@@ -352,9 +363,9 @@ impl<T> Slab<T> {
352363
/// Panics if allocating this value requires reallocating the underlying
353364
/// storage, and the new capacity exceeds `Slab::MAX_CAPACITY`.
354365
#[inline]
355-
pub fn alloc(&mut self, value: T) -> Id {
366+
pub fn alloc(&mut self, value: T) -> Result<Id, OutOfMemory> {
356367
self.try_alloc(value)
357-
.unwrap_or_else(|value| self.alloc_slow(value))
368+
.or_else(|value| self.alloc_slow(value))
358369
}
359370

360371
/// Get the `Id` that will be returned for the next allocation in this slab.
@@ -366,12 +377,13 @@ impl<T> Slab<T> {
366377

367378
#[inline(never)]
368379
#[cold]
369-
fn alloc_slow(&mut self, value: T) -> Id {
380+
fn alloc_slow(&mut self, value: T) -> Result<Id, OutOfMemory> {
370381
// Reserve additional capacity, since we didn't have space for the
371382
// allocation.
372-
self.double_capacity();
383+
self.double_capacity()?;
373384
// After which the allocation will succeed.
374-
self.try_alloc(value).ok().unwrap()
385+
let id = self.try_alloc(value).ok().unwrap();
386+
Ok(id)
375387
}
376388

377389
/// Get a shared borrow of the value associated with `id`.

crates/wasmtime/src/runtime/gc/enabled/rooting.rs

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -164,6 +164,7 @@
164164
use crate::runtime::vm::{GcRootsList, GcStore, VMGcRef};
165165
use crate::{
166166
AsContext, AsContextMut, GcRef, Result, RootedGcRef,
167+
error::OutOfMemory,
167168
store::{AsStoreOpaque, AutoAssertNoGc, StoreId, StoreOpaque},
168169
};
169170
use crate::{ValRaw, prelude::*};
@@ -1047,7 +1048,7 @@ impl<T: GcRef> Rooted<T> {
10471048
pub(crate) fn _to_owned_rooted(&self, store: &mut StoreOpaque) -> Result<OwnedRooted<T>> {
10481049
let mut store = AutoAssertNoGc::new(store);
10491050
let gc_ref = self.try_clone_gc_ref(&mut store)?;
1050-
Ok(OwnedRooted::new(&mut store, gc_ref))
1051+
Ok(OwnedRooted::new(&mut store, gc_ref)?)
10511052
}
10521053

10531054
/// Are these two `Rooted<T>`s the same GC root?
@@ -1627,7 +1628,10 @@ where
16271628
/// `gc_ref` should be a GC reference pointing to an instance of the GC type
16281629
/// that `T` represents. Failure to uphold this invariant is memory safe but
16291630
/// will result in general incorrectness such as panics and wrong results.
1630-
pub(crate) fn new(store: &mut AutoAssertNoGc<'_>, gc_ref: VMGcRef) -> Self {
1631+
pub(crate) fn new(
1632+
store: &mut AutoAssertNoGc<'_>,
1633+
gc_ref: VMGcRef,
1634+
) -> Result<Self, OutOfMemory> {
16311635
// We always have the opportunity to trim and unregister stale
16321636
// owned roots whenever we have a mut borrow to the store. We
16331637
// take the opportunity to do so here to avoid tying growth of
@@ -1639,20 +1643,20 @@ where
16391643
store.trim_gc_liveness_flags(false);
16401644

16411645
let roots = store.gc_roots_mut();
1642-
let id = roots.owned_rooted.alloc(gc_ref);
1646+
let id = roots.owned_rooted.alloc(gc_ref)?;
16431647
let liveness_flag = Arc::new(());
16441648
roots
16451649
.liveness_flags
16461650
.push((Arc::downgrade(&liveness_flag), id));
1647-
OwnedRooted {
1651+
Ok(OwnedRooted {
16481652
inner: GcRootIndex {
16491653
store_id: store.id(),
16501654
generation: 0,
16511655
index: PackedIndex::new_owned(id),
16521656
},
16531657
liveness_flag,
16541658
_phantom: marker::PhantomData,
1655-
}
1659+
})
16561660
}
16571661

16581662
#[inline]

crates/wasmtime/src/runtime/store.rs

Lines changed: 11 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2780,13 +2780,18 @@ at https://bytecodealliance.org/security.
27802780
/// Get an owned rooted reference to the pending exception,
27812781
/// without taking it off the store.
27822782
#[cfg(all(feature = "gc", feature = "debug"))]
2783-
pub(crate) fn pending_exception_owned_rooted(&mut self) -> Option<OwnedRooted<ExnRef>> {
2783+
pub(crate) fn pending_exception_owned_rooted(
2784+
&mut self,
2785+
) -> Result<Option<OwnedRooted<ExnRef>>, crate::error::OutOfMemory> {
27842786
let mut nogc = AutoAssertNoGc::new(self);
2785-
nogc.pending_exception.take().map(|vmexnref| {
2786-
let cloned = nogc.clone_gc_ref(vmexnref.as_gc_ref());
2787-
nogc.pending_exception = Some(cloned.into_exnref_unchecked());
2788-
OwnedRooted::new(&mut nogc, vmexnref.into())
2789-
})
2787+
nogc.pending_exception
2788+
.take()
2789+
.map(|vmexnref| {
2790+
let cloned = nogc.clone_gc_ref(vmexnref.as_gc_ref());
2791+
nogc.pending_exception = Some(cloned.into_exnref_unchecked());
2792+
OwnedRooted::new(&mut nogc, vmexnref.into())
2793+
})
2794+
.transpose()
27902795
}
27912796

27922797
#[cfg(feature = "gc")]

crates/wasmtime/src/runtime/type_registry.rs

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -738,6 +738,13 @@ impl TypeRegistryInner {
738738

739739
log::trace!("hash-consing map miss: making new registration");
740740

741+
// Reserve capacity before making any side effects (like incrementing
742+
// other rec groups ref counts). We do not want to panic or
743+
// `?`-propagate an error when we have applied partial side effects.
744+
//
745+
// TODO(#12069): handle allocation failure here.
746+
self.types.reserve(non_canon_types.len()).unwrap();
747+
741748
// Inter-group edges: increment the referenced group's ref
742749
// count, because these other rec groups shouldn't be dropped
743750
// while this rec group is still alive.
@@ -760,7 +767,9 @@ impl TypeRegistryInner {
760767
let shared_type_indices: Box<[_]> = non_canon_types
761768
.iter()
762769
.map(|(module_index, ty)| {
763-
let engine_index = slab_id_to_shared_type_index(self.types.alloc(None));
770+
let engine_index = slab_id_to_shared_type_index(
771+
self.types.alloc(None).expect("reserved capacity"),
772+
);
764773
log::trace!(
765774
"reserved {engine_index:?} for {module_index:?} = non-canonical {ty:?}"
766775
);

crates/wasmtime/src/runtime/vm/gc/func_ref.rs

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -50,10 +50,10 @@ impl FuncRefTable {
5050
/// The given `func_ref` must point to a valid `VMFuncRef` and must remain
5151
/// valid for the duration of this table's lifetime.
5252
pub unsafe fn intern(&mut self, func_ref: Option<SendSyncPtr<VMFuncRef>>) -> FuncRefTableId {
53-
*self
54-
.interned
55-
.entry(func_ref)
56-
.or_insert_with(|| FuncRefTableId(self.slab.alloc(func_ref)))
53+
*self.interned.entry(func_ref).or_insert_with(|| {
54+
// TODO(#12069): handle allocation failure here
55+
FuncRefTableId(self.slab.alloc(func_ref).unwrap())
56+
})
5757
}
5858

5959
/// Get the `VMFuncRef` associated with the given ID.

crates/wasmtime/src/runtime/vm/gc/host_data.rs

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,8 @@ fn deref_box_mut<T: ?Sized>(b: &mut Box<T>) -> &mut T {
3939
impl ExternRefHostDataTable {
4040
/// Allocate a new `externref` host data value.
4141
pub fn alloc(&mut self, value: Box<dyn Any + Send + Sync>) -> ExternRefHostDataId {
42-
let id = self.slab.alloc(value);
42+
// TODO(#12069): handle allocation failure here
43+
let id = self.slab.alloc(value).unwrap();
4344
let id = ExternRefHostDataId(id);
4445
log::trace!("allocated new externref host data: {id:?}");
4546
id

crates/wasmtime/src/runtime/vm/traphandlers.rs

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -837,6 +837,8 @@ impl CallThreadState {
837837
let exn = store
838838
.as_store_opaque()
839839
.pending_exception_owned_rooted()
840+
// TODO(#12069): handle allocation failure here
841+
.unwrap()
840842
.expect("exception should be set when we are throwing");
841843
store.block_on_debug_handler(crate::DebugEvent::CaughtExceptionThrown(exn))
842844
}
@@ -848,6 +850,8 @@ impl CallThreadState {
848850
let exn = store
849851
.as_store_opaque()
850852
.pending_exception_owned_rooted()
853+
// TODO(#12069): handle allocation failure here
854+
.unwrap()
851855
.expect("exception should be set when we are throwing");
852856
store.block_on_debug_handler(crate::DebugEvent::UncaughtExceptionThrown(
853857
exn.clone(),

0 commit comments

Comments
 (0)