Browse Source

Implement `Atomics` builtin (#3394)

* Implement `Atomics` builtin

* Apply review
pull/3444/head
José Julián Espina 1 year ago committed by GitHub
parent
commit
04ed7bee70
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 62
      Cargo.lock
  2. 2
      Cargo.toml
  3. 11
      boa_engine/Cargo.toml
  4. 7
      boa_engine/src/builtins/array_buffer/shared.rs
  5. 4
      boa_engine/src/builtins/array_buffer/utils.rs
  6. 401
      boa_engine/src/builtins/atomics/futex.rs
  7. 605
      boa_engine/src/builtins/atomics/mod.rs
  8. 4
      boa_engine/src/builtins/mod.rs
  9. 2
      boa_engine/src/builtins/temporal/now.rs
  10. 31
      boa_engine/src/builtins/typed_array/builtin.rs
  11. 400
      boa_engine/src/builtins/typed_array/element.rs
  12. 115
      boa_engine/src/builtins/typed_array/element/atomic.rs
  13. 350
      boa_engine/src/builtins/typed_array/element/mod.rs
  14. 48
      boa_engine/src/builtins/typed_array/mod.rs
  15. 15
      boa_engine/src/context/intrinsics.rs
  16. 63
      boa_engine/src/context/mod.rs
  17. 2
      boa_engine/src/lib.rs
  18. 323
      boa_engine/src/small_map/entry.rs
  19. 644
      boa_engine/src/small_map/mod.rs
  20. 2
      boa_engine/src/string/common.rs
  21. 3
      boa_engine/src/sys/fallback/mod.rs
  22. 1
      boa_engine/src/sys/js/mod.rs
  23. 16
      boa_engine/src/sys/mod.rs
  24. 2
      boa_engine/src/value/integer.rs
  25. 1
      boa_engine/src/vm/code_block.rs
  26. 4
      boa_engine/src/vm/mod.rs
  27. 2
      boa_tester/Cargo.toml
  28. 236
      boa_tester/src/exec/js262.rs
  29. 94
      boa_tester/src/exec/mod.rs
  30. 24
      boa_tester/src/main.rs
  31. 4
      boa_wasm/Cargo.toml
  32. 1
      test262_config.toml

62
Cargo.lock generated

@ -126,6 +126,12 @@ dependencies = [
"derive_arbitrary",
]
[[package]]
name = "arrayvec"
version = "0.7.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711"
[[package]]
name = "async-channel"
version = "1.9.0"
@ -382,6 +388,7 @@ dependencies = [
name = "boa_engine"
version = "0.17.0"
dependencies = [
"arrayvec",
"bitflags 2.4.1",
"boa_ast",
"boa_gc",
@ -391,6 +398,7 @@ dependencies = [
"boa_parser",
"boa_profiler",
"bytemuck",
"cfg-if",
"chrono",
"criterion",
"dashmap",
@ -411,6 +419,7 @@ dependencies = [
"icu_segmenter",
"indexmap 2.0.2",
"indoc",
"intrusive-collections",
"itertools 0.11.0",
"jemallocator",
"num-bigint",
@ -434,6 +443,7 @@ dependencies = [
"textwrap",
"thin-vec",
"thiserror",
"web-time",
"writeable",
"yoke",
"zerofrom",
@ -556,6 +566,8 @@ dependencies = [
"bitflags 2.4.1",
"boa_engine",
"boa_gc",
"boa_runtime",
"bus",
"clap",
"color-eyre",
"colored",
@ -589,6 +601,17 @@ version = "3.14.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec"
[[package]]
name = "bus"
version = "2.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4b7118d0221d84fada881b657c2ddb7cd55108db79c8764c9ee212c0c259b783"
dependencies = [
"crossbeam-channel",
"num_cpus",
"parking_lot_core 0.9.9",
]
[[package]]
name = "bytecheck"
version = "0.6.11"
@ -926,6 +949,16 @@ version = "1.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7059fff8937831a9ae6f0fe4d658ffabf58f2ca96aa9dec1c889f936f705f216"
[[package]]
name = "crossbeam-channel"
version = "0.5.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200"
dependencies = [
"cfg-if",
"crossbeam-utils",
]
[[package]]
name = "crossbeam-deque"
version = "0.8.3"
@ -1996,6 +2029,15 @@ dependencies = [
"cfg-if",
]
[[package]]
name = "intrusive-collections"
version = "0.9.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b694dc9f70c3bda874626d2aed13b780f137aab435f4e9814121955cf706122e"
dependencies = [
"memoffset 0.9.0",
]
[[package]]
name = "io-lifetimes"
version = "1.0.11"
@ -2337,6 +2379,16 @@ dependencies = [
"autocfg",
]
[[package]]
name = "num_cpus"
version = "1.16.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43"
dependencies = [
"hermit-abi",
"libc",
]
[[package]]
name = "num_enum"
version = "0.7.1"
@ -4259,6 +4311,16 @@ dependencies = [
"wasm-bindgen",
]
[[package]]
name = "web-time"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8208e3fdbc243c8fd30805721869242a7f6de3e2e9f3b057652ab36e52ae1e87"
dependencies = [
"js-sys",
"wasm-bindgen",
]
[[package]]
name = "webpki-roots"
version = "0.25.2"

2
Cargo.toml

@ -20,7 +20,7 @@ members = [
[workspace.package]
edition = "2021"
version = "0.17.0"
rust-version = "1.71.0"
rust-version = "1.73.0"
authors = ["boa-dev"]
repository = "https://github.com/boa-dev/boa"
license = "Unlicense OR MIT"

11
boa_engine/Cargo.toml

@ -41,7 +41,7 @@ fuzz = ["boa_ast/arbitrary", "boa_interner/arbitrary"]
flowgraph = []
# Enable Boa's VM instruction tracing.
trace = []
trace = ["js"]
# Enable Boa's additional ECMAScript features for web browsers.
annex-b = ["boa_parser/annex-b"]
@ -52,6 +52,9 @@ temporal = ["boa_parser/temporal", "dep:icu_calendar"]
# Enable experimental features, like Stage 3 proposals.
experimental = ["temporal"]
# Enable binding to JS APIs for system related utilities.
js = ["dep:web-time"]
[dependencies]
boa_interner.workspace = true
boa_gc = { workspace = true, features = [ "thinvec" ] }
@ -86,6 +89,9 @@ icu_normalizer = "~1.3.0"
paste = "1.0"
portable-atomic = "1.5.1"
bytemuck = { version = "1.14.0", features = ["derive"] }
arrayvec = "0.7.4"
intrusive-collections = "0.9.6"
cfg-if = "1.0.0"
# intl deps
boa_icu_provider = {workspace = true, features = ["std"], optional = true }
@ -105,6 +111,9 @@ yoke = { workspace = true, optional = true }
zerofrom = { workspace = true, optional = true }
fixed_decimal = { workspace = true, features = ["ryu"], optional = true}
[target.'cfg(all(target_family = "wasm", not(any(target_os = "emscripten", target_os = "wasi"))))'.dependencies]
web-time = { version = "0.2.2", optional = true }
[dev-dependencies]
criterion = "0.5.1"
float-cmp = "0.9.0"

7
boa_engine/src/builtins/array_buffer/shared.rs

@ -34,6 +34,13 @@ pub struct SharedArrayBuffer {
}
impl SharedArrayBuffer {
/// Creates a `SharedArrayBuffer` with an empty buffer.
#[must_use]
pub fn empty() -> Self {
Self {
data: Arc::default(),
}
}
/// Gets the length of this `SharedArrayBuffer`.
pub(crate) fn len(&self) -> usize {
self.data.len()

4
boa_engine/src/builtins/array_buffer/utils.rs

@ -91,7 +91,7 @@ impl SliceRef<'_> {
// 9. Return RawBytesToNumeric(type, rawValue, isLittleEndian).
// SAFETY: The invariants of this operation are ensured by the caller.
unsafe { T::read_from_buffer(buffer, order) }
unsafe { T::read(buffer).load(order) }
}
let buffer = *self;
@ -259,7 +259,7 @@ impl SliceRefMut<'_> {
// SAFETY: The invariants of this operation are ensured by the caller.
unsafe {
T::write_to_buffer(buffer, value, order);
T::read_mut(buffer).store(value, order);
}
}

401
boa_engine/src/builtins/atomics/futex.rs

@ -0,0 +1,401 @@
// Implementation mostly based from https://github.com/v8/v8/blob/main/src/execution/futex-emulation.cc
// TODO: track https://github.com/rust-lang/rfcs/pull/3467 to see if we can use `UnsafeAliased` instead
// of raw pointers.
// A bit of context about how exactly this thing works.
//
// `Atomics.wait/notify` is basically an emulation of the "futex" syscall, which internally uses
// a wait queue attached to a certain memory address, where processes and threads can manipulate
// it to synchronize between them.
// More information: https://en.wikipedia.org/wiki/Futex
//
// Our emulation of the API is composed by three components:
//
// - `FutexWaiters`, which is a map of addresses to the corresponding wait queue for that address.
// Internally uses intrusive linked lists to avoid allocating when adding a new waiter, which
// reduces the time spent by a thread in the critical section.
//
// - `FutexWaiter`, which contains all the data necessary to be able to wake a waiter from another
// thread. It also contains a `waiting` boolean, that is checked after waking up to see
// if the waiter was indeed woken up or if it just sporadically woke up (yes, this is a thing that
// can happen per the documentation of `CondVar`).
//
// - `CRITICAL_SECTION`, a global static that must be locked before registering or notifying any
// waiter. This guarantees that only one agent can write to the wait queues at any point in time.
//
// We can emulate a typical execution using the API for demonstration purposes.
// At the start of the program, we initially have an empty map of wait queues. We represent this
// graphically as:
//
// Address │
// │
// ────────────┼────────────────────────────────────────────────────────────────────
// │
// │
// <empty> │
// │
// │
//
// Each row here will represent an address and the corresponding wait queue for that address.
//
// Let's suppose that "Thread 2" wants to wait on the address 50. After locking the global mutex,
// it first creates a new instante of a `FutexWaiter` and passes a pointer to it to the
// `FutexWaiters::add_waiter`:
//
// Address │
// │
// ────────────┼──────────────────────────────────────────────────────────────────────
// │
// │ ┌───────────────┐
// │ ┌─►│ │
// │ │ │ Thread 2 │
// │ │ │ FutexWaiter │
// 50 ├────┘ │ │
// │ │ │
// │ │ cond_var │
// │ │ waiting: true │
// │ │ │
// │ └───────────────┘
// │
//
// Immediately after this, "Thread 2" calls `cond_var.wait`, unlocks the global mutex and sleeps
// until it is notified again (ignoring the spurious wakeups, those are handled in an infinite loop
// anyways).
//
// Now, let's suppose that `Thread 1` has now acquired the lock and now wants to also
// wait on the address `50`. Doing the same procedure as "Thread 2", our map now looks like:
//
// Address │
// │
// ────────────┼──────────────────────────────────────────────────────────────────────
// │
// │ ┌───────────────┐ ┌───────────────┐
// │ ┌─►│ ├───────►│ │
// │ │ │ Thread 2 │ │ Thread 1 │
// │ │ │ FutexWaiter │ │ FutexWaiter │
// 50 ├────┘ │ │ │ │
// │ │ │ │ │
// │ │ cond_var │ │ cond_var │
// │ │ waiting: true │◄───────┤ waiting: true │
// │ │ │ │ │
// │ └───────────────┘ └───────────────┘
// │
//
// Note how the head of our list contains the first waiter which was registered, and the
// tail of our list is our most recent waiter.
//
// After "Thread 1" sleeps, "Thread 3" has the opportunity to lock the global mutex.
// In this case, "Thread 3" will notify one waiter of the address 50 using the `cond_var` inside
// `FutexWaiter`, and will also remove it from the linked list. In this case
// the notified thread is "Thread 2":
//
// Address │
// │
// ────────────┼──────────────────────────────────────────────────────────────────────
// │
// │ ┌────────────────┐ ┌────────────────┐
// │ │ │ ┌──►│ │
// │ │ Thread 2 │ │ │ Thread 1 │
// │ │ FutexWaiter │ │ │ FutexWaiter │
// 50 ├───┐ │ │ │ │ │
// │ │ │ │ │ │ │
// │ │ │ cond_var │ │ │ cond_var │
// │ │ │ waiting: false │ │ │ waiting: true │
// │ │ │ │ │ │ │
// │ │ └────────────────┘ │ └────────────────┘
// │ │ │
// │ └────────────────────────┘
// │
//
// Then, when the lock is released and "Thread 2" has woken up, it tries to lock the global mutex
// again, checking if `waiting` is true to manually remove itself from the queue if that's the case.
// In this case, `waiting` is false, which doesn't require any other handling, so it just
// removes the `FutexWaiter` from its stack and returns `AtomicsWaitResult::Ok`.
//
// Address │
// │
// ────────────┼──────────────────────────────────────────────────────────────────────
// │
// │ ┌────────────────┐
// │ ┌──────────────────────────►│ │
// │ │ │ Thread 1 │
// │ │ │ FutexWaiter │
// 50 ├────┘ │ │
// │ │ │
// │ │ cond_var │
// │ │ waiting: true │
// │ │ │
// │ └────────────────┘
// │
// │
// │
//
// In a future point in time, "Thread 1" will be notified, which will proceed with the
// exact same steps as "Thread 2", emptying the wait queue and finishing the execution of our
// program.
#![deny(unsafe_op_in_unsafe_fn)]
#![deny(clippy::undocumented_unsafe_blocks)]
#![allow(clippy::expl_impl_clone_on_copy)]
#![allow(unstable_name_collisions)]
use std::{
cell::UnsafeCell,
sync::{atomic::Ordering, Condvar, Mutex},
};
use intrusive_collections::{intrusive_adapter, LinkedList, LinkedListLink, UnsafeRef};
use sptr::Strict;
use crate::{
builtins::{
array_buffer::{utils::SliceRef, SharedArrayBuffer},
typed_array::Element,
},
small_map::{Entry, SmallMap},
sys::time::{Duration, Instant},
JsNativeError, JsResult,
};
/// Map of shared data addresses and its corresponding list of agents waiting on that location.
pub(crate) static CRITICAL_SECTION: Mutex<FutexWaiters> = Mutex::new(FutexWaiters {
waiters: SmallMap::new(),
});
/// A waiter of a memory address.
#[derive(Debug, Default)]
pub(crate) struct FutexWaiter {
pub(super) link: LinkedListLink,
pub(super) cond_var: Condvar,
pub(super) waiting: bool,
addr: usize,
}
intrusive_adapter!(FutexWaiterAdapter = UnsafeRef<FutexWaiter>: FutexWaiter { link: LinkedListLink });
/// List of memory addresses and its corresponding list of waiters for that address.
#[derive(Debug)]
pub(crate) struct FutexWaiters {
waiters: SmallMap<usize, LinkedList<FutexWaiterAdapter>, 16>,
}
impl FutexWaiters {
/// Notifies at most `max_count` waiters that are waiting on the address `addr`, and
/// returns the number of waiters that were notified.
///
/// Equivalent to [`RemoveWaiters`][remove] and [`NotifyWaiter`][notify], but in a single operation.
///
/// [remove]: https://tc39.es/ecma262/#sec-removewaiters
/// [notify]: https://tc39.es/ecma262/#sec-notifywaiter
pub(crate) fn notify_many(&mut self, addr: usize, max_count: u64) -> u64 {
let Entry::Occupied(mut wl) = self.waiters.entry(addr) else {
return 0;
};
for i in 0..max_count {
let Some(elem) = wl.get_mut().pop_front() else {
wl.remove();
return i;
};
elem.cond_var.notify_one();
// SAFETY: all elements of the waiters list are guaranteed to be valid.
unsafe {
(*UnsafeRef::into_raw(elem)).waiting = false;
}
}
if wl.get().is_empty() {
wl.remove();
}
max_count
}
/// # Safety
///
/// - `node` must NOT be linked to an existing waiter list.
/// - `node` must always point to a valid instance of `FutexWaiter` until `node` is
/// removed from its linked list. This can happen by either `remove_waiter` or `notify_many`.
pub(crate) unsafe fn add_waiter(&mut self, node: *mut FutexWaiter, addr: usize) {
// SAFETY: `node` must point to a valid instance.
let node = unsafe {
debug_assert!(!(*node).link.is_linked());
(*node).waiting = true;
(*node).addr = addr;
UnsafeRef::from_raw(node)
};
self.waiters
.entry(addr)
.or_insert_with(|| LinkedList::new(FutexWaiterAdapter::new()))
.push_back(node);
}
/// # Safety
///
/// - `node` must point to a valid instance of `FutexWaiter`.
/// - `node` must be inside the wait list associated with `node.addr`.
pub(crate) unsafe fn remove_waiter(&mut self, node: *mut FutexWaiter) {
// SAFETY: `node` must point to a valid instance.
let addr = unsafe { (*node).addr };
let mut wl = match self.waiters.entry(addr) {
crate::small_map::Entry::Occupied(wl) => wl,
crate::small_map::Entry::Vacant(_) => return,
};
// SAFETY: `node` must be inside the wait list associated with `node.addr`.
unsafe {
wl.get_mut().cursor_mut_from_ptr(node).remove();
}
if wl.get().is_empty() {
wl.remove();
}
}
}
#[derive(Debug, Clone, Copy)]
pub(super) enum AtomicsWaitResult {
NotEqual,
TimedOut,
Ok,
}
/// Adds this agent to the wait queue for the address pointed to by `buffer[offset..]`.
///
/// # Safety
///
/// - `addr` must be a multiple of `std::mem::size_of::<E>()`.
/// - `buffer` must contain at least `std::mem::size_of::<E>()` bytes to read starting from `usize`.
// our implementation guarantees that `SharedArrayBuffer` is always aligned to `u64` at minimum.
pub(super) unsafe fn wait<E: Element + PartialEq>(
buffer: &SharedArrayBuffer,
offset: usize,
check: E,
timeout: Option<Duration>,
) -> JsResult<AtomicsWaitResult> {
// 10. Let block be buffer.[[ArrayBufferData]].
// 11. Let WL be GetWaiterList(block, indexedPosition).
// 12. Perform EnterCriticalSection(WL).
let mut waiters = CRITICAL_SECTION.lock().map_err(|_| {
// avoids exposing internals of our implementation.
JsNativeError::typ().with_message("failed to synchronize with the agent cluster")
})?;
let time_info = timeout.map(|timeout| (Instant::now(), timeout));
let buffer = &buffer.data()[offset..];
// 13. Let elementType be TypedArrayElementType(typedArray).
// 14. Let w be GetValueFromBuffer(buffer, indexedPosition, elementType, true, SeqCst).
// SAFETY: The safety of this operation is guaranteed by the caller.
let value = unsafe { E::read(SliceRef::AtomicSlice(buffer)).load(Ordering::SeqCst) };
// 15. If v ≠ w, then
// a. Perform LeaveCriticalSection(WL).
// b. Return "not-equal".
if check != value {
return Ok(AtomicsWaitResult::NotEqual);
}
// 16. Let W be AgentSignifier().
// 17. Perform AddWaiter(WL, W).
// ensure we can have aliased pointers to the waiter in a sound way.
let waiter = UnsafeCell::new(FutexWaiter::default());
let waiter_ptr = waiter.get();
// SAFETY: waiter is valid and we call `remove_node` below.
unsafe {
waiters.add_waiter(waiter_ptr, buffer.as_ptr().addr());
}
// 18. Let notified be SuspendAgent(WL, W, t).
// `SuspendAgent(WL, W, t)`
// https://tc39.es/ecma262/#sec-suspendthisagent
let result = loop {
// SAFETY: waiter is still valid
if unsafe { !(*waiter_ptr).waiting } {
break AtomicsWaitResult::Ok;
}
if let Some((start, timeout)) = time_info {
let Some(remaining) = timeout.checked_sub(start.elapsed()) else {
break AtomicsWaitResult::TimedOut;
};
// Since the mutex is poisoned, `waiter` cannot be read from other threads, meaning
// we can return directly.
// This doesn't use `wait_timeout_while` because it has to mutably borrow `waiter`,
// which is a big nono since we have pointers to that location while the borrow is
// active.
// SAFETY: waiter is still valid
waiters = unsafe {
(*waiter_ptr)
.cond_var
.wait_timeout(waiters, remaining)
.map_err(|_| {
JsNativeError::typ()
.with_message("failed to synchronize with the agent cluster")
})?
.0
};
} else {
// SAFETY: waiter is still valid
waiters = unsafe {
(*waiter_ptr).cond_var.wait(waiters).map_err(|_| {
JsNativeError::typ()
.with_message("failed to synchronize with the agent cluster")
})?
};
}
};
// SAFETY: waiter is valid and contained in its waiter list if `waiting == true`.
unsafe {
// 20. Else,
// a. Perform RemoveWaiter(WL, W).
if (*waiter_ptr).waiting {
waiters.remove_waiter(waiter_ptr);
} else {
// 19. If notified is true, then
// a. Assert: W is not on the list of waiters in WL.
debug_assert!(!(*waiter_ptr).link.is_linked());
}
}
// 21. Perform LeaveCriticalSection(WL).
drop(waiters);
// 22. If notified is true, return "ok".
// 23. Return "timed-out".
Ok(result)
}
/// Notifies at most `count` agents waiting on the memory address pointed to by `buffer[offset..]`.
pub(super) fn notify(buffer: &SharedArrayBuffer, offset: usize, count: u64) -> JsResult<u64> {
let addr = buffer.data()[offset..].as_ptr().addr();
// 7. Let WL be GetWaiterList(block, indexedPosition).
// 8. Perform EnterCriticalSection(WL).
let mut waiters = CRITICAL_SECTION.lock().map_err(|_| {
// avoids exposing internals of our implementation.
JsNativeError::typ().with_message("failed to synchronize with the agent cluster")
})?;
// 9. Let S be RemoveWaiters(WL, c).
// 10. For each element W of S, do
// a. Perform NotifyWaiter(WL, W).
let count = waiters.notify_many(addr, count);
// 11. Perform LeaveCriticalSection(WL).
drop(waiters);
Ok(count)
}

605
boa_engine/src/builtins/atomics/mod.rs

@ -0,0 +1,605 @@
//! Boa's implementation of ECMAScript's global `Atomics` object.
//!
//! The `Atomics` object contains synchronization methods to orchestrate multithreading
//! on contexts that live in separate threads.
//!
//! More information:
//! - [ECMAScript reference][spec]
//! - [MDN documentation][mdn]
//!
//! [spec]: https://tc39.es/ecma262/#sec-atomics-object
//! [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Atomics
mod futex;
use std::sync::atomic::Ordering;
use crate::{
builtins::BuiltInObject,
context::intrinsics::Intrinsics,
js_string,
object::{JsObject, Object},
property::Attribute,
realm::Realm,
string::common::StaticJsStrings,
symbol::JsSymbol,
sys::time::Duration,
value::IntegerOrInfinity,
Context, JsArgs, JsNativeError, JsResult, JsString, JsValue,
};
use boa_gc::GcRef;
use boa_profiler::Profiler;
use super::{
array_buffer::BufferRef,
typed_array::{
Atomic, ContentType, Element, IntegerIndexed, TypedArrayElement, TypedArrayKind,
},
BuiltInBuilder, IntrinsicObject,
};
/// Javascript `Atomics` object.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub(crate) struct Atomics;
impl IntrinsicObject for Atomics {
fn init(realm: &Realm) {
let _timer = Profiler::global().start_event(std::any::type_name::<Self>(), "init");
BuiltInBuilder::with_intrinsic::<Self>(realm)
.static_property(
JsSymbol::to_string_tag(),
Self::NAME,
Attribute::READONLY | Attribute::NON_ENUMERABLE | Attribute::CONFIGURABLE,
)
.static_method(Atomics::add, js_string!("add"), 3)
.static_method(Atomics::bit_and, js_string!("and"), 3)
.static_method(Atomics::compare_exchange, js_string!("compareExchange"), 4)
.static_method(Atomics::swap, js_string!("exchange"), 3)
.static_method(Atomics::is_lock_free, js_string!("isLockFree"), 1)
.static_method(Atomics::load, js_string!("load"), 2)
.static_method(Atomics::bit_or, js_string!("or"), 3)
.static_method(Atomics::store, js_string!("store"), 3)
.static_method(Atomics::sub, js_string!("sub"), 3)
.static_method(Atomics::wait, js_string!("wait"), 4)
.static_method(Atomics::notify, js_string!("notify"), 3)
.static_method(Atomics::bit_xor, js_string!("xor"), 3)
.build();
}
fn get(intrinsics: &Intrinsics) -> JsObject {
intrinsics.objects().atomics()
}
}
impl BuiltInObject for Atomics {
const NAME: JsString = StaticJsStrings::ATOMICS;
}
macro_rules! atomic_op {
($(#[$attr:meta])* $name:ident) => {
$(#[$attr])* fn $name(_: &JsValue, args: &[JsValue], context: &mut Context<'_>) -> JsResult<JsValue> {
let array = args.get_or_undefined(0);
let index = args.get_or_undefined(1);
let value = args.get_or_undefined(2);
let ii = validate_integer_typed_array(array, false)?;
let pos = validate_atomic_access(&ii, index, context)?;
let value = ii.kind().get_element(value, context)?;
// revalidate
let mut buffer = ii.viewed_array_buffer().borrow_mut();
let mut buffer = buffer
.as_buffer_mut()
.expect("integer indexed object must contain a valid buffer");
let Some(mut data) = buffer.data_mut() else {
return Err(JsNativeError::typ()
.with_message("cannot execute atomic operation in detached buffer")
.into());
};
let data = data.subslice_mut(pos..);
// SAFETY: The integer indexed object guarantees that the buffer is aligned.
// The call to `validate_atomic_access` guarantees that the index is in-bounds.
let value: TypedArrayElement = unsafe {
match value {
TypedArrayElement::Int8(num) => {
i8::read_mut(data).$name(num, Ordering::SeqCst).into()
}
TypedArrayElement::Uint8(num) => {
u8::read_mut(data).$name(num, Ordering::SeqCst).into()
}
TypedArrayElement::Int16(num) => i16::read_mut(data)
.$name(num, Ordering::SeqCst)
.into(),
TypedArrayElement::Uint16(num) => u16::read_mut(data)
.$name(num, Ordering::SeqCst)
.into(),
TypedArrayElement::Int32(num) => i32::read_mut(data)
.$name(num, Ordering::SeqCst)
.into(),
TypedArrayElement::Uint32(num) => u32::read_mut(data)
.$name(num, Ordering::SeqCst)
.into(),
TypedArrayElement::BigInt64(num) => i64::read_mut(data)
.$name(num, Ordering::SeqCst)
.into(),
TypedArrayElement::BigUint64(num) => u64::read_mut(data)
.$name(num, Ordering::SeqCst)
.into(),
TypedArrayElement::Uint8Clamped(_)
| TypedArrayElement::Float32(_)
| TypedArrayElement::Float64(_) => unreachable!(
"must have been filtered out by the call to `validate_integer_typed_array`"
),
}
};
Ok(value.into())
}
};
}
impl Atomics {
/// [`Atomics.isLockFree ( size )`][spec]
///
/// [spec]: https://tc39.es/ecma262/#sec-atomics.islockfree
fn is_lock_free(_: &JsValue, args: &[JsValue], context: &mut Context<'_>) -> JsResult<JsValue> {
// 1. Let n be ? ToIntegerOrInfinity(size).
let n = args.get_or_undefined(0).to_integer_or_infinity(context)?;
// 2. Let AR be the Agent Record of the surrounding agent.
Ok(match n.as_integer() {
// 3. If n = 1, return AR.[[IsLockFree1]].
Some(1) => <<u8 as Element>::Atomic as Atomic>::is_lock_free(),
// 4. If n = 2, return AR.[[IsLockFree2]].
Some(2) => <<u16 as Element>::Atomic as Atomic>::is_lock_free(),
// 5. If n = 4, return true.
Some(4) => true,
// 6. If n = 8, return AR.[[IsLockFree8]].
Some(8) => <<u64 as Element>::Atomic as Atomic>::is_lock_free(),
// 7. Return false.
_ => false,
}
.into())
}
/// [`Atomics.load ( typedArray, index )`][spec]
///
/// [spec]: https://tc39.es/ecma262/#sec-atomics.load
fn load(_: &JsValue, args: &[JsValue], context: &mut Context<'_>) -> JsResult<JsValue> {
let array = args.get_or_undefined(0);
let index = args.get_or_undefined(1);
// 1. Let indexedPosition be ? ValidateAtomicAccessOnIntegerTypedArray(typedArray, index).
let ii = validate_integer_typed_array(array, false)?;
let pos = validate_atomic_access(&ii, index, context)?;
// 2. Perform ? RevalidateAtomicAccess(typedArray, indexedPosition).
let buffer = ii.viewed_array_buffer().borrow();
let buffer = buffer
.as_buffer()
.expect("integer indexed object must contain a valid buffer");
let Some(data) = buffer.data() else {
return Err(JsNativeError::typ()
.with_message("cannot execute atomic operation in detached buffer")
.into());
};
let data = data.subslice(pos..);
// 3. Let buffer be typedArray.[[ViewedArrayBuffer]].
// 4. Let elementType be TypedArrayElementType(typedArray).
// 5. Return GetValueFromBuffer(buffer, indexedPosition, elementType, true, seq-cst).
// SAFETY: The integer indexed object guarantees that the buffer is aligned.
// The call to `validate_atomic_access` guarantees that the index is in-bounds.
let value = unsafe { data.get_value(ii.kind(), Ordering::SeqCst) };
Ok(value.into())
}
/// [`Atomics.store ( typedArray, index, value )`][spec]
///
/// [spec]: https://tc39.es/ecma262/#sec-atomics.store
fn store(_: &JsValue, args: &[JsValue], context: &mut Context<'_>) -> JsResult<JsValue> {
let array = args.get_or_undefined(0);
let index = args.get_or_undefined(1);
let value = args.get_or_undefined(2);
// 1. Let indexedPosition be ? ValidateAtomicAccessOnIntegerTypedArray(typedArray, index).
let ii = validate_integer_typed_array(array, false)?;
let pos = validate_atomic_access(&ii, index, context)?;
// bit of a hack to preserve the converted value
// 2. If typedArray.[[ContentType]] is bigint, let v be ? ToBigInt(value).
let converted: JsValue = if ii.kind().content_type() == ContentType::BigInt {
value.to_bigint(context)?.into()
} else {
// 3. Otherwise, let v be 𝔽(? ToIntegerOrInfinity(value)).
match value.to_integer_or_infinity(context)? {
IntegerOrInfinity::PositiveInfinity => f64::INFINITY,
IntegerOrInfinity::Integer(i) => i as f64,
IntegerOrInfinity::NegativeInfinity => f64::NEG_INFINITY,
}
.into()
};
let value = ii.kind().get_element(&converted, context)?;
// 4. Perform ? RevalidateAtomicAccess(typedArray, indexedPosition).
let mut buffer = ii.viewed_array_buffer().borrow_mut();
let mut buffer = buffer
.as_buffer_mut()
.expect("integer indexed object must contain a valid buffer");
let Some(mut buffer) = buffer.data_mut() else {
return Err(JsNativeError::typ()
.with_message("cannot execute atomic operation in detached buffer")
.into());
};
let mut data = buffer.subslice_mut(pos..);
// 5. Let buffer be typedArray.[[ViewedArrayBuffer]].
// 6. Let elementType be TypedArrayElementType(typedArray).
// 7. Perform SetValueInBuffer(buffer, indexedPosition, elementType, v, true, seq-cst).
// SAFETY: The integer indexed object guarantees that the buffer is aligned.
// The call to `validate_atomic_access` guarantees that the index is in-bounds.
unsafe {
data.set_value(value, Ordering::SeqCst);
}
// 8. Return v.
Ok(converted)
}
/// [`Atomics.compareExchange ( typedArray, index, expectedValue, replacementValue )`][spec]
///
/// [spec]: https://tc39.es/ecma262/#sec-atomics.compareexchange
fn compare_exchange(
_: &JsValue,
args: &[JsValue],
context: &mut Context<'_>,
) -> JsResult<JsValue> {
let array = args.get_or_undefined(0);
let index = args.get_or_undefined(1);
let expected = args.get_or_undefined(2);
let replacement = args.get_or_undefined(3);
// 1. Let indexedPosition be ? ValidateAtomicAccessOnIntegerTypedArray(typedArray, index).
// 2. Let buffer be typedArray.[[ViewedArrayBuffer]].
// 3. Let block be buffer.[[ArrayBufferData]].
let ii = validate_integer_typed_array(array, false)?;
let pos = validate_atomic_access(&ii, index, context)?;
let typed_array_kind = ii.kind();
// 4. If typedArray.[[ContentType]] is bigint, then
// a. Let expected be ? ToBigInt(expectedValue).
// b. Let replacement be ? ToBigInt(replacementValue).
// 5. Else,
// a. Let expected be 𝔽(? ToIntegerOrInfinity(expectedValue)).
// b. Let replacement be 𝔽(? ToIntegerOrInfinity(replacementValue)).
let exp = typed_array_kind.get_element(expected, context)?.to_bytes();
let rep = typed_array_kind
.get_element(replacement, context)?
.to_bytes();
// 6. Perform ? RevalidateAtomicAccess(typedArray, indexedPosition).
let mut buffer = ii.viewed_array_buffer().borrow_mut();
let mut buffer = buffer
.as_buffer_mut()
.expect("integer indexed object must contain a valid buffer");
let Some(mut data) = buffer.data_mut() else {
return Err(JsNativeError::typ()
.with_message("cannot execute atomic operation in detached buffer")
.into());
};
let data = data.subslice_mut(pos..);
// 7. Let elementType be TypedArrayElementType(typedArray).
// 8. Let elementSize be TypedArrayElementSize(typedArray).
// 9. Let isLittleEndian be the value of the [[LittleEndian]] field of the surrounding agent's Agent Record.
// 10. Let expectedBytes be NumericToRawBytes(elementType, expected, isLittleEndian).
// 11. Let replacementBytes be NumericToRawBytes(elementType, replacement, isLittleEndian).
// 12. If IsSharedArrayBuffer(buffer) is true, then
// a. Let rawBytesRead be AtomicCompareExchangeInSharedBlock(block, indexedPosition, elementSize, expectedBytes, replacementBytes).
// 13. Else,
// a. Let rawBytesRead be a List of length elementSize whose elements are the sequence of elementSize bytes starting with block[indexedPosition].
// b. If ByteListEqual(rawBytesRead, expectedBytes) is true, then
// i. Store the individual bytes of replacementBytes into block, starting at block[indexedPosition].
// 14. Return RawBytesToNumeric(elementType, rawBytesRead, isLittleEndian).
// SAFETY: The integer indexed object guarantees that the buffer is aligned.
// The call to `validate_atomic_access` guarantees that the index is in-bounds.
let value: TypedArrayElement = unsafe {
match typed_array_kind {
TypedArrayKind::Int8 => i8::read_mut(data)
.compare_exchange(exp as i8, rep as i8, Ordering::SeqCst)
.into(),
TypedArrayKind::Uint8 => u8::read_mut(data)
.compare_exchange(exp as u8, rep as u8, Ordering::SeqCst)
.into(),
TypedArrayKind::Int16 => i16::read_mut(data)
.compare_exchange(exp as i16, rep as i16, Ordering::SeqCst)
.into(),
TypedArrayKind::Uint16 => u16::read_mut(data)
.compare_exchange(exp as u16, rep as u16, Ordering::SeqCst)
.into(),
TypedArrayKind::Int32 => i32::read_mut(data)
.compare_exchange(exp as i32, rep as i32, Ordering::SeqCst)
.into(),
TypedArrayKind::Uint32 => u32::read_mut(data)
.compare_exchange(exp as u32, rep as u32, Ordering::SeqCst)
.into(),
TypedArrayKind::BigInt64 => i64::read_mut(data)
.compare_exchange(exp as i64, rep as i64, Ordering::SeqCst)
.into(),
TypedArrayKind::BigUint64 => u64::read_mut(data)
.compare_exchange(exp, rep, Ordering::SeqCst)
.into(),
TypedArrayKind::Uint8Clamped
| TypedArrayKind::Float32
| TypedArrayKind::Float64 => unreachable!(
"must have been filtered out by the call to `validate_integer_typed_array`"
),
}
};
Ok(value.into())
}
// =========== Atomics.ops start ===========
// Most of the operations here follow the same list of steps:
//
// AtomicReadModifyWrite ( typedArray, index, value, op )
// <https://tc39.es/ecma262/#sec-atomicreadmodifywrite>
//
// 1. Let buffer be ? ValidateIntegerTypedArray(typedArray).
// 2. Let indexedPosition be ? ValidateAtomicAccess(typedArray, index).
// 3. If typedArray.[[ContentType]] is BigInt, let v be ? ToBigInt(value).
// 4. Otherwise, let v be 𝔽(? ToIntegerOrInfinity(value)).
// 5. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
// 6. NOTE: The above check is not redundant with the check in ValidateIntegerTypedArray because the call to ToBigInt or ToIntegerOrInfinity on the preceding lines can have arbitrary side effects, which could cause the buffer to become detached.
// 7. Let elementType be TypedArrayElementType(typedArray).
// 8. Return GetModifySetValueInBuffer(buffer, indexedPosition, elementType, v, op).
//
// However, our impementation differs significantly from this, which is why these steps are
// just here for documentation purposes.
atomic_op! {
/// [`Atomics.add ( typedArray, index, value )`][spec]
///
/// [spec]: https://tc39.es/ecma262/#sec-atomics.add
add
}
atomic_op! {
/// [`Atomics.and ( typedArray, index, value )`][spec]
///
/// [spec]: https://tc39.es/ecma262/#sec-atomics.and
bit_and
}
atomic_op! {
/// [`Atomics.exchange ( typedArray, index, value )`][spec]
///
/// [spec]: https://tc39.es/ecma262/#sec-atomics.exchange
swap
}
atomic_op! {
/// [`Atomics.or ( typedArray, index, value )`][spec]
///
/// [spec]: https://tc39.es/ecma262/#sec-atomics.or
bit_or
}
atomic_op! {
/// [`Atomics.sub ( typedArray, index, value )`][spec]
///
/// [spec]: https://tc39.es/ecma262/#sec-atomics.sub
sub
}
atomic_op! {
/// [`Atomics.xor ( typedArray, index, value )`][spec]
///
/// [spec]: https://tc39.es/ecma262/#sec-atomics.xor
bit_xor
}
/// [`Atomics.wait ( typedArray, index, value, timeout )`][spec]
///
/// [spec]: https://tc39.es/ecma262/#sec-atomics.wait
fn wait(_: &JsValue, args: &[JsValue], context: &mut Context<'_>) -> JsResult<JsValue> {
let array = args.get_or_undefined(0);
let index = args.get_or_undefined(1);
let value = args.get_or_undefined(2);
let timeout = args.get_or_undefined(3);
// 1. Let buffer be ? ValidateIntegerTypedArray(typedArray, true).
let ii = validate_integer_typed_array(array, true)?;
let buffer = ii.viewed_array_buffer().borrow();
let buffer = buffer
.as_buffer()
.expect("integer indexed object must contain a valid buffer");
// 2. If IsSharedArrayBuffer(buffer) is false, throw a TypeError exception.
let BufferRef::SharedBuffer(buffer) = buffer else {
return Err(JsNativeError::typ()
.with_message("cannot use `ArrayBuffer` for an atomic wait")
.into());
};
// 3. Let indexedPosition be ? ValidateAtomicAccess(typedArray, index).
let offset = validate_atomic_access(&ii, index, context)?;
// spec expects the evaluation of this first, then the timeout.
let value = if ii.kind() == TypedArrayKind::BigInt64 {
// 4. If typedArray.[[TypedArrayName]] is "BigInt64Array", let v be ? ToBigInt64(value).
value.to_big_int64(context)?
} else {
// 5. Otherwise, let v be ? ToInt32(value).
i64::from(value.to_i32(context)?)
};
// moving above since we need to make a generic call next.
// 6. Let q be ? ToNumber(timeout).
// 7. If q is either NaN or +∞𝔽, let t be +∞; else if q is -∞𝔽, let t be 0; else let t be max(ℝ(q), 0).
let mut timeout = timeout.to_number(context)?;
// convert to nanoseconds to discard any excessively big timeouts.
timeout = timeout.clamp(0.0, f64::INFINITY) * 1000.0 * 1000.0;
let timeout = if timeout.is_nan() || timeout.is_infinite() || timeout > u64::MAX as f64 {
None
} else {
Some(Duration::from_nanos(timeout as u64))
};
// 8. Let B be AgentCanSuspend().
// 9. If B is false, throw a TypeError exception.
if !context.can_block() {
return Err(JsNativeError::typ()
.with_message("agent cannot be suspended")
.into());
}
// SAFETY: the validity of `addr` is verified by our call to `validate_atomic_access`.
let result = unsafe {
if ii.kind() == TypedArrayKind::BigInt64 {
futex::wait(buffer, offset, value, timeout)?
} else {
// value must fit into `i32` since it came from an `i32` above.
futex::wait(buffer, offset, value as i32, timeout)?
}
};
Ok(match result {
futex::AtomicsWaitResult::NotEqual => js_string!("not-equal"),
futex::AtomicsWaitResult::TimedOut => js_string!("timed-out"),
futex::AtomicsWaitResult::Ok => js_string!("ok"),
}
.into())
}
/// [`Atomics.notify ( typedArray, index, count )`][spec]
///
/// [spec]: https://tc39.es/ecma262/#sec-atomics.notify
fn notify(_: &JsValue, args: &[JsValue], context: &mut Context<'_>) -> JsResult<JsValue> {
let array = args.get_or_undefined(0);
let index = args.get_or_undefined(1);
let count = args.get_or_undefined(2);
// 1. Let indexedPosition be ? ValidateAtomicAccessOnIntegerTypedArray(typedArray, index, true).
let ii = validate_integer_typed_array(array, true)?;
let offset = validate_atomic_access(&ii, index, context)?;
// 2. If count is undefined, then
let count = if count.is_undefined() {
// a. Let c be +∞.
u64::MAX
} else {
// 3. Else,
// a. Let intCount be ? ToIntegerOrInfinity(count).
// b. Let c be max(intCount, 0).
match count.to_integer_or_infinity(context)? {
IntegerOrInfinity::PositiveInfinity => u64::MAX,
IntegerOrInfinity::Integer(i) => i64::max(i, 0) as u64,
IntegerOrInfinity::NegativeInfinity => 0,
}
};
// 4. Let buffer be typedArray.[[ViewedArrayBuffer]].
// 5. Let block be buffer.[[ArrayBufferData]].
// 6. If IsSharedArrayBuffer(buffer) is false, return +0𝔽.
let buffer = ii.viewed_array_buffer();
let buffer = buffer.borrow();
let Some(shared) = buffer.as_shared_array_buffer() else {
return Ok(0.into());
};
let count = futex::notify(shared, offset, count)?;
// 12. Let n be the number of elements in S.
// 13. Return 𝔽(n).
Ok(count.into())
}
}
/// [`ValidateIntegerTypedArray ( typedArray [ , waitable ] )`][spec]
///
/// [spec]: https://tc39.es/ecma262/#sec-validateintegertypedarray
fn validate_integer_typed_array(
array: &JsValue,
waitable: bool,
) -> JsResult<GcRef<'_, IntegerIndexed>> {
// 1. If waitable is not present, set waitable to false.
// 2. Perform ? ValidateTypedArray(typedArray).
let ii = array
.as_object()
.and_then(|o| GcRef::try_map(o.borrow(), Object::as_typed_array))
.ok_or_else(|| JsNativeError::typ().with_message("value is not a typed array object"))?;
if ii.is_detached() {
return Err(JsNativeError::typ()
.with_message("Buffer of the typed array is detached")
.into());
}
// 3. Let buffer be typedArray.[[ViewedArrayBuffer]].
if waitable {
// 4. If waitable is true, then
// a. If typedArray.[[TypedArrayName]] is neither "Int32Array" nor "BigInt64Array", throw a TypeError exception.
if ![TypedArrayKind::Int32, TypedArrayKind::BigInt64].contains(&ii.kind()) {
return Err(JsNativeError::typ()
.with_message("can only atomically wait using Int32 or BigInt64 arrays")
.into());
}
} else {
// 5. Else,
// a. Let type be TypedArrayElementType(typedArray).
// b. If IsUnclampedIntegerElementType(type) is false and IsBigIntElementType(type) is
// false, throw a TypeError exception.
if !ii.kind().supports_atomic_ops() {
return Err(JsNativeError::typ()
.with_message(
"platform doesn't support atomic operations on the provided `TypedArray`",
)
.into());
}
}
// 6. Return buffer.
Ok(ii)
}
/// [`ValidateAtomicAccess ( iieoRecord, requestIndex )`][spec]
///
/// [spec]: https://tc39.es/ecma262/#sec-validateatomicaccess
fn validate_atomic_access(
array: &IntegerIndexed,
request_index: &JsValue,
context: &mut Context<'_>,
) -> JsResult<usize> {
// 1. Let length be typedArray.[[ArrayLength]].
let length = array.array_length();
// 2. Let accessIndex be ? ToIndex(requestIndex).
let access_index = request_index.to_index(context)?;
// 3. Assert: accessIndex ≥ 0.
// ensured by the type.
// 4. If accessIndex ≥ length, throw a RangeError exception.
if access_index >= length {
return Err(JsNativeError::range()
.with_message("index for typed array outside of bounds")
.into());
}
// 5. Let elementSize be TypedArrayElementSize(typedArray).
let element_size = array.kind().element_size();
// 6. Let offset be typedArray.[[ByteOffset]].
let offset = array.byte_offset();
// 7. Return (accessIndex × elementSize) + offset.
Ok(((access_index * element_size) + offset) as usize)
}

4
boa_engine/src/builtins/mod.rs

@ -5,6 +5,7 @@ pub mod array_buffer;
pub mod async_function;
pub mod async_generator;
pub mod async_generator_function;
pub mod atomics;
pub mod bigint;
pub mod boolean;
pub mod dataview;
@ -86,6 +87,7 @@ use crate::{
array_buffer::{ArrayBuffer, SharedArrayBuffer},
async_generator::AsyncGenerator,
async_generator_function::AsyncGeneratorFunction,
atomics::Atomics,
error::r#type::ThrowTypeError,
generator::Generator,
generator_function::GeneratorFunction,
@ -263,6 +265,7 @@ impl Realm {
WeakRef::init(self);
WeakMap::init(self);
WeakSet::init(self);
Atomics::init(self);
#[cfg(feature = "annex-b")]
{
@ -388,6 +391,7 @@ pub(crate) fn set_default_global_bindings(context: &mut Context<'_>) -> JsResult
global_binding::<WeakRef>(context)?;
global_binding::<WeakMap>(context)?;
global_binding::<WeakSet>(context)?;
global_binding::<Atomics>(context)?;
#[cfg(feature = "annex-b")]
{

2
boa_engine/src/builtins/temporal/now.rs

@ -10,12 +10,12 @@ use crate::{
property::Attribute,
realm::Realm,
string::common::StaticJsStrings,
sys::time::SystemTime,
Context, JsBigInt, JsNativeError, JsObject, JsResult, JsString, JsSymbol, JsValue,
};
use boa_profiler::Profiler;
use super::{ns_max_instant, ns_min_instant};
use std::time::SystemTime;
/// JavaScript `Temporal.Now` object.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]

31
boa_engine/src/builtins/typed_array/builtin.rs

@ -1,4 +1,4 @@
use std::{cmp::Ordering, ptr, sync::atomic};
use std::{cmp, ptr, sync::atomic};
use boa_gc::GcRef;
use boa_macros::utf16;
@ -2204,6 +2204,7 @@ impl BuiltinTypedArray {
.expect("value can only be f64 or BigInt");
// ii. Perform SetValueInBuffer(targetBuffer, targetByteIndex, targetType, value, true, Unordered).
// SAFETY: previous checks preserve the validity of the indices.
unsafe {
target_buffer
.subslice_mut(target_byte_index..)
@ -2584,7 +2585,7 @@ impl BuiltinTypedArray {
// 5. NOTE: The following closure performs a numeric comparison rather than the string comparison used in 23.1.3.30.
// 6. Let SortCompare be a new Abstract Closure with parameters (x, y) that captures comparefn and performs the following steps when called:
let sort_compare =
|x: &JsValue, y: &JsValue, context: &mut Context<'_>| -> JsResult<Ordering> {
|x: &JsValue, y: &JsValue, context: &mut Context<'_>| -> JsResult<cmp::Ordering> {
// a. Return ? CompareTypedArrayElements(x, y, comparefn).
compare_typed_array_elements(x, y, compare_fn, context)
};
@ -2652,7 +2653,7 @@ impl BuiltinTypedArray {
// 6. NOTE: The following closure performs a numeric comparison rather than the string comparison used in 23.1.3.34.
// 7. Let SortCompare be a new Abstract Closure with parameters (x, y) that captures comparefn and performs the following steps when called:
let sort_compare =
|x: &JsValue, y: &JsValue, context: &mut Context<'_>| -> JsResult<Ordering> {
|x: &JsValue, y: &JsValue, context: &mut Context<'_>| -> JsResult<cmp::Ordering> {
// a. Return ? CompareTypedArrayElements(x, y, comparefn).
compare_typed_array_elements(x, y, compare_fn, context)
};
@ -3501,7 +3502,7 @@ fn compare_typed_array_elements(
y: &JsValue,
compare_fn: Option<&JsObject>,
context: &mut Context<'_>,
) -> JsResult<Ordering> {
) -> JsResult<cmp::Ordering> {
// 1. Assert: x is a Number and y is a Number, or x is a BigInt and y is a BigInt.
// 2. If comparefn is not undefined, then
@ -3513,14 +3514,14 @@ fn compare_typed_array_elements(
// b. If v is NaN, return +0𝔽.
if v.is_nan() {
return Ok(Ordering::Equal);
return Ok(cmp::Ordering::Equal);
}
// c. Return v.
if v.is_sign_positive() {
return Ok(Ordering::Greater);
return Ok(cmp::Ordering::Greater);
}
return Ok(Ordering::Less);
return Ok(cmp::Ordering::Less);
}
match (x, y) {
@ -3541,41 +3542,41 @@ fn compare_typed_array_elements(
(JsValue::Rational(x), JsValue::Rational(y)) => {
// 3. If x and y are both NaN, return +0𝔽.
if x.is_nan() && y.is_nan() {
return Ok(Ordering::Equal);
return Ok(cmp::Ordering::Equal);
}
// 4. If x is NaN, return 1𝔽.
if x.is_nan() {
return Ok(Ordering::Greater);
return Ok(cmp::Ordering::Greater);
}
// 5. If y is NaN, return -1𝔽.
if y.is_nan() {
return Ok(Ordering::Less);
return Ok(cmp::Ordering::Less);
}
// 6. If x < y, return -1𝔽.
if x < y {
return Ok(Ordering::Less);
return Ok(cmp::Ordering::Less);
}
// 7. If x > y, return 1𝔽.
if x > y {
return Ok(Ordering::Greater);
return Ok(cmp::Ordering::Greater);
}
// 8. If x is -0𝔽 and y is +0𝔽, return -1𝔽.
if x.is_sign_negative() && x.is_zero() && y.is_sign_positive() && y.is_zero() {
return Ok(Ordering::Less);
return Ok(cmp::Ordering::Less);
}
// 9. If x is +0𝔽 and y is -0𝔽, return 1𝔽.
if x.is_sign_positive() && x.is_zero() && y.is_sign_negative() && y.is_zero() {
return Ok(Ordering::Greater);
return Ok(cmp::Ordering::Greater);
}
// 10. Return +0𝔽.
Ok(Ordering::Equal)
Ok(cmp::Ordering::Equal)
}
_ => unreachable!("x and y must be both Numbers or BigInts"),
}

400
boa_engine/src/builtins/typed_array/element.rs

@ -1,400 +0,0 @@
#![deny(unsafe_op_in_unsafe_fn)]
#![allow(clippy::cast_ptr_alignment)] // Invariants are checked by the caller.
#![allow(clippy::undocumented_unsafe_blocks)] // Invariants are checked by the caller.
use std::sync::atomic;
use bytemuck::{AnyBitPattern, NoUninit};
use num_traits::ToPrimitive;
use portable_atomic::{AtomicU16, AtomicU32, AtomicU64};
use crate::{
builtins::{
array_buffer::utils::{SliceRef, SliceRefMut},
typed_array::TypedArrayElement,
},
value::Numeric,
Context, JsResult, JsValue,
};
#[derive(Debug, Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash, AnyBitPattern, NoUninit)]
#[repr(transparent)]
pub(crate) struct ClampedU8(pub(crate) u8);
impl ClampedU8 {
pub(crate) fn to_be(self) -> Self {
Self(self.0.to_be())
}
pub(crate) fn to_le(self) -> Self {
Self(self.0.to_le())
}
}
impl From<ClampedU8> for Numeric {
fn from(value: ClampedU8) -> Self {
Numeric::Number(value.0.into())
}
}
pub(crate) trait Element:
Sized + Into<TypedArrayElement> + NoUninit + AnyBitPattern
{
fn from_js_value(value: &JsValue, context: &mut Context<'_>) -> JsResult<Self>;
/// Gets the little endian representation of `Self`.
fn to_little_endian(self) -> Self;
/// Gets the big endian representation of `Self`.
fn to_big_endian(self) -> Self;
/// Reads `Self` from the `buffer`.
///
/// This will always read values in the native endianness of the target architecture.
///
/// # Safety
///
/// - `buffer` must be aligned to the native alignment of `Self`.
/// - `buffer` must contain enough bytes to read `std::sizeof::<Self>` bytes.
unsafe fn read_from_buffer(buffer: SliceRef<'_>, order: atomic::Ordering) -> Self;
/// Writes the bytes of this element into `buffer`.
///
/// This will always write values in the native endianness of the target architecture.
///
/// # Safety
///
/// - `buffer` must be aligned to the native alignment of `Self`.
/// - `buffer` must contain enough bytes to store `std::sizeof::<Self>` bytes.
unsafe fn write_to_buffer(buffer: SliceRefMut<'_>, value: Self, order: atomic::Ordering);
}
impl Element for u8 {
fn from_js_value(value: &JsValue, context: &mut Context<'_>) -> JsResult<Self> {
value.to_uint8(context)
}
fn to_big_endian(self) -> Self {
self.to_be()
}
fn to_little_endian(self) -> Self {
self.to_le()
}
unsafe fn read_from_buffer(buffer: SliceRef<'_>, order: atomic::Ordering) -> Self {
debug_assert!(buffer.len() >= 1);
match buffer {
SliceRef::Slice(buffer) => unsafe { *buffer.get_unchecked(0) },
SliceRef::AtomicSlice(buffer) => unsafe { buffer.get_unchecked(0).load(order) },
}
}
unsafe fn write_to_buffer(buffer: SliceRefMut<'_>, value: Self, order: atomic::Ordering) {
debug_assert!(buffer.len() >= 1);
match buffer {
SliceRefMut::Slice(buffer) => unsafe {
*buffer.get_unchecked_mut(0) = value;
},
SliceRefMut::AtomicSlice(buffer) => unsafe {
buffer.get_unchecked(0).store(value, order);
},
}
}
}
impl Element for u16 {
fn from_js_value(value: &JsValue, context: &mut Context<'_>) -> JsResult<Self> {
value.to_uint16(context)
}
fn to_big_endian(self) -> Self {
self.to_be()
}
fn to_little_endian(self) -> Self {
self.to_le()
}
unsafe fn read_from_buffer(buffer: SliceRef<'_>, order: atomic::Ordering) -> Self {
if cfg!(debug_assertions) {
assert!(buffer.len() >= std::mem::size_of::<u16>());
assert!(buffer.addr() % std::mem::align_of::<u16>() == 0);
}
match buffer {
SliceRef::Slice(buffer) => unsafe { *buffer.as_ptr().cast() },
SliceRef::AtomicSlice(buffer) => unsafe {
(*buffer.as_ptr().cast::<AtomicU16>()).load(order)
},
}
}
unsafe fn write_to_buffer(buffer: SliceRefMut<'_>, value: Self, order: atomic::Ordering) {
if cfg!(debug_assertions) {
assert!(buffer.len() >= std::mem::size_of::<u16>());
assert!(buffer.addr() % std::mem::align_of::<u16>() == 0);
}
match buffer {
SliceRefMut::Slice(buffer) => unsafe {
*buffer.as_mut_ptr().cast() = value;
},
SliceRefMut::AtomicSlice(buffer) => unsafe {
(*buffer.as_ptr().cast::<AtomicU16>()).store(value, order);
},
}
}
}
impl Element for u32 {
fn from_js_value(value: &JsValue, context: &mut Context<'_>) -> JsResult<Self> {
value.to_u32(context)
}
fn to_big_endian(self) -> Self {
self.to_be()
}
fn to_little_endian(self) -> Self {
self.to_le()
}
unsafe fn read_from_buffer(buffer: SliceRef<'_>, order: atomic::Ordering) -> Self {
if cfg!(debug_assertions) {
assert!(buffer.len() >= std::mem::size_of::<u32>());
assert!(buffer.addr() % std::mem::align_of::<u32>() == 0);
}
match buffer {
SliceRef::Slice(buffer) => unsafe { *buffer.as_ptr().cast() },
SliceRef::AtomicSlice(buffer) => unsafe {
(*buffer.as_ptr().cast::<AtomicU32>()).load(order)
},
}
}
unsafe fn write_to_buffer(buffer: SliceRefMut<'_>, value: Self, order: atomic::Ordering) {
if cfg!(debug_assertions) {
assert!(buffer.len() >= std::mem::size_of::<u32>());
assert!(buffer.addr() % std::mem::align_of::<u32>() == 0);
}
match buffer {
SliceRefMut::Slice(buffer) => unsafe {
*buffer.as_mut_ptr().cast() = value;
},
SliceRefMut::AtomicSlice(buffer) => unsafe {
(*buffer.as_ptr().cast::<AtomicU32>()).store(value, order);
},
}
}
}
impl Element for u64 {
fn from_js_value(value: &JsValue, context: &mut Context<'_>) -> JsResult<Self> {
Ok(value.to_big_uint64(context)?.to_u64().unwrap_or(u64::MAX))
}
fn to_big_endian(self) -> Self {
self.to_be()
}
fn to_little_endian(self) -> Self {
self.to_le()
}
unsafe fn read_from_buffer(buffer: SliceRef<'_>, order: atomic::Ordering) -> Self {
if cfg!(debug_assertions) {
assert!(buffer.len() >= std::mem::size_of::<u64>());
assert!(buffer.addr() % std::mem::align_of::<u64>() == 0);
}
match buffer {
SliceRef::Slice(buffer) => unsafe { *buffer.as_ptr().cast() },
SliceRef::AtomicSlice(buffer) => unsafe {
(*buffer.as_ptr().cast::<AtomicU64>()).load(order)
},
}
}
unsafe fn write_to_buffer(buffer: SliceRefMut<'_>, value: Self, order: atomic::Ordering) {
if cfg!(debug_assertions) {
assert!(buffer.len() >= std::mem::size_of::<u64>());
assert!(buffer.addr() % std::mem::align_of::<u64>() == 0);
}
match buffer {
SliceRefMut::Slice(buffer) => unsafe {
*buffer.as_mut_ptr().cast() = value;
},
SliceRefMut::AtomicSlice(buffer) => unsafe {
(*buffer.as_ptr().cast::<AtomicU64>()).store(value, order);
},
}
}
}
impl Element for i8 {
fn from_js_value(value: &JsValue, context: &mut Context<'_>) -> JsResult<Self> {
value.to_int8(context)
}
fn to_big_endian(self) -> Self {
self.to_be()
}
fn to_little_endian(self) -> Self {
self.to_le()
}
unsafe fn read_from_buffer(buffer: SliceRef<'_>, order: atomic::Ordering) -> Self {
unsafe { u8::read_from_buffer(buffer, order) as i8 }
}
unsafe fn write_to_buffer(buffer: SliceRefMut<'_>, value: Self, order: atomic::Ordering) {
unsafe { u8::write_to_buffer(buffer, value as u8, order) }
}
}
impl Element for ClampedU8 {
fn from_js_value(value: &JsValue, context: &mut Context<'_>) -> JsResult<Self> {
value.to_uint8_clamp(context).map(ClampedU8)
}
fn to_big_endian(self) -> Self {
self.to_be()
}
fn to_little_endian(self) -> Self {
self.to_le()
}
unsafe fn read_from_buffer(buffer: SliceRef<'_>, order: atomic::Ordering) -> Self {
unsafe { ClampedU8(u8::read_from_buffer(buffer, order)) }
}
unsafe fn write_to_buffer(buffer: SliceRefMut<'_>, value: Self, order: atomic::Ordering) {
unsafe { u8::write_to_buffer(buffer, value.0, order) }
}
}
impl Element for i16 {
fn from_js_value(value: &JsValue, context: &mut Context<'_>) -> JsResult<Self> {
value.to_int16(context)
}
fn to_big_endian(self) -> Self {
self.to_be()
}
fn to_little_endian(self) -> Self {
self.to_le()
}
unsafe fn read_from_buffer(buffer: SliceRef<'_>, order: atomic::Ordering) -> Self {
unsafe { u16::read_from_buffer(buffer, order) as i16 }
}
unsafe fn write_to_buffer(buffer: SliceRefMut<'_>, value: Self, order: atomic::Ordering) {
unsafe { u16::write_to_buffer(buffer, value as u16, order) }
}
}
impl Element for i32 {
fn from_js_value(value: &JsValue, context: &mut Context<'_>) -> JsResult<Self> {
value.to_i32(context)
}
fn to_big_endian(self) -> Self {
self.to_be()
}
fn to_little_endian(self) -> Self {
self.to_le()
}
unsafe fn read_from_buffer(buffer: SliceRef<'_>, order: atomic::Ordering) -> Self {
unsafe { u32::read_from_buffer(buffer, order) as i32 }
}
unsafe fn write_to_buffer(buffer: SliceRefMut<'_>, value: Self, order: atomic::Ordering) {
unsafe { u32::write_to_buffer(buffer, value as u32, order) }
}
}
impl Element for i64 {
fn from_js_value(value: &JsValue, context: &mut Context<'_>) -> JsResult<Self> {
let big_int = value.to_big_int64(context)?;
Ok(big_int.to_i64().unwrap_or_else(|| {
if big_int.is_positive() {
i64::MAX
} else {
i64::MIN
}
}))
}
fn to_big_endian(self) -> Self {
self.to_be()
}
fn to_little_endian(self) -> Self {
self.to_le()
}
unsafe fn read_from_buffer(buffer: SliceRef<'_>, order: atomic::Ordering) -> Self {
unsafe { u64::read_from_buffer(buffer, order) as i64 }
}
unsafe fn write_to_buffer(buffer: SliceRefMut<'_>, value: Self, order: atomic::Ordering) {
unsafe { u64::write_to_buffer(buffer, value as u64, order) }
}
}
impl Element for f32 {
fn from_js_value(value: &JsValue, context: &mut Context<'_>) -> JsResult<Self> {
value.to_number(context).map(|f| f as f32)
}
fn to_big_endian(self) -> Self {
f32::from_bits(self.to_bits().to_be())
}
fn to_little_endian(self) -> Self {
f32::from_bits(self.to_bits().to_le())
}
unsafe fn read_from_buffer(buffer: SliceRef<'_>, order: atomic::Ordering) -> Self {
unsafe { f32::from_bits(u32::read_from_buffer(buffer, order)) }
}
unsafe fn write_to_buffer(buffer: SliceRefMut<'_>, value: Self, order: atomic::Ordering) {
unsafe { u32::write_to_buffer(buffer, value.to_bits(), order) }
}
}
impl Element for f64 {
fn from_js_value(value: &JsValue, context: &mut Context<'_>) -> JsResult<Self> {
value.to_number(context)
}
fn to_big_endian(self) -> Self {
f64::from_bits(self.to_bits().to_be())
}
fn to_little_endian(self) -> Self {
f64::from_bits(self.to_bits().to_le())
}
unsafe fn read_from_buffer(buffer: SliceRef<'_>, order: atomic::Ordering) -> Self {
unsafe { f64::from_bits(u64::read_from_buffer(buffer, order)) }
}
unsafe fn write_to_buffer(buffer: SliceRefMut<'_>, value: Self, order: atomic::Ordering) {
unsafe { u64::write_to_buffer(buffer, value.to_bits(), order) }
}
}

115
boa_engine/src/builtins/typed_array/element/atomic.rs

@ -0,0 +1,115 @@
use std::{convert::identity, sync::atomic::Ordering};
use portable_atomic::{
AtomicI16, AtomicI32, AtomicI64, AtomicI8, AtomicU16, AtomicU32, AtomicU64, AtomicU8,
};
/// An atomic type that supports atomic operations.
pub(crate) trait Atomic {
/// The "plain" type of the atomic e.g. `AtomicU8::Plain == u8`
type Plain;
/// Loads the value of this atomic.
fn load(&self, order: Ordering) -> Self::Plain;
/// Stores `value` on this atomic.
fn store(&self, val: Self::Plain, order: Ordering);
/// Computes the `+` operation between `self` and `value`, storing the result
/// on `self` and returning the old value. This operation wraps on overflow.
fn add(&self, val: Self::Plain, order: Ordering) -> Self::Plain;
/// Computes the `&` operation between `self` and `value`, storing the result
/// on `self` and returning the old value.
fn bit_and(&self, val: Self::Plain, order: Ordering) -> Self::Plain;
/// Compares the current value of `self` with `expected`, storing `replacement`
/// if they're equal and returning its old value in all cases.
fn compare_exchange(
&self,
expected: Self::Plain,
replacement: Self::Plain,
order: Ordering,
) -> Self::Plain;
/// Swaps `self` with `value`, returning the old value of `self`.
fn swap(&self, val: Self::Plain, order: Ordering) -> Self::Plain;
/// Computes the `|` operation between `self` and `value`, storing the result
/// on `self` and returning the old value.
fn bit_or(&self, val: Self::Plain, order: Ordering) -> Self::Plain;
/// Computes the `-` operation between `self` and `value`, storing the result
/// on `self` and returning the old value. This operation wraps on overflow.
fn sub(&self, val: Self::Plain, order: Ordering) -> Self::Plain;
/// Computes the `^` operation between `self` and `value`, storing the result
/// on `self` and returning the old value.
fn bit_xor(&self, val: Self::Plain, order: Ordering) -> Self::Plain;
/// Checks if this atomic does not use any locks to support atomic operations.
fn is_lock_free() -> bool;
}
macro_rules! atomic {
( $atomic:ty, $plain:ty ) => {
impl Atomic for $atomic {
type Plain = $plain;
fn load(&self, order: Ordering) -> Self::Plain {
<$atomic>::load(self, order)
}
fn store(&self, val: Self::Plain, order: Ordering) {
<$atomic>::store(self, val, order);
}
fn add(&self, val: Self::Plain, order: Ordering) -> Self::Plain {
<$atomic>::fetch_add(self, val, order)
}
fn bit_and(&self, val: Self::Plain, order: Ordering) -> Self::Plain {
<$atomic>::fetch_and(self, val, order)
}
fn compare_exchange(
&self,
expected: Self::Plain,
replacement: Self::Plain,
order: Ordering,
) -> Self::Plain {
<$atomic>::compare_exchange(self, expected, replacement, order, order)
.map_or_else(identity, identity)
}
fn swap(&self, val: Self::Plain, order: Ordering) -> Self::Plain {
<$atomic>::swap(self, val, order)
}
fn bit_or(&self, val: Self::Plain, order: Ordering) -> Self::Plain {
<$atomic>::fetch_or(self, val, order)
}
fn sub(&self, val: Self::Plain, order: Ordering) -> Self::Plain {
<$atomic>::fetch_sub(self, val, order)
}
fn bit_xor(&self, val: Self::Plain, order: Ordering) -> Self::Plain {
<$atomic>::fetch_xor(self, val, order)
}
fn is_lock_free() -> bool {
<$atomic>::is_lock_free()
}
}
};
}
atomic!(AtomicU8, u8);
atomic!(AtomicI8, i8);
atomic!(AtomicU16, u16);
atomic!(AtomicI16, i16);
atomic!(AtomicU32, u32);
atomic!(AtomicI32, i32);
atomic!(AtomicU64, u64);
atomic!(AtomicI64, i64);

350
boa_engine/src/builtins/typed_array/element/mod.rs

@ -0,0 +1,350 @@
#![deny(unsafe_op_in_unsafe_fn)]
#![allow(clippy::cast_ptr_alignment)] // Invariants are checked by the caller.
mod atomic;
pub(crate) use self::atomic::Atomic;
use std::ops::{BitOr, BitXor};
use std::sync::atomic::Ordering;
use std::{convert::identity, ops::BitAnd};
use bytemuck::{AnyBitPattern, NoUninit};
use num_traits::{WrappingAdd, WrappingSub};
use portable_atomic::{
AtomicI16, AtomicI32, AtomicI64, AtomicI8, AtomicU16, AtomicU32, AtomicU64, AtomicU8,
};
use crate::{
builtins::{
array_buffer::utils::{SliceRef, SliceRefMut},
typed_array::TypedArrayElement,
},
value::Numeric,
Context, JsResult, JsValue,
};
/// A reference to an element inside an array buffer.
#[derive(Debug, Copy, Clone)]
pub(crate) enum ElementRef<'a, E: Element> {
Atomic(&'a E::Atomic),
Plain(&'a E),
}
impl<E: Element> ElementRef<'_, E> {
/// Loads the value of this reference.
pub(crate) fn load(&self, order: Ordering) -> E {
match self {
ElementRef::Atomic(num) => E::from_plain(num.load(order)),
ElementRef::Plain(num) => **num,
}
}
}
/// A mutable reference to an element inside an array buffer.
pub(crate) enum ElementRefMut<'a, E: Element> {
Atomic(&'a E::Atomic),
Plain(&'a mut E),
}
impl<E: Element> ElementRefMut<'_, E> {
/// Stores `value` on this mutable reference.
pub(crate) fn store(&mut self, value: E, order: Ordering) {
match self {
ElementRefMut::Atomic(num) => num.store(value.to_plain(), order),
ElementRefMut::Plain(num) => **num = value,
}
}
}
impl<E: Element> ElementRefMut<'_, E>
where
E::Atomic: Atomic<Plain = E>,
{
/// Computes the `+` operation between `self` and `value`, storing the result
/// on `self` and returning the old value. This operation wraps on overflow.
pub(crate) fn add(&mut self, value: E, order: Ordering) -> E
where
E: WrappingAdd,
{
match self {
ElementRefMut::Atomic(num) => num.add(value, order),
ElementRefMut::Plain(num) => {
let new = num.wrapping_add(&value);
std::mem::replace(num, new)
}
}
}
/// Computes the `&` operation between `self` and `value`, storing the result
/// on `self` and returning the old value.
pub(crate) fn bit_and(&mut self, value: E, order: Ordering) -> E
where
E: BitAnd<Output = E>,
{
match self {
ElementRefMut::Atomic(num) => num.bit_and(value, order),
ElementRefMut::Plain(num) => {
let new = **num & value;
std::mem::replace(num, new)
}
}
}
/// Compares the current value of `self` with `expected`, exchanging it with `replacement`
/// if they're equal and returning its old value in all cases.
pub(crate) fn compare_exchange(&mut self, expected: E, replacement: E, order: Ordering) -> E
where
E: Eq,
{
match self {
ElementRefMut::Atomic(num) => num.compare_exchange(expected, replacement, order),
ElementRefMut::Plain(num) => {
let old = **num;
if old == expected {
**num = replacement;
}
old
}
}
}
/// Swaps `self` with `value`, returning the old value of `self`.
pub(crate) fn swap(&mut self, value: E, order: Ordering) -> E {
match self {
ElementRefMut::Atomic(num) => num.swap(value, order),
ElementRefMut::Plain(num) => std::mem::replace(num, value),
}
}
/// Computes the `|` operation between `self` and `value`, storing the result
/// on `self` and returning the old value.
pub(crate) fn bit_or(&mut self, value: E, order: Ordering) -> E
where
E: BitOr<Output = E>,
{
match self {
ElementRefMut::Atomic(num) => num.bit_or(value, order),
ElementRefMut::Plain(num) => {
let new = **num | value;
std::mem::replace(num, new)
}
}
}
/// Computes the `-` operation between `self` and `value`, storing the result
/// on `self` and returning the old value. This operation wraps on overflow.
pub(crate) fn sub(&mut self, value: E, order: Ordering) -> E
where
E: WrappingSub,
{
match self {
ElementRefMut::Atomic(num) => num.sub(value, order),
ElementRefMut::Plain(num) => {
let new = num.wrapping_sub(&value);
std::mem::replace(num, new)
}
}
}
/// Computes the `^` operation between `self` and `value`, storing the result
/// on `self` and returning the old value.
pub(crate) fn bit_xor(&mut self, value: E, order: Ordering) -> E
where
E: BitXor<Output = E>,
{
match self {
ElementRefMut::Atomic(num) => num.bit_xor(value, order),
ElementRefMut::Plain(num) => {
let new = **num ^ value;
std::mem::replace(num, new)
}
}
}
}
/// An `u8` that clamps instead of overflowing when converting from a `JsValue`.
#[derive(Debug, Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash, AnyBitPattern, NoUninit)]
#[repr(transparent)]
pub(crate) struct ClampedU8(pub(crate) u8);
impl ClampedU8 {
/// Converts this `ClampedU8` to its big endian representation.
pub(crate) fn to_be(self) -> Self {
Self(self.0.to_be())
}
/// Converts this `ClampedU8` to its little endian representation.
pub(crate) fn to_le(self) -> Self {
Self(self.0.to_le())
}
}
impl From<ClampedU8> for Numeric {
fn from(value: ClampedU8) -> Self {
Numeric::Number(value.0.into())
}
}
/// A native element that can be inside a `TypedArray`.
pub(crate) trait Element:
Sized + Into<TypedArrayElement> + NoUninit + AnyBitPattern
{
/// The atomic type used for shared array buffers.
type Atomic: Atomic;
/// Converts a `JsValue` into the native element `Self`.
fn from_js_value(value: &JsValue, context: &mut Context<'_>) -> JsResult<Self>;
/// Converts from the plain type of an atomic to `Self`.
fn from_plain(bytes: <Self::Atomic as Atomic>::Plain) -> Self;
/// Converts from `Self` to the plain type of an atomic.
fn to_plain(self) -> <Self::Atomic as Atomic>::Plain;
/// Gets the little endian representation of `Self`.
fn to_little_endian(self) -> Self;
/// Gets the big endian representation of `Self`.
fn to_big_endian(self) -> Self;
/// Reads `Self` from the `buffer`.
///
/// This will always read values in the native endianness of the target architecture.
///
/// # Safety
///
/// - `buffer` must be aligned to the native alignment of `Self`.
/// - `buffer` must contain enough bytes to read `std::sizeof::<Self>` bytes.
unsafe fn read(buffer: SliceRef<'_>) -> ElementRef<'_, Self>;
/// Writes the bytes of this element into `buffer`.
///
/// This will always write values in the native endianness of the target architecture.
///
/// # Safety
///
/// - `buffer` must be aligned to the native alignment of `Self`.
/// - `buffer` must contain enough bytes to store `std::sizeof::<Self>` bytes.
unsafe fn read_mut(buffer: SliceRefMut<'_>) -> ElementRefMut<'_, Self>;
}
macro_rules! element {
( $element:ty, $atomic:ty, from_js: $from_js:path $(,)?) => {
element!(
$element,
$atomic,
from_js: $from_js,
from_plain: identity,
to_plain: identity,
to_be: |this: $element| this.to_be(),
to_le: |this: $element| this.to_le()
);
};
(
$element:ty,
$atomic:ty,
from_js: $from_js:expr,
from_plain: $from_plain:expr,
to_plain: $to_plain:expr,
to_be: $to_be:expr,
to_le: $to_le:expr $(,)?
) => {
#[allow(clippy::redundant_closure_call)]
#[allow(clippy::undocumented_unsafe_blocks)] // Invariants are checked by the caller.
impl Element for $element {
type Atomic = $atomic;
fn from_js_value(value: &JsValue, context: &mut Context<'_>) -> JsResult<Self> {
$from_js(value, context)
}
fn from_plain(plain: <Self::Atomic as Atomic>::Plain) -> Self {
$from_plain(plain)
}
fn to_plain(self) -> <Self::Atomic as Atomic>::Plain {
$to_plain(self)
}
fn to_big_endian(self) -> Self {
$to_be(self)
}
fn to_little_endian(self) -> Self {
$to_le(self)
}
unsafe fn read(buffer: SliceRef<'_>) -> ElementRef<'_, Self> {
if cfg!(debug_assertions) {
assert!(buffer.len() >= std::mem::size_of::<Self>());
assert!(buffer.addr() % std::mem::align_of::<Self>() == 0);
}
match buffer {
SliceRef::Slice(buffer) => unsafe {
ElementRef::Plain(&*buffer.as_ptr().cast())
},
SliceRef::AtomicSlice(buffer) => unsafe {
ElementRef::Atomic(&*buffer.as_ptr().cast::<Self::Atomic>())
},
}
}
unsafe fn read_mut(buffer: SliceRefMut<'_>) -> ElementRefMut<'_, Self> {
if cfg!(debug_assertions) {
assert!(buffer.len() >= std::mem::size_of::<Self>());
assert!(buffer.addr() % std::mem::align_of::<Self>() == 0);
}
match buffer {
SliceRefMut::Slice(buffer) => unsafe {
ElementRefMut::Plain(&mut *buffer.as_mut_ptr().cast())
},
SliceRefMut::AtomicSlice(buffer) => unsafe {
ElementRefMut::Atomic(&*buffer.as_ptr().cast::<Self::Atomic>())
},
}
}
}
};
}
element!(u8, AtomicU8, from_js: JsValue::to_uint8);
element!(i8, AtomicI8, from_js: JsValue::to_int8);
element!(u16, AtomicU16, from_js: JsValue::to_uint16);
element!(i16, AtomicI16, from_js: JsValue::to_int16);
element!(u32, AtomicU32, from_js: JsValue::to_u32);
element!(i32, AtomicI32, from_js: JsValue::to_i32);
element!(u64, AtomicU64, from_js: JsValue::to_big_uint64);
element!(i64, AtomicI64, from_js: JsValue::to_big_int64);
element!(
ClampedU8,
AtomicU8,
from_js: |value: &JsValue, context| value.to_uint8_clamp(context).map(ClampedU8),
from_plain: ClampedU8,
to_plain: |c: ClampedU8| c.0,
to_be: |this: ClampedU8| this.to_be(),
to_le: |this: ClampedU8| this.to_le(),
);
element!(
f32,
AtomicU32,
from_js: |value: &JsValue, context| value.to_number(context).map(|f| f as f32),
from_plain: f32::from_bits,
to_plain: |f: f32| f.to_bits(),
to_be: |this: f32| f32::from_bits(this.to_bits().to_be()),
to_le: |this: f32| f32::from_bits(this.to_bits().to_le()),
);
element!(
f64,
AtomicU64,
from_js: |value: &JsValue, context| value.to_number(context),
from_plain: f64::from_bits,
to_plain: |f: f64| f.to_bits(),
to_be: |this: f64| f64::from_bits(this.to_bits().to_be()),
to_le: |this: f64| f64::from_bits(this.to_bits().to_le()),
);

48
boa_engine/src/builtins/typed_array/mod.rs

@ -35,7 +35,7 @@ mod element;
mod integer_indexed_object;
pub(crate) use builtin::{is_valid_integer_index, BuiltinTypedArray};
pub(crate) use element::{ClampedU8, Element};
pub(crate) use element::{Atomic, ClampedU8, Element};
pub use integer_indexed_object::IntegerIndexed;
pub(crate) trait TypedArray {
@ -399,6 +399,29 @@ impl TypedArrayKind {
}
}
/// Returns `true` if this kind of typed array supports `Atomics` operations
///
/// Equivalent to `IsUnclampedIntegerElementType(type) is true || IsBigIntElementType(type) is true`.
pub(crate) fn supports_atomic_ops(self) -> bool {
match self {
TypedArrayKind::Int8
| TypedArrayKind::Uint8
| TypedArrayKind::Int16
| TypedArrayKind::Uint16
| TypedArrayKind::Int32
| TypedArrayKind::Uint32
| TypedArrayKind::BigInt64
| TypedArrayKind::BigUint64 => true,
// `f32` and `f64` support atomic operations on certain platforms, but it's not common and
// could require polyfilling the operations using CAS.
// `u8` clamps to the limits, which atomic operations don't support since
// they always overflow.
TypedArrayKind::Uint8Clamped | TypedArrayKind::Float32 | TypedArrayKind::Float64 => {
false
}
}
}
/// Gets the size of the type of element of this `TypedArrayKind`.
pub(crate) const fn element_size(self) -> u64 {
match self {
@ -477,6 +500,29 @@ pub(crate) enum TypedArrayElement {
Float64(f64),
}
impl TypedArrayElement {
/// Converts the element into its extended bytes representation as a `u64`.
///
/// This is guaranteed to never fail, since all numeric types supported by JS are less than
/// 8 bytes long.
pub(crate) fn to_bytes(self) -> u64 {
#[allow(clippy::cast_lossless)]
match self {
TypedArrayElement::Int8(num) => num as u64,
TypedArrayElement::Uint8(num) => num as u64,
TypedArrayElement::Uint8Clamped(num) => num.0 as u64,
TypedArrayElement::Int16(num) => num as u64,
TypedArrayElement::Uint16(num) => num as u64,
TypedArrayElement::Int32(num) => num as u64,
TypedArrayElement::Uint32(num) => num as u64,
TypedArrayElement::BigInt64(num) => num as u64,
TypedArrayElement::BigUint64(num) => num,
TypedArrayElement::Float32(num) => num.to_bits() as u64,
TypedArrayElement::Float64(num) => num.to_bits(),
}
}
}
impl From<i8> for TypedArrayElement {
fn from(value: i8) -> Self {
Self::Int8(value)

15
boa_engine/src/context/intrinsics.rs

@ -1043,6 +1043,9 @@ pub struct IntrinsicObjects {
/// [`%AsyncGeneratorFunction.prototype.prototype%`](https://tc39.es/ecma262/#sec-properties-of-asyncgenerator-prototype)
async_generator: JsObject,
/// [`%Atomics%`](https://tc39.es/ecma262/#sec-atomics)
atomics: JsObject,
/// [`%eval%`](https://tc39.es/ecma262/#sec-eval-x)
eval: JsFunction,
@ -1098,6 +1101,7 @@ impl Default for IntrinsicObjects {
iterator_prototypes: IteratorPrototypes::default(),
generator: JsObject::default(),
async_generator: JsObject::default(),
atomics: JsObject::default(),
eval: JsFunction::empty_intrinsic_function(false),
uri_functions: UriFunctions::default(),
is_finite: JsFunction::empty_intrinsic_function(false),
@ -1164,7 +1168,7 @@ impl IntrinsicObjects {
self.generator.clone()
}
/// Gets the [`%AsyncGeneratorFunction.prototype.prototype%`] intrinsic object.
/// Gets the [`%AsyncGeneratorFunction.prototype.prototype%`][spec] intrinsic object.
///
/// [spec]: https://tc39.es/ecma262/#sec-asyncgenerator-objects
#[inline]
@ -1173,6 +1177,15 @@ impl IntrinsicObjects {
self.async_generator.clone()
}
/// Gets the [`%Atomics%`][spec] intrinsic object.
///
/// [spec]: https://tc39.es/ecma262/#sec-atomics
#[inline]
#[must_use]
pub fn atomics(&self) -> JsObject {
self.atomics.clone()
}
/// Gets the [`%eval%`][spec] intrinsic function.
///
/// [spec]: https://tc39.es/ecma262/#sec-eval-x

63
boa_engine/src/context/mod.rs

@ -14,7 +14,7 @@ pub use maybe_shared::MaybeShared;
#[cfg(not(feature = "intl"))]
pub use std::marker::PhantomData;
use std::{io::Read, path::Path, rc::Rc};
use std::{cell::Cell, io::Read, path::Path, rc::Rc};
use crate::{
builtins,
@ -39,6 +39,10 @@ use crate::vm::RuntimeLimits;
use self::intrinsics::StandardConstructor;
thread_local! {
static CANNOT_BLOCK_COUNTER: Cell<u64> = Cell::new(0);
}
/// ECMAScript context. It is the primary way to interact with the runtime.
///
/// `Context`s constructed in a thread share the same runtime, therefore it
@ -98,6 +102,8 @@ pub struct Context<'host> {
pub(crate) kept_alive: Vec<JsObject>,
can_block: bool,
/// ICU related utilities
#[cfg(feature = "intl")]
icu: icu::Icu<'host>,
@ -132,7 +138,15 @@ impl std::fmt::Debug for Context<'_> {
#[cfg(feature = "intl")]
debug.field("icu", &self.icu);
debug.finish()
debug.finish_non_exhaustive()
}
}
impl Drop for Context<'_> {
fn drop(&mut self) {
if !self.can_block {
CANNOT_BLOCK_COUNTER.set(CANNOT_BLOCK_COUNTER.get() - 1);
}
}
}
@ -568,6 +582,13 @@ impl<'host> Context<'host> {
pub fn runtime_limits_mut(&mut self) -> &mut RuntimeLimits {
&mut self.vm.runtime_limits
}
/// Returns `true` if this context can be suspended by an `Atomics.wait` call.
#[inline]
#[must_use]
pub fn can_block(&self) -> bool {
self.can_block
}
}
// ==== Private API ====
@ -844,6 +865,7 @@ pub struct ContextBuilder<'icu, 'hooks, 'queue, 'module> {
host_hooks: Option<MaybeShared<'hooks, dyn HostHooks>>,
job_queue: Option<MaybeShared<'queue, dyn JobQueue>>,
module_loader: Option<MaybeShared<'module, dyn ModuleLoader>>,
can_block: bool,
#[cfg(feature = "intl")]
icu: Option<icu::Icu<'icu>>,
#[cfg(not(feature = "intl"))]
@ -869,7 +891,8 @@ impl std::fmt::Debug for ContextBuilder<'_, '_, '_, '_> {
.field(
"module_loader",
&self.module_loader.as_ref().map(|_| ModuleLoader),
);
)
.field("can_block", &self.can_block);
#[cfg(feature = "intl")]
out.field("icu", &self.icu);
@ -971,6 +994,27 @@ impl<'icu, 'hooks, 'queue, 'module> ContextBuilder<'icu, 'hooks, 'queue, 'module
}
}
/// [`AgentCanSuspend ( )`][spec] aka `[[CanBlock]]`
///
/// Defines if this context can be suspended by calls to the [`Atomics.wait`][wait] function.
///
/// # Note
///
/// By the specification, multiple agents cannot share the same thread if any of them has its
/// `[[CanBlock]]` field set to true. The builder will verify at build time that all contexts on
/// the current thread fulfill this requisite.
///
/// [spec]: https://tc39.es/ecma262/#sec-agentcansuspend
/// [wait]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Atomics/wait
#[must_use]
pub const fn can_block(
mut self,
can_block: bool,
) -> ContextBuilder<'icu, 'hooks, 'queue, 'module> {
self.can_block = can_block;
self
}
/// Specifies the number of instructions remaining to the [`Context`].
///
/// This function is only available if the `fuzz` feature is enabled.
@ -992,6 +1036,18 @@ impl<'icu, 'hooks, 'queue, 'module> ContextBuilder<'icu, 'hooks, 'queue, 'module
'queue: 'host,
'module: 'host,
{
if self.can_block {
if CANNOT_BLOCK_COUNTER.get() > 0 {
return Err(JsNativeError::typ()
.with_message(
"a context that can block must be the only active context in its current thread",
)
.into());
}
} else {
CANNOT_BLOCK_COUNTER.set(CANNOT_BLOCK_COUNTER.get() + 1);
}
let root_shape = RootShape::default();
let host_hooks = self.host_hooks.unwrap_or_else(|| {
@ -1038,6 +1094,7 @@ impl<'icu, 'hooks, 'queue, 'module> ContextBuilder<'icu, 'hooks, 'queue, 'module
optimizer_options: OptimizerOptions::OPTIMIZE_ALL,
root_shape,
parser_identifier: 0,
can_block: self.can_block,
};
builtins::set_default_global_bindings(&mut context)?;

2
boa_engine/src/lib.rs

@ -151,6 +151,8 @@ pub mod value;
pub mod vm;
mod host_defined;
mod small_map;
mod sys;
mod tagged;
#[cfg(test)]

323
boa_engine/src/small_map/entry.rs

@ -0,0 +1,323 @@
use std::{
collections::{btree_map, BTreeMap},
fmt::Debug,
};
use arrayvec::ArrayVec;
use super::SmallMap;
use Entry::{Occupied, Vacant};
/// A view into a single entry in a map, which may either be vacant or occupied.
///
/// This `enum` is constructed from the [`entry`] method on [`SmallMap`].
///
/// [`entry`]: SmallMap::entry
pub enum Entry<'a, K, V, const ARRAY_SIZE: usize> {
/// A vacant entry.
Vacant(VacantEntry<'a, K, V, ARRAY_SIZE>),
/// An occupied entry.
Occupied(OccupiedEntry<'a, K, V, ARRAY_SIZE>),
}
impl<K: Debug + Ord, V: Debug, const ARRAY_SIZE: usize> Debug for Entry<'_, K, V, ARRAY_SIZE> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Vacant(ref v) => f.debug_tuple("Entry").field(v).finish(),
Self::Occupied(ref o) => f.debug_tuple("Entry").field(o).finish(),
}
}
}
/// A view into a vacant entry in a `SmallMap`.
/// It is part of the [`Entry`] enum.
pub struct VacantEntry<'a, K, V, const ARRAY_SIZE: usize> {
pub(super) inner: InnerVacant<'a, K, V, ARRAY_SIZE>,
}
pub(super) enum InnerVacant<'a, K, V, const ARRAY_SIZE: usize> {
Inline(InlineVacantEntry<'a, K, V, ARRAY_SIZE>),
Heap(btree_map::VacantEntry<'a, K, V>),
}
impl<K: Debug + Ord, V, const ARRAY_SIZE: usize> Debug for VacantEntry<'_, K, V, ARRAY_SIZE> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_tuple("VacantEntry").field(self.key()).finish()
}
}
/// A view into an occupied entry in a `SmallMap`.
/// It is part of the [`Entry`] enum.
pub struct OccupiedEntry<'a, K, V, const ARRAY_SIZE: usize> {
pub(super) inner: InnerOccupied<'a, K, V, ARRAY_SIZE>,
}
pub(super) enum InnerOccupied<'a, K, V, const ARRAY_SIZE: usize> {
Inline(InlineOccupiedEntry<'a, K, V, ARRAY_SIZE>),
Heap(btree_map::OccupiedEntry<'a, K, V>),
}
impl<K: Ord + Debug, V: Debug, const ARRAY_SIZE: usize> Debug
for OccupiedEntry<'_, K, V, ARRAY_SIZE>
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("OccupiedEntry")
.field("key", self.key())
.field("value", self.get())
.finish()
}
}
impl<'a, K: Ord, V, const ARRAY_SIZE: usize> Entry<'a, K, V, ARRAY_SIZE> {
/// Ensures a value is in the entry by inserting the default if empty, and returns
/// a mutable reference to the value in the entry.
pub fn or_insert(self, default: V) -> &'a mut V {
match self {
Occupied(entry) => entry.into_mut(),
Vacant(entry) => entry.insert(default),
}
}
/// Ensures a value is in the entry by inserting the result of the default function if empty,
/// and returns a mutable reference to the value in the entry.
pub fn or_insert_with<F: FnOnce() -> V>(self, default: F) -> &'a mut V {
match self {
Occupied(entry) => entry.into_mut(),
Vacant(entry) => entry.insert(default()),
}
}
/// Ensures a value is in the entry by inserting, if empty, the result of the default function.
/// This method allows for generating key-derived values for insertion by providing the default
/// function a reference to the key that was moved during the `.entry(key)` method call.
///
/// The reference to the moved key is provided so that cloning or copying the key is
/// unnecessary, unlike with `.or_insert_with(|| ... )`.
pub fn or_insert_with_key<F: FnOnce(&K) -> V>(self, default: F) -> &'a mut V {
match self {
Occupied(entry) => entry.into_mut(),
Vacant(entry) => {
let value = default(entry.key());
entry.insert(value)
}
}
}
/// Returns a reference to this entry's key.
pub fn key(&self) -> &K {
match self {
Occupied(entry) => entry.key(),
Vacant(entry) => entry.key(),
}
}
/// Provides in-place mutable access to an occupied entry before any
/// potential inserts into the map.
pub fn and_modify<F>(self, f: F) -> Self
where
F: FnOnce(&mut V),
{
match self {
Occupied(mut entry) => {
f(entry.get_mut());
Occupied(entry)
}
Vacant(entry) => Vacant(entry),
}
}
}
impl<'a, K: Ord, V: Default, const ARRAY_SIZE: usize> Entry<'a, K, V, ARRAY_SIZE> {
/// Ensures a value is in the entry by inserting the default value if empty,
/// and returns a mutable reference to the value in the entry.
pub fn or_default(self) -> &'a mut V {
match self {
Occupied(entry) => entry.into_mut(),
Vacant(entry) => entry.insert(Default::default()),
}
}
}
impl<'a, K: Ord, V, const ARRAY_SIZE: usize> VacantEntry<'a, K, V, ARRAY_SIZE> {
/// Gets a reference to the key that would be used when inserting a value
/// through the `VacantEntry`.
pub fn key(&self) -> &K {
match &self.inner {
InnerVacant::Inline(i) => i.key(),
InnerVacant::Heap(v) => v.key(),
}
}
/// Takes ownership of the key.
pub fn into_key(self) -> K {
match self.inner {
InnerVacant::Inline(i) => i.into_key(),
InnerVacant::Heap(v) => v.into_key(),
}
}
/// Sets the value of the entry with the `VacantEntry`'s key,
/// and returns a mutable reference to it.
pub fn insert(self, value: V) -> &'a mut V {
match self.inner {
InnerVacant::Inline(i) => i.insert(value),
InnerVacant::Heap(v) => v.insert(value),
}
}
}
impl<'a, K: Ord, V, const ARRAY_SIZE: usize> OccupiedEntry<'a, K, V, ARRAY_SIZE> {
/// Gets a reference to the key in the entry.
pub fn key(&self) -> &K {
match &self.inner {
InnerOccupied::Inline(o) => o.key(),
InnerOccupied::Heap(o) => o.key(),
}
}
/// Takes ownership of the key and value from the map.
pub fn remove_entry(self) -> (K, V) {
match self.inner {
InnerOccupied::Inline(o) => o.remove_entry(),
InnerOccupied::Heap(o) => o.remove_entry(),
}
}
/// Gets a reference to the value in the entry.
pub fn get(&self) -> &V {
match &self.inner {
InnerOccupied::Inline(o) => o.get(),
InnerOccupied::Heap(o) => o.get(),
}
}
/// Gets a mutable reference to the value in the entry.
///
/// If you need a reference to the `OccupiedEntry` that may outlive the
/// destruction of the `Entry` value, see [`into_mut`].
///
/// [`into_mut`]: OccupiedEntry::into_mut
pub fn get_mut(&mut self) -> &mut V {
match &mut self.inner {
InnerOccupied::Inline(o) => o.get_mut(),
InnerOccupied::Heap(o) => o.get_mut(),
}
}
/// Converts the entry into a mutable reference to its value.
///
/// If you need multiple references to the `OccupiedEntry`, see [`get_mut`].
///
/// [`get_mut`]: OccupiedEntry::get_mut
pub fn into_mut(self) -> &'a mut V {
match self.inner {
InnerOccupied::Inline(o) => o.into_mut(),
InnerOccupied::Heap(o) => o.into_mut(),
}
}
/// Sets the value of the entry with the `OccupiedEntry`'s key,
/// and returns the entry's old value.
pub fn insert(&mut self, value: V) -> V {
match &mut self.inner {
InnerOccupied::Inline(o) => o.insert(value),
InnerOccupied::Heap(o) => o.insert(value),
}
}
/// Takes the value of the entry out of the map, and returns it.
pub fn remove(self) -> V {
match self.inner {
InnerOccupied::Inline(o) => o.remove(),
InnerOccupied::Heap(o) => o.remove(),
}
}
}
pub(super) struct InlineVacantEntry<'a, K, V, const ARRAY_SIZE: usize> {
pub(super) key: K,
pub(super) map: &'a mut SmallMap<K, V, ARRAY_SIZE>,
}
impl<'a, K: Ord + Eq, V, const ARRAY_SIZE: usize> InlineVacantEntry<'a, K, V, ARRAY_SIZE> {
pub(super) fn key(&self) -> &K {
&self.key
}
pub(super) fn into_key(self) -> K {
self.key
}
pub(super) fn insert(self, value: V) -> &'a mut V {
let InlineVacantEntry { key, map } = self;
let vec = match &mut map.inner {
super::Inner::Inline(vec) => {
if !vec.is_full() {
let len = vec.len();
vec.push((key, value));
// Workaround for Problem case 3 of the current borrow checker.
// https://rust-lang.github.io/rfcs/2094-nll.html#problem-case-3-conditional-control-flow-across-functions
match &mut map.inner {
super::Inner::Inline(vec) => return &mut vec[len].1,
super::Inner::Heap(_) => unreachable!(),
}
}
std::mem::take(vec)
}
super::Inner::Heap(_) => unreachable!(),
};
// Need to convert to a heap allocated map.
let btree = BTreeMap::from_iter(vec);
*map = SmallMap {
inner: super::Inner::Heap(btree),
};
match &mut map.inner {
super::Inner::Inline(_) => unreachable!(),
super::Inner::Heap(h) => h.entry(key).or_insert(value),
}
}
}
pub(super) struct InlineOccupiedEntry<'a, K, V, const ARRAY_SIZE: usize> {
pub(super) index: usize,
pub(super) array: &'a mut ArrayVec<(K, V), ARRAY_SIZE>,
}
impl<'a, K, V, const ARRAY_SIZE: usize> InlineOccupiedEntry<'a, K, V, ARRAY_SIZE> {
pub(super) fn key(&self) -> &K {
&self.array[self.index].0
}
pub(super) fn remove_entry(self) -> (K, V) {
self.array.remove(self.index)
}
pub(super) fn get(&self) -> &V {
&self.array[self.index].1
}
pub(super) fn get_mut(&mut self) -> &mut V {
&mut self.array[self.index].1
}
pub(super) fn into_mut(self) -> &'a mut V {
&mut self.array[self.index].1
}
pub(super) fn insert(&mut self, value: V) -> V {
std::mem::replace(&mut self.array[self.index].1, value)
}
pub(super) fn remove(self) -> V {
self.remove_entry().1
}
}

644
boa_engine/src/small_map/mod.rs

@ -0,0 +1,644 @@
// TODO: Maybe extract to a separate crate? It could be useful for some applications.
#![allow(unreachable_pub)]
#![allow(unused)]
use std::{
borrow::Borrow,
collections::{btree_map, BTreeMap},
fmt,
hash::{Hash, Hasher},
iter::FusedIterator,
ops::{Index, IndexMut},
};
use arrayvec::ArrayVec;
mod entry;
pub use entry::{Entry, OccupiedEntry, VacantEntry};
use Entry::{Occupied, Vacant};
/// A map that is initially backed by an inline vec, but changes its backing to a heap map if its
/// number of elements exceeds `ARRAY_SIZE`.
#[derive(Clone)]
pub(crate) struct SmallMap<K, V, const ARRAY_SIZE: usize> {
inner: Inner<K, V, ARRAY_SIZE>,
}
#[derive(Debug, Clone)]
enum Inner<K, V, const ARRAY_SIZE: usize> {
Inline(ArrayVec<(K, V), ARRAY_SIZE>),
Heap(BTreeMap<K, V>),
}
/// An iterator over the entries of a `SmallMap`.
///
/// This `struct` is created by the [`iter`] method on [`SmallMap`]. See its
/// documentation for more.
///
/// [`iter`]: SmallMap::iter
#[derive(Clone)]
pub struct Iter<'a, K, V> {
inner: InnerIter<'a, K, V>,
}
#[derive(Clone)]
enum InnerIter<'a, K, V> {
Inline(std::slice::Iter<'a, (K, V)>),
Heap(btree_map::Iter<'a, K, V>),
}
impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for Iter<'_, K, V> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match &self.inner {
InnerIter::Inline(i) => f.debug_tuple("Inline").field(i).finish(),
InnerIter::Heap(h) => f.debug_tuple("Heap").field(h).finish(),
}
}
}
impl<K, V> Default for Iter<'_, K, V> {
/// Creates an empty `small_map::Iter`.
fn default() -> Self {
Self {
inner: InnerIter::Inline(std::slice::Iter::default()),
}
}
}
/// A mutable iterator over the entries of a `SmallMap`.
///
/// This `struct` is created by the [`iter_mut`] method on [`SmallMap`]. See its
/// documentation for more.
///
/// [`iter_mut`]: SmallMap::iter_mut
pub struct IterMut<'a, K, V> {
inner: InnerIterMut<'a, K, V>,
}
enum InnerIterMut<'a, K, V> {
Inline(std::slice::IterMut<'a, (K, V)>),
Heap(btree_map::IterMut<'a, K, V>),
}
impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for IterMut<'_, K, V> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match &self.inner {
InnerIterMut::Inline(i) => f.debug_tuple("Inline").field(i).finish(),
InnerIterMut::Heap(h) => f.debug_tuple("Heap").field(h).finish(),
}
}
}
impl<K, V> Default for IterMut<'_, K, V> {
/// Creates an empty `small_map::IterMut`.
fn default() -> Self {
Self {
inner: InnerIterMut::Inline(std::slice::IterMut::default()),
}
}
}
/// An owning iterator over the entries of a `SmallMap`.
///
/// This `struct` is created by the [`into_iter`] method on [`SmallMap`]
/// (provided by the [`IntoIterator`] trait). See its documentation for more.
///
/// [`into_iter`]: IntoIterator::into_iter
pub struct IntoIter<K, V, const ARRAY_SIZE: usize> {
inner: InnerIntoIter<K, V, ARRAY_SIZE>,
}
enum InnerIntoIter<K, V, const ARRAY_SIZE: usize> {
Inline(arrayvec::IntoIter<(K, V), ARRAY_SIZE>),
Heap(btree_map::IntoIter<K, V>),
}
impl<K: fmt::Debug, V: fmt::Debug, const ARRAY_SIZE: usize> fmt::Debug
for IntoIter<K, V, ARRAY_SIZE>
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match &self.inner {
InnerIntoIter::Inline(i) => f.debug_tuple("Inline").field(i).finish(),
InnerIntoIter::Heap(h) => f.debug_tuple("Heap").field(h).finish(),
}
}
}
impl<K, V, const ARRAY_SIZE: usize> Default for IntoIter<K, V, ARRAY_SIZE> {
/// Creates an empty `small_map::IntoIter`.
fn default() -> Self {
Self {
inner: InnerIntoIter::Inline(ArrayVec::new().into_iter()),
}
}
}
impl<K, V, const ARRAY_SIZE: usize> SmallMap<K, V, ARRAY_SIZE> {
/// Makes a new, empty `SmallMap`.
pub const fn new() -> Self {
Self {
inner: Inner::Inline(ArrayVec::new_const()),
}
}
/// Clears the map, removing all elements.
///
/// The current implementation will preserve the heap map allocation
/// if the map has already transitioned to the fallback heap map.
pub fn clear(&mut self) {
match &mut self.inner {
Inner::Inline(v) => v.clear(),
Inner::Heap(h) => h.clear(),
}
}
/// Returns a reference to the value corresponding to the key.
///
/// The key may be any borrowed form of the map's key type, but the ordering
/// on the borrowed form *must* match the ordering on the key type.
pub fn get<Q: ?Sized>(&self, key: &Q) -> Option<&V>
where
K: Borrow<Q> + Ord + Eq,
Q: Ord + Eq,
{
match &self.inner {
Inner::Inline(v) => v.iter().find(|(k, _)| k.borrow() == key).map(|(_, v)| v),
Inner::Heap(h) => h.get(key),
}
}
/// Returns the key-value pair corresponding to the supplied key.
///
/// The supplied key may be any borrowed form of the map's key type, but the ordering
/// on the borrowed form *must* match the ordering on the key type.
pub fn get_key_value<Q: ?Sized>(&self, key: &Q) -> Option<(&K, &V)>
where
K: Borrow<Q> + Ord + Eq,
Q: Ord + Eq,
{
match &self.inner {
Inner::Inline(v) => v
.iter()
.find(|(k, _)| k.borrow() == key)
.map(|(k, v)| (k, v)),
Inner::Heap(h) => h.get_key_value(key),
}
}
/// Returns `true` if the map contains a value for the specified key.
///
/// The key may be any borrowed form of the map's key type, but the ordering
/// on the borrowed form *must* match the ordering on the key type.
pub fn contains_key<Q: ?Sized>(&self, key: &Q) -> bool
where
K: Borrow<Q> + Ord + Eq,
Q: Ord + Eq,
{
self.get(key).is_some()
}
/// Returns a mutable reference to the value corresponding to the key.
///
/// The key may be any borrowed form of the map's key type, but the ordering
/// on the borrowed form *must* match the ordering on the key type.
pub fn get_mut<Q: ?Sized>(&mut self, key: &Q) -> Option<&mut V>
where
K: Borrow<Q> + Ord + Eq,
Q: Ord + Eq,
{
match &mut self.inner {
Inner::Inline(v) => v
.iter_mut()
.find(|(k, _)| k.borrow() == key)
.map(|(_, v)| v),
Inner::Heap(h) => h.get_mut(key),
}
}
/// Inserts a key-value pair into the map.
///
/// If the map did not have this key present, `None` is returned.
///
/// If the map did have this key present, the value is updated, and the old
/// value is returned. The key is not updated, though; this matters for
/// types that can be `==` without being identical. See the [**Insert and complex keys**][keys]
/// section from the [`std::collections`] module documentation for more information.
///
/// [keys]: https://doc.rust-lang.org/std/collections/index.html#insert-and-complex-keys
pub fn insert(&mut self, key: K, value: V) -> Option<V>
where
K: Eq + Ord,
{
match self.entry(key) {
Occupied(mut entry) => Some(entry.insert(value)),
Vacant(entry) => {
entry.insert(value);
None
}
}
}
/// Removes a key from the map, returning the value at the key if the key
/// was previously in the map.
///
/// The key may be any borrowed form of the map's key type, but the ordering
/// on the borrowed form *must* match the ordering on the key type.
pub fn remove<Q: ?Sized>(&mut self, key: &Q) -> Option<V>
where
K: Borrow<Q> + Ord + Eq,
Q: Ord + Eq,
{
self.remove_entry(key).map(|(_, v)| v)
}
/// Removes a key from the map, returning the stored key and value if the key
/// was previously in the map.
///
/// The key may be any borrowed form of the map's key type, but the ordering
/// on the borrowed form *must* match the ordering on the key type.
pub fn remove_entry<Q: ?Sized>(&mut self, key: &Q) -> Option<(K, V)>
where
K: Borrow<Q> + Ord,
Q: Ord,
{
match &mut self.inner {
Inner::Inline(v) => v
.iter()
.position(|(k, _)| k.borrow() == key)
.map(|idx| v.remove(idx)),
Inner::Heap(h) => h.remove_entry(key),
}
}
/// Retains only the elements specified by the predicate.
///
/// In other words, remove all pairs `(k, v)` for which `f(&k, &mut v)` returns `false`.
pub fn retain<F>(&mut self, mut f: F)
where
K: Ord,
F: FnMut(&K, &mut V) -> bool,
{
match &mut self.inner {
Inner::Inline(v) => v.retain(|(k, v)| f(k, v)),
Inner::Heap(h) => h.retain(f),
}
}
/// Moves all elements from `other` into `self`, leaving `other` empty.
///
/// If a key from `other` is already present in `self`, the respective
/// value from `self` will be overwritten with the respective value from `other`.
pub fn append<const OTHER_SIZE: usize>(&mut self, other: &mut SmallMap<K, V, OTHER_SIZE>)
where
K: Ord + Eq,
{
if other.is_empty() {
return;
}
let inline = matches!(other.inner, Inner::Inline(_));
let other = std::mem::replace(
other,
SmallMap {
inner: if inline {
Inner::Inline(ArrayVec::new())
} else {
Inner::Heap(BTreeMap::new())
},
},
);
self.extend(other);
}
/// Gets the given key's corresponding entry in the map for in-place manipulation.
pub fn entry(&mut self, key: K) -> Entry<'_, K, V, ARRAY_SIZE>
where
K: Eq + Ord,
{
match &mut self.inner {
Inner::Inline(array) => {
let Some(index) = array.iter().position(|(k, _)| *k == key) else {
return Vacant(VacantEntry {
inner: entry::InnerVacant::Inline(entry::InlineVacantEntry {
key,
map: self,
}),
});
};
// Workaround for Problem case 3 of the current borrow checker.
// https://rust-lang.github.io/rfcs/2094-nll.html#problem-case-3-conditional-control-flow-across-functions
// Hopefully we can remove this with some improvements to the borrow checker.
match &mut self.inner {
Inner::Inline(array) => Occupied(OccupiedEntry {
inner: entry::InnerOccupied::Inline(entry::InlineOccupiedEntry {
index,
array,
}),
}),
Inner::Heap(_) => unreachable!(),
}
}
// Same workaround as above.
Inner::Heap(_) => match &mut self.inner {
Inner::Heap(h) => match h.entry(key) {
btree_map::Entry::Vacant(entry) => Vacant(VacantEntry {
inner: entry::InnerVacant::Heap(entry),
}),
btree_map::Entry::Occupied(entry) => Occupied(OccupiedEntry {
inner: entry::InnerOccupied::Heap(entry),
}),
},
Inner::Inline(_) => unreachable!(),
},
}
}
}
impl<'a, K, V, const ARRAY_SIZE: usize> IntoIterator for &'a SmallMap<K, V, ARRAY_SIZE> {
type Item = (&'a K, &'a V);
type IntoIter = Iter<'a, K, V>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
impl<'a, K, V> Iterator for Iter<'a, K, V> {
type Item = (&'a K, &'a V);
fn next(&mut self) -> Option<Self::Item> {
match &mut self.inner {
InnerIter::Inline(i) => i.next().map(|(k, v)| (k, v)),
InnerIter::Heap(h) => h.next(),
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
match &self.inner {
InnerIter::Inline(i) => i.size_hint(),
InnerIter::Heap(h) => h.size_hint(),
}
}
fn last(self) -> Option<(&'a K, &'a V)> {
match self.inner {
InnerIter::Inline(i) => i.last().map(|(k, v)| (k, v)),
InnerIter::Heap(h) => h.last(),
}
}
}
impl<K, V> FusedIterator for Iter<'_, K, V> {}
impl<'a, K: 'a, V: 'a> DoubleEndedIterator for Iter<'a, K, V> {
fn next_back(&mut self) -> Option<(&'a K, &'a V)> {
match &mut self.inner {
InnerIter::Inline(i) => i.next_back().map(|(k, v)| (k, v)),
InnerIter::Heap(h) => h.next_back(),
}
}
}
impl<K, V> ExactSizeIterator for Iter<'_, K, V> {
fn len(&self) -> usize {
match &self.inner {
InnerIter::Inline(i) => i.len(),
InnerIter::Heap(h) => h.len(),
}
}
}
impl<'a, K, V, const ARRAY_SIZE: usize> IntoIterator for &'a mut SmallMap<K, V, ARRAY_SIZE> {
type Item = (&'a K, &'a mut V);
type IntoIter = IterMut<'a, K, V>;
fn into_iter(self) -> Self::IntoIter {
self.iter_mut()
}
}
impl<'a, K, V> Iterator for IterMut<'a, K, V> {
type Item = (&'a K, &'a mut V);
fn next(&mut self) -> Option<Self::Item> {
match &mut self.inner {
InnerIterMut::Inline(i) => i.next().map(|(k, v)| (&*k, v)),
InnerIterMut::Heap(h) => h.next(),
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
match &self.inner {
InnerIterMut::Inline(i) => i.size_hint(),
InnerIterMut::Heap(h) => h.size_hint(),
}
}
fn last(self) -> Option<(&'a K, &'a mut V)> {
match self.inner {
InnerIterMut::Inline(i) => i.last().map(|(k, v)| (&*k, v)),
InnerIterMut::Heap(h) => h.last(),
}
}
}
impl<K, V> FusedIterator for IterMut<'_, K, V> {}
impl<'a, K: 'a, V: 'a> DoubleEndedIterator for IterMut<'a, K, V> {
fn next_back(&mut self) -> Option<(&'a K, &'a mut V)> {
match &mut self.inner {
InnerIterMut::Inline(i) => i.next_back().map(|(k, v)| (&*k, v)),
InnerIterMut::Heap(h) => h.next_back(),
}
}
}
impl<K, V> ExactSizeIterator for IterMut<'_, K, V> {
fn len(&self) -> usize {
match &self.inner {
InnerIterMut::Inline(i) => i.len(),
InnerIterMut::Heap(h) => h.len(),
}
}
}
impl<K, V, const ARRAY_SIZE: usize> IntoIterator for SmallMap<K, V, ARRAY_SIZE> {
type Item = (K, V);
type IntoIter = IntoIter<K, V, ARRAY_SIZE>;
fn into_iter(self) -> Self::IntoIter {
match self.inner {
Inner::Inline(i) => IntoIter {
inner: InnerIntoIter::Inline(i.into_iter()),
},
Inner::Heap(h) => IntoIter {
inner: InnerIntoIter::Heap(h.into_iter()),
},
}
}
}
impl<K, V, const ARRAY_SIZE: usize> Iterator for IntoIter<K, V, ARRAY_SIZE> {
type Item = (K, V);
fn next(&mut self) -> Option<(K, V)> {
match &mut self.inner {
InnerIntoIter::Inline(i) => i.next(),
InnerIntoIter::Heap(h) => h.next(),
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
match &self.inner {
InnerIntoIter::Inline(i) => i.size_hint(),
InnerIntoIter::Heap(h) => h.size_hint(),
}
}
}
impl<K, V, const ARRAY_SIZE: usize> DoubleEndedIterator for IntoIter<K, V, ARRAY_SIZE> {
fn next_back(&mut self) -> Option<(K, V)> {
match &mut self.inner {
InnerIntoIter::Inline(i) => i.next_back(),
InnerIntoIter::Heap(h) => h.next_back(),
}
}
}
impl<K, V, const ARRAY_SIZE: usize> ExactSizeIterator for IntoIter<K, V, ARRAY_SIZE> {
fn len(&self) -> usize {
match &self.inner {
InnerIntoIter::Inline(i) => i.len(),
InnerIntoIter::Heap(h) => h.len(),
}
}
}
impl<K, V, const ARRAY_SIZE: usize> FusedIterator for IntoIter<K, V, ARRAY_SIZE> {}
impl<K: Eq + Ord, V, const ARRAY_SIZE: usize> Extend<(K, V)> for SmallMap<K, V, ARRAY_SIZE> {
fn extend<T: IntoIterator<Item = (K, V)>>(&mut self, iter: T) {
iter.into_iter().for_each(move |(k, v)| {
self.insert(k, v);
});
}
}
impl<'a, K: Eq + Ord + Copy, V: Copy, const ARRAY_SIZE: usize> Extend<(&'a K, &'a V)>
for SmallMap<K, V, ARRAY_SIZE>
{
fn extend<I: IntoIterator<Item = (&'a K, &'a V)>>(&mut self, iter: I) {
self.extend(iter.into_iter().map(|(&key, &value)| (key, value)));
}
}
impl<K: Hash, V: Hash, const ARRAY_SIZE: usize> Hash for SmallMap<K, V, ARRAY_SIZE> {
fn hash<H: Hasher>(&self, state: &mut H) {
// TODO: track https://github.com/rust-lang/rust/issues/96762
// state.write_length_prefix(self.len());
state.write_usize(self.len());
for elt in self {
elt.hash(state);
}
}
}
impl<K, V, const ARRAY_SIZE: usize> Default for SmallMap<K, V, ARRAY_SIZE> {
/// Creates an empty `SmallMap`.
fn default() -> Self {
Self::new()
}
}
impl<K: PartialEq + Ord, V: PartialEq, const LHS_SIZE: usize, const RHS_SIZE: usize>
PartialEq<SmallMap<K, V, RHS_SIZE>> for SmallMap<K, V, LHS_SIZE>
{
fn eq(&self, other: &SmallMap<K, V, RHS_SIZE>) -> bool {
if let (Inner::Heap(lhs), Inner::Heap(rhs)) = (&self.inner, &other.inner) {
return lhs == rhs;
}
if self.len() != other.len() {
return false;
}
self.iter()
.all(|(key, value)| other.get(key).map_or(false, |v| *value == *v))
}
}
impl<K: Eq + Ord, V: Eq, const ARRAY_SIZE: usize> Eq for SmallMap<K, V, ARRAY_SIZE> {}
impl<K: fmt::Debug, V: fmt::Debug, const ARRAY_SIZE: usize> fmt::Debug
for SmallMap<K, V, ARRAY_SIZE>
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_map().entries(self.iter()).finish()
}
}
impl<K, Q: ?Sized, V, const ARRAY_SIZE: usize> Index<&Q> for SmallMap<K, V, ARRAY_SIZE>
where
K: Eq + Ord + Borrow<Q>,
Q: Eq + Ord,
{
type Output = V;
fn index(&self, index: &Q) -> &Self::Output {
self.get(index).expect("no entry found for key")
}
}
impl<K, Q: ?Sized, V, const ARRAY_SIZE: usize> IndexMut<&Q> for SmallMap<K, V, ARRAY_SIZE>
where
K: Eq + Ord + Borrow<Q>,
Q: Eq + Ord,
{
fn index_mut(&mut self, index: &Q) -> &mut Self::Output {
self.get_mut(index).expect("no entry found for key")
}
}
impl<K, V, const ARRAY_SIZE: usize> SmallMap<K, V, ARRAY_SIZE> {
/// Gets an iterator over the entries of the map.
pub fn iter(&self) -> Iter<'_, K, V> {
match &self.inner {
Inner::Inline(i) => Iter {
inner: InnerIter::Inline(i.iter()),
},
Inner::Heap(h) => Iter {
inner: InnerIter::Heap(h.iter()),
},
}
}
/// Gets a mutable iterator over the entries of the map.
pub fn iter_mut(&mut self) -> IterMut<'_, K, V> {
match &mut self.inner {
Inner::Inline(i) => IterMut {
inner: InnerIterMut::Inline(i.iter_mut()),
},
Inner::Heap(h) => IterMut {
inner: InnerIterMut::Heap(h.iter_mut()),
},
}
}
/// Returns the number of elements in the map.
pub fn len(&self) -> usize {
match &self.inner {
Inner::Inline(i) => i.len(),
Inner::Heap(h) => h.len(),
}
}
/// Returns `true` if the map contains no elements.
pub fn is_empty(&self) -> bool {
self.len() == 0
}
}

2
boa_engine/src/string/common.rs

@ -114,6 +114,7 @@ impl StaticJsStrings {
(ASYNC_FUNCTION, "AsyncFunction"),
(ASYNC_GENERATOR, "AsyncGenerator"),
(ASYNC_GENERATOR_FUNCTION, "AsyncGeneratorFunction"),
(ATOMICS, "Atomics"),
(BIG_INT, "BigInt"),
(BOOLEAN, "Boolean"),
(DATA_VIEW, "DataView"),
@ -255,6 +256,7 @@ const RAW_STATICS: &[&[u16]] = &[
utf16!("AsyncFunction"),
utf16!("AsyncGenerator"),
utf16!("AsyncGeneratorFunction"),
utf16!("Atomics"),
utf16!("BigInt"),
utf16!("Boolean"),
utf16!("DataView"),

3
boa_engine/src/sys/fallback/mod.rs

@ -0,0 +1,3 @@
// Reexports `std::time` for all other platforms. This could cause panics on
// platforms that don't support `Instant::now()`.
pub(crate) use std::time;

1
boa_engine/src/sys/js/mod.rs

@ -0,0 +1 @@
pub(crate) use web_time as time;

16
boa_engine/src/sys/mod.rs

@ -0,0 +1,16 @@
// We could use `web-time` directly, but that would make it harder to add support
// for other platforms in the future e.g. `no_std` targets.
// We could also pull `web-time` and customize it for our target selection
cfg_if::cfg_if! {
if #[cfg(all(
target_family = "wasm",
not(any(target_os = "emscripten", target_os = "wasi")),
feature = "js"
))] {
mod js;
pub(crate) use self::js::*;
} else {
mod fallback;
pub(crate) use self::fallback::*;
}
}

2
boa_engine/src/value/integer.rs

@ -22,8 +22,6 @@ impl IntegerOrInfinity {
/// Panics if `min > max`.
#[must_use]
pub fn clamp_finite(self, min: i64, max: i64) -> i64 {
assert!(min <= max);
match self {
Self::Integer(i) => i.clamp(min, max),
Self::PositiveInfinity => max,

1
boa_engine/src/vm/code_block.rs

@ -123,7 +123,6 @@ pub(crate) enum Constant {
BigInt(#[unsafe_ignore_trace] JsBigInt),
/// Compile time environments in this function.
///
// Safety: Nothing in CompileTimeEnvironment needs tracing, so this is safe.
//
// TODO(#3034): Maybe changing this to Gc after garbage collection would be better than Rc.

4
boa_engine/src/vm/mod.rs

@ -14,9 +14,9 @@ use boa_profiler::Profiler;
use std::{future::Future, mem::size_of, ops::ControlFlow, pin::Pin, task};
#[cfg(feature = "trace")]
use boa_interner::ToInternedString;
use crate::sys::time::Instant;
#[cfg(feature = "trace")]
use std::time::Instant;
use boa_interner::ToInternedString;
mod call_frame;
mod code_block;

2
boa_tester/Cargo.toml

@ -13,6 +13,7 @@ rust-version.workspace = true
[dependencies]
boa_engine.workspace = true
boa_runtime.workspace = true
boa_gc.workspace = true
clap = { workspace = true, features = ["derive"] }
serde = { workspace = true, features = ["derive"] }
@ -29,6 +30,7 @@ color-eyre = "0.6.2"
phf = { workspace = true, features = ["macros"] }
comfy-table = "7.1.0"
serde_repr = "0.1.17"
bus = "2.4.1"
[features]
default = ["boa_engine/intl", "boa_engine/experimental", "boa_engine/annex-b"]

236
boa_tester/src/exec/js262.rs

@ -1,15 +1,79 @@
use std::{
cell::RefCell,
rc::Rc,
sync::mpsc::{self, Sender},
thread::JoinHandle,
time::Duration,
};
use boa_engine::{
builtins::array_buffer::SharedArrayBuffer,
js_string,
native_function::NativeFunction,
object::{JsObject, ObjectInitializer},
object::{builtins::JsSharedArrayBuffer, JsObject, ObjectInitializer},
property::Attribute,
Context, JsArgs, JsNativeError, JsResult, JsValue, Source,
};
use bus::BusReader;
use crate::START;
pub(super) enum WorkerResult {
Ok,
Err(String),
Panic(String),
}
pub(super) type WorkerHandle = JoinHandle<Result<(), String>>;
#[derive(Debug, Clone)]
pub(super) struct WorkerHandles(Rc<RefCell<Vec<WorkerHandle>>>);
impl WorkerHandles {
pub(super) fn new() -> Self {
Self(Rc::default())
}
pub(super) fn join_all(&mut self) -> Vec<WorkerResult> {
let handles = std::mem::take(&mut *self.0.borrow_mut());
handles
.into_iter()
.map(|h| {
let result = h.join();
match result {
Ok(Ok(())) => WorkerResult::Ok,
Ok(Err(msg)) => {
eprintln!("Detected error on worker thread: {msg}");
WorkerResult::Err(msg)
}
Err(e) => {
let msg = e
.downcast_ref::<&str>()
.map(|&s| String::from(s))
.unwrap_or_default();
eprintln!("Detected panic on worker thread: {msg}");
WorkerResult::Panic(msg)
}
}
})
.collect()
}
}
impl Drop for WorkerHandles {
fn drop(&mut self) {
self.join_all();
}
}
/// Creates the object $262 in the context.
pub(super) fn register_js262(context: &mut Context<'_>) -> JsObject {
pub(super) fn register_js262(handles: WorkerHandles, context: &mut Context<'_>) -> JsObject {
let global_obj = context.global_object();
let agent = agent_obj(handles, context);
let js262 = ObjectInitializer::new(context)
.function(
NativeFunction::from_fn_ptr(create_realm),
@ -32,7 +96,11 @@ pub(super) fn register_js262(context: &mut Context<'_>) -> JsObject {
global_obj,
Attribute::WRITABLE | Attribute::CONFIGURABLE,
)
// .property("agent", agent, Attribute::default())
.property(
js_string!("agent"),
agent,
Attribute::WRITABLE | Attribute::CONFIGURABLE,
)
.build();
context
@ -54,7 +122,7 @@ pub(super) fn register_js262(context: &mut Context<'_>) -> JsObject {
fn create_realm(_: &JsValue, _: &[JsValue], _: &mut Context<'_>) -> JsResult<JsValue> {
let context = &mut Context::default();
let js262 = register_js262(context);
let js262 = register_js262(WorkerHandles::new(), context);
Ok(JsValue::new(js262))
}
@ -108,3 +176,163 @@ fn gc(_this: &JsValue, _: &[JsValue], _context: &mut Context<'_>) -> JsResult<Js
boa_gc::force_collect();
Ok(JsValue::undefined())
}
/// The `$262.agent.sleep()` function.
fn sleep(_: &JsValue, args: &[JsValue], context: &mut Context<'_>) -> JsResult<JsValue> {
let ms = args.get_or_undefined(0).to_number(context)? / 1000.0;
std::thread::sleep(Duration::from_secs_f64(ms));
Ok(JsValue::undefined())
}
/// The `$262.agent.monotonicNow()` function.
#[allow(clippy::unnecessary_wraps)]
fn monotonic_now(_: &JsValue, _: &[JsValue], _: &mut Context<'_>) -> JsResult<JsValue> {
Ok(JsValue::from(START.elapsed().as_millis() as f64))
}
/// Initializes the `$262.agent` object in the main agent.
fn agent_obj(handles: WorkerHandles, context: &mut Context<'_>) -> JsObject {
// TODO: improve initialization of this by using a `[[HostDefined]]` field on `Context`.
let bus = Rc::new(RefCell::new(bus::Bus::new(1)));
let (reports_tx, reports_rx) = mpsc::channel();
let start = unsafe {
let bus = bus.clone();
NativeFunction::from_closure(move |_, args, context| {
let script = args
.get_or_undefined(0)
.to_string(context)?
.to_std_string()
.map_err(|e| JsNativeError::typ().with_message(e.to_string()))?;
let rx = bus.borrow_mut().add_rx();
let tx = reports_tx.clone();
handles.0.borrow_mut().push(std::thread::spawn(move || {
let context = &mut Context::builder()
.can_block(true)
.build()
.map_err(|e| e.to_string())?;
register_js262_worker(rx, tx, context);
let src = Source::from_bytes(&script);
context.eval(src).map_err(|e| e.to_string())?;
Ok(())
}));
Ok(JsValue::undefined())
})
};
let broadcast = unsafe {
// should technically also have a second numeric argument, but the test262 never uses it.
NativeFunction::from_closure(move |_, args, _| {
let buffer = args.get_or_undefined(0).as_object().ok_or_else(|| {
JsNativeError::typ().with_message("argument was not a shared array")
})?;
let buffer = buffer
.borrow()
.as_shared_array_buffer()
.ok_or_else(|| {
JsNativeError::typ().with_message("argument was not a shared array")
})?
.clone();
bus.borrow_mut().broadcast(buffer);
Ok(JsValue::undefined())
})
};
let get_report = unsafe {
NativeFunction::from_closure(move |_, _, _| {
let Ok(msg) = reports_rx.try_recv() else {
return Ok(JsValue::null());
};
Ok(js_string!(msg).into())
})
};
ObjectInitializer::new(context)
.function(start, js_string!("start"), 1)
.function(broadcast, js_string!("broadcast"), 2)
.function(get_report, js_string!("getReport"), 0)
.function(NativeFunction::from_fn_ptr(sleep), js_string!("sleep"), 1)
.function(
NativeFunction::from_fn_ptr(monotonic_now),
js_string!("monotonicNow"),
0,
)
.build()
}
/// Initializes the `$262` object in a worker agent.
fn register_js262_worker(
rx: BusReader<SharedArrayBuffer>,
tx: Sender<Vec<u16>>,
context: &mut Context<'_>,
) {
let rx = RefCell::new(rx);
let receive_broadcast = unsafe {
// should technically also have a second numeric argument, but the test262 never uses it.
NativeFunction::from_closure(move |_, args, context| {
let array = rx.borrow_mut().recv().map_err(|err| {
JsNativeError::typ().with_message(format!("failed to receive buffer: {err}"))
})?;
let callable = args
.get_or_undefined(0)
.as_callable()
.ok_or_else(|| JsNativeError::typ().with_message("argument is not callable"))?;
let buffer = JsSharedArrayBuffer::from_buffer(array, context);
callable.call(&JsValue::undefined(), &[buffer.into()], context)
})
};
let report = unsafe {
NativeFunction::from_closure(move |_, args, context| {
let string = args.get_or_undefined(0).to_string(context)?.to_vec();
tx.send(string)
.map_err(|e| JsNativeError::typ().with_message(e.to_string()))?;
Ok(JsValue::undefined())
})
};
let agent = ObjectInitializer::new(context)
.function(receive_broadcast, js_string!("receiveBroadcast"), 1)
.function(report, js_string!("report"), 1)
.function(NativeFunction::from_fn_ptr(sleep), js_string!("sleep"), 1)
// Don't need to signal leaving, the main thread will join with the worker
// threads anyways.
.function(
NativeFunction::from_fn_ptr(|_, _, _| Ok(JsValue::undefined())),
js_string!("leaving"),
0,
)
.function(
NativeFunction::from_fn_ptr(monotonic_now),
js_string!("monotonicNow"),
0,
)
.build();
let js262 = ObjectInitializer::new(context)
.property(
js_string!("agent"),
agent,
Attribute::WRITABLE | Attribute::CONFIGURABLE,
)
.build();
context
.register_global_property(
js_string!("$262"),
js262,
Attribute::WRITABLE | Attribute::CONFIGURABLE,
)
.expect("shouldn't fail with the default global");
}

94
boa_tester/src/exec/mod.rs

@ -22,6 +22,8 @@ use rayon::prelude::*;
use rustc_hash::FxHashSet;
use std::{cell::RefCell, eprintln, rc::Rc};
use self::js262::WorkerHandles;
impl TestSuite {
/// Runs the test suite.
pub(crate) fn run(
@ -31,6 +33,7 @@ impl TestSuite {
parallel: bool,
max_edition: SpecEdition,
optimizer_options: OptimizerOptions,
console: bool,
) -> SuiteResult {
if verbose != 0 {
println!("Suite {}:", self.path.display());
@ -39,12 +42,30 @@ impl TestSuite {
let suites: Vec<_> = if parallel {
self.suites
.par_iter()
.map(|suite| suite.run(harness, verbose, parallel, max_edition, optimizer_options))
.map(|suite| {
suite.run(
harness,
verbose,
parallel,
max_edition,
optimizer_options,
console,
)
})
.collect()
} else {
self.suites
.iter()
.map(|suite| suite.run(harness, verbose, parallel, max_edition, optimizer_options))
.map(|suite| {
suite.run(
harness,
verbose,
parallel,
max_edition,
optimizer_options,
console,
)
})
.collect()
};
@ -52,13 +73,13 @@ impl TestSuite {
self.tests
.par_iter()
.filter(|test| test.edition <= max_edition)
.flat_map(|test| test.run(harness, verbose, optimizer_options))
.flat_map(|test| test.run(harness, verbose, optimizer_options, console))
.collect()
} else {
self.tests
.iter()
.filter(|test| test.edition <= max_edition)
.flat_map(|test| test.run(harness, verbose, optimizer_options))
.flat_map(|test| test.run(harness, verbose, optimizer_options, console))
.collect()
};
@ -146,17 +167,18 @@ impl Test {
harness: &Harness,
verbose: u8,
optimizer_options: OptimizerOptions,
console: bool,
) -> Vec<TestResult> {
let mut results = Vec::new();
if self.flags.contains(TestFlags::MODULE) {
results.push(self.run_once(harness, false, verbose, optimizer_options));
results.push(self.run_once(harness, false, verbose, optimizer_options, console));
} else {
if self.flags.contains(TestFlags::STRICT) && !self.flags.contains(TestFlags::RAW) {
results.push(self.run_once(harness, true, verbose, optimizer_options));
results.push(self.run_once(harness, true, verbose, optimizer_options, console));
}
if self.flags.contains(TestFlags::NO_STRICT) || self.flags.contains(TestFlags::RAW) {
results.push(self.run_once(harness, false, verbose, optimizer_options));
results.push(self.run_once(harness, false, verbose, optimizer_options, console));
}
}
@ -170,6 +192,7 @@ impl Test {
strict: bool,
verbose: u8,
optimizer_options: OptimizerOptions,
console: bool,
) -> TestResult {
let Ok(source) = Source::from_filepath(&self.path) else {
if verbose > 1 {
@ -227,10 +250,19 @@ impl Test {
let dyn_loader: &dyn ModuleLoader = loader;
let context = &mut Context::builder()
.module_loader(dyn_loader)
.can_block(!self.flags.contains(TestFlags::CAN_BLOCK_IS_FALSE))
.build()
.expect("cannot fail with default global object");
if let Err(e) = self.set_up_env(harness, context, async_result.clone()) {
let mut handles = WorkerHandles::new();
if let Err(e) = self.set_up_env(
harness,
context,
async_result.clone(),
handles.clone(),
console,
) {
return (false, e);
}
@ -298,6 +330,16 @@ impl Test {
_ => {}
}
let results = handles.join_all();
for result in results {
match result {
js262::WorkerResult::Err(msg) => return (false, msg),
js262::WorkerResult::Panic(msg) => panic!("Worker thread panicked: {msg}"),
js262::WorkerResult::Ok => {}
}
}
(true, value.display().to_string())
}
Outcome::Negative {
@ -394,12 +436,21 @@ impl Test {
let dyn_loader: &dyn ModuleLoader = loader;
let context = &mut Context::builder()
.module_loader(dyn_loader)
.can_block(!self.flags.contains(TestFlags::CAN_BLOCK_IS_FALSE))
.build()
.expect("cannot fail with default global object");
context.strict(strict);
context.set_optimizer_options(optimizer_options);
if let Err(e) = self.set_up_env(harness, context, AsyncResult::default()) {
let mut handles = WorkerHandles::new();
if let Err(e) = self.set_up_env(
harness,
context,
AsyncResult::default(),
handles.clone(),
console,
) {
return (false, e);
}
let error = if self.is_module() {
@ -457,6 +508,16 @@ impl Test {
}
};
let results = handles.join_all();
for result in results {
match result {
js262::WorkerResult::Err(msg) => return (false, msg),
js262::WorkerResult::Panic(msg) => panic!("Worker thread panicked: {msg}"),
js262::WorkerResult::Ok => {}
}
}
(
is_error_type(&error, error_type, context),
format!("Uncaught {error}"),
@ -527,12 +588,25 @@ impl Test {
harness: &Harness,
context: &mut Context<'_>,
async_result: AsyncResult,
handles: WorkerHandles,
console: bool,
) -> Result<(), String> {
// Register the print() function.
register_print_fn(context, async_result);
// add the $262 object.
let _js262 = js262::register_js262(context);
let _js262 = js262::register_js262(handles, context);
if console {
let console = boa_runtime::Console::init(context);
context
.register_global_property(
js_string!(boa_runtime::Console::NAME),
console,
Attribute::all(),
)
.expect("the console builtin shouldn't exist");
}
if self.flags.contains(TestFlags::RAW) {
return Ok(());

24
boa_tester/src/main.rs

@ -85,6 +85,7 @@ use color_eyre::{
};
use colored::Colorize;
use edition::SpecEdition;
use once_cell::sync::Lazy;
use read::ErrorType;
use rustc_hash::{FxHashMap, FxHashSet};
use serde::{
@ -95,8 +96,11 @@ use std::{
ops::{Add, AddAssign},
path::{Path, PathBuf},
process::Command,
time::Instant,
};
static START: Lazy<Instant> = Lazy::new(Instant::now);
/// Structure that contains the configuration of the tester.
#[derive(Debug, Deserialize)]
struct Config {
@ -216,6 +220,10 @@ enum Cli {
/// Displays the conformance results per ECMAScript edition.
#[arg(long)]
versioned: bool,
/// Injects the `Console` object into every context created.
#[arg(long)]
console: bool,
},
/// Compare two test suite results.
Compare {
@ -237,6 +245,8 @@ const DEFAULT_TEST262_DIRECTORY: &str = "test262";
/// Program entry point.
fn main() -> Result<()> {
// initializes the monotonic clock.
Lazy::force(&START);
color_eyre::install()?;
match Cli::parse() {
Cli::Run {
@ -250,6 +260,7 @@ fn main() -> Result<()> {
config: config_path,
edition,
versioned,
console,
} => {
let config: Config = {
let input = std::fs::read_to_string(config_path)?;
@ -283,6 +294,7 @@ fn main() -> Result<()> {
} else {
OptimizerOptions::empty()
},
console,
)
}
Cli::Compare {
@ -441,6 +453,7 @@ fn run_test_suite(
edition: SpecEdition,
versioned: bool,
optimizer_options: OptimizerOptions,
console: bool,
) -> Result<()> {
if let Some(path) = output {
if path.exists() {
@ -467,7 +480,7 @@ fn run_test_suite(
if verbose != 0 {
println!("Test loaded, starting...");
}
test.run(&harness, verbose, optimizer_options);
test.run(&harness, verbose, optimizer_options, console);
} else {
println!(
"Minimum spec edition of test is bigger than the specified edition. Skipping."
@ -485,7 +498,14 @@ fn run_test_suite(
if verbose != 0 {
println!("Test suite loaded, starting tests...");
}
let results = suite.run(&harness, verbose, parallel, edition, optimizer_options);
let results = suite.run(
&harness,
verbose,
parallel,
edition,
optimizer_options,
console,
);
if versioned {
let mut table = comfy_table::Table::new();

4
boa_wasm/Cargo.toml

@ -12,8 +12,8 @@ repository.workspace = true
rust-version.workspace = true
[dependencies]
boa_engine.workspace = true
wasm-bindgen = "0.2.87"
boa_engine = { workspace = true, features = ["js"] }
wasm-bindgen = { version = "0.2.87", default-features = false }
getrandom = { version = "0.2.10", features = ["js"] }
chrono = { workspace = true, default-features = false, features = ["clock", "std", "wasmbind"] }
console_error_panic_hook = "0.1.7"

1
test262_config.toml

@ -9,7 +9,6 @@ features = [
"FinalizationRegistry",
"IsHTMLDDA",
"Atomics",
"resizable-arraybuffer",
"symbols-as-weakmap-keys",
"intl-normative-optional",

Loading…
Cancel
Save