mirror of https://github.com/boa-dev/boa.git
Browse Source
* Implement `SharedArrayBuffer` * Add small documentation * Fix docs * Apply reviewpull/3387/head
José Julián Espina
1 year ago
committed by
GitHub
29 changed files with 5765 additions and 4598 deletions
@ -0,0 +1,374 @@
|
||||
#![allow(unstable_name_collisions)] |
||||
|
||||
use std::{alloc, sync::Arc}; |
||||
|
||||
use boa_profiler::Profiler; |
||||
use portable_atomic::AtomicU8; |
||||
|
||||
use boa_gc::{Finalize, Trace}; |
||||
use sptr::Strict; |
||||
|
||||
use crate::{ |
||||
builtins::{BuiltInBuilder, BuiltInConstructor, BuiltInObject, IntrinsicObject}, |
||||
context::intrinsics::{Intrinsics, StandardConstructor, StandardConstructors}, |
||||
js_string, |
||||
object::{internal_methods::get_prototype_from_constructor, ObjectData}, |
||||
property::Attribute, |
||||
realm::Realm, |
||||
string::common::StaticJsStrings, |
||||
Context, JsArgs, JsNativeError, JsObject, JsResult, JsString, JsSymbol, JsValue, |
||||
}; |
||||
|
||||
use super::{get_slice_range, utils::copy_shared_to_shared, SliceRange}; |
||||
|
||||
/// The internal representation of a `SharedArrayBuffer` object.
|
||||
///
|
||||
/// This struct implements `Send` and `Sync`, meaning it can be shared between threads
|
||||
/// running different JS code at the same time.
|
||||
#[derive(Debug, Clone, Trace, Finalize)] |
||||
pub struct SharedArrayBuffer { |
||||
/// The `[[ArrayBufferData]]` internal slot.
|
||||
// Shared buffers cannot be detached.
|
||||
#[unsafe_ignore_trace] |
||||
data: Arc<Box<[AtomicU8]>>, |
||||
} |
||||
|
||||
impl SharedArrayBuffer { |
||||
/// Gets the length of this `SharedArrayBuffer`.
|
||||
pub(crate) fn len(&self) -> usize { |
||||
self.data.len() |
||||
} |
||||
|
||||
/// Gets the inner bytes of this `SharedArrayBuffer`.
|
||||
pub(crate) fn data(&self) -> &[AtomicU8] { |
||||
&self.data |
||||
} |
||||
} |
||||
|
||||
impl IntrinsicObject for SharedArrayBuffer { |
||||
fn init(realm: &Realm) { |
||||
let _timer = Profiler::global().start_event(std::any::type_name::<Self>(), "init"); |
||||
|
||||
let flag_attributes = Attribute::CONFIGURABLE | Attribute::NON_ENUMERABLE; |
||||
|
||||
let get_species = BuiltInBuilder::callable(realm, Self::get_species) |
||||
.name(js_string!("get [Symbol.species]")) |
||||
.build(); |
||||
|
||||
let get_byte_length = BuiltInBuilder::callable(realm, Self::get_byte_length) |
||||
.name(js_string!("get byteLength")) |
||||
.build(); |
||||
|
||||
BuiltInBuilder::from_standard_constructor::<Self>(realm) |
||||
.accessor( |
||||
js_string!("byteLength"), |
||||
Some(get_byte_length), |
||||
None, |
||||
flag_attributes, |
||||
) |
||||
.static_accessor( |
||||
JsSymbol::species(), |
||||
Some(get_species), |
||||
None, |
||||
Attribute::CONFIGURABLE, |
||||
) |
||||
.method(Self::slice, js_string!("slice"), 2) |
||||
.property( |
||||
JsSymbol::to_string_tag(), |
||||
Self::NAME, |
||||
Attribute::READONLY | Attribute::NON_ENUMERABLE | Attribute::CONFIGURABLE, |
||||
) |
||||
.build(); |
||||
} |
||||
|
||||
fn get(intrinsics: &Intrinsics) -> JsObject { |
||||
Self::STANDARD_CONSTRUCTOR(intrinsics.constructors()).constructor() |
||||
} |
||||
} |
||||
|
||||
impl BuiltInObject for SharedArrayBuffer { |
||||
const NAME: JsString = StaticJsStrings::SHARED_ARRAY_BUFFER; |
||||
} |
||||
|
||||
impl BuiltInConstructor for SharedArrayBuffer { |
||||
const LENGTH: usize = 1; |
||||
|
||||
const STANDARD_CONSTRUCTOR: fn(&StandardConstructors) -> &StandardConstructor = |
||||
StandardConstructors::shared_array_buffer; |
||||
|
||||
/// `25.1.3.1 SharedArrayBuffer ( length [ , options ] )`
|
||||
///
|
||||
/// More information:
|
||||
/// - [ECMAScript reference][spec]
|
||||
///
|
||||
/// [spec]: https://tc39.es/ecma262/#sec-sharedarraybuffer-constructor
|
||||
fn constructor( |
||||
new_target: &JsValue, |
||||
args: &[JsValue], |
||||
context: &mut Context<'_>, |
||||
) -> JsResult<JsValue> { |
||||
// 1. If NewTarget is undefined, throw a TypeError exception.
|
||||
if new_target.is_undefined() { |
||||
return Err(JsNativeError::typ() |
||||
.with_message("ArrayBuffer.constructor called with undefined new target") |
||||
.into()); |
||||
} |
||||
|
||||
// 2. Let byteLength be ? ToIndex(length).
|
||||
let byte_length = args.get_or_undefined(0).to_index(context)?; |
||||
|
||||
// 3. Return ? AllocateSharedArrayBuffer(NewTarget, byteLength, requestedMaxByteLength).
|
||||
Ok(Self::allocate(new_target, byte_length, context)?.into()) |
||||
} |
||||
} |
||||
|
||||
impl SharedArrayBuffer { |
||||
/// `get SharedArrayBuffer [ @@species ]`
|
||||
///
|
||||
/// More information:
|
||||
/// - [ECMAScript reference][spec]
|
||||
///
|
||||
/// [spec]: https://tc39.es/ecma262/#sec-sharedarraybuffer-@@species
|
||||
#[allow(clippy::unnecessary_wraps)] |
||||
fn get_species(this: &JsValue, _: &[JsValue], _: &mut Context<'_>) -> JsResult<JsValue> { |
||||
// 1. Return the this value.
|
||||
Ok(this.clone()) |
||||
} |
||||
|
||||
/// `get SharedArrayBuffer.prototype.byteLength`
|
||||
///
|
||||
/// More information:
|
||||
/// - [ECMAScript reference][spec]
|
||||
///
|
||||
/// [spec]: https://tc39.es/ecma262/#sec-get-sharedarraybuffer.prototype.bytelength
|
||||
pub(crate) fn get_byte_length( |
||||
this: &JsValue, |
||||
_args: &[JsValue], |
||||
_: &mut Context<'_>, |
||||
) -> JsResult<JsValue> { |
||||
// 1. Let O be the this value.
|
||||
// 2. Perform ? RequireInternalSlot(O, [[ArrayBufferData]]).
|
||||
let obj = this.as_object().ok_or_else(|| { |
||||
JsNativeError::typ() |
||||
.with_message("SharedArrayBuffer.byteLength called with non-object value") |
||||
})?; |
||||
let obj = obj.borrow(); |
||||
// 3. If IsSharedArrayBuffer(O) is true, throw a TypeError exception.
|
||||
let buf = obj.as_shared_array_buffer().ok_or_else(|| { |
||||
JsNativeError::typ() |
||||
.with_message("SharedArrayBuffer.byteLength called with invalid object") |
||||
})?; |
||||
|
||||
// TODO: 4. Let length be ArrayBufferByteLength(O, seq-cst).
|
||||
// 5. Return 𝔽(length).
|
||||
let len = buf.data().len() as u64; |
||||
Ok(len.into()) |
||||
} |
||||
|
||||
/// `SharedArrayBuffer.prototype.slice ( start, end )`
|
||||
///
|
||||
/// More information:
|
||||
/// - [ECMAScript reference][spec]
|
||||
///
|
||||
/// [spec]: https://tc39.es/ecma262/#sec-sharedarraybuffer.prototype.slice
|
||||
fn slice(this: &JsValue, args: &[JsValue], context: &mut Context<'_>) -> JsResult<JsValue> { |
||||
// 1. Let O be the this value.
|
||||
// 2. Perform ? RequireInternalSlot(O, [[ArrayBufferData]]).
|
||||
let obj = this.as_object().ok_or_else(|| { |
||||
JsNativeError::typ().with_message("ArrayBuffer.slice called with non-object value") |
||||
})?; |
||||
let obj_borrow = obj.borrow(); |
||||
|
||||
// 3. If IsSharedArrayBuffer(O) is false, throw a TypeError exception.
|
||||
let buf = obj_borrow.as_shared_array_buffer().ok_or_else(|| { |
||||
JsNativeError::typ().with_message("ArrayBuffer.slice called with invalid object") |
||||
})?; |
||||
|
||||
let SliceRange { |
||||
start: first, |
||||
length: new_len, |
||||
} = get_slice_range( |
||||
buf.len() as u64, |
||||
args.get_or_undefined(0), |
||||
args.get_or_undefined(1), |
||||
context, |
||||
)?; |
||||
|
||||
// 14. Let ctor be ? SpeciesConstructor(O, %SharedArrayBuffer%).
|
||||
let ctor = obj.species_constructor(StandardConstructors::shared_array_buffer, context)?; |
||||
|
||||
// 15. Let new be ? Construct(ctor, « 𝔽(newLen) »).
|
||||
let new = ctor.construct(&[new_len.into()], Some(&ctor), context)?; |
||||
|
||||
{ |
||||
// 16. Perform ? RequireInternalSlot(new, [[ArrayBufferData]]).
|
||||
// 17. If IsSharedArrayBuffer(new) is false, throw a TypeError exception.
|
||||
let new_obj = new.borrow(); |
||||
let new_buf = new_obj.as_shared_array_buffer().ok_or_else(|| { |
||||
JsNativeError::typ() |
||||
.with_message("SharedArrayBuffer constructor returned invalid object") |
||||
})?; |
||||
|
||||
// 18. If new.[[ArrayBufferData]] is O.[[ArrayBufferData]], throw a TypeError exception.
|
||||
if std::ptr::eq(buf.data().as_ptr(), new_buf.data().as_ptr()) { |
||||
return Err(JsNativeError::typ() |
||||
.with_message("cannot reuse the same `SharedArrayBuffer` for a slice operation") |
||||
.into()); |
||||
} |
||||
|
||||
// TODO: 19. If ArrayBufferByteLength(new, seq-cst) < newLen, throw a TypeError exception.
|
||||
if (new_buf.len() as u64) < new_len { |
||||
return Err(JsNativeError::typ() |
||||
.with_message("invalid size of constructed shared array") |
||||
.into()); |
||||
} |
||||
|
||||
// 20. Let fromBuf be O.[[ArrayBufferData]].
|
||||
let from_buf = buf.data(); |
||||
|
||||
// 21. Let toBuf be new.[[ArrayBufferData]].
|
||||
let to_buf = new_buf.data(); |
||||
|
||||
// 22. Perform CopyDataBlockBytes(toBuf, 0, fromBuf, first, newLen).
|
||||
let first = first as usize; |
||||
let new_len = new_len as usize; |
||||
|
||||
// SAFETY: `get_slice_range` will always return indices that are in-bounds.
|
||||
// This also means that the newly created buffer will have at least `new_len` elements
|
||||
// to write to.
|
||||
unsafe { copy_shared_to_shared(&from_buf[first..], to_buf, new_len) } |
||||
} |
||||
|
||||
// 23. Return new.
|
||||
Ok(new.into()) |
||||
} |
||||
|
||||
/// `AllocateSharedArrayBuffer ( constructor, byteLength [ , maxByteLength ] )`
|
||||
///
|
||||
/// More information:
|
||||
/// - [ECMAScript reference][spec]
|
||||
///
|
||||
/// [spec]: https://tc39.es/ecma262/#sec-allocatesharedarraybuffer
|
||||
pub(crate) fn allocate( |
||||
constructor: &JsValue, |
||||
byte_length: u64, |
||||
context: &mut Context<'_>, |
||||
) -> JsResult<JsObject> { |
||||
// TODO:
|
||||
// 1. Let slots be « [[ArrayBufferData]] ».
|
||||
// 2. If maxByteLength is present and maxByteLength is not empty, let allocatingGrowableBuffer
|
||||
// be true; otherwise let allocatingGrowableBuffer be false.
|
||||
// 3. If allocatingGrowableBuffer is true, then
|
||||
// a. If byteLength > maxByteLength, throw a RangeError exception.
|
||||
// b. Append [[ArrayBufferByteLengthData]] and [[ArrayBufferMaxByteLength]] to slots.
|
||||
// 4. Else,
|
||||
// a. Append [[ArrayBufferByteLength]] to slots.
|
||||
|
||||
// 5. Let obj be ? OrdinaryCreateFromConstructor(constructor, "%SharedArrayBuffer.prototype%", slots).
|
||||
let prototype = get_prototype_from_constructor( |
||||
constructor, |
||||
StandardConstructors::shared_array_buffer, |
||||
context, |
||||
)?; |
||||
|
||||
// TODO: 6. If allocatingGrowableBuffer is true, let allocLength be maxByteLength;
|
||||
// otherwise let allocLength be byteLength.
|
||||
|
||||
// 7. Let block be ? CreateSharedByteDataBlock(allocLength).
|
||||
// 8. Set obj.[[ArrayBufferData]] to block.
|
||||
let data = create_shared_byte_data_block(byte_length, context)?; |
||||
|
||||
// TODO:
|
||||
// 9. If allocatingGrowableBuffer is true, then
|
||||
// a. Assert: byteLength ≤ maxByteLength.
|
||||
// b. Let byteLengthBlock be ? CreateSharedByteDataBlock(8).
|
||||
// c. Perform SetValueInBuffer(byteLengthBlock, 0, biguint64, ℤ(byteLength), true, seq-cst).
|
||||
// d. Set obj.[[ArrayBufferByteLengthData]] to byteLengthBlock.
|
||||
// e. Set obj.[[ArrayBufferMaxByteLength]] to maxByteLength.
|
||||
|
||||
// 10. Else,
|
||||
// a. Set obj.[[ArrayBufferByteLength]] to byteLength.
|
||||
let obj = JsObject::from_proto_and_data_with_shared_shape( |
||||
context.root_shape(), |
||||
prototype, |
||||
ObjectData::shared_array_buffer(Self { data }), |
||||
); |
||||
|
||||
// 11. Return obj.
|
||||
Ok(obj) |
||||
} |
||||
} |
||||
|
||||
/// [`CreateSharedByteDataBlock ( size )`][spec] abstract operation.
|
||||
///
|
||||
/// Creates a new `Arc<Vec<AtomicU8>>` that can be used as a backing buffer for a [`SharedArrayBuffer`].
|
||||
///
|
||||
/// For more information, check the [spec][spec].
|
||||
///
|
||||
/// [spec]: https://tc39.es/ecma262/#sec-createsharedbytedatablock
|
||||
pub(crate) fn create_shared_byte_data_block( |
||||
size: u64, |
||||
context: &mut Context<'_>, |
||||
) -> JsResult<Arc<Box<[AtomicU8]>>> { |
||||
if size > context.host_hooks().max_buffer_size() { |
||||
return Err(JsNativeError::range() |
||||
.with_message( |
||||
"cannot allocate a buffer that exceeds the maximum buffer size".to_string(), |
||||
) |
||||
.into()); |
||||
} |
||||
|
||||
// 1. Let db be a new Shared Data Block value consisting of size bytes. If it is impossible to
|
||||
// create such a Shared Data Block, throw a RangeError exception.
|
||||
let size = size.try_into().map_err(|e| { |
||||
JsNativeError::range().with_message(format!("couldn't allocate the data block: {e}")) |
||||
})?; |
||||
|
||||
if size == 0 { |
||||
// Must ensure we don't allocate a zero-sized buffer.
|
||||
return Ok(Arc::new(Box::new([]))); |
||||
} |
||||
|
||||
// 2. Let execution be the [[CandidateExecution]] field of the surrounding agent's Agent Record.
|
||||
// 3. Let eventsRecord be the Agent Events Record of execution.[[EventsRecords]] whose
|
||||
// [[AgentSignifier]] is AgentSignifier().
|
||||
// 4. Let zero be « 0 ».
|
||||
// 5. For each index i of db, do
|
||||
// a. Append WriteSharedMemory { [[Order]]: init, [[NoTear]]: true, [[Block]]: db,
|
||||
// [[ByteIndex]]: i, [[ElementSize]]: 1, [[Payload]]: zero } to eventsRecord.[[EventList]].
|
||||
// 6. Return db.
|
||||
|
||||
// Initializing a boxed slice of atomics is almost impossible using safe code.
|
||||
// This replaces that with a simple `alloc` and some casts to convert the allocation
|
||||
// to `Box<[AtomicU8]>`.
|
||||
|
||||
let layout = alloc::Layout::array::<AtomicU8>(size).map_err(|e| { |
||||
JsNativeError::range().with_message(format!("couldn't allocate the data block: {e}")) |
||||
})?; |
||||
|
||||
// SAFETY: We already returned if `size == 0`, making this safe.
|
||||
let ptr: *mut AtomicU8 = unsafe { alloc::alloc_zeroed(layout).cast() }; |
||||
|
||||
if ptr.is_null() { |
||||
return Err(JsNativeError::range() |
||||
.with_message("memory allocator failed to allocate buffer") |
||||
.into()); |
||||
} |
||||
|
||||
// SAFETY:
|
||||
// - It is ensured by the layout that `buffer` has `size` contiguous elements
|
||||
// on its allocation.
|
||||
// - The original `ptr` doesn't escape outside this function.
|
||||
// - `buffer` is a valid pointer by the null check above.
|
||||
let buffer = unsafe { Box::from_raw(std::slice::from_raw_parts_mut(ptr, size)) }; |
||||
|
||||
// Just for good measure, since our implementation depends on having a pointer aligned
|
||||
// to the alignment of `u64`.
|
||||
// This could be replaced with a custom `Box` implementation, but most architectures
|
||||
// already align pointers to 8 bytes, so it's a lot of work for such a small
|
||||
// compatibility improvement.
|
||||
assert_eq!(buffer.as_ptr().addr() % std::mem::align_of::<u64>(), 0); |
||||
|
||||
// 3. Return db.
|
||||
Ok(Arc::new(buffer)) |
||||
} |
@ -1,8 +1,21 @@
|
||||
use crate::Context; |
||||
|
||||
#[test] |
||||
fn create_byte_data_block() { |
||||
let context = &mut Context::default(); |
||||
// Sunny day
|
||||
assert!(super::create_byte_data_block(100, context).is_ok()); |
||||
|
||||
// Rainy day
|
||||
assert!(super::create_byte_data_block(u64::MAX, context).is_err()); |
||||
} |
||||
|
||||
#[test] |
||||
fn create_shared_byte_data_block() { |
||||
let context = &mut Context::default(); |
||||
// Sunny day
|
||||
assert!(super::create_byte_data_block(100).is_ok()); |
||||
assert!(super::shared::create_shared_byte_data_block(100, context).is_ok()); |
||||
|
||||
// Rainy day
|
||||
assert!(super::create_byte_data_block(u64::MAX).is_err()); |
||||
assert!(super::shared::create_shared_byte_data_block(u64::MAX, context).is_err()); |
||||
} |
||||
|
@ -0,0 +1,458 @@
|
||||
#![allow(unstable_name_collisions)] |
||||
|
||||
use std::{ptr, slice::SliceIndex, sync::atomic}; |
||||
|
||||
use portable_atomic::AtomicU8; |
||||
use sptr::Strict; |
||||
|
||||
use crate::{ |
||||
builtins::typed_array::{ClampedU8, Element, TypedArrayElement, TypedArrayKind}, |
||||
Context, JsObject, JsResult, |
||||
}; |
||||
|
||||
use super::ArrayBuffer; |
||||
|
||||
#[derive(Debug, Clone, Copy)] |
||||
pub(crate) enum SliceRef<'a> { |
||||
Slice(&'a [u8]), |
||||
AtomicSlice(&'a [AtomicU8]), |
||||
} |
||||
|
||||
impl SliceRef<'_> { |
||||
/// Gets the byte length of this `SliceRef`.
|
||||
pub(crate) fn len(&self) -> usize { |
||||
match self { |
||||
Self::Slice(buf) => buf.len(), |
||||
Self::AtomicSlice(buf) => buf.len(), |
||||
} |
||||
} |
||||
|
||||
/// Gets a subslice of this `SliceRef`.
|
||||
pub(crate) fn subslice<I>(&self, index: I) -> SliceRef<'_> |
||||
where |
||||
I: SliceIndex<[u8], Output = [u8]> + SliceIndex<[AtomicU8], Output = [AtomicU8]>, |
||||
{ |
||||
match self { |
||||
Self::Slice(buffer) => SliceRef::Slice(buffer.get(index).expect("index out of bounds")), |
||||
Self::AtomicSlice(buffer) => { |
||||
SliceRef::AtomicSlice(buffer.get(index).expect("index out of bounds")) |
||||
} |
||||
} |
||||
} |
||||
|
||||
/// Gets the starting address of this `SliceRef`.
|
||||
pub(crate) fn addr(&self) -> usize { |
||||
match self { |
||||
Self::Slice(buf) => buf.as_ptr().addr(), |
||||
Self::AtomicSlice(buf) => buf.as_ptr().addr(), |
||||
} |
||||
} |
||||
|
||||
/// [`GetValueFromBuffer ( arrayBuffer, byteIndex, type, isTypedArray, order [ , isLittleEndian ] )`][spec]
|
||||
///
|
||||
/// The start offset is determined by the input buffer instead of a `byteIndex` parameter.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// - There must be enough bytes in `buffer` to read an element from an array with type `TypedArrayKind`.
|
||||
/// - `buffer` must be aligned to the alignment of said element.
|
||||
///
|
||||
/// [spec]: https://tc39.es/ecma262/#sec-getvaluefrombuffer
|
||||
pub(crate) unsafe fn get_value( |
||||
&self, |
||||
kind: TypedArrayKind, |
||||
order: atomic::Ordering, |
||||
) -> TypedArrayElement { |
||||
unsafe fn read_elem<T: Element>(buffer: SliceRef<'_>, order: atomic::Ordering) -> T { |
||||
// <https://tc39.es/ecma262/#sec-getvaluefrombuffer>
|
||||
|
||||
// 1. Assert: IsDetachedBuffer(arrayBuffer) is false.
|
||||
// 2. Assert: There are sufficient bytes in arrayBuffer starting at byteIndex to represent a value of type.
|
||||
if cfg!(debug_assertions) { |
||||
assert!(buffer.len() >= std::mem::size_of::<T>()); |
||||
assert_eq!(buffer.addr() % std::mem::align_of::<T>(), 0); |
||||
} |
||||
|
||||
// 3. Let block be arrayBuffer.[[ArrayBufferData]].
|
||||
// 4. Let elementSize be the Element Size value specified in Table 70 for Element Type type.
|
||||
// 5. If IsSharedArrayBuffer(arrayBuffer) is true, then
|
||||
// a. Let execution be the [[CandidateExecution]] field of the surrounding agent's Agent Record.
|
||||
// b. Let eventsRecord be the Agent Events Record of execution.[[EventsRecords]] whose [[AgentSignifier]] is AgentSignifier().
|
||||
// c. If isTypedArray is true and IsNoTearConfiguration(type, order) is true, let noTear be true; otherwise let noTear be false.
|
||||
// d. Let rawValue be a List of length elementSize whose elements are nondeterministically chosen byte values.
|
||||
// e. NOTE: In implementations, rawValue is the result of a non-atomic or atomic read instruction on the underlying hardware. The nondeterminism is a semantic prescription of the memory model to describe observable behaviour of hardware with weak consistency.
|
||||
// f. Let readEvent be ReadSharedMemory { [[Order]]: order, [[NoTear]]: noTear, [[Block]]: block, [[ByteIndex]]: byteIndex, [[ElementSize]]: elementSize }.
|
||||
// g. Append readEvent to eventsRecord.[[EventList]].
|
||||
// h. Append Chosen Value Record { [[Event]]: readEvent, [[ChosenValue]]: rawValue } to execution.[[ChosenValues]].
|
||||
// 6. Else,
|
||||
// a. Let rawValue be a List whose elements are bytes from block at indices in the interval from byteIndex (inclusive) to byteIndex + elementSize (exclusive).
|
||||
// 7. Assert: The number of elements in rawValue is elementSize.
|
||||
// 8. If isLittleEndian is not present, set isLittleEndian to the value of the [[LittleEndian]] field of the surrounding agent's Agent Record.
|
||||
// 9. Return RawBytesToNumeric(type, rawValue, isLittleEndian).
|
||||
|
||||
// SAFETY: The invariants of this operation are ensured by the caller.
|
||||
unsafe { T::read_from_buffer(buffer, order) } |
||||
} |
||||
|
||||
let buffer = *self; |
||||
|
||||
// SAFETY: The invariants of this operation are ensured by the caller.
|
||||
unsafe { |
||||
match kind { |
||||
TypedArrayKind::Int8 => read_elem::<i8>(buffer, order).into(), |
||||
TypedArrayKind::Uint8 => read_elem::<u8>(buffer, order).into(), |
||||
TypedArrayKind::Uint8Clamped => read_elem::<ClampedU8>(buffer, order).into(), |
||||
TypedArrayKind::Int16 => read_elem::<i16>(buffer, order).into(), |
||||
TypedArrayKind::Uint16 => read_elem::<u16>(buffer, order).into(), |
||||
TypedArrayKind::Int32 => read_elem::<i32>(buffer, order).into(), |
||||
TypedArrayKind::Uint32 => read_elem::<u32>(buffer, order).into(), |
||||
TypedArrayKind::BigInt64 => read_elem::<i64>(buffer, order).into(), |
||||
TypedArrayKind::BigUint64 => read_elem::<u64>(buffer, order).into(), |
||||
TypedArrayKind::Float32 => read_elem::<f32>(buffer, order).into(), |
||||
TypedArrayKind::Float64 => read_elem::<f64>(buffer, order).into(), |
||||
} |
||||
} |
||||
} |
||||
|
||||
/// `25.1.2.4 CloneArrayBuffer ( srcBuffer, srcByteOffset, srcLength )`
|
||||
///
|
||||
/// More information:
|
||||
/// - [ECMAScript reference][spec]
|
||||
///
|
||||
/// [spec]: https://tc39.es/ecma262/#sec-clonearraybuffer
|
||||
pub(crate) fn clone(&self, context: &mut Context<'_>) -> JsResult<JsObject> { |
||||
// 1. Assert: IsDetachedBuffer(srcBuffer) is false.
|
||||
|
||||
// 2. Let targetBuffer be ? AllocateArrayBuffer(%ArrayBuffer%, srcLength).
|
||||
let target_buffer = ArrayBuffer::allocate( |
||||
&context |
||||
.realm() |
||||
.intrinsics() |
||||
.constructors() |
||||
.array_buffer() |
||||
.constructor() |
||||
.into(), |
||||
self.len() as u64, |
||||
context, |
||||
)?; |
||||
|
||||
// 3. Let srcBlock be srcBuffer.[[ArrayBufferData]].
|
||||
|
||||
// 4. Let targetBlock be targetBuffer.[[ArrayBufferData]].
|
||||
{ |
||||
let mut target_buffer_mut = target_buffer.borrow_mut(); |
||||
let target_array_buffer = target_buffer_mut |
||||
.as_array_buffer_mut() |
||||
.expect("This must be an ArrayBuffer"); |
||||
let target_block = target_array_buffer |
||||
.data |
||||
.as_deref_mut() |
||||
.expect("ArrayBuffer cannot be detached here"); |
||||
|
||||
// 5. Perform CopyDataBlockBytes(targetBlock, 0, srcBlock, srcByteOffset, srcLength).
|
||||
|
||||
// SAFETY: Both buffers are of the same length, `buffer.len()`, which makes this operation
|
||||
// safe.
|
||||
unsafe { memcpy(*self, SliceRefMut::Slice(target_block), self.len()) } |
||||
} |
||||
|
||||
// 6. Return targetBuffer.
|
||||
Ok(target_buffer) |
||||
} |
||||
} |
||||
|
||||
impl<'a> From<&'a [u8]> for SliceRef<'a> { |
||||
fn from(value: &'a [u8]) -> Self { |
||||
Self::Slice(value) |
||||
} |
||||
} |
||||
|
||||
impl<'a> From<&'a [AtomicU8]> for SliceRef<'a> { |
||||
fn from(value: &'a [AtomicU8]) -> Self { |
||||
Self::AtomicSlice(value) |
||||
} |
||||
} |
||||
|
||||
#[derive(Debug)] |
||||
pub(crate) enum SliceRefMut<'a> { |
||||
Slice(&'a mut [u8]), |
||||
AtomicSlice(&'a [AtomicU8]), |
||||
} |
||||
|
||||
impl SliceRefMut<'_> { |
||||
/// Gets the byte length of this `SliceRefMut`.
|
||||
pub(crate) fn len(&self) -> usize { |
||||
match self { |
||||
Self::Slice(buf) => buf.len(), |
||||
Self::AtomicSlice(buf) => buf.len(), |
||||
} |
||||
} |
||||
|
||||
/// Gets a mutable subslice of this `SliceRefMut`.
|
||||
pub(crate) fn subslice_mut<I>(&mut self, index: I) -> SliceRefMut<'_> |
||||
where |
||||
I: SliceIndex<[u8], Output = [u8]> + SliceIndex<[AtomicU8], Output = [AtomicU8]>, |
||||
{ |
||||
match self { |
||||
Self::Slice(buffer) => { |
||||
SliceRefMut::Slice(buffer.get_mut(index).expect("index out of bounds")) |
||||
} |
||||
Self::AtomicSlice(buffer) => { |
||||
SliceRefMut::AtomicSlice(buffer.get(index).expect("index out of bounds")) |
||||
} |
||||
} |
||||
} |
||||
|
||||
/// Gets the starting address of this `SliceRefMut`.
|
||||
pub(crate) fn addr(&self) -> usize { |
||||
match self { |
||||
Self::Slice(buf) => buf.as_ptr().addr(), |
||||
Self::AtomicSlice(buf) => buf.as_ptr().addr(), |
||||
} |
||||
} |
||||
|
||||
/// `25.1.2.12 SetValueInBuffer ( arrayBuffer, byteIndex, type, value, isTypedArray, order [ , isLittleEndian ] )`
|
||||
///
|
||||
/// The start offset is determined by the input buffer instead of a `byteIndex` parameter.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// - There must be enough bytes in `buffer` to write the `TypedArrayElement`.
|
||||
/// - `buffer` must be aligned to the alignment of the `TypedArrayElement`.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// - Panics if the type of `value` is not equal to the content of `kind`.
|
||||
///
|
||||
/// More information:
|
||||
/// - [ECMAScript reference][spec]
|
||||
///
|
||||
/// [spec]: https://tc39.es/ecma262/#sec-setvalueinbuffer
|
||||
pub(crate) unsafe fn set_value(&mut self, value: TypedArrayElement, order: atomic::Ordering) { |
||||
unsafe fn write_elem<T: Element>( |
||||
buffer: SliceRefMut<'_>, |
||||
value: T, |
||||
order: atomic::Ordering, |
||||
) { |
||||
// <https://tc39.es/ecma262/#sec-setvalueinbuffer>
|
||||
|
||||
// 1. Assert: IsDetachedBuffer(arrayBuffer) is false.
|
||||
// 2. Assert: There are sufficient bytes in arrayBuffer starting at byteIndex to represent a value of type.
|
||||
// 3. Assert: value is a BigInt if IsBigIntElementType(type) is true; otherwise, value is a Number.
|
||||
if cfg!(debug_assertions) { |
||||
assert!(buffer.len() >= std::mem::size_of::<T>()); |
||||
assert_eq!(buffer.addr() % std::mem::align_of::<T>(), 0); |
||||
} |
||||
|
||||
// 4. Let block be arrayBuffer.[[ArrayBufferData]].
|
||||
// 5. Let elementSize be the Element Size value specified in Table 70 for Element Type type.
|
||||
// 6. If isLittleEndian is not present, set isLittleEndian to the value of the [[LittleEndian]] field of the surrounding agent's Agent Record.
|
||||
// 7. Let rawBytes be NumericToRawBytes(type, value, isLittleEndian).
|
||||
// 8. If IsSharedArrayBuffer(arrayBuffer) is true, then
|
||||
// a. Let execution be the [[CandidateExecution]] field of the surrounding agent's Agent Record.
|
||||
// b. Let eventsRecord be the Agent Events Record of execution.[[EventsRecords]] whose [[AgentSignifier]] is AgentSignifier().
|
||||
// c. If isTypedArray is true and IsNoTearConfiguration(type, order) is true, let noTear be true; otherwise let noTear be false.
|
||||
// d. Append WriteSharedMemory { [[Order]]: order, [[NoTear]]: noTear, [[Block]]: block, [[ByteIndex]]: byteIndex, [[ElementSize]]: elementSize, [[Payload]]: rawBytes } to eventsRecord.[[EventList]].
|
||||
// 9. Else,
|
||||
// a. Store the individual bytes of rawBytes into block, starting at block[byteIndex].
|
||||
// 10. Return unused.
|
||||
|
||||
// SAFETY: The invariants of this operation are ensured by the caller.
|
||||
unsafe { |
||||
T::write_to_buffer(buffer, value, order); |
||||
} |
||||
} |
||||
|
||||
// Have to rebind in order to remove the outer `&mut` ref.
|
||||
let buffer = match self { |
||||
SliceRefMut::Slice(buf) => SliceRefMut::Slice(buf), |
||||
SliceRefMut::AtomicSlice(buf) => SliceRefMut::AtomicSlice(buf), |
||||
}; |
||||
|
||||
// SAFETY: The invariants of this operation are ensured by the caller.
|
||||
unsafe { |
||||
match value { |
||||
TypedArrayElement::Int8(e) => write_elem(buffer, e, order), |
||||
TypedArrayElement::Uint8(e) => write_elem(buffer, e, order), |
||||
TypedArrayElement::Uint8Clamped(e) => write_elem(buffer, e, order), |
||||
TypedArrayElement::Int16(e) => write_elem(buffer, e, order), |
||||
TypedArrayElement::Uint16(e) => write_elem(buffer, e, order), |
||||
TypedArrayElement::Int32(e) => write_elem(buffer, e, order), |
||||
TypedArrayElement::Uint32(e) => write_elem(buffer, e, order), |
||||
TypedArrayElement::BigInt64(e) => write_elem(buffer, e, order), |
||||
TypedArrayElement::BigUint64(e) => write_elem(buffer, e, order), |
||||
TypedArrayElement::Float32(e) => write_elem(buffer, e, order), |
||||
TypedArrayElement::Float64(e) => write_elem(buffer, e, order), |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
impl<'a> From<&'a mut [u8]> for SliceRefMut<'a> { |
||||
fn from(value: &'a mut [u8]) -> Self { |
||||
Self::Slice(value) |
||||
} |
||||
} |
||||
|
||||
impl<'a> From<&'a [AtomicU8]> for SliceRefMut<'a> { |
||||
fn from(value: &'a [AtomicU8]) -> Self { |
||||
Self::AtomicSlice(value) |
||||
} |
||||
} |
||||
|
||||
/// Copies `count` bytes from `src` into `dest` using atomic relaxed loads and stores.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// - Both `src` and `dest` must have at least `count` bytes to read and write,
|
||||
/// respectively.
|
||||
pub(super) unsafe fn copy_shared_to_shared(src: &[AtomicU8], dest: &[AtomicU8], count: usize) { |
||||
// TODO: this could be optimized with batches of writes using `u32/u64` stores instead.
|
||||
for i in 0..count { |
||||
// SAFETY: The invariants of this operation are ensured by the caller of the function.
|
||||
unsafe { |
||||
dest.get_unchecked(i).store( |
||||
src.get_unchecked(i).load(atomic::Ordering::Relaxed), |
||||
atomic::Ordering::Relaxed, |
||||
); |
||||
} |
||||
} |
||||
} |
||||
|
||||
/// Copies `count` bytes backwards from `src` into `dest` using atomic relaxed loads and stores.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// - Both `src` and `dest` must have at least `count` bytes to read and write,
|
||||
/// respectively.
|
||||
unsafe fn copy_shared_to_shared_backwards(src: &[AtomicU8], dest: &[AtomicU8], count: usize) { |
||||
for i in (0..count).rev() { |
||||
// SAFETY: The invariants of this operation are ensured by the caller of the function.
|
||||
unsafe { |
||||
dest.get_unchecked(i).store( |
||||
src.get_unchecked(i).load(atomic::Ordering::Relaxed), |
||||
atomic::Ordering::Relaxed, |
||||
); |
||||
} |
||||
} |
||||
} |
||||
|
||||
/// Copies `count` bytes from the buffer `src` into the buffer `dest`, using the atomic ordering `order`
|
||||
/// if any of the buffers are atomic.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// - Both `src` and `dest` must have at least `count` bytes to read and write, respectively.
|
||||
/// - The region of memory referenced by `src` must not overlap with the region of memory
|
||||
/// referenced by `dest`. This is guaranteed if either of them are slices
|
||||
/// (you cannot borrow and mutably borrow a slice at the same time), but cannot be guaranteed
|
||||
/// for atomic slices.
|
||||
pub(crate) unsafe fn memcpy(src: SliceRef<'_>, dest: SliceRefMut<'_>, count: usize) { |
||||
if cfg!(debug_assertions) { |
||||
assert!(src.len() >= count); |
||||
assert!(dest.len() >= count); |
||||
let src_range = src.addr()..src.addr() + src.len(); |
||||
let dest_range = dest.addr()..dest.addr() + dest.len(); |
||||
assert!(!src_range.contains(&dest_range.start)); |
||||
assert!(!src_range.contains(&dest_range.end)); |
||||
} |
||||
|
||||
// TODO: this could be optimized with batches of writes using `u32/u64` stores instead.
|
||||
match (src, dest) { |
||||
// SAFETY: The invariants of this operation are ensured by the caller of the function.
|
||||
(SliceRef::Slice(src), SliceRefMut::Slice(dest)) => unsafe { |
||||
ptr::copy_nonoverlapping(src.as_ptr(), dest.as_mut_ptr(), count); |
||||
}, |
||||
// SAFETY: The invariants of this operation are ensured by the caller of the function.
|
||||
(SliceRef::Slice(src), SliceRefMut::AtomicSlice(dest)) => unsafe { |
||||
for i in 0..count { |
||||
dest.get_unchecked(i) |
||||
.store(*src.get_unchecked(i), atomic::Ordering::Relaxed); |
||||
} |
||||
}, |
||||
// SAFETY: The invariants of this operation are ensured by the caller of the function.
|
||||
(SliceRef::AtomicSlice(src), SliceRefMut::Slice(dest)) => unsafe { |
||||
for i in 0..count { |
||||
*dest.get_unchecked_mut(i) = src.get_unchecked(i).load(atomic::Ordering::Relaxed); |
||||
} |
||||
}, |
||||
// SAFETY: The invariants of this operation are ensured by the caller of the function.
|
||||
(SliceRef::AtomicSlice(src), SliceRefMut::AtomicSlice(dest)) => unsafe { |
||||
copy_shared_to_shared(src, dest, count); |
||||
}, |
||||
} |
||||
} |
||||
|
||||
/// Copies `count` bytes from the position `from` to the position `to` in `buffer`.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// - `buffer` must contain at least `from + count` bytes to be read.
|
||||
/// - `buffer` must contain at least `to + count` bytes to be written.
|
||||
pub(crate) unsafe fn memmove(buffer: SliceRefMut<'_>, from: usize, to: usize, count: usize) { |
||||
if cfg!(debug_assertions) { |
||||
assert!(from + count <= buffer.len()); |
||||
assert!(to + count <= buffer.len()); |
||||
} |
||||
|
||||
match buffer { |
||||
// SAFETY: The invariants of this operation are ensured by the caller of the function.
|
||||
SliceRefMut::Slice(buf) => unsafe { |
||||
let ptr = buf.as_mut_ptr(); |
||||
let src_ptr = ptr.add(from); |
||||
let dest_ptr = ptr.add(to); |
||||
ptr::copy(src_ptr, dest_ptr, count); |
||||
}, |
||||
// SAFETY: The invariants of this operation are ensured by the caller of the function.
|
||||
SliceRefMut::AtomicSlice(buf) => unsafe { |
||||
let src = buf.get_unchecked(from..); |
||||
let dest = buf.get_unchecked(to..); |
||||
|
||||
// Let's draw a simple array.
|
||||
//
|
||||
// | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
|
||||
//
|
||||
// Now let's define `from`, `to` and `count` such that the below condition is satisfied.
|
||||
// `from = 0`
|
||||
// `to = 2`
|
||||
// `count = 4`
|
||||
//
|
||||
// We can now imagine that the array is pointed to by our indices:
|
||||
//
|
||||
// | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
|
||||
// ^ ^
|
||||
// from to
|
||||
//
|
||||
// If we start copying bytes until `from + 2 = to`, we can see that the new array would be:
|
||||
//
|
||||
// | 0 | 1 | 0 | 1 | 0 | 5 | 6 | 7 | 8 |
|
||||
// ^ ^
|
||||
// from + 2 to + 2
|
||||
//
|
||||
// However, we've lost the data that was in the index 2! If this process
|
||||
// continues, this'll give the incorrect result:
|
||||
//
|
||||
// | 0 | 1 | 0 | 1 | 0 | 1 | 6 | 7 | 8 |
|
||||
//
|
||||
// To solve this, we just need to copy backwards to ensure we never override data that
|
||||
// we need in next iterations:
|
||||
//
|
||||
// | 0 | 1 | 2 | 3 | 4 | 3 | 6 | 7 | 8 |
|
||||
// ^ ^
|
||||
// from to
|
||||
//
|
||||
// | 0 | 1 | 2 | 3 | 2 | 3 | 6 | 7 | 8 |
|
||||
// ^ ^
|
||||
// from to
|
||||
//
|
||||
// | 0 | 1 | 0 | 1 | 2 | 3 | 6 | 7 | 8 |
|
||||
// ^ ^
|
||||
// from to
|
||||
if from < to && to < from + count { |
||||
copy_shared_to_shared_backwards(src, dest, count); |
||||
} else { |
||||
copy_shared_to_shared(src, dest, count); |
||||
} |
||||
}, |
||||
} |
||||
} |
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,400 @@
|
||||
#![deny(unsafe_op_in_unsafe_fn)] |
||||
#![allow(clippy::cast_ptr_alignment)] // Invariants are checked by the caller.
|
||||
#![allow(clippy::undocumented_unsafe_blocks)] // Invariants are checked by the caller.
|
||||
|
||||
use std::sync::atomic; |
||||
|
||||
use bytemuck::{AnyBitPattern, NoUninit}; |
||||
use num_traits::ToPrimitive; |
||||
use portable_atomic::{AtomicU16, AtomicU32, AtomicU64}; |
||||
|
||||
use crate::{ |
||||
builtins::{ |
||||
array_buffer::utils::{SliceRef, SliceRefMut}, |
||||
typed_array::TypedArrayElement, |
||||
}, |
||||
value::Numeric, |
||||
Context, JsResult, JsValue, |
||||
}; |
||||
|
||||
#[derive(Debug, Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash, AnyBitPattern, NoUninit)] |
||||
#[repr(transparent)] |
||||
pub(crate) struct ClampedU8(pub(crate) u8); |
||||
|
||||
impl ClampedU8 { |
||||
pub(crate) fn to_be(self) -> Self { |
||||
Self(self.0.to_be()) |
||||
} |
||||
|
||||
pub(crate) fn to_le(self) -> Self { |
||||
Self(self.0.to_le()) |
||||
} |
||||
} |
||||
|
||||
impl From<ClampedU8> for Numeric { |
||||
fn from(value: ClampedU8) -> Self { |
||||
Numeric::Number(value.0.into()) |
||||
} |
||||
} |
||||
|
||||
pub(crate) trait Element: |
||||
Sized + Into<TypedArrayElement> + NoUninit + AnyBitPattern |
||||
{ |
||||
fn from_js_value(value: &JsValue, context: &mut Context<'_>) -> JsResult<Self>; |
||||
|
||||
/// Gets the little endian representation of `Self`.
|
||||
fn to_little_endian(self) -> Self; |
||||
|
||||
/// Gets the big endian representation of `Self`.
|
||||
fn to_big_endian(self) -> Self; |
||||
|
||||
/// Reads `Self` from the `buffer`.
|
||||
///
|
||||
/// This will always read values in the native endianness of the target architecture.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// - `buffer` must be aligned to the native alignment of `Self`.
|
||||
/// - `buffer` must contain enough bytes to read `std::sizeof::<Self>` bytes.
|
||||
unsafe fn read_from_buffer(buffer: SliceRef<'_>, order: atomic::Ordering) -> Self; |
||||
|
||||
/// Writes the bytes of this element into `buffer`.
|
||||
///
|
||||
/// This will always write values in the native endianness of the target architecture.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// - `buffer` must be aligned to the native alignment of `Self`.
|
||||
/// - `buffer` must contain enough bytes to store `std::sizeof::<Self>` bytes.
|
||||
unsafe fn write_to_buffer(buffer: SliceRefMut<'_>, value: Self, order: atomic::Ordering); |
||||
} |
||||
|
||||
impl Element for u8 { |
||||
fn from_js_value(value: &JsValue, context: &mut Context<'_>) -> JsResult<Self> { |
||||
value.to_uint8(context) |
||||
} |
||||
|
||||
fn to_big_endian(self) -> Self { |
||||
self.to_be() |
||||
} |
||||
|
||||
fn to_little_endian(self) -> Self { |
||||
self.to_le() |
||||
} |
||||
|
||||
unsafe fn read_from_buffer(buffer: SliceRef<'_>, order: atomic::Ordering) -> Self { |
||||
debug_assert!(buffer.len() >= 1); |
||||
|
||||
match buffer { |
||||
SliceRef::Slice(buffer) => unsafe { *buffer.get_unchecked(0) }, |
||||
SliceRef::AtomicSlice(buffer) => unsafe { buffer.get_unchecked(0).load(order) }, |
||||
} |
||||
} |
||||
|
||||
unsafe fn write_to_buffer(buffer: SliceRefMut<'_>, value: Self, order: atomic::Ordering) { |
||||
debug_assert!(buffer.len() >= 1); |
||||
|
||||
match buffer { |
||||
SliceRefMut::Slice(buffer) => unsafe { |
||||
*buffer.get_unchecked_mut(0) = value; |
||||
}, |
||||
SliceRefMut::AtomicSlice(buffer) => unsafe { |
||||
buffer.get_unchecked(0).store(value, order); |
||||
}, |
||||
} |
||||
} |
||||
} |
||||
|
||||
impl Element for u16 { |
||||
fn from_js_value(value: &JsValue, context: &mut Context<'_>) -> JsResult<Self> { |
||||
value.to_uint16(context) |
||||
} |
||||
|
||||
fn to_big_endian(self) -> Self { |
||||
self.to_be() |
||||
} |
||||
|
||||
fn to_little_endian(self) -> Self { |
||||
self.to_le() |
||||
} |
||||
|
||||
unsafe fn read_from_buffer(buffer: SliceRef<'_>, order: atomic::Ordering) -> Self { |
||||
if cfg!(debug_assertions) { |
||||
assert!(buffer.len() >= std::mem::size_of::<u16>()); |
||||
assert!(buffer.addr() % std::mem::align_of::<u16>() == 0); |
||||
} |
||||
|
||||
match buffer { |
||||
SliceRef::Slice(buffer) => unsafe { *buffer.as_ptr().cast() }, |
||||
SliceRef::AtomicSlice(buffer) => unsafe { |
||||
(*buffer.as_ptr().cast::<AtomicU16>()).load(order) |
||||
}, |
||||
} |
||||
} |
||||
|
||||
unsafe fn write_to_buffer(buffer: SliceRefMut<'_>, value: Self, order: atomic::Ordering) { |
||||
if cfg!(debug_assertions) { |
||||
assert!(buffer.len() >= std::mem::size_of::<u16>()); |
||||
assert!(buffer.addr() % std::mem::align_of::<u16>() == 0); |
||||
} |
||||
|
||||
match buffer { |
||||
SliceRefMut::Slice(buffer) => unsafe { |
||||
*buffer.as_mut_ptr().cast() = value; |
||||
}, |
||||
SliceRefMut::AtomicSlice(buffer) => unsafe { |
||||
(*buffer.as_ptr().cast::<AtomicU16>()).store(value, order); |
||||
}, |
||||
} |
||||
} |
||||
} |
||||
|
||||
impl Element for u32 { |
||||
fn from_js_value(value: &JsValue, context: &mut Context<'_>) -> JsResult<Self> { |
||||
value.to_u32(context) |
||||
} |
||||
|
||||
fn to_big_endian(self) -> Self { |
||||
self.to_be() |
||||
} |
||||
|
||||
fn to_little_endian(self) -> Self { |
||||
self.to_le() |
||||
} |
||||
|
||||
unsafe fn read_from_buffer(buffer: SliceRef<'_>, order: atomic::Ordering) -> Self { |
||||
if cfg!(debug_assertions) { |
||||
assert!(buffer.len() >= std::mem::size_of::<u32>()); |
||||
assert!(buffer.addr() % std::mem::align_of::<u32>() == 0); |
||||
} |
||||
|
||||
match buffer { |
||||
SliceRef::Slice(buffer) => unsafe { *buffer.as_ptr().cast() }, |
||||
SliceRef::AtomicSlice(buffer) => unsafe { |
||||
(*buffer.as_ptr().cast::<AtomicU32>()).load(order) |
||||
}, |
||||
} |
||||
} |
||||
|
||||
unsafe fn write_to_buffer(buffer: SliceRefMut<'_>, value: Self, order: atomic::Ordering) { |
||||
if cfg!(debug_assertions) { |
||||
assert!(buffer.len() >= std::mem::size_of::<u32>()); |
||||
assert!(buffer.addr() % std::mem::align_of::<u32>() == 0); |
||||
} |
||||
|
||||
match buffer { |
||||
SliceRefMut::Slice(buffer) => unsafe { |
||||
*buffer.as_mut_ptr().cast() = value; |
||||
}, |
||||
SliceRefMut::AtomicSlice(buffer) => unsafe { |
||||
(*buffer.as_ptr().cast::<AtomicU32>()).store(value, order); |
||||
}, |
||||
} |
||||
} |
||||
} |
||||
|
||||
impl Element for u64 { |
||||
fn from_js_value(value: &JsValue, context: &mut Context<'_>) -> JsResult<Self> { |
||||
Ok(value.to_big_uint64(context)?.to_u64().unwrap_or(u64::MAX)) |
||||
} |
||||
|
||||
fn to_big_endian(self) -> Self { |
||||
self.to_be() |
||||
} |
||||
|
||||
fn to_little_endian(self) -> Self { |
||||
self.to_le() |
||||
} |
||||
|
||||
unsafe fn read_from_buffer(buffer: SliceRef<'_>, order: atomic::Ordering) -> Self { |
||||
if cfg!(debug_assertions) { |
||||
assert!(buffer.len() >= std::mem::size_of::<u64>()); |
||||
assert!(buffer.addr() % std::mem::align_of::<u64>() == 0); |
||||
} |
||||
|
||||
match buffer { |
||||
SliceRef::Slice(buffer) => unsafe { *buffer.as_ptr().cast() }, |
||||
SliceRef::AtomicSlice(buffer) => unsafe { |
||||
(*buffer.as_ptr().cast::<AtomicU64>()).load(order) |
||||
}, |
||||
} |
||||
} |
||||
|
||||
unsafe fn write_to_buffer(buffer: SliceRefMut<'_>, value: Self, order: atomic::Ordering) { |
||||
if cfg!(debug_assertions) { |
||||
assert!(buffer.len() >= std::mem::size_of::<u64>()); |
||||
assert!(buffer.addr() % std::mem::align_of::<u64>() == 0); |
||||
} |
||||
|
||||
match buffer { |
||||
SliceRefMut::Slice(buffer) => unsafe { |
||||
*buffer.as_mut_ptr().cast() = value; |
||||
}, |
||||
SliceRefMut::AtomicSlice(buffer) => unsafe { |
||||
(*buffer.as_ptr().cast::<AtomicU64>()).store(value, order); |
||||
}, |
||||
} |
||||
} |
||||
} |
||||
|
||||
impl Element for i8 { |
||||
fn from_js_value(value: &JsValue, context: &mut Context<'_>) -> JsResult<Self> { |
||||
value.to_int8(context) |
||||
} |
||||
|
||||
fn to_big_endian(self) -> Self { |
||||
self.to_be() |
||||
} |
||||
|
||||
fn to_little_endian(self) -> Self { |
||||
self.to_le() |
||||
} |
||||
|
||||
unsafe fn read_from_buffer(buffer: SliceRef<'_>, order: atomic::Ordering) -> Self { |
||||
unsafe { u8::read_from_buffer(buffer, order) as i8 } |
||||
} |
||||
|
||||
unsafe fn write_to_buffer(buffer: SliceRefMut<'_>, value: Self, order: atomic::Ordering) { |
||||
unsafe { u8::write_to_buffer(buffer, value as u8, order) } |
||||
} |
||||
} |
||||
|
||||
impl Element for ClampedU8 { |
||||
fn from_js_value(value: &JsValue, context: &mut Context<'_>) -> JsResult<Self> { |
||||
value.to_uint8_clamp(context).map(ClampedU8) |
||||
} |
||||
|
||||
fn to_big_endian(self) -> Self { |
||||
self.to_be() |
||||
} |
||||
|
||||
fn to_little_endian(self) -> Self { |
||||
self.to_le() |
||||
} |
||||
|
||||
unsafe fn read_from_buffer(buffer: SliceRef<'_>, order: atomic::Ordering) -> Self { |
||||
unsafe { ClampedU8(u8::read_from_buffer(buffer, order)) } |
||||
} |
||||
|
||||
unsafe fn write_to_buffer(buffer: SliceRefMut<'_>, value: Self, order: atomic::Ordering) { |
||||
unsafe { u8::write_to_buffer(buffer, value.0, order) } |
||||
} |
||||
} |
||||
|
||||
impl Element for i16 { |
||||
fn from_js_value(value: &JsValue, context: &mut Context<'_>) -> JsResult<Self> { |
||||
value.to_int16(context) |
||||
} |
||||
|
||||
fn to_big_endian(self) -> Self { |
||||
self.to_be() |
||||
} |
||||
|
||||
fn to_little_endian(self) -> Self { |
||||
self.to_le() |
||||
} |
||||
|
||||
unsafe fn read_from_buffer(buffer: SliceRef<'_>, order: atomic::Ordering) -> Self { |
||||
unsafe { u16::read_from_buffer(buffer, order) as i16 } |
||||
} |
||||
|
||||
unsafe fn write_to_buffer(buffer: SliceRefMut<'_>, value: Self, order: atomic::Ordering) { |
||||
unsafe { u16::write_to_buffer(buffer, value as u16, order) } |
||||
} |
||||
} |
||||
|
||||
impl Element for i32 { |
||||
fn from_js_value(value: &JsValue, context: &mut Context<'_>) -> JsResult<Self> { |
||||
value.to_i32(context) |
||||
} |
||||
|
||||
fn to_big_endian(self) -> Self { |
||||
self.to_be() |
||||
} |
||||
|
||||
fn to_little_endian(self) -> Self { |
||||
self.to_le() |
||||
} |
||||
|
||||
unsafe fn read_from_buffer(buffer: SliceRef<'_>, order: atomic::Ordering) -> Self { |
||||
unsafe { u32::read_from_buffer(buffer, order) as i32 } |
||||
} |
||||
|
||||
unsafe fn write_to_buffer(buffer: SliceRefMut<'_>, value: Self, order: atomic::Ordering) { |
||||
unsafe { u32::write_to_buffer(buffer, value as u32, order) } |
||||
} |
||||
} |
||||
|
||||
impl Element for i64 { |
||||
fn from_js_value(value: &JsValue, context: &mut Context<'_>) -> JsResult<Self> { |
||||
let big_int = value.to_big_int64(context)?; |
||||
|
||||
Ok(big_int.to_i64().unwrap_or_else(|| { |
||||
if big_int.is_positive() { |
||||
i64::MAX |
||||
} else { |
||||
i64::MIN |
||||
} |
||||
})) |
||||
} |
||||
|
||||
fn to_big_endian(self) -> Self { |
||||
self.to_be() |
||||
} |
||||
|
||||
fn to_little_endian(self) -> Self { |
||||
self.to_le() |
||||
} |
||||
|
||||
unsafe fn read_from_buffer(buffer: SliceRef<'_>, order: atomic::Ordering) -> Self { |
||||
unsafe { u64::read_from_buffer(buffer, order) as i64 } |
||||
} |
||||
|
||||
unsafe fn write_to_buffer(buffer: SliceRefMut<'_>, value: Self, order: atomic::Ordering) { |
||||
unsafe { u64::write_to_buffer(buffer, value as u64, order) } |
||||
} |
||||
} |
||||
|
||||
impl Element for f32 { |
||||
fn from_js_value(value: &JsValue, context: &mut Context<'_>) -> JsResult<Self> { |
||||
value.to_number(context).map(|f| f as f32) |
||||
} |
||||
|
||||
fn to_big_endian(self) -> Self { |
||||
f32::from_bits(self.to_bits().to_be()) |
||||
} |
||||
|
||||
fn to_little_endian(self) -> Self { |
||||
f32::from_bits(self.to_bits().to_le()) |
||||
} |
||||
|
||||
unsafe fn read_from_buffer(buffer: SliceRef<'_>, order: atomic::Ordering) -> Self { |
||||
unsafe { f32::from_bits(u32::read_from_buffer(buffer, order)) } |
||||
} |
||||
|
||||
unsafe fn write_to_buffer(buffer: SliceRefMut<'_>, value: Self, order: atomic::Ordering) { |
||||
unsafe { u32::write_to_buffer(buffer, value.to_bits(), order) } |
||||
} |
||||
} |
||||
|
||||
impl Element for f64 { |
||||
fn from_js_value(value: &JsValue, context: &mut Context<'_>) -> JsResult<Self> { |
||||
value.to_number(context) |
||||
} |
||||
|
||||
fn to_big_endian(self) -> Self { |
||||
f64::from_bits(self.to_bits().to_be()) |
||||
} |
||||
|
||||
fn to_little_endian(self) -> Self { |
||||
f64::from_bits(self.to_bits().to_le()) |
||||
} |
||||
|
||||
unsafe fn read_from_buffer(buffer: SliceRef<'_>, order: atomic::Ordering) -> Self { |
||||
unsafe { f64::from_bits(u64::read_from_buffer(buffer, order)) } |
||||
} |
||||
|
||||
unsafe fn write_to_buffer(buffer: SliceRefMut<'_>, value: Self, order: atomic::Ordering) { |
||||
unsafe { u64::write_to_buffer(buffer, value.to_bits(), order) } |
||||
} |
||||
} |
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,125 @@
|
||||
//! A Rust API wrapper for Boa's `SharedArrayBuffer` Builtin ECMAScript Object
|
||||
use crate::{ |
||||
builtins::array_buffer::SharedArrayBuffer, |
||||
error::JsNativeError, |
||||
object::{JsObject, JsObjectType, ObjectData}, |
||||
value::TryFromJs, |
||||
Context, JsResult, JsValue, |
||||
}; |
||||
use boa_gc::{Finalize, Trace}; |
||||
use std::ops::Deref; |
||||
|
||||
/// `JsSharedArrayBuffer` provides a wrapper for Boa's implementation of the ECMAScript `ArrayBuffer` object
|
||||
#[derive(Debug, Clone, Trace, Finalize)] |
||||
pub struct JsSharedArrayBuffer { |
||||
inner: JsObject, |
||||
} |
||||
|
||||
impl JsSharedArrayBuffer { |
||||
/// Creates a new [`JsSharedArrayBuffer`] with `byte_length` bytes of allocated space.
|
||||
#[inline] |
||||
pub fn new(byte_length: usize, context: &mut Context<'_>) -> JsResult<Self> { |
||||
let inner = SharedArrayBuffer::allocate( |
||||
&context |
||||
.intrinsics() |
||||
.constructors() |
||||
.shared_array_buffer() |
||||
.constructor() |
||||
.into(), |
||||
byte_length as u64, |
||||
context, |
||||
)?; |
||||
|
||||
Ok(Self { inner }) |
||||
} |
||||
|
||||
/// Creates a [`JsSharedArrayBuffer`] from a shared raw buffer.
|
||||
#[inline] |
||||
pub fn from_buffer(buffer: SharedArrayBuffer, context: &mut Context<'_>) -> Self { |
||||
let proto = context |
||||
.intrinsics() |
||||
.constructors() |
||||
.shared_array_buffer() |
||||
.prototype(); |
||||
|
||||
let inner = JsObject::from_proto_and_data_with_shared_shape( |
||||
context.root_shape(), |
||||
proto, |
||||
ObjectData::shared_array_buffer(buffer), |
||||
); |
||||
|
||||
Self { inner } |
||||
} |
||||
|
||||
/// Creates a [`JsSharedArrayBuffer`] from a [`JsObject`], throwing a `TypeError` if the object
|
||||
/// is not a shared array buffer.
|
||||
///
|
||||
/// This does not clone the fields of the shared array buffer, it only does a shallow clone of
|
||||
/// the object.
|
||||
#[inline] |
||||
pub fn from_object(object: JsObject) -> JsResult<Self> { |
||||
if object.is_shared_array_buffer() { |
||||
Ok(Self { inner: object }) |
||||
} else { |
||||
Err(JsNativeError::typ() |
||||
.with_message("object is not an ArrayBuffer") |
||||
.into()) |
||||
} |
||||
} |
||||
|
||||
/// Returns the byte length of the array buffer.
|
||||
#[inline] |
||||
#[must_use] |
||||
pub fn byte_length(&self) -> usize { |
||||
self.borrow() |
||||
.as_shared_array_buffer() |
||||
.expect("should be an array buffer") |
||||
.len() |
||||
} |
||||
|
||||
/// Gets the raw buffer of this `JsSharedArrayBuffer`.
|
||||
#[inline] |
||||
#[must_use] |
||||
pub fn inner(&self) -> SharedArrayBuffer { |
||||
self.borrow() |
||||
.as_shared_array_buffer() |
||||
.expect("should be an array buffer") |
||||
.clone() |
||||
} |
||||
} |
||||
|
||||
impl From<JsSharedArrayBuffer> for JsObject { |
||||
#[inline] |
||||
fn from(o: JsSharedArrayBuffer) -> Self { |
||||
o.inner.clone() |
||||
} |
||||
} |
||||
|
||||
impl From<JsSharedArrayBuffer> for JsValue { |
||||
#[inline] |
||||
fn from(o: JsSharedArrayBuffer) -> Self { |
||||
o.inner.clone().into() |
||||
} |
||||
} |
||||
|
||||
impl Deref for JsSharedArrayBuffer { |
||||
type Target = JsObject; |
||||
|
||||
#[inline] |
||||
fn deref(&self) -> &Self::Target { |
||||
&self.inner |
||||
} |
||||
} |
||||
|
||||
impl JsObjectType for JsSharedArrayBuffer {} |
||||
|
||||
impl TryFromJs for JsSharedArrayBuffer { |
||||
fn try_from_js(value: &JsValue, _context: &mut Context<'_>) -> JsResult<Self> { |
||||
match value { |
||||
JsValue::Object(o) => Self::from_object(o.clone()), |
||||
_ => Err(JsNativeError::typ() |
||||
.with_message("value is not a SharedArrayBuffer object") |
||||
.into()), |
||||
} |
||||
} |
||||
} |
Loading…
Reference in new issue