diff --git a/mmtk/src/api.rs b/mmtk/src/api.rs
index 3f1ad97..b8b41fd 100644
--- a/mmtk/src/api.rs
+++ b/mmtk/src/api.rs
@@ -3,24 +3,53 @@ use libc::c_void;
use mmtk::memory_manager;
use mmtk::scheduler::GCWorker;
use mmtk::util::opaque_pointer::*;
+use mmtk::util::options::PlanSelector;
use mmtk::util::{Address, ObjectReference};
use mmtk::AllocationSemantics;
use mmtk::Mutator;
use mmtk::MMTK;
+use mmtk::policy::space::Space;
use std::ffi::CStr;
use V8_Upcalls;
use UPCALLS;
use V8;
+/// Release an address buffer
+#[no_mangle]
+pub unsafe extern "C" fn mmtk_release_buffer(ptr: *mut Address, length: usize, capacity: usize) {
+ let _vec = Vec::
::from_raw_parts(ptr, length, capacity);
+}
+
+/// Check whether an object is movable.
+#[no_mangle]
+pub unsafe extern "C" fn mmtk_is_movable(object: ObjectReference) -> i32 {
+ let object = {
+ let untagged_word = object.to_address().as_usize() & !0b11usize;
+ Address::from_usize(untagged_word).to_object_reference()
+ };
+ if object.is_movable() { 1 } else { 0 }
+}
+
+/// Get the forwarding pointer, or NULL if the object is not forwarded
+#[no_mangle]
+pub unsafe extern "C" fn mmtk_get_forwarded_object(object: ObjectReference) -> *mut c_void {
+ let tag = object.to_address().as_usize() & 0b11usize;
+ let object = {
+ let untagged_word = object.to_address().as_usize() & !0b11usize;
+ Address::from_usize(untagged_word).to_object_reference()
+ };
+ object.get_forwarded_object().map(|x| (x.to_address().as_usize() | tag) as *mut c_void).unwrap_or(0 as _)
+}
+
#[no_mangle]
pub extern "C" fn v8_new_heap(calls: *const V8_Upcalls, heap_size: usize) -> *mut c_void {
unsafe {
UPCALLS = calls;
};
- let mmtk: Box> = Box::new(MMTK::new());
- let mmtk: *mut MMTK = Box::into_raw(mmtk);
- memory_manager::gc_init(unsafe { &mut *mmtk }, heap_size);
+ let mmtk: *const MMTK = &*crate::SINGLETON;
+ memory_manager::gc_init(unsafe { &mut *(mmtk as *mut MMTK) }, heap_size);
+ initialize_collection(unsafe { &mut *(mmtk as *mut MMTK) }, VMThread::UNINITIALIZED);
mmtk as *mut c_void
}
@@ -38,6 +67,25 @@ pub extern "C" fn bind_mutator(
Box::into_raw(memory_manager::bind_mutator(mmtk, tls))
}
+#[no_mangle]
+pub unsafe extern "C" fn mmtk_in_space(mmtk: &'static MMTK, object: ObjectReference, space: AllocationSemantics) -> i32 {
+ match space {
+ AllocationSemantics::Default => {
+ (object.is_mapped()
+ && mmtk_in_space(mmtk, object, AllocationSemantics::ReadOnly) == 0
+ && mmtk_in_space(mmtk, object, AllocationSemantics::Immortal) == 0
+ && mmtk_in_space(mmtk, object, AllocationSemantics::Los) == 0
+ && mmtk_in_space(mmtk, object, AllocationSemantics::Code) == 0
+ && mmtk_in_space(mmtk, object, AllocationSemantics::LargeCode) == 0) as _
+ },
+ AllocationSemantics::ReadOnly => mmtk.plan.base().ro_space.in_space(object) as _,
+ AllocationSemantics::Immortal => mmtk.plan.common().immortal.in_space(object) as _,
+ AllocationSemantics::Los => mmtk.plan.common().los.in_space(object) as _,
+ AllocationSemantics::Code => mmtk.plan.base().code_space.in_space(object) as _,
+ AllocationSemantics::LargeCode => mmtk.plan.base().code_lo_space.in_space(object) as _,
+ }
+}
+
#[no_mangle]
// It is fine we turn the pointer back to box, as we turned a boxed value to the raw pointer in bind_mutator()
#[allow(clippy::not_unsafe_ptr_arg_deref)]
@@ -53,7 +101,18 @@ pub extern "C" fn alloc(
offset: isize,
semantics: AllocationSemantics,
) -> Address {
- memory_manager::alloc::(mutator, size, align, offset, semantics)
+ let a = memory_manager::alloc::(mutator, size, align, offset, semantics);
+ unsafe { memory_manager::post_alloc::(mutator, a.to_object_reference(), size, semantics); }
+ if PlanSelector::PageProtect == mutator.plan.options().plan && AllocationSemantics::Default == semantics {
+ // Possible `array_header_size` values that can be passed to [AllocateUninitializedJSArrayWithElements](https://source.chromium.org/chromium/chromium/src/+/main:v8/src/codegen/code-stub-assembler.h;l=1884).
+ let array_header_sizes = [0x20, 0x50, 0x58];
+ for array_header_size in array_header_sizes {
+ unsafe {
+ memory_manager::post_alloc::(mutator, a.add(array_header_size).to_object_reference(), 0, semantics);
+ }
+ }
+ }
+ a
}
#[no_mangle]
@@ -107,8 +166,12 @@ pub extern "C" fn scan_region(mmtk: &mut MMTK) {
}
#[no_mangle]
-pub extern "C" fn is_live_object(object: ObjectReference) -> bool {
- object.is_live()
+pub extern "C" fn mmtk_object_is_live(object: ObjectReference) -> usize {
+ debug_assert_eq!(object.to_address().as_usize() & 0b11, 0);
+ if crate::SINGLETON.plan.base().ro_space.in_space(object) {
+ return 1;
+ }
+ if object.is_reachable() { 1 } else { 0 }
}
#[no_mangle]
diff --git a/mmtk/src/collection.rs b/mmtk/src/collection.rs
index f57a1dd..7729687 100644
--- a/mmtk/src/collection.rs
+++ b/mmtk/src/collection.rs
@@ -1,12 +1,17 @@
use mmtk::scheduler::GCWorker;
+use mmtk::util::*;
use mmtk::scheduler::ProcessEdgesWork;
-use mmtk::util::opaque_pointer::*;
use mmtk::vm::Collection;
-use mmtk::MutatorContext;
+use mmtk::{MutatorContext, MMTK};
use UPCALLS;
use V8;
+use crate::object_archive::global_object_archive;
+use crate::scanning::ROOT_OBJECTS;
+use crate::scanning::flush_roots;
+use crate::scanning::trace_root;
+
pub struct VMCollection {}
impl Collection for VMCollection {
@@ -28,15 +33,20 @@ impl Collection for VMCollection {
}
}
- fn spawn_worker_thread(tls: VMThread, ctx: Option<&GCWorker>) {
+ fn spawn_worker_thread(_tls: VMThread, ctx: Option<&GCWorker>) {
let ctx_ptr = if let Some(r) = ctx {
r as *const GCWorker as *mut GCWorker
} else {
std::ptr::null_mut()
- };
- unsafe {
- ((*UPCALLS).spawn_worker_thread)(tls, ctx_ptr as usize as _);
- }
+ } as usize;
+ std::thread::spawn(move || {
+ let mmtk: *mut MMTK = &*crate::SINGLETON as *const MMTK as *mut MMTK;
+ if ctx_ptr == 0 {
+ crate::api::start_control_collector(unsafe { &mut *mmtk }, VMWorkerThread(VMThread::UNINITIALIZED));
+ } else {
+ crate::api::start_worker(unsafe { &mut *mmtk }, VMWorkerThread(VMThread::UNINITIALIZED), unsafe { &mut *(ctx_ptr as *mut GCWorker) });
+ }
+ });
}
fn prepare_mutator>(
@@ -46,4 +56,19 @@ impl Collection for VMCollection {
) {
unimplemented!()
}
+
+ fn vm_release() {
+ global_object_archive().update();
+ }
+
+ fn process_weak_refs>(worker: &mut GCWorker) {
+ unsafe {
+ debug_assert!(ROOT_OBJECTS.is_empty());
+ ((*UPCALLS).process_weak_refs)(trace_root:: as _, worker as *mut _ as _);
+ if !ROOT_OBJECTS.is_empty() {
+ flush_roots::(worker);
+ }
+ debug_assert!(ROOT_OBJECTS.is_empty());
+ }
+ }
}
diff --git a/mmtk/src/lib.rs b/mmtk/src/lib.rs
index 85bf685..3cfb9fd 100644
--- a/mmtk/src/lib.rs
+++ b/mmtk/src/lib.rs
@@ -1,3 +1,7 @@
+#![feature(vec_into_raw_parts)]
+#![feature(thread_local)]
+#![feature(const_option)]
+
extern crate libc;
extern crate mmtk;
#[macro_use]
@@ -6,6 +10,7 @@ extern crate lazy_static;
#[macro_use]
extern crate log;
+use std::env;
use std::ptr::null_mut;
use libc::c_void;
@@ -22,6 +27,17 @@ mod object_archive;
pub mod object_model;
pub mod reference_glue;
pub mod scanning;
+use mmtk::util::{Address};
+
+#[repr(C)]
+pub struct NewBuffer {
+ pub ptr: *mut Address,
+ pub capacity: usize,
+}
+
+type ProcessEdgesFn = *const extern "C" fn(buf: *mut Address, size: usize, cap: usize) -> NewBuffer;
+type TraceRootFn = *const extern "C" fn(slot: Address, ctx: &'static mut GCWorker) -> Address;
+type TraceFieldFn = *const extern "C" fn(slot: Address, ctx: &'static mut GCWorker) -> Address;
#[repr(C)]
pub struct V8_Upcalls {
@@ -39,6 +55,11 @@ pub struct V8_Upcalls {
pub get_object_size: extern "C" fn(object: ObjectReference) -> usize,
pub get_mmtk_mutator: extern "C" fn(tls: VMMutatorThread) -> *mut Mutator,
pub is_mutator: extern "C" fn(tls: VMThread) -> bool,
+ pub scan_roots: extern "C" fn(trace_root: TraceRootFn, context: *mut c_void, task_id: usize),
+ pub scan_objects: extern "C" fn(objects: *const ObjectReference, count: usize, process_edges: ProcessEdgesFn, trace_field: TraceFieldFn, context: *mut c_void, task_id: usize),
+ pub process_weak_refs: extern "C" fn(trace_root: TraceRootFn, context: *mut c_void),
+ pub on_move_event: extern "C" fn(from: ObjectReference, to: ObjectReference, size: usize),
+ pub process_ephemerons: extern "C" fn(trace_root: TraceRootFn, context: *mut c_void, task_id: usize),
}
pub static mut UPCALLS: *const V8_Upcalls = null_mut();
@@ -58,9 +79,15 @@ impl VMBinding for V8 {
lazy_static! {
pub static ref SINGLETON: MMTK = {
- #[cfg(feature = "nogc")]
- std::env::set_var("MMTK_PLAN", "NoGC");
-
+ // V8 can only support up to 8 worker threads.
+ // Set MMTK_THREADS = 7 here to exclude the main thread -- it undertakes part of the worker's job.
+ if let Ok(threads) = env::var("MMTK_THREADS").map(|x| x.parse::().unwrap()) {
+ if threads > 7 {
+ env::set_var("MMTK_THREADS", "7");
+ }
+ } else {
+ env::set_var("MMTK_THREADS", "7");
+ }
MMTK::new()
};
}
diff --git a/mmtk/src/object_archive.rs b/mmtk/src/object_archive.rs
index a0a9a96..da52e23 100644
--- a/mmtk/src/object_archive.rs
+++ b/mmtk/src/object_archive.rs
@@ -1,13 +1,20 @@
use libc::c_void;
+use mmtk::util::ObjectReference;
use mmtk::util::address::Address;
-use std::sync::RwLock;
+use mmtk::policy::space::Space;
-const INITIAL_ARCHIVE_SIZE: usize = 10000;
-const ADDITIONAL_ARCHIVE_SIZE: usize = 1000;
+
+lazy_static! {
+ pub static ref OBJECT_ARCHIVE: ObjectArchive = ObjectArchive::new();
+}
+
+pub fn global_object_archive() -> &'static mut ObjectArchive {
+ unsafe { &mut *(&OBJECT_ARCHIVE as &ObjectArchive as *const ObjectArchive as *mut ObjectArchive) }
+}
#[no_mangle]
pub extern "C" fn tph_archive_new() -> *const c_void {
- Box::into_raw(Box::new(ObjectArchive::new())) as *const c_void
+ &OBJECT_ARCHIVE as &ObjectArchive as *const ObjectArchive as *const c_void
}
#[no_mangle]
@@ -43,36 +50,16 @@ pub extern "C" fn tph_archive_inner_to_obj(
res.to_mut_ptr()
}
-#[no_mangle]
-pub extern "C" fn tph_archive_obj_to_isolate(
- arch: *mut c_void,
- obj_ptr: *mut c_void,
-) -> *mut c_void {
- let arch = unsafe { Box::from_raw(arch as *mut ObjectArchive) };
- let res = arch.object_to_isolate(Address::from_mut_ptr(obj_ptr));
- Box::into_raw(arch);
- res.to_mut_ptr()
-}
-
-#[no_mangle]
-pub extern "C" fn tph_archive_obj_to_space(arch: *mut c_void, obj_ptr: *mut c_void) -> u8 {
- let arch = unsafe { Box::from_raw(arch as *mut ObjectArchive) };
- let res = arch.object_to_space(Address::from_mut_ptr(obj_ptr));
- Box::into_raw(arch);
- res
-}
-
#[no_mangle]
pub extern "C" fn tph_archive_insert(
arch: *mut c_void,
obj_ptr: *mut c_void,
iso_ptr: *mut c_void,
- space: u8,
) {
let obj_addr = Address::from_mut_ptr(obj_ptr);
let iso_addr = Address::from_mut_ptr(iso_ptr);
let mut arch = unsafe { Box::from_raw(arch as *mut ObjectArchive) };
- arch.insert_object(obj_addr, iso_addr, space);
+ arch.insert_object(obj_addr, iso_addr);
Box::into_raw(arch);
}
@@ -84,163 +71,77 @@ pub extern "C" fn tph_archive_remove(arch: *mut c_void, obj_ptr: *mut c_void) {
Box::into_raw(arch);
}
+#[derive(Default)]
pub struct ObjectArchive {
- sorted_addr_list: RwLock>,
- isolate_list: RwLock>,
- space_list: Vec,
+ untagged_objects: Vec,
iter_pos: usize,
iter_len: usize,
}
impl ObjectArchive {
pub fn new() -> ObjectArchive {
- ObjectArchive {
- sorted_addr_list: RwLock::new(Vec::with_capacity(INITIAL_ARCHIVE_SIZE)),
- isolate_list: RwLock::new(Vec::with_capacity(INITIAL_ARCHIVE_SIZE)),
- space_list: Vec::with_capacity(INITIAL_ARCHIVE_SIZE),
- iter_pos: 0usize,
- iter_len: 0usize,
- }
+ Default::default()
}
- pub fn insert_object(&mut self, obj_addr: Address, isolate: Address, space: u8) {
- let mut lst = match self.sorted_addr_list.write() {
- Ok(res) => res,
- Err(err) => {
- panic!("OA.insert: LST LOCK ACQ failed with err: {:#?}", err);
- }
- };
- let mut iso_lst = match self.isolate_list.write() {
- Ok(res) => res,
- Err(err) => {
- panic!("OA.insert: ISO LOCK ACQ failed with err: {:#?}", err);
- }
+ pub fn insert_object(&mut self, addr: Address, _isolate: Address) {
+ let untagged_object = unsafe {
+ Address::from_usize(addr.as_usize() & !0b11).to_object_reference()
};
-
- assert_eq!(lst.len(), iso_lst.len());
-
- if lst.capacity() == lst.len() {
- lst.reserve(ADDITIONAL_ARCHIVE_SIZE);
- iso_lst.reserve(ADDITIONAL_ARCHIVE_SIZE);
- self.space_list.reserve(ADDITIONAL_ARCHIVE_SIZE);
- }
- match lst.binary_search(&obj_addr.as_usize()) {
- Ok(_) => {
- debug!("OA.insert: Object {:?} already archived", obj_addr);
- }
+ match self.untagged_objects.binary_search_by(|o| o.to_address().cmp(&untagged_object.to_address())) {
+ Ok(_) => unreachable!(),
Err(idx) => {
- lst.insert(idx, obj_addr.as_usize());
- iso_lst.insert(idx, isolate.as_usize());
- self.space_list.insert(idx, space);
+ self.untagged_objects.insert(idx, untagged_object);
}
- };
+ }
}
- pub fn remove_object(&mut self, obj_addr: Address) {
- let mut lst = match self.sorted_addr_list.write() {
- Ok(res) => res,
- Err(err) => {
- panic!("OA.remove: LST LOCK ACQ failed with err: {:#?}", err);
- }
- };
- let mut iso_lst = match self.isolate_list.write() {
- Ok(res) => res,
- Err(err) => {
- panic!("OA.remove: ISO LOCK ACQ failed with err: {:#?}", err);
- }
- };
- assert_eq!(lst.len(), iso_lst.len());
-
- let idx = match lst.binary_search(&obj_addr.as_usize()) {
- Ok(idx) => idx,
- Err(_) => {
- panic!("OA.remove: Object {:?} not archived!", obj_addr);
- }
- };
- lst.remove(idx);
- iso_lst.remove(idx);
- self.space_list.remove(idx);
+ pub fn remove_object(&mut self, addr: Address) {
+ let untagged_object = unsafe { Address::from_usize(addr.as_usize() & !0b11).to_object_reference() };
+ let index = self.untagged_objects.iter().position(|x| *x == untagged_object).unwrap();
+ self.untagged_objects.remove(index);
}
pub fn inner_addr_to_object(&self, inner_addr: Address) -> Address {
- let lst = match self.sorted_addr_list.read() {
- Ok(res) => res,
- Err(err) => {
- panic!("OA.inner_addr_to_object: LOCK ACQ failed: {:#?}", err);
- }
- };
- let idx = match lst.binary_search(&inner_addr.as_usize()) {
- Ok(idx) => idx,
- Err(idx) => idx - 1,
- };
- unsafe { Address::from_usize(lst[idx]) }
- }
-
- pub fn object_to_isolate(&self, obj_addr: Address) -> Address {
- let lst = match self.sorted_addr_list.read() {
- Ok(res) => res,
- Err(err) => {
- panic!("OA.object_to_isolate: LST LOCK ACQ failed: {:#?}", err);
- }
- };
- let iso_lst = match self.isolate_list.read() {
- Ok(res) => res,
- Err(err) => {
- panic!("OA.object_to_isolate: ISO LOCK ACQ failed: {:#?}", err);
- }
- };
- assert_eq!(lst.len(), iso_lst.len());
- let idx = match lst.binary_search(&obj_addr.as_usize()) {
+ let idx = match self.untagged_objects.binary_search_by(|o| o.to_address().cmp(&inner_addr)) {
Ok(idx) => idx,
Err(idx) => idx - 1,
};
- unsafe { Address::from_usize(iso_lst[idx]) }
+ self.untagged_objects[idx].to_address()
}
- pub fn object_to_space(&self, obj_addr: Address) -> u8 {
- let lst = match self.sorted_addr_list.read() {
- Ok(res) => res,
- Err(err) => {
- panic!("OA.object_to_isolate: LST LOCK ACQ failed: {:#?}", err);
+ pub fn update(&mut self) {
+ let mut new_objects = vec![];
+ for object in &self.untagged_objects {
+ debug_assert_eq!(object.to_address().as_usize() & 0b11, 0);
+ if object.is_reachable() || crate::SINGLETON.plan.base().ro_space.in_space(*object) {
+ let new_object = object.get_forwarded_object().unwrap_or(*object);
+ debug_assert_eq!(new_object.to_address().as_usize() & 0b11, 0);
+ new_objects.push(new_object);
}
- };
- let idx = match lst.binary_search(&obj_addr.as_usize()) {
- Ok(idx) => idx,
- Err(idx) => idx - 1,
- };
- self.space_list[idx]
+ }
+ new_objects.dedup();
+ new_objects.sort_by(|a, b| a.to_address().cmp(&b.to_address()));
+ self.untagged_objects = new_objects;
}
pub fn reset_iterator(&mut self) {
- let lst = match self.sorted_addr_list.read() {
- Ok(res) => res,
- Err(err) => {
- panic!("OA.reset_iterator: LOCK ACQ failed: {:#?}", err);
- }
- };
self.iter_pos = 0;
- self.iter_len = lst.len();
+ self.iter_len = self.untagged_objects.len();
}
pub fn next_object(&mut self) -> Address {
- let lst = match self.sorted_addr_list.read() {
- Ok(res) => res,
- Err(err) => {
- panic!("OA.inner_addr_to_object: LOCK ACQ failed: {:#?}", err);
- }
- };
- if self.iter_len != lst.len() {
+ if self.iter_len != self.untagged_objects.len() {
warn!(
"ObjectArchive changed from {} to {}.",
self.iter_len,
- lst.len()
+ self.untagged_objects.len()
);
- self.iter_len = lst.len();
+ self.iter_len = self.untagged_objects.len();
}
- if self.iter_pos < lst.len() {
- let obj = unsafe { Address::from_usize(lst[self.iter_pos]) };
+ if self.iter_pos < self.untagged_objects.len() {
+ let o = self.untagged_objects[self.iter_pos].to_address();
self.iter_pos += 1;
- obj
+ o
} else {
unsafe { Address::zero() }
}
diff --git a/mmtk/src/object_model.rs b/mmtk/src/object_model.rs
index 5fe8cc3..f03690b 100644
--- a/mmtk/src/object_model.rs
+++ b/mmtk/src/object_model.rs
@@ -1,8 +1,8 @@
use std::sync::atomic::Ordering;
-
+use std::ptr;
use super::UPCALLS;
-use mmtk::util::metadata::header_metadata::HeaderMetadataSpec;
-use mmtk::util::{Address, ObjectReference};
+use mmtk::util::metadata::{header_metadata::HeaderMetadataSpec};
+use mmtk::util::{Address, ObjectReference, metadata};
use mmtk::vm::*;
use mmtk::AllocationSemantics;
use mmtk::CopyContext;
@@ -11,33 +11,35 @@ use V8;
pub struct VMObjectModel {}
impl ObjectModel for VMObjectModel {
- // FIXME: Use proper specs
- const GLOBAL_LOG_BIT_SPEC: VMGlobalLogBitSpec = VMGlobalLogBitSpec::in_header(0);
- const LOCAL_FORWARDING_POINTER_SPEC: VMLocalForwardingPointerSpec =
- VMLocalForwardingPointerSpec::in_header(0);
- const LOCAL_FORWARDING_BITS_SPEC: VMLocalForwardingBitsSpec =
- VMLocalForwardingBitsSpec::in_header(0);
- const LOCAL_MARK_BIT_SPEC: VMLocalMarkBitSpec = VMLocalMarkBitSpec::in_header(0);
- const LOCAL_LOS_MARK_NURSERY_SPEC: VMLocalLOSMarkNurserySpec =
- VMLocalLOSMarkNurserySpec::in_header(0);
+ const GLOBAL_LOG_BIT_SPEC: VMGlobalLogBitSpec = VMGlobalLogBitSpec::side_first();
+ const LOCAL_FORWARDING_POINTER_SPEC: VMLocalForwardingPointerSpec = VMLocalForwardingPointerSpec::in_header(0);
+ const LOCAL_FORWARDING_BITS_SPEC: VMLocalForwardingBitsSpec = VMLocalForwardingBitsSpec::side_first();
+ const LOCAL_MARK_BIT_SPEC: VMLocalMarkBitSpec = VMLocalMarkBitSpec::side_after(&Self::LOCAL_FORWARDING_BITS_SPEC.as_spec());
+ const LOCAL_LOS_MARK_NURSERY_SPEC: VMLocalLOSMarkNurserySpec = VMLocalLOSMarkNurserySpec::side_after(&Self::LOCAL_MARK_BIT_SPEC.as_spec());
fn load_metadata(
- _metadata_spec: &HeaderMetadataSpec,
- _object: ObjectReference,
- _mask: Option,
- _atomic_ordering: Option,
+ metadata_spec: &HeaderMetadataSpec,
+ object: ObjectReference,
+ optional_mask: Option,
+ atomic_ordering: Option,
) -> usize {
- unimplemented!()
+ metadata::header_metadata::load_metadata(metadata_spec, object, optional_mask, atomic_ordering)
}
fn store_metadata(
- _metadata_spec: &HeaderMetadataSpec,
- _object: ObjectReference,
- _val: usize,
- _mask: Option,
- _atomic_ordering: Option,
+ metadata_spec: &HeaderMetadataSpec,
+ object: ObjectReference,
+ val: usize,
+ _optional_mask: Option,
+ atomic_ordering: Option,
) {
- unimplemented!()
+ metadata::header_metadata::store_metadata(
+ metadata_spec,
+ object,
+ val,
+ None,
+ atomic_ordering,
+ );
}
fn compare_exchange_metadata(
@@ -70,12 +72,22 @@ impl ObjectModel for VMObjectModel {
unimplemented!()
}
+ #[inline(always)]
fn copy(
- _from: ObjectReference,
- _allocator: AllocationSemantics,
- _copy_context: &mut impl CopyContext,
+ from: ObjectReference,
+ allocator: AllocationSemantics,
+ copy_context: &mut impl CopyContext,
) -> ObjectReference {
- unimplemented!()
+ let bytes = unsafe { ((*UPCALLS).get_object_size)(from) };
+ let dst = copy_context.alloc_copy(from, bytes, ::std::mem::size_of::(), 0, allocator);
+ // Copy
+ unsafe {
+ ptr::copy_nonoverlapping::(from.to_address().to_ptr(), dst.to_mut_ptr(), bytes);
+ }
+ let to_obj = unsafe { dst.to_object_reference() };
+ copy_context.post_copy(to_obj, unsafe { Address::zero() }, bytes, allocator);
+ unsafe { ((*UPCALLS).on_move_event)(from, to_obj, bytes) };
+ to_obj
}
fn copy_to(_from: ObjectReference, _to: ObjectReference, _region: Address) -> Address {
@@ -104,7 +116,7 @@ impl ObjectModel for VMObjectModel {
}
}
- fn ref_to_address(_object: ObjectReference) -> Address {
- unimplemented!()
+ fn ref_to_address(object: ObjectReference) -> Address {
+ unsafe { Address::from_usize(object.to_address().as_usize() & !0b1) }
}
}
diff --git a/mmtk/src/scanning.rs b/mmtk/src/scanning.rs
index 2101896..93e6775 100644
--- a/mmtk/src/scanning.rs
+++ b/mmtk/src/scanning.rs
@@ -5,6 +5,10 @@ use mmtk::util::ObjectReference;
use mmtk::vm::Scanning;
use mmtk::{Mutator, TransitiveClosure};
use V8;
+use mmtk::scheduler::*;
+use crate::*;
+
+use std::marker::PhantomData;
pub struct VMScanning {}
@@ -25,10 +29,20 @@ impl Scanning for VMScanning {
}
fn scan_objects>(
- _objects: &[ObjectReference],
- _worker: &mut GCWorker,
+ objects: &[ObjectReference],
+ worker: &mut GCWorker,
) {
- unimplemented!()
+ unsafe {
+ debug_assert!(OBJECTS_TO_SCAN.is_empty());
+ OBJECTS_TO_SCAN = objects.to_vec();
+ while !OBJECTS_TO_SCAN.is_empty() {
+ let objects = OBJECTS_TO_SCAN.clone();
+ OBJECTS_TO_SCAN = vec![];
+ let buf = objects.as_ptr();
+ let len = objects.len();
+ ((*UPCALLS).scan_objects)(buf, len, create_process_edges_work:: as _, trace_slot:: as _, worker as *mut _ as _, worker.ordinal);
+ }
+ }
}
fn scan_thread_roots>() {
@@ -42,11 +56,159 @@ impl Scanning for VMScanning {
unimplemented!()
}
- fn scan_vm_specific_roots>() {
- unimplemented!()
+ fn scan_vm_specific_roots>(worker: &mut GCWorker) {
+ let x = worker as *mut GCWorker as usize;
+ mmtk::memory_manager::on_closure_end(&SINGLETON, Box::new(move || {
+ unsafe {
+ let w = x as *mut GCWorker;
+ debug_assert!(ROOT_OBJECTS.is_empty());
+ ROOT_FLUSHED = false;
+ ((*UPCALLS).process_ephemerons)(trace_root:: as _, w as _, (*w).ordinal);
+ if !ROOT_OBJECTS.is_empty() {
+ flush_roots::(&mut *w);
+ }
+ debug_assert!(ROOT_OBJECTS.is_empty());
+ ROOT_FLUSHED
+ }
+ }));
+ mmtk::memory_manager::add_work_packet(
+ &SINGLETON,
+ WorkBucketStage::Closure,
+ ScanAndForwardRoots::::new(),
+ );
}
fn supports_return_barrier() -> bool {
unimplemented!()
}
}
+
+pub struct ScanAndForwardRoots>(PhantomData);
+
+impl> ScanAndForwardRoots {
+ pub fn new() -> Self {
+ Self(PhantomData)
+ }
+}
+
+impl> GCWork for ScanAndForwardRoots {
+ fn do_work(&mut self, worker: &mut GCWorker, _mmtk: &'static MMTK) {
+ unsafe {
+ debug_assert!(ROOT_OBJECTS.is_empty());
+ ((*UPCALLS).scan_roots)(trace_root:: as _, worker as *mut _ as _, worker.ordinal);
+ if !ROOT_OBJECTS.is_empty() {
+ flush_roots::(worker);
+ }
+ debug_assert!(ROOT_OBJECTS.is_empty());
+ }
+ }
+}
+
+/// No locks since we always use single-threaded root scanning.
+pub(crate) static mut ROOT_OBJECTS: Vec = Vec::new();
+pub(crate) static mut ROOT_FLUSHED: bool = false;
+
+pub(crate) fn flush_roots>(_worker: &mut GCWorker) {
+ unsafe { ROOT_FLUSHED = true; }
+ let mut buf = vec![];
+ unsafe { std::mem::swap(&mut buf, &mut ROOT_OBJECTS); }
+ pub struct ScanRootObjects {
+ buffer: Vec,
+ phantom: PhantomData,
+ }
+ impl ScanRootObjects {
+ pub fn new(buffer: Vec) -> Self {
+ Self {
+ buffer,
+ phantom: PhantomData,
+ }
+ }
+ }
+ impl GCWork for ScanRootObjects {
+ fn do_work(&mut self, worker: &mut GCWorker, _mmtk: &'static MMTK) {
+ ::VMScanning::scan_objects::(&self.buffer, worker);
+ }
+ }
+ let scan_objects_work = ScanRootObjects::::new(buf);
+ mmtk::memory_manager::add_work_packet(
+ &SINGLETON,
+ WorkBucketStage::Closure,
+ scan_objects_work,
+ );
+}
+
+pub(crate) extern "C" fn trace_root>(slot: Address, worker: &'static mut GCWorker) -> ObjectReference {
+ let obj: ObjectReference = unsafe { slot.load() };
+ let tag = obj.to_address().as_usize() & 0b11usize;
+ let mut w = W::new(vec![], false, &SINGLETON);
+ w.set_worker(worker);
+ let object_untagged = unsafe {
+ Address::from_usize(obj.to_address().as_usize() & !0b11usize).to_object_reference()
+ };
+ let new_obj = w.trace_object(object_untagged);
+ // println!("Root {:?} {:?} -> {:?}", slot, obj, new_obj);
+ if W::OVERWRITE_REFERENCE {
+ unsafe {
+ slot.store((new_obj.to_address().as_usize() & !0b11) | tag);
+ }
+ }
+ unsafe {
+ if ROOT_OBJECTS.is_empty() {
+ ROOT_OBJECTS.reserve(W::CAPACITY);
+ }
+ }
+ for o in &w.nodes {
+ unsafe { ROOT_OBJECTS.push(*o); }
+ }
+ unsafe {
+ if ROOT_OBJECTS.len() > W::CAPACITY {
+ flush_roots::(worker);
+ }
+ }
+ new_obj
+}
+
+#[thread_local]
+static mut OBJECTS_TO_SCAN: Vec = Vec::new();
+
+pub(crate) extern "C" fn trace_slot>(slot: Address, worker: &'static mut GCWorker) -> ObjectReference {
+ let obj: ObjectReference = unsafe { slot.load() };
+ let tag = obj.to_address().as_usize() & 0b11usize;
+ let mut w = W::new(vec![], false, &SINGLETON);
+ w.set_worker(worker);
+ let object_untagged = unsafe {
+ Address::from_usize(obj.to_address().as_usize() & !0b11usize).to_object_reference()
+ };
+ let new_obj = w.trace_object(object_untagged);
+ if W::OVERWRITE_REFERENCE {
+ unsafe {
+ slot.store((new_obj.to_address().as_usize() & !0b11) | tag);
+ }
+ }
+ unsafe {
+ if OBJECTS_TO_SCAN.is_empty() {
+ OBJECTS_TO_SCAN.reserve(W::CAPACITY);
+ }
+ }
+ for o in &w.nodes {
+ unsafe { OBJECTS_TO_SCAN.push(*o); }
+ }
+ new_obj
+}
+
+pub(crate) extern "C" fn create_process_edges_work>(
+ ptr: *mut Address,
+ length: usize,
+ capacity: usize,
+) -> NewBuffer {
+ if !ptr.is_null() {
+ let buf = unsafe { Vec::::from_raw_parts(ptr, length, capacity) };
+ mmtk::memory_manager::add_work_packet(
+ &SINGLETON,
+ WorkBucketStage::Closure,
+ W::new(buf, false, &SINGLETON),
+ );
+ }
+ let (ptr, _, capacity) = Vec::with_capacity(W::CAPACITY).into_raw_parts();
+ NewBuffer { ptr, capacity }
+}
\ No newline at end of file
diff --git a/v8/third_party/heap/mmtk/log.h b/v8/third_party/heap/mmtk/log.h
new file mode 100644
index 0000000..c5896d9
--- /dev/null
+++ b/v8/third_party/heap/mmtk/log.h
@@ -0,0 +1,11 @@
+#ifndef MMTK_LOG_H
+#define MMTK_LOG_H
+
+#define ENABLE_LOGGING false
+
+#define MMTK_LOG(...) \
+ if (ENABLE_LOGGING) { \
+ fprintf(stderr, __VA_ARGS__); \
+ }
+
+#endif // MMTK_LOG_H
\ No newline at end of file
diff --git a/v8/third_party/heap/mmtk/main-thread-sync.h b/v8/third_party/heap/mmtk/main-thread-sync.h
new file mode 100644
index 0000000..f27fc1b
--- /dev/null
+++ b/v8/third_party/heap/mmtk/main-thread-sync.h
@@ -0,0 +1,96 @@
+#ifndef MAIN_THREAD_SYNC_H
+#define MAIN_THREAD_SYNC_H
+
+#include
+#include
+#include
+#include
+#include "src/heap/safepoint.h"
+#include "log.h"
+
+namespace mmtk {
+
+class MainThreadSynchronizer;
+
+class TaskAwaiter {
+ std::mutex m_ {};
+ std::condition_variable cv_ {};
+ bool task_completed_ = false;
+
+ void Complete() {
+ std::unique_lock lock(m_);
+ task_completed_ = true;
+ cv_.notify_all();
+ }
+
+ friend class MainThreadSynchronizer;
+
+ public:
+ void Wait() {
+ std::unique_lock lock(m_);
+ while (!task_completed_) cv_.wait(lock);
+ }
+};
+
+class MainThreadSynchronizer {
+ std::mutex m_ {};
+ std::condition_variable cv_ {};
+ bool gc_in_progress_ = false;
+ std::deque> main_thread_tasks_ {};
+ std::unique_ptr safepoint_scope_ = nullptr;
+
+ void DrainTasks() {
+ while (!main_thread_tasks_.empty()) {
+ MMTK_LOG("[main-thread] Run One Task\n");
+ auto task = main_thread_tasks_.front();
+ main_thread_tasks_.pop_front();
+ task();
+ }
+ }
+
+ public:
+ void Block() {
+ MMTK_LOG("[main-thread] Blocked\n");
+ gc_in_progress_ = true;
+ std::unique_lock lock(m_);
+ while (gc_in_progress_) {
+ DrainTasks();
+ MMTK_LOG("[main-thread] Sleep\n");
+ cv_.wait(lock);
+ MMTK_LOG("[main-thread] Wake\n");
+ }
+ DrainTasks();
+ MMTK_LOG("[main-thread] Resumed\n");
+ }
+
+ void WakeUp() {
+ std::unique_lock lock(m_);
+ gc_in_progress_ = false;
+ cv_.notify_all();
+ }
+
+ void RunMainThreadTask(std::function task) {
+ auto awaiter = std::make_unique();
+ {
+ std::unique_lock lock(m_);
+ main_thread_tasks_.push_back([task, &awaiter]() {
+ task();
+ awaiter->Complete();
+ });
+ cv_.notify_all();
+ }
+ awaiter->Wait();
+ }
+
+ void EnterSafepoint(v8::internal::Heap* heap) {
+ safepoint_scope_.reset(new v8::internal::SafepointScope(heap));
+ }
+
+ void ExitSafepoint() {
+ safepoint_scope_ = nullptr;
+ }
+};
+
+}
+
+#endif // MAIN_THREAD_SYNC_H
\ No newline at end of file
diff --git a/v8/third_party/heap/mmtk/mmtk-visitors.h b/v8/third_party/heap/mmtk/mmtk-visitors.h
new file mode 100644
index 0000000..0530417
--- /dev/null
+++ b/v8/third_party/heap/mmtk/mmtk-visitors.h
@@ -0,0 +1,373 @@
+#ifndef MMTK_VISITORS_H
+#define MMTK_VISITORS_H
+
+#include "mmtkUpcalls.h"
+#include "log.h"
+#include
+#include
+#include "src/objects/slots-inl.h"
+#include "src/heap/safepoint.h"
+#include "src/codegen/reloc-info.h"
+#include "src/objects/code.h"
+#include "src/objects/code-inl.h"
+#include "src/objects/map-inl.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/heap/objects-visiting-inl.h"
+#include "weak-refs.h"
+#include
+
+namespace mmtk {
+
+namespace i = v8::internal;
+namespace tph = v8::internal::third_party_heap;
+
+class MMTkRootVisitor: public i::RootVisitor {
+ public:
+ explicit MMTkRootVisitor(i::Heap* heap, TraceRootFn trace_root, void* context, int task_id)
+ : heap_(heap), trace_root_(trace_root), context_(context), task_id_(task_id) {
+ USE(heap_);
+ USE(task_id_);
+ DCHECK(task_id <= 8);
+ }
+
+ virtual void VisitRootPointer(i::Root root, const char* description, i::FullObjectSlot p) override final {
+ ProcessRootEdge(root, p);
+ }
+
+ virtual void VisitRootPointers(i::Root root, const char* description, i::FullObjectSlot start, i::FullObjectSlot end) override final {
+ for (auto p = start; p < end; ++p) ProcessRootEdge(root, p);
+ }
+
+ virtual void VisitRootPointers(i::Root root, const char* description, i::OffHeapObjectSlot start, i::OffHeapObjectSlot end) override final {
+ for (auto p = start; p < end; ++p) ProcessRootEdge(root, p);
+ }
+
+ private:
+ V8_INLINE void ProcessRootEdge(i::Root root, i::FullObjectSlot slot) {
+ DCHECK(!i::HasWeakHeapObjectTag(*slot));
+ i::HeapObject object;
+ if ((*slot).GetHeapObject(&object)) {
+ trace_root_((void*) slot.address(), context_);
+ }
+ }
+
+ v8::internal::Heap* heap_;
+ TraceRootFn trace_root_;
+ void* context_;
+ int task_id_;
+};
+
+class MMTkCustomRootBodyVisitor final : public i::ObjectVisitor {
+ public:
+ explicit MMTkCustomRootBodyVisitor(i::Heap* heap, TraceRootFn trace_root, void* context, int task_id)
+ : heap_(heap), trace_root_(trace_root), context_(context), task_id_(task_id) {
+ USE(heap_);
+ USE(task_id_);
+ DCHECK(task_id <= 8);
+ }
+
+ void VisitPointer(i::HeapObject host, i::ObjectSlot p) final {}
+
+ void VisitMapPointer(i::HeapObject host) final {}
+
+ void VisitPointers(i::HeapObject host, i::ObjectSlot start, i::ObjectSlot end) final {}
+
+ void VisitPointers(i::HeapObject host, i::MaybeObjectSlot start,
+ i::MaybeObjectSlot end) final {
+ // At the moment, custom roots cannot contain weak pointers.
+ UNREACHABLE();
+ }
+
+ // VisitEmbedderPointer is defined by ObjectVisitor to call VisitPointers.
+ void VisitCodeTarget(i::Code host, i::RelocInfo* rinfo) override {
+ auto target = i::Code::GetCodeFromTargetAddress(rinfo->target_address());
+ DCHECK(!mmtk_is_movable(target));
+ trace_root_((void*) &target, context_);
+ DCHECK_EQ(target, i::Code::GetCodeFromTargetAddress(rinfo->target_address()));
+ }
+
+ void VisitEmbeddedPointer(i::Code host, i::RelocInfo* rinfo) override {
+ auto o = rinfo->target_object();
+ trace_root_((void*) &o, context_);
+ if (o != rinfo->target_object()) rinfo->set_target_object(heap_, o);
+ }
+
+ private:
+ V8_INLINE void ProcessEdge(i::HeapObject host, i::ObjectSlot slot) {
+ auto object = *slot;
+ if (!object.IsHeapObject()) return;
+ trace_root_((void*) &object, context_);
+ *slot = object;
+ }
+
+ v8::internal::Heap* heap_;
+ TraceRootFn trace_root_;
+ void* context_;
+ int task_id_;
+};
+
+class MMTkEdgeVisitor: public i::HeapVisitor {
+ public:
+ explicit MMTkEdgeVisitor(i::Heap* heap, ProcessEdgesFn process_edges, TraceFieldFn trace_field, void* context, int task_id)
+ : heap_(heap), process_edges_(process_edges), task_id_(task_id) {
+ trace_field_ = [=](i::HeapObject o) -> base::Optional {
+ auto old = o;
+ trace_field((void*) &o, context);
+ return o != old ? base::make_optional(o) : base::nullopt;
+ };
+ USE(heap_);
+ DCHECK(1 <= task_id && task_id <= 7);
+ NewBuffer buf = process_edges(NULL, 0, 0);
+ buffer_ = buf.buf;
+ cap_ = buf.cap;
+ USE(task_id_);
+ }
+
+ virtual ~MMTkEdgeVisitor() {
+ if (cursor_ > 0) flush();
+ if (buffer_ != NULL) {
+ mmtk_release_buffer(buffer_, cursor_, cap_);
+ }
+ }
+
+ V8_INLINE void VisitDescriptorArray(i::Map map, i::DescriptorArray array) {
+ VisitMapPointer(array);
+ VisitPointers(array, array.GetFirstPointerSlot(), array.GetDescriptorSlot(0));
+ VisitDescriptors(array, array.number_of_descriptors());
+ }
+
+ void VisitDescriptors(i::DescriptorArray descriptor_array, int number_of_own_descriptors) {
+ int16_t new_marked = static_cast(number_of_own_descriptors);
+ // Note: Always trace all the element in descriptor_arrays.
+ // int16_t old_marked = descriptor_array.UpdateNumberOfMarkedDescriptors(
+ // heap_->gc_count(), new_marked);
+ // if (old_marked < new_marked) {
+ VisitPointers(descriptor_array,
+ i::MaybeObjectSlot(descriptor_array.GetDescriptorSlot(0)),
+ i::MaybeObjectSlot(descriptor_array.GetDescriptorSlot(new_marked)));
+ // }
+ }
+
+ V8_INLINE void VisitMap(i::Map meta_map, i::Map map) {
+ int size = i::Map::BodyDescriptor::SizeOf(meta_map, map);
+ size += VisitDescriptorsForMap(map);
+ // Mark the pointer fields of the Map. If there is a transitions array, it has
+ // been marked already, so it is fine that one of these fields contains a
+ // pointer to it.
+ i::Map::BodyDescriptor::IterateBody(meta_map, map, size, this);
+ }
+
+ V8_INLINE int VisitDescriptorsForMap(i::Map map) {
+ if (!map.CanTransition()) return 0;
+ // Maps that can transition share their descriptor arrays and require
+ // special visiting logic to avoid memory leaks.
+ // Since descriptor arrays are potentially shared, ensure that only the
+ // descriptors that belong to this map are marked. The first time a
+ // non-empty descriptor array is marked, its header is also visited. The
+ // slot holding the descriptor array will be implicitly recorded when the
+ // pointer fields of this map are visited.
+ i::Object maybe_descriptors =
+ i::TaggedField::Acquire_Load(
+ heap_->isolate(), map);
+ // If the descriptors are a Smi, then this Map is in the process of being
+ // deserialized, and doesn't yet have an initialized descriptor field.
+ if (maybe_descriptors.IsSmi()) {
+ DCHECK_EQ(maybe_descriptors, i::Smi::uninitialized_deserialization_value());
+ return 0;
+ }
+ auto descriptors = i::DescriptorArray::cast(maybe_descriptors);
+ // Don't do any special processing of strong descriptor arrays, let them get
+ // marked through the normal visitor mechanism.
+ if (descriptors.IsStrongDescriptorArray()) {
+ return 0;
+ }
+ // Mark weak DescriptorArray
+ if (auto forwarded = trace_field_(descriptors)) {
+ descriptors = i::DescriptorArray::cast(*forwarded);
+ i::TaggedField::Release_Store(map, descriptors);
+ }
+ auto size = i::DescriptorArray::BodyDescriptor::SizeOf(descriptors.map(), descriptors);
+ int number_of_own_descriptors = map.NumberOfOwnDescriptors();
+ if (number_of_own_descriptors) {
+ // It is possible that the concurrent marker observes the
+ // number_of_own_descriptors out of sync with the descriptors. In that
+ // case the marking write barrier for the descriptor array will ensure
+ // that all required descriptors are marked. The concurrent marker
+ // just should avoid crashing in that case. That's why we need the
+ // std::min() below.
+ VisitDescriptors(descriptors, std::min(number_of_own_descriptors, descriptors.number_of_descriptors()));
+ }
+ return size;
+ }
+
+ virtual void VisitPointers(i::HeapObject host, i::ObjectSlot start, i::ObjectSlot end) override final {
+ for (auto p = start; p < end; ++p) ProcessEdge(host, p);
+ }
+
+ virtual void VisitPointers(i::HeapObject host, i::MaybeObjectSlot start, i::MaybeObjectSlot end) override final {
+ for (auto p = start; p < end; ++p) ProcessEdge(host, p);
+ }
+
+ virtual void VisitCodeTarget(i::Code host, i::RelocInfo* rinfo) override final {
+ auto target = i::Code::GetCodeFromTargetAddress(rinfo->target_address());
+ DCHECK(!mmtk_is_movable(target));
+ auto forwarded = trace_field_(target);
+ DCHECK(!forwarded);
+ USE(forwarded);
+ }
+
+ virtual void VisitEmbeddedPointer(i::Code host, i::RelocInfo* rinfo) override final {
+ auto o = rinfo->target_object();
+ if (auto forwarded = mmtk::get_forwarded_ref(o)) {
+ rinfo->set_target_object(heap_, *forwarded);
+ } else if (host.IsWeakObject(o) && WEAKREF_PROCESSING_BOOL) {
+ // TODO: Enable weak ref processing
+ UNIMPLEMENTED();
+ } else {
+ if (auto forwarded = trace_field_(o)) {
+ rinfo->set_target_object(heap_, *forwarded);
+ }
+ }
+ }
+
+ virtual void VisitMapPointer(i::HeapObject host) override final {
+ ProcessEdge(host, host.map_slot());
+ }
+
+ private:
+ template
+ V8_INLINE void ProcessEdge(i::HeapObject host, TSlot slot) {
+ DCHECK(mmtk::is_live(host));
+ DCHECK(!mmtk::get_forwarded_ref(host));
+ i::HeapObject object;
+ if ((*slot).GetHeapObjectIfStrong(&object)) {
+ PushEdge((void*) slot.address());
+ } else if (TSlot::kCanBeWeak && (*slot).GetHeapObjectIfWeak(&object)) {
+ if (!WEAKREF_PROCESSING_BOOL) {
+ PushEdge((void*) slot.address());
+ } else {
+ // TODO: Enable weak ref processing
+ UNIMPLEMENTED();
+ }
+ }
+ }
+
+ V8_INLINE void PushEdge(void* edge) {
+ buffer_[cursor_++] = edge;
+ if (cursor_ >= cap_) flush();
+ }
+
+ void flush() {
+ if (cursor_ > 0) {
+ NewBuffer buf = process_edges_(buffer_, cursor_, cap_);
+ buffer_ = buf.buf;
+ cap_ = buf.cap;
+ cursor_ = 0;
+ }
+ }
+
+ v8::internal::Heap* heap_;
+ ProcessEdgesFn process_edges_;
+ int task_id_;
+ void** buffer_ = nullptr;
+ size_t cap_ = 0;
+ size_t cursor_ = 0;
+ i::WeakObjects* weak_objects_ = mmtk::global_weakref_processor->weak_objects();
+ std::function(i::HeapObject)> trace_field_;
+};
+
+
+
+class MMTkHeapVerifier: public i::RootVisitor, public i::ObjectVisitor {
+ public:
+ explicit MMTkHeapVerifier() {
+ }
+
+ virtual ~MMTkHeapVerifier() {}
+
+ virtual void VisitRootPointer(i::Root root, const char* description, i::FullObjectSlot p) override final {
+ VerifyRootEdge(root, p);
+ }
+
+ virtual void VisitRootPointers(i::Root root, const char* description, i::FullObjectSlot start, i::FullObjectSlot end) override final {
+ for (auto p = start; p < end; ++p) VerifyRootEdge(root, p);
+ }
+
+ virtual void VisitRootPointers(i::Root root, const char* description, i::OffHeapObjectSlot start, i::OffHeapObjectSlot end) override final {
+ for (auto p = start; p < end; ++p) VerifyRootEdge(root, p);
+ }
+
+ virtual void VisitPointers(i::HeapObject host, i::ObjectSlot start, i::ObjectSlot end) override final {
+ for (auto p = start; p < end; ++p) VerifyEdge(host, p);
+ }
+
+ virtual void VisitPointers(i::HeapObject host, i::MaybeObjectSlot start, i::MaybeObjectSlot end) override final {
+ for (auto p = start; p < end; ++p) VerifyEdge(host, p);
+ }
+
+ virtual void VisitCodeTarget(i::Code host, i::RelocInfo* rinfo) override final {
+ auto target = i::Code::GetCodeFromTargetAddress(rinfo->target_address());
+ VerifyHeapObject(host, 0, target);
+ }
+
+ virtual void VisitEmbeddedPointer(i::Code host, i::RelocInfo* rinfo) override final {
+ VerifyHeapObject(host, 0, rinfo->target_object());
+ }
+
+ virtual void VisitMapPointer(i::HeapObject host) override final {
+ VerifyEdge(host, host.map_slot());
+ }
+
+ void TransitiveClosure() {
+ while (mark_stack_.size() != 0) {
+ auto o = mark_stack_.back();
+ mark_stack_.pop_back();
+ o.Iterate(this);
+ }
+ }
+
+ static void Verify(i::Heap* heap) {
+ MMTkHeapVerifier visitor;
+ heap->IterateRoots(&visitor, {});
+ visitor.TransitiveClosure();
+ }
+
+ private:
+ V8_INLINE void VerifyRootEdge(i::Root root, i::FullObjectSlot p) {
+ VerifyEdge(i::HeapObject(), p);
+ }
+
+ template
+ V8_INLINE void VerifyEdge(i::HeapObject host, T p) {
+ i::HeapObject object;
+ if ((*p).GetHeapObject(&object)) {
+ VerifyHeapObject(host, p.address(), object);
+ }
+ }
+
+ V8_INLINE void VerifyHeapObject(i::HeapObject host, i::Address edge, i::HeapObject o) {
+ if (marked_objects_.find(o.ptr()) == marked_objects_.end()) {
+ marked_objects_.insert(o.ptr());
+ if (!tph::Heap::IsValidHeapObject(o)) {
+ printf("Dead edge %p.%p -> %p\n", (void*) host.ptr(), (void*) edge, (void*) o.ptr());
+ }
+ CHECK(tph::Heap::IsValidHeapObject(o));
+ if (!is_live(o)) {
+ printf("Dead edge %p.%p -> %p\n", (void*) host.ptr(), (void*) edge, (void*) o.ptr());
+ }
+ CHECK(is_live(o));
+ if (get_forwarded_ref(o)) {
+ printf("Unforwarded edge %p.%p -> %p\n", (void*) host.ptr(), (void*) edge, (void*) o.ptr());
+ }
+ CHECK(!get_forwarded_ref(o));
+ mark_stack_.push_back(o);
+ }
+ }
+
+ std::unordered_set marked_objects_;
+ std::vector mark_stack_;
+};
+
+} // namespace mmtk
+
+#endif // MMTK_VISITORS_H
\ No newline at end of file
diff --git a/v8/third_party/heap/mmtk/mmtk.cc b/v8/third_party/heap/mmtk/mmtk.cc
index 5c552a6..e761e5c 100644
--- a/v8/third_party/heap/mmtk/mmtk.cc
+++ b/v8/third_party/heap/mmtk/mmtk.cc
@@ -3,147 +3,82 @@
// found in the LICENSE file.
#include "mmtk.h"
+#include "src/heap/heap-inl.h"
+#include "log.h"
+
+namespace mmtk {
+thread_local MMTk_Mutator mutator = nullptr;
+}
namespace v8 {
namespace internal {
namespace third_party_heap {
-class TPHData {
- Heap* v8_tph_;
- MMTk_Heap mmtk_heap_;
- v8::internal::Isolate* isolate_;
- MMTk_Heap_Archive tph_archive_;
-
- public:
- Heap* v8_tph() { return v8_tph_; }
- MMTk_Heap mmtk_heap() { return mmtk_heap_; }
- v8::internal::Isolate * isolate() { return isolate_; }
- MMTk_Heap_Archive archive() { return tph_archive_; }
-
- TPHData(Heap* v8_tph, MMTk_Heap mmtk_heap, Isolate* isolate, MMTk_Heap_Archive tph_archive):
- v8_tph_(v8_tph), mmtk_heap_(mmtk_heap), isolate_(isolate), tph_archive_(tph_archive) {}
-};
-
-// Data structure required for Rust-MMTK
-class BumpAllocator {
- public:
- TPHData* tph_data;
- uintptr_t cursor;
- uintptr_t limit;
- void* space;
-};
-
+v8::internal::Heap* v8_heap = nullptr;
base::AddressRegion code_range_;
-thread_local BumpAllocator* tph_mutator_ = nullptr;
-
-std::vector* tph_data_list = new std::vector();
-
extern V8_Upcalls mmtk_upcalls;
-TPHData* get_tph_data(Heap* tph) {
- for (size_t i = 0; i < tph_data_list->size(); i++)
- {
- TPHData* tph_data_ = reinterpret_cast((*tph_data_list)[i]);
- if (tph_data_->v8_tph() == tph) {
- return tph_data_;
- }
- }
- UNREACHABLE();
-}
-
inline void CheckMutator(Heap* tph) {
- TPHData* tph_data_ = get_tph_data(tph);
- if (tph_mutator_ == nullptr) {
- tph_mutator_ = reinterpret_cast(
- bind_mutator(tph_data_->mmtk_heap(), &tph_mutator_));
- tph_mutator_->tph_data = tph_data_;
+ if (mmtk::mutator == nullptr) {
+ mmtk::mutator = bind_mutator(mmtk::get_mmtk_instance(tph), &mmtk::mutator);
+ tph->impl()->mutators_.push_back(mmtk::mutator);
}
}
-MMTk_Heap GetMMTkHeap(Address object_pointer) {
- for (size_t i = 0; i < tph_data_list->size(); i++)
- {
- TPHData* tph_data_ = reinterpret_cast((*tph_data_list)[i]);
- void* ptr = tph_archive_obj_to_isolate(
- tph_data_->archive(), reinterpret_cast(object_pointer));
- if (ptr != nullptr) {
- return tph_data_->mmtk_heap();
- }
- }
- UNREACHABLE();
-}
-
static std::atomic_bool IsolateCreated { false };
#define GB (1ull << 30)
-#define FIXED_HEAP_SIZE (1ull * GB)
+#define FIXED_HEAP_SIZE (4ull * GB)
size_t Heap::Capacity() {
return FIXED_HEAP_SIZE;
}
std::unique_ptr Heap::New(v8::internal::Isolate* isolate) {
+ DCHECK(!v8_heap);
+ v8_heap = isolate->heap();
// MMTK current default maximum heap size is 1GB.
auto isolate_created = IsolateCreated.exchange(true);
DCHECK_WITH_MSG(!isolate_created, "Multiple isolates are not supported.");
- fprintf(stderr, "New Isolate: %lx\n", (unsigned long) isolate);
- MMTk_Heap new_heap = v8_new_heap(&mmtk_upcalls, FIXED_HEAP_SIZE);
- tph_mutator_ = reinterpret_cast(bind_mutator(new_heap, &tph_mutator_));
+ MMTK_LOG("New Isolate: %lx\n", (unsigned long) isolate);
+ MMTk_Heap mmtk_heap = v8_new_heap(&mmtk_upcalls, FIXED_HEAP_SIZE);
// FIXME
code_range_ = base::AddressRegion(0x60000000, (0xb0000000- 0x60000000)); // isolate->AddCodeRange(code_range_.begin(), code_range_.size());
auto v8_tph = std::make_unique();
- TPHData* tph_data = new TPHData(v8_tph.get(), new_heap, isolate, tph_archive_new());
- tph_mutator_->tph_data = tph_data;
- tph_data_list->push_back(tph_data);
+ v8_tph->impl_ = new Impl(mmtk_heap, isolate, tph_archive_new());
return v8_tph;
}
v8::internal::Isolate* Heap::GetIsolate(Address object_pointer) {
- for (size_t i = 0; i < tph_data_list->size(); i++)
- {
- TPHData* tph_data_ = reinterpret_cast((*tph_data_list)[i]);
- void* ptr = tph_archive_obj_to_isolate(
- tph_data_->archive(), reinterpret_cast(object_pointer));
- if (ptr != nullptr) {
- return reinterpret_cast(ptr);
- }
- }
- UNREACHABLE();
+ return v8_heap->isolate();
}
// Address space in Rust is statically from 0x60000000 - 0xb0000000
AllocationResult Heap::Allocate(size_t size_in_bytes, AllocationType type, AllocationAlignment align) {
CheckMutator(this);
- TPHData* tph_data_ = get_tph_data(this);
+ if (!v8_heap->deserialization_complete() && type == AllocationType::kOld) {
+ type = AllocationType::kMap;
+ }
bool large_object = size_in_bytes > kMaxRegularHeapObjectSize;
size_t align_bytes = (type == AllocationType::kCode) ? kCodeAlignment : (align == kWordAligned) ? kSystemPointerSize : (align == kDoubleAligned) ? kDoubleSize : kSystemPointerSize;
- // Get MMTk space that the object should be allocated to.
- int space = (type == AllocationType::kCode) ? 3 : (type == AllocationType::kReadOnly) ? 4 : (large_object) ? 2 : 0;
+ auto mmtk_allocator = mmtk::GetAllocationSemanticForV8AllocationType(type, large_object);
Address result =
- reinterpret_cast(alloc(tph_mutator_, size_in_bytes, align_bytes, 0, space));
- // Remember the V8 internal `AllocationSpace` for this object.
- // This is required to pass various V8 internal space checks.
- // TODO(wenyuzhao): Use MMTk's vm-specific spaces for allocation instead of remembering the `AllocationSpace`s.
- AllocationSpace allocation_space;
- if (type == AllocationType::kCode) {
- allocation_space = large_object ? CODE_LO_SPACE : CODE_SPACE;
- } else if (type == AllocationType::kReadOnly) {
- allocation_space = RO_SPACE;
- } else {
- allocation_space = large_object ? LO_SPACE : OLD_SPACE;
- }
- tph_archive_insert(tph_data_->archive(), reinterpret_cast(result), tph_data_->isolate(), uint8_t(allocation_space));
+ reinterpret_cast(alloc(mmtk::mutator, size_in_bytes, align_bytes, 0, (int) mmtk_allocator));
+ tph_archive_insert(mmtk::get_object_archive(this), reinterpret_cast(result),mmtk::get_isolate(this));
HeapObject rtn = HeapObject::FromAddress(result);
return rtn;
}
+bool Heap::IsPendingAllocation(HeapObject object) {
+ return false;
+}
+
Address Heap::GetObjectFromInnerPointer(Address inner_pointer) {
- TPHData* tph_data_ = get_tph_data(this);
return reinterpret_cast(
- tph_archive_inner_to_obj(tph_data_->archive(),
+ tph_archive_inner_to_obj(mmtk::get_object_archive(this),
reinterpret_cast(inner_pointer)));
}
@@ -152,25 +87,22 @@ const v8::base::AddressRegion& Heap::GetCodeRange() {
}
bool Heap::CollectGarbage() {
+ v8_heap->gc_count_++;
+ v8_heap->SetGCState(v8::internal::Heap::MARK_COMPACT);
+ handle_user_collection_request(mmtk::get_mmtk_instance(this), (void*) 0);
+ v8_heap->SetGCState(v8::internal::Heap::NOT_IN_GC);
return true;
}
-// Uninitialized space tag
-constexpr AllocationSpace kNoSpace = AllocationSpace(255);
-
-// Checks whether the address is *logically* in the allocation_space.
-// This does not related the real MMTk space that contains the address,
-// but the V8 internal space expected by the runtime.
-//
-// TODO: Currently we record the space tag for each object. In the future we
-// need to link each allocation_space to a real MMTk space.
-bool Heap::InSpace(Address address, AllocationSpace allocation_space) {
- for (auto tph_data : *tph_data_list) {
- auto space = AllocationSpace(tph_archive_obj_to_space(tph_data->archive(), reinterpret_cast(address)));
- if (space == kNoSpace) continue;
- return space == allocation_space;
- }
- UNREACHABLE();
+bool Heap::InSpace(Address address, AllocationSpace v8_space) {
+ auto mmtk_space = mmtk::GetAllocationSemanticForV8Space(v8_space);
+ // TODO(wenyuzhao): Infer isolate from address. May involves consulting the SFT.
+ auto mmtk = mmtk::get_mmtk_instance(v8_heap);
+ return mmtk_in_space(mmtk, (void*) address, (size_t) mmtk_space) != 0;
+}
+
+bool Heap::IsImmovable(HeapObject object) {
+ return mmtk_is_movable(object) == 0;
}
bool Heap::InOldSpace(Address address) {
@@ -191,17 +123,15 @@ bool Heap::InLargeObjectSpace(Address address) {
}
bool Heap::IsValidHeapObject(HeapObject object) {
- return is_live_object(reinterpret_cast(object.address()));
+ return mmtk_object_is_live(reinterpret_cast(object.address())) != 0;
}
void Heap::ResetIterator() {
- TPHData* tph_data_ = get_tph_data(this);
- tph_archive_iter_reset(tph_data_->archive());
+ tph_archive_iter_reset(mmtk::get_object_archive(this));
}
HeapObject Heap::NextObject() {
- TPHData* tph_data_ = get_tph_data(this);
- void* obj_addr = tph_archive_iter_next(tph_data_->archive());
+ void* obj_addr = tph_archive_iter_next(mmtk::get_object_archive(this));
if (obj_addr != nullptr) {
return HeapObject::FromAddress(reinterpret_cast(obj_addr));
} else {
diff --git a/v8/third_party/heap/mmtk/mmtk.h b/v8/third_party/heap/mmtk/mmtk.h
index 6e394ea..4352c02 100644
--- a/v8/third_party/heap/mmtk/mmtk.h
+++ b/v8/third_party/heap/mmtk/mmtk.h
@@ -4,6 +4,8 @@
#include "src/heap/third-party/heap-api.h"
#include "src/base/address-region.h"
#include "src/heap/heap.h"
+#include "src/objects/objects.h"
+#include "src/objects/objects-inl.h"
#include "src/execution/isolate.h"
#include
#include
@@ -34,7 +36,7 @@ extern void* alloc_slow(MMTk_Mutator mutator, size_t size,
extern void post_alloc(MMTk_Mutator mutator, void* refer,
int bytes, int allocator);
-extern bool is_live_object(void* ref);
+extern size_t mmtk_object_is_live(void* ref);
extern bool is_mapped_object(void* ref);
extern bool is_mapped_address(void* addr);
extern void modify_check(void *mmtk, void* ref);
@@ -62,13 +64,23 @@ extern void start_worker(void *tls, void* worker);
extern MMTk_Heap v8_new_heap(void* calls, size_t heap_size);
extern void* tph_archive_new();
extern void tph_archive_delete(void* arch);
-extern void tph_archive_insert(void* arch, void* object, void* isolate, uint8_t space);
+extern void tph_archive_insert(void* arch, void* object, void* isolate);
extern void tph_archive_remove(void* arch, void* object);
extern void tph_archive_iter_reset(void* arch);
extern void* tph_archive_iter_next(void* arch);
extern void* tph_archive_inner_to_obj(void* arch, void* inner_ptr);
-extern void* tph_archive_obj_to_isolate(void* arch, void* obj_ptr);
-extern uint8_t tph_archive_obj_to_space(void* arch, void* obj_ptr);
+extern int mmtk_in_space(void* mmtk, void* object, size_t space);
+
+extern void mmtk_release_buffer(void** buffer, size_t len, size_t cap);
+
+typedef struct {
+ void** buf;
+ size_t cap;
+} NewBuffer;
+
+typedef NewBuffer (*ProcessEdgesFn)(void** buf, size_t len, size_t cap);
+typedef void* (*TraceRootFn)(void* slot, void* ctx);
+typedef void* (*TraceFieldFn)(void* slot, void* ctx);
typedef struct {
void (*stop_all_mutators) (void *tls);
@@ -85,6 +97,11 @@ typedef struct {
size_t (*get_object_size) (void* object);
void* (*get_mmtk_mutator) (void* tls);
bool (*is_mutator) (void* tls);
+ void (*scan_roots) (TraceRootFn process_edges, void* context, int task_id);
+ void (*scan_objects) (void** objects, size_t count, ProcessEdgesFn process_edges, TraceFieldFn trace_field, void* context, int task_id);
+ void (*process_weak_refs) (TraceRootFn process_edges, void* context);
+ void (*on_move_event) (void* from, void* to, size_t size);
+ void (*process_ephemerons) (TraceRootFn process_edges, void* context, int task_id);
} V8_Upcalls;
/**
@@ -103,8 +120,165 @@ extern void add_phantom_candidate(void* ref, void* referent);
extern void harness_begin(void* ref, void *tls);
extern void harness_end(void* ref);
+extern int mmtk_is_movable(v8::internal::Object o);
+extern void* mmtk_get_forwarded_object(v8::internal::Object o);
+
#ifdef __cplusplus
}
#endif
+// Helpers
+
+namespace v8 {
+namespace internal {
+namespace third_party_heap {
+
+namespace i = v8::internal;
+
+class Impl {
+ public:
+ template
+ static v8::internal::Object VisitWeakList(v8::internal::Heap* heap, v8::internal::Object list, v8::internal::WeakObjectRetainer* retainer);
+
+ V8_INLINE static void ProcessAllWeakReferences(v8::internal::Heap* heap, v8::internal::WeakObjectRetainer* retainer) {
+ heap->set_native_contexts_list(VisitWeakList(heap, heap->native_contexts_list(), retainer));
+ heap->set_allocation_sites_list(VisitWeakList(heap, heap->allocation_sites_list(), retainer));
+ auto head = VisitWeakList(heap, heap->dirty_js_finalization_registries_list(), retainer);
+ heap->set_dirty_js_finalization_registries_list(head);
+ if (head.IsUndefined(heap->isolate())) {
+ heap->set_dirty_js_finalization_registries_list_tail(head);
+ }
+ }
+
+ V8_INLINE static void UpdateExternalStringTable(v8::internal::Heap* heap, RootVisitor* external_visitor) {
+ heap->external_string_table_.IterateAll(external_visitor);
+ heap->external_string_table_.CleanUpAll();
+ }
+
+ V8_INLINE static void EphemeronHashTable_RemoveEntry(EphemeronHashTable& table, InternalIndex entry) {
+ table.RemoveEntry(entry);
+ }
+
+ V8_INLINE static void TransitionArray_SetNumberOfTransitions(TransitionArray& array, int number_of_transitions) {
+ array.SetNumberOfTransitions(number_of_transitions);
+ }
+
+ V8_INLINE static int TransitionArray_Capacity(TransitionArray& array) {
+ return array.Capacity();
+ }
+
+ V8_INLINE static Map TransitionsAccessor_GetTargetFromRaw(MaybeObject raw) {
+ return TransitionsAccessor::GetTargetFromRaw(raw);
+ }
+
+ V8_INLINE static bool TransitionsAccessor_HasSimpleTransitionTo(Isolate* isolate, Map parent, Map target, DisallowGarbageCollection* no_gc) {
+ return TransitionsAccessor(isolate, parent, no_gc).HasSimpleTransitionTo(target);
+ }
+
+ V8_INLINE static void FlushNumberStringCache(v8::internal::Heap* heap) {
+ heap->FlushNumberStringCache();
+ }
+
+ V8_INLINE static Heap* get_tp_heap(v8::internal::Heap* heap) {
+ return heap->tp_heap_.get();
+ }
+
+ V8_INLINE Impl(MMTk_Heap mmtk_heap, Isolate* isolate, MMTk_Heap_Archive tph_archive)
+ : mmtk_heap_(mmtk_heap), isolate_(isolate), tph_archive_(tph_archive) {}
+
+ MMTk_Heap mmtk_heap_;
+ v8::internal::Isolate* isolate_;
+ MMTk_Heap_Archive tph_archive_;
+ std::vector mutators_ {};
+};
+
+// TODO(wenyuzhao): We only support one heap at the moment.
+extern v8::internal::Heap* v8_heap;
+
+}
+}
+}
+
+namespace mmtk {
+
+namespace i = v8::internal;
+namespace base = v8::base;
+namespace tph = v8::internal::third_party_heap;
+
+// TODO(wenyuzhao): Using of thread_local is incorrect for multiple isolates.
+extern thread_local MMTk_Mutator mutator;
+
+enum class MMTkAllocationSemantic: uint8_t {
+ kDefault = 0,
+ kImmortal = 1,
+ kLos = 2,
+ kCode = 3,
+ kReadOnly = 4,
+ kLargeCode = 5,
+};
+
+V8_INLINE MMTkAllocationSemantic GetAllocationSemanticForV8Space(i::AllocationSpace space) {
+ switch (space) {
+ case i::RO_SPACE: return mmtk::MMTkAllocationSemantic::kReadOnly;
+ case i::OLD_SPACE: return mmtk::MMTkAllocationSemantic::kDefault;
+ case i::CODE_SPACE: return mmtk::MMTkAllocationSemantic::kCode;
+ case i::MAP_SPACE: return mmtk::MMTkAllocationSemantic::kImmortal;
+ case i::LO_SPACE: return mmtk::MMTkAllocationSemantic::kLos;
+ case i::CODE_LO_SPACE: return mmtk::MMTkAllocationSemantic::kLargeCode;
+ default: UNREACHABLE();
+ }
+}
+
+V8_INLINE MMTkAllocationSemantic GetAllocationSemanticForV8AllocationType(i::AllocationType type, bool large) {
+ if (type == i::AllocationType::kCode) {
+ return large ? MMTkAllocationSemantic::kLargeCode : MMTkAllocationSemantic::kCode;
+ } else if (type == i::AllocationType::kReadOnly) {
+ return MMTkAllocationSemantic::kReadOnly;
+ } else if (type == i::AllocationType::kMap) {
+ return MMTkAllocationSemantic::kImmortal;
+ } else {
+ return large ? MMTkAllocationSemantic::kLos : MMTkAllocationSemantic::kDefault;
+ }
+}
+
+V8_INLINE bool is_live(i::HeapObject o) {
+ return mmtk_object_is_live(reinterpret_cast(o.address())) != 0;
+}
+
+V8_INLINE i::MaybeObject to_weakref(i::HeapObject o) {
+ DCHECK(o.IsStrong());
+ return i::MaybeObject::MakeWeak(i::MaybeObject::FromObject(o));
+}
+
+V8_INLINE base::Optional get_forwarded_ref(i::HeapObject o) {
+ auto f = mmtk_get_forwarded_object(o);
+ if (f != nullptr) {
+ auto x = i::HeapObject::cast(i::Object((i::Address) f));
+ return x;
+ }
+ return base::nullopt;
+}
+
+V8_INLINE std::vector& get_mmtk_mutators(i::Heap* heap) {
+ return tph::Impl::get_tp_heap(heap)->impl()->mutators_;
+}
+
+V8_INLINE MMTk_Heap get_mmtk_instance(i::Heap* heap) {
+ return tph::Impl::get_tp_heap(heap)->impl()->mmtk_heap_;
+}
+
+V8_INLINE MMTk_Heap get_mmtk_instance(tph::Heap* tp_heap) {
+ return tp_heap->impl()->mmtk_heap_;
+}
+
+V8_INLINE MMTk_Heap get_object_archive(tph::Heap* tp_heap) {
+ return tp_heap->impl()->tph_archive_;
+}
+
+V8_INLINE i::Isolate* get_isolate(tph::Heap* tp_heap) {
+ return tp_heap->impl()->isolate_;
+}
+
+}
+
#endif // MMTK_H
\ No newline at end of file
diff --git a/v8/third_party/heap/mmtk/mmtkUpcalls.cc b/v8/third_party/heap/mmtk/mmtkUpcalls.cc
index da7ad43..5e11681 100644
--- a/v8/third_party/heap/mmtk/mmtkUpcalls.cc
+++ b/v8/third_party/heap/mmtk/mmtkUpcalls.cc
@@ -1,85 +1,198 @@
-#include "src/base/logging.h"
#include "mmtkUpcalls.h"
-
+#include "src/objects/slots-inl.h"
+#include "src/heap/safepoint.h"
+#include "src/heap/array-buffer-sweeper.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/objects/embedder-data-array-inl.h"
+#include "src/objects/js-collection-inl.h"
+#include "src/execution/frames-inl.h"
+#include "src/regexp/regexp.h"
+#include "mmtk-visitors.h"
+#include "main-thread-sync.h"
+#include "log.h"
+#include "weak-refs.h"
+#include "src/codegen/compilation-cache.h"
namespace v8 {
namespace internal {
namespace third_party_heap {
+extern v8::internal::Heap* v8_heap;
+
+mmtk::MainThreadSynchronizer* main_thread_synchronizer = new mmtk::MainThreadSynchronizer();
+
static void mmtk_stop_all_mutators(void *tls) {
- UNIMPLEMENTED();
+ MMTK_LOG("[mmtk_stop_all_mutators] START\n");
+ main_thread_synchronizer->RunMainThreadTask([=]() {
+ main_thread_synchronizer->EnterSafepoint(v8_heap);
+ MMTK_LOG("[mmtk_stop_all_mutators] Flush cache\n");
+ v8_heap->isolate()->descriptor_lookup_cache()->Clear();
+ RegExpResultsCache::Clear(v8_heap->string_split_cache());
+ RegExpResultsCache::Clear(v8_heap->regexp_multiple_cache());
+ v8_heap->isolate()->compilation_cache()->MarkCompactPrologue();
+ Impl::FlushNumberStringCache(v8_heap);
+ int len = v8_heap->number_string_cache().length();
+ for (int i = 0; i < len; i++) {
+ v8_heap->number_string_cache().set_undefined(i);
+ }
+ });
+ MMTK_LOG("[mmtk_stop_all_mutators] END\n");
+}
+
+static void mmtk_process_weak_refs(TraceRootFn trace_root, void* context) {
+ main_thread_synchronizer->RunMainThreadTask([=]() {
+ MMTK_LOG("[mmtk_process_weak_refs]\n");
+ mmtk::global_weakref_processor->trace_ = [=](void* slot) {
+ trace_root(slot, context);
+ };
+ mmtk::global_weakref_processor->ClearNonLiveReferences();
+ });
}
static void mmtk_resume_mutators(void *tls) {
- UNIMPLEMENTED();
+ MMTK_LOG("[mmtk_resume_mutators] START\n");
+ main_thread_synchronizer->RunMainThreadTask([=]() {
+ MMTK_LOG("[mmtk_resume_mutators] Verify heap\n");
+ mmtk::MMTkHeapVerifier::Verify(v8_heap);
+ MMTK_LOG("[mmtk_resume_mutators] Flush cache\n");
+ v8_heap->isolate()->inner_pointer_to_code_cache()->Flush();
+ // The stub caches are not traversed during GC; clear them to force
+ // their lazy re-initialization. This must be done after the
+ // GC, because it relies on the new address of certain old space
+ // objects (empty string, illegal builtin).
+ v8_heap->isolate()->load_stub_cache()->Clear();
+ v8_heap->isolate()->store_stub_cache()->Clear();
+ // v8_heap->array_buffer_sweeper()->RequestSweepFull();
+ // Some code objects were marked for deoptimization during the GC.
+ // Deoptimizer::DeoptimizeMarkedCode(v8_heap->isolate());
+ main_thread_synchronizer->ExitSafepoint();
+ });
+ main_thread_synchronizer->WakeUp();
+ MMTK_LOG("[mmtk_resume_mutators] END\n");
}
static void mmtk_spawn_collector_thread(void* tls, void* ctx) {
- UNIMPLEMENTED();
+ UNIMPLEMENTED();
}
static void mmtk_block_for_gc() {
- UNIMPLEMENTED();
+ v8_heap->SetGCState(v8::internal::Heap::MARK_COMPACT);
+ main_thread_synchronizer->Block();
+ v8_heap->SetGCState(v8::internal::Heap::NOT_IN_GC);
}
static void* mmtk_get_mmtk_mutator(void* tls) {
- UNIMPLEMENTED();
+ UNIMPLEMENTED();
}
static bool mmtk_is_mutator(void* tls) {
- return false;
+ return mmtk::mutator != nullptr;
}
+size_t index = 0;
+
static void* mmtk_get_next_mutator() {
- UNIMPLEMENTED();
+ auto& mutators = mmtk::get_mmtk_mutators(v8_heap);
+ if (index >= mutators.size()) return nullptr;
+ return mutators[index++];
}
static void mmtk_reset_mutator_iterator() {
- UNIMPLEMENTED();
+ index = 0;
}
static void mmtk_compute_global_roots(void* trace, void* tls) {
- UNIMPLEMENTED();
+ UNIMPLEMENTED();
}
static void mmtk_compute_static_roots(void* trace, void* tls) {
- UNIMPLEMENTED();
+ UNIMPLEMENTED();
}
static void mmtk_compute_thread_roots(void* trace, void* tls) {
- UNIMPLEMENTED();
+ UNIMPLEMENTED();
}
static void mmtk_scan_object(void* trace, void* object, void* tls) {
- UNIMPLEMENTED();
+ UNIMPLEMENTED();
}
static void mmtk_dump_object(void* object) {
- UNIMPLEMENTED();
+ UNIMPLEMENTED();
}
static size_t mmtk_get_object_size(void* object) {
- UNIMPLEMENTED();
+ auto o = HeapObject::FromAddress((Address) object);
+ auto m = o.map();
+ return o.SizeFromMap(m);
+}
+
+static void mmtk_on_move_event(void* from_address, void* to_address, size_t size) {
+ auto from = HeapObject::FromAddress((Address) from_address);
+ auto to = HeapObject::FromAddress((Address) to_address);
+ v8_heap->OnMoveEvent(to, from, (int) size);
+}
+
+static void mmtk_scan_roots(TraceRootFn trace_root, void* context, int task_id) {
+ main_thread_synchronizer->RunMainThreadTask([=]() {
+ mmtk::MMTkRootVisitor root_visitor(v8_heap, trace_root, context, kMainThreadTask);
+ mmtk::MMTkCustomRootBodyVisitor custom_root_body_visitor(v8_heap, trace_root, context, kMainThreadTask);
+ v8_heap->IterateRoots(&root_visitor, {});
+ for (i::StackFrameIterator it(v8_heap->isolate(), v8_heap->isolate()->thread_local_top()); !it.done(); it.Advance()) {
+ if (it.frame()->is_unoptimized()) break;
+ if (it.frame()->type() == StackFrame::OPTIMIZED) {
+ auto code = it.frame()->LookupCode();
+ if (!code.CanDeoptAt(v8_heap->isolate(), it.frame()->pc())) {
+ trace_root((void*) &code, context);
+ v8::internal::Code::BodyDescriptor::IterateBody(code.map(), code, &custom_root_body_visitor);
+ }
+ break;
+ }
+ }
+ });
+}
+
+static void mmtk_scan_objects(void** objects, size_t count, ProcessEdgesFn process_edges, TraceFieldFn trace_field, void* context, int task_id) {
+ mmtk::MMTkEdgeVisitor visitor(v8_heap, process_edges, trace_field, context, task_id + 1);
+ for (size_t i = 0; i < count; i++) {
+ auto ptr = *(objects + i);
+ DCHECK_EQ(((Address) ptr) & 1, 0);
+ auto obj = HeapObject::FromAddress(((Address) ptr));
+ visitor.Visit(obj);
+ }
+}
+
+static void mmtk_process_ephemerons(TraceRootFn trace_root, void* context, int task_id) {
+ main_thread_synchronizer->RunMainThreadTask([=]() {
+ mmtk::global_weakref_processor->trace_ = [=](void* slot) {
+ trace_root(slot, context);
+ };
+ mmtk::global_weakref_processor->ProcessEphemerons();
+ });
}
V8_Upcalls mmtk_upcalls = {
- mmtk_stop_all_mutators,
- mmtk_resume_mutators,
- mmtk_spawn_collector_thread,
- mmtk_block_for_gc,
- mmtk_get_next_mutator,
- mmtk_reset_mutator_iterator,
- mmtk_compute_static_roots,
- mmtk_compute_global_roots,
- mmtk_compute_thread_roots,
- mmtk_scan_object,
- mmtk_dump_object,
- mmtk_get_object_size,
- mmtk_get_mmtk_mutator,
- mmtk_is_mutator,
+ mmtk_stop_all_mutators,
+ mmtk_resume_mutators,
+ mmtk_spawn_collector_thread,
+ mmtk_block_for_gc,
+ mmtk_get_next_mutator,
+ mmtk_reset_mutator_iterator,
+ mmtk_compute_static_roots,
+ mmtk_compute_global_roots,
+ mmtk_compute_thread_roots,
+ mmtk_scan_object,
+ mmtk_dump_object,
+ mmtk_get_object_size,
+ mmtk_get_mmtk_mutator,
+ mmtk_is_mutator,
+ mmtk_scan_roots,
+ mmtk_scan_objects,
+ mmtk_process_weak_refs,
+ mmtk_on_move_event,
+ mmtk_process_ephemerons,
};
-
-} // namespace third_party_heap
+} // namespace third_party_heap
} // namespace internal
-} // namespace v8
\ No newline at end of file
+} // namespace v8
\ No newline at end of file
diff --git a/v8/third_party/heap/mmtk/third-party-heap-impl.cc b/v8/third_party/heap/mmtk/third-party-heap-impl.cc
new file mode 100644
index 0000000..dd074ed
--- /dev/null
+++ b/v8/third_party/heap/mmtk/third-party-heap-impl.cc
@@ -0,0 +1,230 @@
+#include "mmtk.h"
+#include "src/heap/heap.h"
+#include "src/objects/string-table.h"
+#include "src/objects/visitors.h"
+#include "src/objects/transitions-inl.h"
+#include "src/heap/objects-visiting.h"
+#include "src/heap/heap-inl.h"
+#include "src/objects/js-weak-refs-inl.h"
+#include "src/heap/mark-compact-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace third_party_heap {
+
+namespace {
+
+using namespace v8::internal;
+using Heap = v8::internal::Heap;
+
+static bool MustRecordSlots(Heap* heap) {
+ return false;
+}
+
+template
+struct WeakListVisitor;
+
+template
+Object InternalVisitWeakList(Heap* heap, Object list, WeakObjectRetainer* retainer) {
+ Object undefined = ReadOnlyRoots(heap).undefined_value();
+ Object head = undefined;
+ T tail;
+ bool record_slots = MustRecordSlots(heap);
+
+ while (list != undefined) {
+ // Check whether to keep the candidate in the list.
+ T candidate = T::cast(list);
+
+ Object retained = retainer->RetainAs(list);
+
+ // Move to the next element before the WeakNext is cleared.
+ list = WeakListVisitor::WeakNext(retained != Object() ? T::cast(retained)
+ : candidate);
+
+ if (retained != Object()) {
+ if (head == undefined) {
+ // First element in the list.
+ head = retained;
+ } else {
+ // Subsequent elements in the list.
+ DCHECK(!tail.is_null());
+ WeakListVisitor::SetWeakNext(tail, retained);
+ if (record_slots) {
+ HeapObject slot_holder = WeakListVisitor::WeakNextHolder(tail);
+ int slot_offset = WeakListVisitor::WeakNextOffset();
+ ObjectSlot slot = slot_holder.RawField(slot_offset);
+ MarkCompactCollector::RecordSlot(slot_holder, slot,
+ HeapObject::cast(retained));
+ }
+ }
+ // Retained object is new tail.
+ DCHECK(!retained.IsUndefined(heap->isolate()));
+ candidate = T::cast(retained);
+ tail = candidate;
+
+ // tail is a live object, visit it.
+ WeakListVisitor::VisitLiveObject(heap, tail, retainer);
+
+ } else {
+ WeakListVisitor::VisitPhantomObject(heap, candidate);
+ }
+ }
+
+ // Terminate the list if there is one or more elements.
+ if (!tail.is_null()) WeakListVisitor::SetWeakNext(tail, undefined);
+ return head;
+}
+
+template
+static void ClearWeakList(Heap* heap, Object list) {
+ Object undefined = ReadOnlyRoots(heap).undefined_value();
+ while (list != undefined) {
+ T candidate = T::cast(list);
+ list = WeakListVisitor::WeakNext(candidate);
+ WeakListVisitor::SetWeakNext(candidate, undefined);
+ }
+}
+
+template <>
+struct WeakListVisitor {
+ static void SetWeakNext(CodeT code, Object next) {
+ CodeDataContainerFromCodeT(code).set_next_code_link(
+ next, UPDATE_WEAK_WRITE_BARRIER);
+ }
+
+ static Object WeakNext(CodeT code) {
+ return CodeDataContainerFromCodeT(code).next_code_link();
+ }
+
+ static HeapObject WeakNextHolder(CodeT code) {
+ return CodeDataContainerFromCodeT(code);
+ }
+
+ static int WeakNextOffset() { return CodeDataContainer::kNextCodeLinkOffset; }
+
+ static void VisitLiveObject(Heap*, CodeT, WeakObjectRetainer*) {}
+
+ static void VisitPhantomObject(Heap* heap, CodeT code) {
+ // Even though the code is dying, its code_data_container can still be
+ // alive. Clear the next_code_link slot to avoid a dangling pointer.
+ SetWeakNext(code, ReadOnlyRoots(heap).undefined_value());
+ }
+};
+
+template <>
+struct WeakListVisitor {
+ static void SetWeakNext(Context context, Object next) {
+ context.set(Context::NEXT_CONTEXT_LINK, next, UPDATE_WEAK_WRITE_BARRIER);
+ }
+
+ static Object WeakNext(Context context) {
+ return context.next_context_link();
+ }
+
+ static HeapObject WeakNextHolder(Context context) { return context; }
+
+ static int WeakNextOffset() {
+ return FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK);
+ }
+
+ static void VisitLiveObject(Heap* heap, Context context,
+ WeakObjectRetainer* retainer) {
+ if (heap->gc_state() == Heap::MARK_COMPACT) {
+ if (!V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
+ // Record the slots of the weak entries in the native context.
+ for (int idx = Context::FIRST_WEAK_SLOT;
+ idx < Context::NATIVE_CONTEXT_SLOTS; ++idx) {
+ ObjectSlot slot = context.RawField(Context::OffsetOfElementAt(idx));
+ MarkCompactCollector::RecordSlot(context, slot,
+ HeapObject::cast(*slot));
+ }
+ }
+ // Code objects are always allocated in Code space, we do not have to
+ // visit them during scavenges.
+ DoWeakList(heap, context, retainer, Context::OPTIMIZED_CODE_LIST);
+ DoWeakList(heap, context, retainer,
+ Context::DEOPTIMIZED_CODE_LIST);
+ }
+ }
+
+ template
+ static void DoWeakList(i::Heap* heap, Context context,
+ WeakObjectRetainer* retainer, int index) {
+ // Visit the weak list, removing dead intermediate elements.
+ Object list_head = VisitWeakList(heap, context.get(index), retainer);
+
+ // Update the list head.
+ context.set(index, list_head, UPDATE_WRITE_BARRIER);
+
+ if (MustRecordSlots(heap)) {
+ // Record the updated slot if necessary.
+ ObjectSlot head_slot = context.RawField(FixedArray::SizeFor(index));
+ heap->mark_compact_collector()->RecordSlot(context, head_slot,
+ HeapObject::cast(list_head));
+ }
+ }
+
+ static void VisitPhantomObject(Heap* heap, Context context) {
+ ClearWeakList(heap, context.get(Context::OPTIMIZED_CODE_LIST));
+ ClearWeakList(heap, context.get(Context::DEOPTIMIZED_CODE_LIST));
+ }
+};
+
+
+template <>
+struct WeakListVisitor {
+ static void SetWeakNext(AllocationSite obj, Object next) {
+ obj.set_weak_next(next, UPDATE_WEAK_WRITE_BARRIER);
+ }
+
+ static Object WeakNext(AllocationSite obj) { return obj.weak_next(); }
+
+ static HeapObject WeakNextHolder(AllocationSite obj) { return obj; }
+
+ static int WeakNextOffset() { return AllocationSite::kWeakNextOffset; }
+
+ static void VisitLiveObject(Heap*, AllocationSite, WeakObjectRetainer*) {}
+
+ static void VisitPhantomObject(Heap*, AllocationSite) {}
+};
+
+template <>
+struct WeakListVisitor {
+ static void SetWeakNext(JSFinalizationRegistry obj, Object next) {
+ obj.set_next_dirty(next, UPDATE_WEAK_WRITE_BARRIER);
+ }
+
+ static Object WeakNext(JSFinalizationRegistry obj) {
+ return obj.next_dirty();
+ }
+
+ static HeapObject WeakNextHolder(JSFinalizationRegistry obj) { return obj; }
+
+ static int WeakNextOffset() {
+ return JSFinalizationRegistry::kNextDirtyOffset;
+ }
+
+ static void VisitLiveObject(Heap* heap, JSFinalizationRegistry obj,
+ WeakObjectRetainer*) {
+ heap->set_dirty_js_finalization_registries_list_tail(obj);
+ }
+
+ static void VisitPhantomObject(Heap*, JSFinalizationRegistry) {}
+};
+
+}
+
+template
+i::Object Impl::VisitWeakList(i::Heap* heap, i::Object list, i::WeakObjectRetainer* retainer) {
+ return InternalVisitWeakList(heap, list, retainer);
+}
+
+template i::Object Impl::VisitWeakList(i::Heap* heap, i::Object list, i::WeakObjectRetainer* retainer);
+
+template i::Object Impl::VisitWeakList(i::Heap* heap, i::Object list, i::WeakObjectRetainer* retainer);
+
+template i::Object Impl::VisitWeakList(i::Heap* heap, i::Object list, i::WeakObjectRetainer* retainer);
+
+}
+}
+}
\ No newline at end of file
diff --git a/v8/third_party/heap/mmtk/weak-refs.h b/v8/third_party/heap/mmtk/weak-refs.h
new file mode 100644
index 0000000..482ae1d
--- /dev/null
+++ b/v8/third_party/heap/mmtk/weak-refs.h
@@ -0,0 +1,84 @@
+#ifndef MMTK_WEAK_REFS_H
+#define MMTK_WEAK_REFS_H
+
+#include "src/heap/heap.h"
+#include "src/objects/string-table.h"
+#include "src/objects/visitors.h"
+#include "src/objects/transitions-inl.h"
+#include "src/heap/objects-visiting.h"
+
+// TODO: Enable weak ref processing
+// #define WEAKREF_PROCESSING
+
+#ifdef WEAKREF_PROCESSING
+#define WEAKREF_PROCESSING_BOOL true
+#else
+#define WEAKREF_PROCESSING_BOOL false
+#endif
+
+namespace v8 {
+namespace internal {
+namespace third_party_heap {
+
+extern v8::internal::Heap* v8_heap;
+
+}
+}
+}
+
+namespace mmtk {
+
+class MMTkWeakObjectRetainer: public i::WeakObjectRetainer {
+ public:
+ virtual i::Object RetainAs(i::Object object) override final {
+ if (object == i::Object()) return object;
+ auto heap_object = i::HeapObject::cast(object);
+ if (is_live(heap_object)) {
+ auto f = mmtk_get_forwarded_object(heap_object);
+ if (f != nullptr) return i::Object((i::Address) f);
+ return object;
+ } else {
+ return i::Object();
+ }
+ }
+};
+
+class WeakRefs {
+ static constexpr int kMainThreadTask = 0;
+
+ i::WeakObjects weak_objects_;
+ bool have_code_to_deoptimize_ = false;
+
+ public:
+ void ProcessEphemerons() {
+ // Do nothing at the moment.
+ // TODO: Fix this
+ }
+
+ static i::Isolate* isolate() {
+ return heap()->isolate();
+ }
+ static i::Heap* heap() {
+ return tph::v8_heap;
+ }
+ i::WeakObjects* weak_objects() {
+ return &weak_objects_;
+ }
+
+ void ClearNonLiveReferences() {
+ have_code_to_deoptimize_ = false;
+ {
+ MMTkWeakObjectRetainer retainer;
+ tph::Impl::ProcessAllWeakReferences(heap(), &retainer);
+ }
+ DCHECK(!have_code_to_deoptimize_);
+ }
+
+ std::function trace_ = [](void*) { UNREACHABLE(); };
+};
+
+WeakRefs* global_weakref_processor = new WeakRefs();
+
+}
+
+#endif // MMTK_WEAK_REFS_H
\ No newline at end of file