From cfc5173044fa178ae8a0e90b47eeb0f26989924e Mon Sep 17 00:00:00 2001 From: Eduardo Souza Date: Wed, 18 Jan 2023 04:46:22 +0000 Subject: [PATCH 1/2] Removing julia's bigvalue_t and moving the decision to allocate large objects to MMTk --- julia/mmtk_julia.c | 168 ++++++++++++++------------------------- mmtk/src/api.rs | 59 ++++++++++---- mmtk/src/lib.rs | 4 + mmtk/src/object_model.rs | 19 +---- 4 files changed, 110 insertions(+), 140 deletions(-) diff --git a/julia/mmtk_julia.c b/julia/mmtk_julia.c index de885e37..8fc79c3e 100644 --- a/julia/mmtk_julia.c +++ b/julia/mmtk_julia.c @@ -35,28 +35,28 @@ JL_DLLEXPORT void (jl_mmtk_harness_end)(void) harness_end(); } -JL_DLLEXPORT jl_value_t *jl_mmtk_gc_alloc_default_llvm(int pool_offset, int osize) +JL_DLLEXPORT jl_value_t *jl_mmtk_gc_alloc_default_llvm(int pool_offset, size_t osize) { jl_ptls_t ptls = (jl_ptls_t)jl_get_ptls_states(); // safepoint - if (__unlikely(jl_atomic_load(&jl_gc_running))) { - int8_t old_state = ptls->gc_state; - jl_atomic_store_release(&ptls->gc_state, JL_GC_STATE_WAITING); - jl_safepoint_wait_gc(); - jl_atomic_store_release(&ptls->gc_state, old_state); - } + jl_gc_safepoint_(ptls); jl_value_t *v; - - ptls->mmtk_mutator_ptr->allocators.immix[0].cursor = ptls->cursor; + jl_taggedvalue_t *v_tagged; // v needs to be 16 byte aligned, therefore v_tagged needs to be offset accordingly to consider the size of header - jl_taggedvalue_t *v_tagged = - (jl_taggedvalue_t *) alloc(ptls->mmtk_mutator_ptr, osize, 16, 8, 0); + if (osize >= 4096) { + v_tagged = (jl_taggedvalue_t *) alloc(ptls->mmtk_mutator_ptr, osize, 16, 8, 0); + } else { + ptls->mmtk_mutator_ptr->allocators.immix[0].cursor = ptls->cursor; - ptls->cursor = ptls->mmtk_mutator_ptr->allocators.immix[0].cursor; - ptls->limit = ptls->mmtk_mutator_ptr->allocators.immix[0].limit; + v_tagged = + (jl_taggedvalue_t *) alloc(ptls->mmtk_mutator_ptr, osize, 16, 8, 0); + + ptls->cursor = ptls->mmtk_mutator_ptr->allocators.immix[0].cursor; + ptls->limit = ptls->mmtk_mutator_ptr->allocators.immix[0].limit; + } v = jl_valueof(v_tagged); @@ -68,32 +68,31 @@ JL_DLLEXPORT jl_value_t *jl_mmtk_gc_alloc_default_llvm(int pool_offset, int osiz } STATIC_INLINE void* alloc_default_object(jl_ptls_t ptls, size_t size, int offset) { - int64_t delta = (-offset -(int64_t)(ptls->cursor)) & 15; // aligned to 16 - uint64_t aligned_addr = (uint64_t)ptls->cursor + delta; - - if(__unlikely(aligned_addr+size > (uint64_t)ptls->limit)) { - jl_ptls_t ptls2 = (jl_ptls_t)jl_get_ptls_states(); - ptls2->mmtk_mutator_ptr->allocators.immix[0].cursor = ptls2->cursor; - void* res = alloc(ptls2->mmtk_mutator_ptr, size, 16, offset, 0); - ptls2->cursor = ptls2->mmtk_mutator_ptr->allocators.immix[0].cursor; - ptls2->limit = ptls2->mmtk_mutator_ptr->allocators.immix[0].limit; - return res; + if (size >= 4096) { + void* res = alloc(ptls->mmtk_mutator_ptr, size, 16, offset, 0); + return res; } else { - ptls->cursor = (void*) (aligned_addr+size); - return (void*) aligned_addr; + int64_t delta = (-offset -(int64_t)(ptls->cursor)) & 15; // aligned to 16 + uint64_t aligned_addr = (uint64_t)ptls->cursor + delta; + + if(__unlikely(aligned_addr+size > (uint64_t)ptls->limit)) { + jl_ptls_t ptls2 = (jl_ptls_t)jl_get_ptls_states(); + ptls2->mmtk_mutator_ptr->allocators.immix[0].cursor = ptls2->cursor; + void* res = alloc(ptls2->mmtk_mutator_ptr, size, 16, offset, 0); + ptls2->cursor = ptls2->mmtk_mutator_ptr->allocators.immix[0].cursor; + ptls2->limit = ptls2->mmtk_mutator_ptr->allocators.immix[0].limit; + return res; + } else { + ptls->cursor = (void*) (aligned_addr+size); + return (void*) aligned_addr; + } } } -JL_DLLEXPORT jl_value_t *jl_mmtk_gc_alloc_default(jl_ptls_t ptls, int pool_offset, - int osize, void *ty) +JL_DLLEXPORT jl_value_t *jl_mmtk_gc_alloc_default(jl_ptls_t ptls, size_t osize, void *ty) { // safepoint - if (__unlikely(jl_atomic_load(&jl_gc_running))) { - int8_t old_state = ptls->gc_state; - jl_atomic_store_release(&ptls->gc_state, JL_GC_STATE_WAITING); - jl_safepoint_wait_gc(); - jl_atomic_store_release(&ptls->gc_state, old_state); - } + jl_gc_safepoint_(ptls); jl_value_t *v; if ((uintptr_t)ty != jl_buff_tag) { @@ -116,43 +115,6 @@ JL_DLLEXPORT jl_value_t *jl_mmtk_gc_alloc_default(jl_ptls_t ptls, int pool_offse return v; } -JL_DLLEXPORT jl_value_t *jl_mmtk_gc_alloc_big(jl_ptls_t ptls, size_t sz) -{ - // safepoint - if (__unlikely(jl_atomic_load(&jl_gc_running))) { - int8_t old_state = ptls->gc_state; - jl_atomic_store_release(&ptls->gc_state, JL_GC_STATE_WAITING); - jl_safepoint_wait_gc(); - jl_atomic_store_release(&ptls->gc_state, old_state); - } - - size_t offs = offsetof(bigval_t, header); - assert(sz >= sizeof(jl_taggedvalue_t) && "sz must include tag"); - static_assert(offsetof(bigval_t, header) >= sizeof(void*), "Empty bigval header?"); - static_assert(sizeof(bigval_t) % JL_HEAP_ALIGNMENT == 0, ""); - size_t allocsz = LLT_ALIGN(sz + offs, JL_CACHE_BYTE_ALIGNMENT); - if (allocsz < sz) { // overflow in adding offs, size was "negative" - assert(0 && "Error when allocating big object"); - jl_throw(jl_memory_exception); - } - - bigval_t *v = (bigval_t*)alloc_large(ptls->mmtk_mutator_ptr, allocsz, JL_CACHE_BYTE_ALIGNMENT, 0, 2); - - if (v == NULL) { - assert(0 && "Allocation failed"); - jl_throw(jl_memory_exception); - } - v->sz = allocsz; - - ptls->gc_num.allocd += allocsz; - ptls->gc_num.bigalloc++; - - jl_value_t *result = jl_valueof(&v->header); - post_alloc(ptls->mmtk_mutator_ptr, result, allocsz, 2); - - return result; -} - static void mmtk_sweep_malloced_arrays(void) JL_NOTSAFEPOINT { gc_time_mallocd_array_start(); @@ -326,7 +288,7 @@ size_t get_so_size(void* obj) jl_array_t* a = (jl_array_t*) obj; if (a->flags.how == 0) { int ndimwords = jl_array_ndimwords(jl_array_ndims(a)); - int tsz = sizeof(jl_array_t) + ndimwords*sizeof(size_t); + size_t tsz = sizeof(jl_array_t) + ndimwords*sizeof(size_t); if (object_is_managed_by_mmtk(a->data)) { size_t pre_data_bytes = ((size_t)a->data - a->offset*a->elsize) - (size_t)a; if (pre_data_bytes > 0 && pre_data_bytes <= ARRAY_INLINE_NBYTES) { @@ -334,60 +296,38 @@ size_t get_so_size(void* obj) tsz += jl_array_nbytes(a); } } - int pool_id = jl_gc_szclass(tsz + sizeof(jl_taggedvalue_t)); - int osize = jl_gc_sizeclasses[pool_id]; - return osize; + return tsz + sizeof(jl_taggedvalue_t); } else if (a->flags.how == 1) { int ndimwords = jl_array_ndimwords(jl_array_ndims(a)); - int tsz = sizeof(jl_array_t) + ndimwords*sizeof(size_t); - int pool_id = jl_gc_szclass(tsz + sizeof(jl_taggedvalue_t)); - int osize = jl_gc_sizeclasses[pool_id]; - - return osize; + size_t tsz = sizeof(jl_array_t) + ndimwords*sizeof(size_t); + return tsz + sizeof(jl_taggedvalue_t); } else if (a->flags.how == 2) { int ndimwords = jl_array_ndimwords(jl_array_ndims(a)); - int tsz = sizeof(jl_array_t) + ndimwords*sizeof(size_t); - int pool_id = jl_gc_szclass(tsz + sizeof(jl_taggedvalue_t)); - int osize = jl_gc_sizeclasses[pool_id]; - - return osize; + size_t tsz = sizeof(jl_array_t) + ndimwords*sizeof(size_t); + return tsz + sizeof(jl_taggedvalue_t); } else if (a->flags.how == 3) { int ndimwords = jl_array_ndimwords(jl_array_ndims(a)); - int tsz = sizeof(jl_array_t) + ndimwords * sizeof(size_t) + sizeof(void*); - int pool_id = jl_gc_szclass(tsz + sizeof(jl_taggedvalue_t)); - int osize = jl_gc_sizeclasses[pool_id]; - return osize; + size_t tsz = sizeof(jl_array_t) + ndimwords * sizeof(size_t) + sizeof(void*); + return tsz + sizeof(jl_taggedvalue_t); } } else if (vt == jl_simplevector_type) { size_t l = jl_svec_len(obj); - int pool_id = jl_gc_szclass(l * sizeof(void*) + sizeof(jl_svec_t) + sizeof(jl_taggedvalue_t)); - int osize = jl_gc_sizeclasses[pool_id]; - return osize; + return l * sizeof(void*) + sizeof(jl_svec_t) + sizeof(jl_taggedvalue_t); } else if (vt == jl_module_type) { size_t dtsz = sizeof(jl_module_t); - int pool_id = jl_gc_szclass(dtsz + sizeof(jl_taggedvalue_t)); - int osize = jl_gc_sizeclasses[pool_id]; - return osize; + return dtsz + sizeof(jl_taggedvalue_t); } else if (vt == jl_task_type) { size_t dtsz = sizeof(jl_task_t); - int pool_id = jl_gc_szclass(dtsz + sizeof(jl_taggedvalue_t)); - int osize = jl_gc_sizeclasses[pool_id]; - return osize; + return dtsz + sizeof(jl_taggedvalue_t); } else if (vt == jl_string_type) { size_t dtsz = jl_string_len(obj) + sizeof(size_t) + 1; - int pool_id = jl_gc_szclass_align8(dtsz + sizeof(jl_taggedvalue_t)); - int osize = jl_gc_sizeclasses[pool_id]; - return osize; - } if (vt == jl_method_type) { + return dtsz + sizeof(jl_taggedvalue_t); + } else if (vt == jl_method_type) { size_t dtsz = sizeof(jl_method_t); - int pool_id = jl_gc_szclass(dtsz + sizeof(jl_taggedvalue_t)); - int osize = jl_gc_sizeclasses[pool_id]; - return osize; + return dtsz + sizeof(jl_taggedvalue_t); } else { size_t dtsz = jl_datatype_size(vt); - int pool_id = jl_gc_szclass(dtsz + sizeof(jl_taggedvalue_t)); - int osize = jl_gc_sizeclasses[pool_id]; - return osize; + return dtsz + sizeof(jl_taggedvalue_t); } } @@ -405,6 +345,20 @@ static inline void mmtk_jl_run_finalizers_in_list(bool at_exit) { mmtk_run_finalizers(at_exit); } +void mmtk_jl_run_pending_finalizers(void* ptls) { + if (!((jl_ptls_t)ptls)->in_finalizer && !((jl_ptls_t)ptls)->finalizers_inhibited && ((jl_ptls_t)ptls)->locks.len == 0) { + jl_task_t *ct = jl_current_task; + ((jl_ptls_t)ptls)->in_finalizer = 1; + uint64_t save_rngState[4]; + memcpy(&save_rngState[0], &ct->rngState[0], sizeof(save_rngState)); + jl_rng_split(ct->rngState, finalizer_rngState); + jl_atomic_store_relaxed(&jl_gc_have_pending_finalizers, 0); + mmtk_jl_run_finalizers_in_list(false); + memcpy(&ct->rngState[0], &save_rngState[0], sizeof(save_rngState)); + ((jl_ptls_t)ptls)->in_finalizer = 0; + } +} + void mmtk_jl_run_finalizers(void* ptls) { // Only disable finalizers on current thread // Doing this on all threads is racy (it's impossible to check diff --git a/mmtk/src/api.rs b/mmtk/src/api.rs index 8e92bea0..109be136 100644 --- a/mmtk/src/api.rs +++ b/mmtk/src/api.rs @@ -26,6 +26,9 @@ use std::ffi::CStr; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::RwLockWriteGuard; +#[cfg(feature = "immix")] +use crate::MAX_IMMIX_OBJECT_SIZE; + #[no_mangle] pub extern "C" fn gc_init( min_heap_size: usize, @@ -136,6 +139,7 @@ pub extern "C" fn destroy_mutator(mutator: *mut Mutator) { memory_manager::destroy_mutator(unsafe { &mut *mutator }) } +#[cfg(feature = "immix")] #[no_mangle] pub extern "C" fn alloc( mutator: *mut Mutator, @@ -144,25 +148,33 @@ pub extern "C" fn alloc( offset: isize, semantics: AllocationSemantics, ) -> Address { - memory_manager::alloc::(unsafe { &mut *mutator }, size, align, offset, semantics) + if size >= MAX_IMMIX_OBJECT_SIZE { + // MAX_IMMIX_OBJECT_SIZE + memory_manager::alloc::( + unsafe { &mut *mutator }, + size, + 64, + offset, + AllocationSemantics::Los, + ) + } else { + memory_manager::alloc::(unsafe { &mut *mutator }, size, align, offset, semantics) + } } +#[cfg(not(feature = "immix"))] #[no_mangle] -pub extern "C" fn alloc_large( +pub extern "C" fn alloc( mutator: *mut Mutator, size: usize, align: usize, offset: isize, + semantics: AllocationSemantics, ) -> Address { - memory_manager::alloc::( - unsafe { &mut *mutator }, - size, - align, - offset, - AllocationSemantics::Los, - ) + memory_manager::alloc::(unsafe { &mut *mutator }, size, align, offset, semantics) } +#[cfg(feature = "immix")] #[no_mangle] pub extern "C" fn post_alloc( mutator: *mut Mutator, @@ -170,16 +182,31 @@ pub extern "C" fn post_alloc( bytes: usize, semantics: AllocationSemantics, ) { - match semantics { - AllocationSemantics::Los => { - memory_manager::post_alloc::(unsafe { &mut *mutator }, refer, bytes, semantics) - } - _ => { - memory_manager::post_alloc::(unsafe { &mut *mutator }, refer, bytes, semantics) - } + if bytes >= MAX_IMMIX_OBJECT_SIZE { + // MAX_IMMIX_OBJECT_SIZE + memory_manager::post_alloc::( + unsafe { &mut *mutator }, + refer, + bytes, + AllocationSemantics::Los, + ) + } else { + memory_manager::post_alloc::(unsafe { &mut *mutator }, refer, bytes, semantics) } } +#[cfg(not(feature = "immix"))] +#[no_mangle] +pub extern "C" fn post_alloc( + mutator: *mut Mutator, + refer: ObjectReference, + bytes: usize, + semantics: AllocationSemantics, +) { + memory_manager::post_alloc::(unsafe { &mut *mutator }, refer, bytes, semantics) +} + + #[no_mangle] pub extern "C" fn will_never_move(object: ObjectReference) -> bool { !object.is_movable() diff --git a/mmtk/src/lib.rs b/mmtk/src/lib.rs index 71ff205d..82d0fa76 100644 --- a/mmtk/src/lib.rs +++ b/mmtk/src/lib.rs @@ -75,6 +75,10 @@ extern "C" { pub static BI_METADATA_END_ALIGNED_UP: usize; } +#[cfg(feature = "immix")] +#[no_mangle] +pub static MAX_IMMIX_OBJECT_SIZE : usize = mmtk::plan::IMMIX_CONSTRAINTS.max_non_los_default_alloc_bytes; + #[no_mangle] pub static BLOCK_FOR_GC: AtomicBool = AtomicBool::new(false); diff --git a/mmtk/src/object_model.rs b/mmtk/src/object_model.rs index ba1fe0ec..e1c6d2dd 100644 --- a/mmtk/src/object_model.rs +++ b/mmtk/src/object_model.rs @@ -67,13 +67,7 @@ impl ObjectModel for VMObjectModel { } fn get_current_size(object: ObjectReference) -> usize { - let size = if is_object_in_los(&object) { - unsafe { ((*UPCALLS).get_lo_size)(object) } - } else { - let obj_size = unsafe { ((*UPCALLS).get_so_size)(object) }; - obj_size - }; - + let size = unsafe { ((*UPCALLS).get_so_size)(object) }; size as usize } @@ -99,12 +93,7 @@ impl ObjectModel for VMObjectModel { #[inline(always)] fn ref_to_object_start(object: ObjectReference) -> Address { - let res = if is_object_in_los(&object) { - object.to_raw_address() - 48 - } else { - unsafe { ((*UPCALLS).get_object_start_ref)(object) } - }; - res + unsafe { ((*UPCALLS).get_object_start_ref)(object) } } #[inline(always)] @@ -127,10 +116,6 @@ impl ObjectModel for VMObjectModel { } } -pub fn is_object_in_los(object: &ObjectReference) -> bool { - (*object).to_raw_address().as_usize() > 0x60000000000 -} - #[no_mangle] pub extern "C" fn map_boot_image_metadata(start: Address, end: Address) { let start_address_aligned_down = start.align_down(BYTES_IN_PAGE); From 0375ccdfe33f3ff3a870ac969e2d73b91ee682bb Mon Sep 17 00:00:00 2001 From: Luis Eduardo de Souza Amorim Date: Mon, 23 Jan 2023 02:14:00 +0000 Subject: [PATCH 2/2] Setting up allocation threshold as immix's MAX_IMMIX_OBJECT_SIZE --- julia/mmtk_julia.c | 78 ++++++++++++++++++++++++++-------------- mmtk/julia | 1 - mmtk/src/api.rs | 23 +++++++++--- mmtk/src/lib.rs | 8 ++++- mmtk/src/object_model.rs | 19 ++++++++-- 5 files changed, 95 insertions(+), 34 deletions(-) delete mode 160000 mmtk/julia diff --git a/julia/mmtk_julia.c b/julia/mmtk_julia.c index 8fc79c3e..064a3de0 100644 --- a/julia/mmtk_julia.c +++ b/julia/mmtk_julia.c @@ -46,17 +46,12 @@ JL_DLLEXPORT jl_value_t *jl_mmtk_gc_alloc_default_llvm(int pool_offset, size_t o jl_taggedvalue_t *v_tagged; // v needs to be 16 byte aligned, therefore v_tagged needs to be offset accordingly to consider the size of header - if (osize >= 4096) { - v_tagged = (jl_taggedvalue_t *) alloc(ptls->mmtk_mutator_ptr, osize, 16, 8, 0); - } else { - ptls->mmtk_mutator_ptr->allocators.immix[0].cursor = ptls->cursor; + ptls->mmtk_mutator_ptr->allocators.immix[0].cursor = ptls->cursor; - v_tagged = - (jl_taggedvalue_t *) alloc(ptls->mmtk_mutator_ptr, osize, 16, 8, 0); + v_tagged = (jl_taggedvalue_t *) alloc(ptls->mmtk_mutator_ptr, osize, 16, 8, 0); - ptls->cursor = ptls->mmtk_mutator_ptr->allocators.immix[0].cursor; - ptls->limit = ptls->mmtk_mutator_ptr->allocators.immix[0].limit; - } + ptls->cursor = ptls->mmtk_mutator_ptr->allocators.immix[0].cursor; + ptls->limit = ptls->mmtk_mutator_ptr->allocators.immix[0].limit; v = jl_valueof(v_tagged); @@ -68,24 +63,19 @@ JL_DLLEXPORT jl_value_t *jl_mmtk_gc_alloc_default_llvm(int pool_offset, size_t o } STATIC_INLINE void* alloc_default_object(jl_ptls_t ptls, size_t size, int offset) { - if (size >= 4096) { - void* res = alloc(ptls->mmtk_mutator_ptr, size, 16, offset, 0); - return res; + int64_t delta = (-offset -(int64_t)(ptls->cursor)) & 15; // aligned to 16 + uint64_t aligned_addr = (uint64_t)ptls->cursor + delta; + + if(__unlikely(aligned_addr+size > (uint64_t)ptls->limit)) { + jl_ptls_t ptls2 = (jl_ptls_t)jl_get_ptls_states(); + ptls2->mmtk_mutator_ptr->allocators.immix[0].cursor = ptls2->cursor; + void* res = alloc(ptls2->mmtk_mutator_ptr, size, 16, offset, 0); + ptls2->cursor = ptls2->mmtk_mutator_ptr->allocators.immix[0].cursor; + ptls2->limit = ptls2->mmtk_mutator_ptr->allocators.immix[0].limit; + return res; } else { - int64_t delta = (-offset -(int64_t)(ptls->cursor)) & 15; // aligned to 16 - uint64_t aligned_addr = (uint64_t)ptls->cursor + delta; - - if(__unlikely(aligned_addr+size > (uint64_t)ptls->limit)) { - jl_ptls_t ptls2 = (jl_ptls_t)jl_get_ptls_states(); - ptls2->mmtk_mutator_ptr->allocators.immix[0].cursor = ptls2->cursor; - void* res = alloc(ptls2->mmtk_mutator_ptr, size, 16, offset, 0); - ptls2->cursor = ptls2->mmtk_mutator_ptr->allocators.immix[0].cursor; - ptls2->limit = ptls2->mmtk_mutator_ptr->allocators.immix[0].limit; - return res; - } else { - ptls->cursor = (void*) (aligned_addr+size); - return (void*) aligned_addr; - } + ptls->cursor = (void*) (aligned_addr+size); + return (void*) aligned_addr; } } @@ -115,6 +105,38 @@ JL_DLLEXPORT jl_value_t *jl_mmtk_gc_alloc_default(jl_ptls_t ptls, size_t osize, return v; } +JL_DLLEXPORT jl_value_t *jl_mmtk_gc_alloc_big(jl_ptls_t ptls, size_t sz) +{ + // safepoint + jl_gc_safepoint_(ptls); + + size_t offs = offsetof(bigval_t, header); + assert(sz >= sizeof(jl_taggedvalue_t) && "sz must include tag"); + static_assert(offsetof(bigval_t, header) >= sizeof(void*), "Empty bigval header?"); + static_assert(sizeof(bigval_t) % JL_HEAP_ALIGNMENT == 0, ""); + size_t allocsz = LLT_ALIGN(sz + offs, JL_CACHE_BYTE_ALIGNMENT); + if (allocsz < sz) { // overflow in adding offs, size was "negative" + assert(0 && "Error when allocating big object"); + jl_throw(jl_memory_exception); + } + + bigval_t *v = (bigval_t*)alloc_large(ptls->mmtk_mutator_ptr, allocsz, JL_CACHE_BYTE_ALIGNMENT, 0, 2); + + if (v == NULL) { + assert(0 && "Allocation failed"); + jl_throw(jl_memory_exception); + } + v->sz = allocsz; + + ptls->gc_num.allocd += allocsz; + ptls->gc_num.bigalloc++; + + jl_value_t *result = jl_valueof(&v->header); + post_alloc(ptls->mmtk_mutator_ptr, result, allocsz, 2); + + return result; +} + static void mmtk_sweep_malloced_arrays(void) JL_NOTSAFEPOINT { gc_time_mallocd_array_start(); @@ -296,6 +318,10 @@ size_t get_so_size(void* obj) tsz += jl_array_nbytes(a); } } + if (a->flags.pooled && tsz > 2032) { // a->data is actually a separate object and not inlined + tsz = sizeof(jl_array_t) + ndimwords*sizeof(size_t); + } + return tsz + sizeof(jl_taggedvalue_t); } else if (a->flags.how == 1) { int ndimwords = jl_array_ndimwords(jl_array_ndims(a)); diff --git a/mmtk/julia b/mmtk/julia deleted file mode 160000 index f3792d56..00000000 --- a/mmtk/julia +++ /dev/null @@ -1 +0,0 @@ -Subproject commit f3792d56fcfaf8d3180a60fabadcccb042dee262 diff --git a/mmtk/src/api.rs b/mmtk/src/api.rs index 109be136..deb4f3f7 100644 --- a/mmtk/src/api.rs +++ b/mmtk/src/api.rs @@ -27,7 +27,7 @@ use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::RwLockWriteGuard; #[cfg(feature = "immix")] -use crate::MAX_IMMIX_OBJECT_SIZE; +use crate::MAX_STANDARD_OBJECT_SIZE; #[no_mangle] pub extern "C" fn gc_init( @@ -148,7 +148,7 @@ pub extern "C" fn alloc( offset: isize, semantics: AllocationSemantics, ) -> Address { - if size >= MAX_IMMIX_OBJECT_SIZE { + if size >= MAX_STANDARD_OBJECT_SIZE { // MAX_IMMIX_OBJECT_SIZE memory_manager::alloc::( unsafe { &mut *mutator }, @@ -174,6 +174,22 @@ pub extern "C" fn alloc( memory_manager::alloc::(unsafe { &mut *mutator }, size, align, offset, semantics) } +#[no_mangle] +pub extern "C" fn alloc_large( + mutator: *mut Mutator, + size: usize, + align: usize, + offset: isize, +) -> Address { + memory_manager::alloc::( + unsafe { &mut *mutator }, + size, + align, + offset, + AllocationSemantics::Los, + ) +} + #[cfg(feature = "immix")] #[no_mangle] pub extern "C" fn post_alloc( @@ -182,7 +198,7 @@ pub extern "C" fn post_alloc( bytes: usize, semantics: AllocationSemantics, ) { - if bytes >= MAX_IMMIX_OBJECT_SIZE { + if bytes >= MAX_STANDARD_OBJECT_SIZE { // MAX_IMMIX_OBJECT_SIZE memory_manager::post_alloc::( unsafe { &mut *mutator }, @@ -206,7 +222,6 @@ pub extern "C" fn post_alloc( memory_manager::post_alloc::(unsafe { &mut *mutator }, refer, bytes, semantics) } - #[no_mangle] pub extern "C" fn will_never_move(object: ObjectReference) -> bool { !object.is_movable() diff --git a/mmtk/src/lib.rs b/mmtk/src/lib.rs index 82d0fa76..02294013 100644 --- a/mmtk/src/lib.rs +++ b/mmtk/src/lib.rs @@ -77,7 +77,13 @@ extern "C" { #[cfg(feature = "immix")] #[no_mangle] -pub static MAX_IMMIX_OBJECT_SIZE : usize = mmtk::plan::IMMIX_CONSTRAINTS.max_non_los_default_alloc_bytes; +pub static MAX_STANDARD_OBJECT_SIZE: usize = + mmtk::plan::IMMIX_CONSTRAINTS.max_non_los_default_alloc_bytes; + +#[cfg(not(feature = "immix"))] +#[no_mangle] +pub static MAX_STANDARD_OBJECT_SIZE: usize = // default to size of Julia's max size class + 2032 - std::mem::size_of::
(); #[no_mangle] pub static BLOCK_FOR_GC: AtomicBool = AtomicBool::new(false); diff --git a/mmtk/src/object_model.rs b/mmtk/src/object_model.rs index e1c6d2dd..ba1fe0ec 100644 --- a/mmtk/src/object_model.rs +++ b/mmtk/src/object_model.rs @@ -67,7 +67,13 @@ impl ObjectModel for VMObjectModel { } fn get_current_size(object: ObjectReference) -> usize { - let size = unsafe { ((*UPCALLS).get_so_size)(object) }; + let size = if is_object_in_los(&object) { + unsafe { ((*UPCALLS).get_lo_size)(object) } + } else { + let obj_size = unsafe { ((*UPCALLS).get_so_size)(object) }; + obj_size + }; + size as usize } @@ -93,7 +99,12 @@ impl ObjectModel for VMObjectModel { #[inline(always)] fn ref_to_object_start(object: ObjectReference) -> Address { - unsafe { ((*UPCALLS).get_object_start_ref)(object) } + let res = if is_object_in_los(&object) { + object.to_raw_address() - 48 + } else { + unsafe { ((*UPCALLS).get_object_start_ref)(object) } + }; + res } #[inline(always)] @@ -116,6 +127,10 @@ impl ObjectModel for VMObjectModel { } } +pub fn is_object_in_los(object: &ObjectReference) -> bool { + (*object).to_raw_address().as_usize() > 0x60000000000 +} + #[no_mangle] pub extern "C" fn map_boot_image_metadata(start: Address, end: Address) { let start_address_aligned_down = start.align_down(BYTES_IN_PAGE);