diff --git a/src/policy/immix/immixspace.rs b/src/policy/immix/immixspace.rs index b932808d8f..49b5691cd5 100644 --- a/src/policy/immix/immixspace.rs +++ b/src/policy/immix/immixspace.rs @@ -252,7 +252,6 @@ impl ImmixSpace { vec![ MetadataSpec::OnSide(Block::DEFRAG_STATE_TABLE), MetadataSpec::OnSide(Block::MARK_TABLE), - MetadataSpec::OnSide(ChunkMap::ALLOC_TABLE), *VM::VMObjectModel::LOCAL_MARK_BIT_SPEC, *VM::VMObjectModel::LOCAL_FORWARDING_BITS_SPEC, *VM::VMObjectModel::LOCAL_FORWARDING_POINTER_SPEC, @@ -264,7 +263,6 @@ impl ImmixSpace { MetadataSpec::OnSide(Line::MARK_TABLE), MetadataSpec::OnSide(Block::DEFRAG_STATE_TABLE), MetadataSpec::OnSide(Block::MARK_TABLE), - MetadataSpec::OnSide(ChunkMap::ALLOC_TABLE), *VM::VMObjectModel::LOCAL_MARK_BIT_SPEC, *VM::VMObjectModel::LOCAL_FORWARDING_BITS_SPEC, *VM::VMObjectModel::LOCAL_FORWARDING_POINTER_SPEC, @@ -299,6 +297,7 @@ impl ImmixSpace { let scheduler = args.scheduler.clone(); let common = CommonSpace::new(args.into_policy_args(true, false, Self::side_metadata_specs())); + let space_index = common.descriptor.get_index(); ImmixSpace { pr: if common.vmrequest.is_discontiguous() { BlockPageResource::new_discontiguous( @@ -316,7 +315,7 @@ impl ImmixSpace { ) }, common, - chunk_map: ChunkMap::new(), + chunk_map: ChunkMap::new(space_index), line_mark_state: AtomicU8::new(Line::RESET_MARK_STATE), line_unavail_state: AtomicU8::new(Line::RESET_MARK_STATE), lines_consumed: AtomicUsize::new(0), @@ -524,7 +523,7 @@ impl ImmixSpace { self.defrag.notify_new_clean_block(copy); let block = Block::from_aligned_address(block_address); block.init(copy); - self.chunk_map.set(block.chunk(), ChunkState::Allocated); + self.chunk_map.set_allocated(block.chunk(), true); self.lines_consumed .fetch_add(Block::LINES, Ordering::SeqCst); Some(block) @@ -899,7 +898,7 @@ struct SweepChunk { impl GCWork for SweepChunk { fn do_work(&mut self, _worker: &mut GCWorker, mmtk: &'static MMTK) { - assert_eq!(self.space.chunk_map.get(self.chunk), ChunkState::Allocated); + assert!(self.space.chunk_map.get(self.chunk).unwrap().is_allocated()); let mut histogram = self.space.defrag.new_histogram(); let line_mark_state = if super::BLOCK_ONLY { @@ -950,7 +949,7 @@ impl GCWork for SweepChunk { probe!(mmtk, sweep_chunk, allocated_blocks); // Set this chunk as free if there is not live blocks. if allocated_blocks == 0 { - self.space.chunk_map.set(self.chunk, ChunkState::Free) + self.space.chunk_map.set_allocated(self.chunk, false) } self.space.defrag.add_completed_mark_histogram(histogram); self.epilogue.finish_one_work_packet(); diff --git a/src/policy/marksweepspace/native_ms/global.rs b/src/policy/marksweepspace/native_ms/global.rs index 0b349c4508..26570e41ba 100644 --- a/src/policy/marksweepspace/native_ms/global.rs +++ b/src/policy/marksweepspace/native_ms/global.rs @@ -288,7 +288,7 @@ impl MarkSweepSpace { let vm_map = args.vm_map; let is_discontiguous = args.vmrequest.is_discontiguous(); let local_specs = { - metadata::extract_side_metadata(&vec![ + metadata::extract_side_metadata(&[ MetadataSpec::OnSide(Block::NEXT_BLOCK_TABLE), MetadataSpec::OnSide(Block::PREV_BLOCK_TABLE), MetadataSpec::OnSide(Block::FREE_LIST_TABLE), @@ -300,11 +300,11 @@ impl MarkSweepSpace { MetadataSpec::OnSide(Block::BLOCK_LIST_TABLE), MetadataSpec::OnSide(Block::TLS_TABLE), MetadataSpec::OnSide(Block::MARK_TABLE), - MetadataSpec::OnSide(ChunkMap::ALLOC_TABLE), *VM::VMObjectModel::LOCAL_MARK_BIT_SPEC, ]) }; let common = CommonSpace::new(args.into_policy_args(false, false, local_specs)); + let space_index = common.descriptor.get_index(); MarkSweepSpace { pr: if is_discontiguous { BlockPageResource::new_discontiguous( @@ -322,7 +322,7 @@ impl MarkSweepSpace { ) }, common, - chunk_map: ChunkMap::new(), + chunk_map: ChunkMap::new(space_index), scheduler, abandoned: Mutex::new(AbandonedBlockLists::new()), abandoned_in_gc: Mutex::new(AbandonedBlockLists::new()), @@ -402,7 +402,7 @@ impl MarkSweepSpace { pub fn record_new_block(&self, block: Block) { block.init(); - self.chunk_map.set(block.chunk(), ChunkState::Allocated); + self.chunk_map.set_allocated(block.chunk(), true); } pub fn prepare(&mut self, full_heap: bool) { @@ -567,7 +567,7 @@ struct PrepareChunkMap { impl GCWork for PrepareChunkMap { fn do_work(&mut self, _worker: &mut GCWorker, _mmtk: &'static MMTK) { - debug_assert!(self.space.chunk_map.get(self.chunk) == ChunkState::Allocated); + debug_assert!(self.space.chunk_map.get(self.chunk).unwrap().is_allocated()); // number of allocated blocks. let mut n_occupied_blocks = 0; self.chunk @@ -581,7 +581,7 @@ impl GCWork for PrepareChunkMap { }); if n_occupied_blocks == 0 { // Set this chunk as free if there is no live blocks. - self.space.chunk_map.set(self.chunk, ChunkState::Free) + self.space.chunk_map.set_allocated(self.chunk, false) } else { // Otherwise this chunk is occupied, and we reset the mark bit if it is on the side. if let MetadataSpec::OnSide(side) = *VM::VMObjectModel::LOCAL_MARK_BIT_SPEC { @@ -617,7 +617,7 @@ struct SweepChunk { impl GCWork for SweepChunk { fn do_work(&mut self, _worker: &mut GCWorker, _mmtk: &'static MMTK) { - assert_eq!(self.space.chunk_map.get(self.chunk), ChunkState::Allocated); + assert!(self.space.chunk_map.get(self.chunk).unwrap().is_allocated()); // number of allocated blocks. let mut allocated_blocks = 0; @@ -636,7 +636,7 @@ impl GCWork for SweepChunk { probe!(mmtk, sweep_chunk, allocated_blocks); // Set this chunk as free if there is not live blocks. if allocated_blocks == 0 { - self.space.chunk_map.set(self.chunk, ChunkState::Free) + self.space.chunk_map.set_allocated(self.chunk, false); } self.epilogue.finish_one_work_packet(); } diff --git a/src/util/heap/chunk_map.rs b/src/util/heap/chunk_map.rs index 26912b3f89..69caf12b12 100644 --- a/src/util/heap/chunk_map.rs +++ b/src/util/heap/chunk_map.rs @@ -44,20 +44,68 @@ impl Chunk { } } -/// Chunk allocation state -#[repr(u8)] -#[derive(Debug, PartialEq, Clone, Copy)] -pub enum ChunkState { - /// The chunk is not allocated. - Free = 0, - /// The chunk is allocated. - Allocated = 1, +/// The allocation state for a chunk in the chunk map. It includes whether each chunk is allocated or free, and the space the chunk belongs to. +/// Highest bit: 0 = free, 1 = allocated +/// Lower 4 bits: Space index (0-15) if the chunk is allocated. +#[repr(transparent)] +#[derive(PartialEq, Clone, Copy)] +pub struct ChunkState(u8); + +impl ChunkState { + const ALLOC_BIT_MASK: u8 = 0x80; + const SPACE_INDEX_MASK: u8 = 0x0F; + + /// Create a new ChunkState that represents being allocated in the given space + pub fn allocated(space_index: usize) -> ChunkState { + debug_assert!(space_index < crate::util::heap::layout::heap_parameters::MAX_SPACES); + let mut encode = space_index as u8; + encode |= Self::ALLOC_BIT_MASK; + ChunkState(encode) + } + /// Create a new ChunkState that represents being free + pub fn free() -> ChunkState { + ChunkState(0u8) + } + /// Is the chunk free? + pub fn is_free(&self) -> bool { + self.0 == 0 + } + /// Is the chunk allocated? + pub fn is_allocated(&self) -> bool { + !self.is_free() + } + /// Get the space index of the chunk + pub fn get_space_index(&self) -> usize { + debug_assert!(self.is_allocated()); + let index = (self.0 & Self::SPACE_INDEX_MASK) as usize; + debug_assert!(index < crate::util::heap::layout::heap_parameters::MAX_SPACES); + index + } +} + +impl std::fmt::Debug for ChunkState { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + if self.is_free() { + write!(f, "Free") + } else { + write!(f, "Allocated({})", self.get_space_index()) + } + } } /// A byte-map to record all the allocated chunks. /// A plan can use this to maintain records for the chunks that they used, and the states of the chunks. -/// Any plan that uses the chunk map should include the `ALLOC_TABLE` spec in their local sidemetadata specs +/// Any plan that uses the chunk map should include the `ALLOC_TABLE` spec in their global sidemetadata specs. +/// +/// A chunk map is created for a space (identified by the space index), and will only update or list chunks for that space. pub struct ChunkMap { + /// The space that uses this chunk map. + space_index: usize, + /// The range of chunks that are used by the space. The range only records the lowest chunk and the highest chunk. + /// All the chunks that are used for the space are within the range, but not necessarily that all the chunks in the range + /// are used for the space. Spaces may be discontiguous, thus the range may include chunks that do not belong to the space. + /// We need to use the space index in the chunk map and the space index encoded with the chunk state to know if + /// the chunk belongs to the current space. chunk_range: Mutex>, } @@ -66,22 +114,40 @@ impl ChunkMap { pub const ALLOC_TABLE: SideMetadataSpec = crate::util::metadata::side_metadata::spec_defs::CHUNK_MARK; - pub fn new() -> Self { + pub fn new(space_index: usize) -> Self { Self { + space_index, chunk_range: Mutex::new(Chunk::ZERO..Chunk::ZERO), } } - /// Set chunk state - pub fn set(&self, chunk: Chunk, state: ChunkState) { + /// Set a chunk as allocated, or as free. + pub fn set_allocated(&self, chunk: Chunk, allocated: bool) { + let state = if allocated { + ChunkState::allocated(self.space_index) + } else { + ChunkState::free() + }; // Do nothing if the chunk is already in the expected state. - if self.get(chunk) == state { + if self.get_internal(chunk) == state { return; } + #[cfg(debug_assertions)] + { + let old_state = self.get_internal(chunk); + // If a chunk is free, any space may use it. If a chunk is not free, only the current space may update its state. + assert!( + old_state.is_free() || old_state.get_space_index() == self.space_index, + "Chunk {:?}: old state {:?}, new state {:?}. Cannot set to new state.", + chunk, + old_state, + state + ); + } // Update alloc byte - unsafe { Self::ALLOC_TABLE.store::(chunk.start(), state as u8) }; + unsafe { Self::ALLOC_TABLE.store::(chunk.start(), state.0) }; // If this is a newly allcoated chunk, then expand the chunk range. - if state == ChunkState::Allocated { + if allocated { debug_assert!(!chunk.start().is_zero()); let mut range = self.chunk_range.lock(); if range.start == Chunk::ZERO { @@ -96,20 +162,23 @@ impl ChunkMap { } } - /// Get chunk state - pub fn get(&self, chunk: Chunk) -> ChunkState { + /// Get chunk state. Return None if the chunk does not belong to the space. + pub fn get(&self, chunk: Chunk) -> Option { + let state = self.get_internal(chunk); + (state.is_allocated() && state.get_space_index() == self.space_index).then_some(state) + } + + /// Get chunk state, regardless of the space. This should always be private. + fn get_internal(&self, chunk: Chunk) -> ChunkState { let byte = unsafe { Self::ALLOC_TABLE.load::(chunk.start()) }; - match byte { - 0 => ChunkState::Free, - 1 => ChunkState::Allocated, - _ => unreachable!(), - } + ChunkState(byte) } - /// A range of all chunks in the heap. - pub fn all_chunks(&self) -> RegionIterator { + /// A range of all allocated chunks by this space in the heap. + pub fn all_chunks(&self) -> impl Iterator + '_ { let chunk_range = self.chunk_range.lock(); RegionIterator::::new(chunk_range.start, chunk_range.end) + .filter(|c| self.get(*c).is_some()) } /// Helper function to create per-chunk processing work packets for each allocated chunks. @@ -118,18 +187,9 @@ impl ChunkMap { func: impl Fn(Chunk) -> Box>, ) -> Vec>> { let mut work_packets: Vec>> = vec![]; - for chunk in self - .all_chunks() - .filter(|c| self.get(*c) == ChunkState::Allocated) - { + for chunk in self.all_chunks() { work_packets.push(func(chunk)); } work_packets } } - -impl Default for ChunkMap { - fn default() -> Self { - Self::new() - } -} diff --git a/src/util/metadata/side_metadata/global.rs b/src/util/metadata/side_metadata/global.rs index 8730602a4b..387d2f1d75 100644 --- a/src/util/metadata/side_metadata/global.rs +++ b/src/util/metadata/side_metadata/global.rs @@ -1345,6 +1345,11 @@ impl SideMetadataContext { } } + // Any plan that uses the chunk map needs to reserve the chunk map table. + // As we use either the mark sweep or (non moving) immix as the non moving space, + // and both policies use the chunk map, we just add the chunk map table globally. + ret.push(crate::util::heap::chunk_map::ChunkMap::ALLOC_TABLE); + ret.extend_from_slice(specs); ret } diff --git a/src/util/metadata/side_metadata/spec_defs.rs b/src/util/metadata/side_metadata/spec_defs.rs index be7a7730d3..736bf4ccfd 100644 --- a/src/util/metadata/side_metadata/spec_defs.rs +++ b/src/util/metadata/side_metadata/spec_defs.rs @@ -60,6 +60,8 @@ define_side_metadata_specs!( MS_ACTIVE_CHUNK = (global: true, log_num_of_bits: 3, log_bytes_in_region: LOG_BYTES_IN_CHUNK), // Track the index in SFT map for a chunk (only used for SFT sparse chunk map) SFT_DENSE_CHUNK_MAP_INDEX = (global: true, log_num_of_bits: 3, log_bytes_in_region: LOG_BYTES_IN_CHUNK), + // Mark chunks (any plan that uses the chunk map should include this spec in their global sidemetadata specs) + CHUNK_MARK = (global: true, log_num_of_bits: 3, log_bytes_in_region: crate::util::heap::chunk_map::Chunk::LOG_BYTES), ); // This defines all LOCAL side metadata used by mmtk-core. @@ -75,8 +77,6 @@ define_side_metadata_specs!( IX_BLOCK_DEFRAG = (global: false, log_num_of_bits: 3, log_bytes_in_region: crate::policy::immix::block::Block::LOG_BYTES), // Mark blocks by immix IX_BLOCK_MARK = (global: false, log_num_of_bits: 3, log_bytes_in_region: crate::policy::immix::block::Block::LOG_BYTES), - // Mark chunks (any plan that uses the chunk map should include this spec in their local sidemetadata specs) - CHUNK_MARK = (global: false, log_num_of_bits: 3, log_bytes_in_region: crate::util::heap::chunk_map::Chunk::LOG_BYTES), // Mark blocks by (native mimalloc) marksweep MS_BLOCK_MARK = (global: false, log_num_of_bits: 3, log_bytes_in_region: crate::policy::marksweepspace::native_ms::Block::LOG_BYTES), // Next block in list for native mimalloc diff --git a/src/util/object_enum.rs b/src/util/object_enum.rs index 18508f088b..4748cf7565 100644 --- a/src/util/object_enum.rs +++ b/src/util/object_enum.rs @@ -5,10 +5,7 @@ use std::marker::PhantomData; use crate::vm::VMBinding; use super::{ - heap::{ - chunk_map::{ChunkMap, ChunkState}, - MonotonePageResource, - }, + heap::{chunk_map::ChunkMap, MonotonePageResource}, linear_scan::Region, metadata::{side_metadata::spec_defs::VO_BIT, vo_bit}, Address, ObjectReference, @@ -85,11 +82,9 @@ pub(crate) fn enumerate_blocks_from_chunk_map( B: BlockMayHaveObjects, { for chunk in chunk_map.all_chunks() { - if chunk_map.get(chunk) == ChunkState::Allocated { - for block in chunk.iter_region::() { - if block.may_have_objects() { - enumerator.visit_address_range(block.start(), block.end()); - } + for block in chunk.iter_region::() { + if block.may_have_objects() { + enumerator.visit_address_range(block.start(), block.end()); } } } diff --git a/src/vm/tests/mock_tests/mock_test_allocate_nonmoving.rs b/src/vm/tests/mock_tests/mock_test_allocate_nonmoving.rs new file mode 100644 index 0000000000..82d1c5e958 --- /dev/null +++ b/src/vm/tests/mock_tests/mock_test_allocate_nonmoving.rs @@ -0,0 +1,37 @@ +// GITHUB-CI: MMTK_PLAN=all + +use super::mock_test_prelude::*; +use crate::plan::AllocationSemantics; + +#[test] +pub fn allocate_nonmoving() { + with_mockvm( + || -> MockVM { + MockVM { + is_collection_enabled: MockMethod::new_fixed(Box::new(|_| false)), + ..MockVM::default() + } + }, + || { + // 1MB heap + const MB: usize = 1024 * 1024; + let mut fixture = MutatorFixture::create_with_heapsize(MB); + + // Normal alloc + let addr = + memory_manager::alloc(&mut fixture.mutator, 16, 8, 0, AllocationSemantics::Default); + assert!(!addr.is_zero()); + + // Non moving alloc + let addr = memory_manager::alloc( + &mut fixture.mutator, + 16, + 8, + 0, + AllocationSemantics::NonMoving, + ); + assert!(!addr.is_zero()); + }, + no_cleanup, + ) +} diff --git a/src/vm/tests/mock_tests/mod.rs b/src/vm/tests/mock_tests/mod.rs index aab9aafd8b..55b08b3b12 100644 --- a/src/vm/tests/mock_tests/mod.rs +++ b/src/vm/tests/mock_tests/mod.rs @@ -24,6 +24,7 @@ pub(crate) mod mock_test_prelude { } mod mock_test_allocate_align_offset; +mod mock_test_allocate_nonmoving; mod mock_test_allocate_with_disable_collection; mod mock_test_allocate_with_initialize_collection; mod mock_test_allocate_with_re_enable_collection;