-
Notifications
You must be signed in to change notification settings - Fork 78
Concurrent Immix #1355
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Concurrent Immix #1355
Changes from all commits
fb12863
9ac22bd
04fee38
100c049
2bbb200
90f4518
521adea
4f14b2f
865e24c
7b13272
1db4743
5c38704
a95e94a
eacc959
028bd0e
fe29529
26c2a54
80024dd
7487c1b
6bb03c0
ad41d7e
a938110
321c6e2
c9315b9
12112d2
77330fe
74572eb
bdcf723
1368ac5
f906db3
4e5c772
47708b7
35cf25a
2653716
9cdcb7a
d43c9e5
e3163d8
4966131
c70c03f
c79fe8e
a23bfbb
3e076a9
553227a
8ad9ed8
acea83a
c442dcd
70ef1db
6dfde0a
a33ba5a
d7ae8e2
8acb5af
2731449
ab75860
d96d2e5
4e1db41
c9d4971
ad0d88e
be33efe
34be825
2ee86af
0c3d9f6
f76e954
b7b79e4
26f74c1
5b5552b
815cc17
446ec36
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,163 @@ | ||
use std::sync::atomic::Ordering; | ||
|
||
use super::{concurrent_marking_work::ProcessModBufSATB, Pause}; | ||
use crate::plan::global::PlanTraceObject; | ||
use crate::policy::gc_work::TraceKind; | ||
use crate::util::VMMutatorThread; | ||
use crate::{ | ||
plan::{barriers::BarrierSemantics, concurrent::global::ConcurrentPlan, VectorQueue}, | ||
scheduler::WorkBucketStage, | ||
util::ObjectReference, | ||
vm::{ | ||
slot::{MemorySlice, Slot}, | ||
VMBinding, | ||
}, | ||
MMTK, | ||
}; | ||
|
||
pub struct SATBBarrierSemantics< | ||
VM: VMBinding, | ||
P: ConcurrentPlan<VM = VM> + PlanTraceObject<VM>, | ||
const KIND: TraceKind, | ||
> { | ||
mmtk: &'static MMTK<VM>, | ||
tls: VMMutatorThread, | ||
satb: VectorQueue<ObjectReference>, | ||
refs: VectorQueue<ObjectReference>, | ||
plan: &'static P, | ||
} | ||
|
||
impl<VM: VMBinding, P: ConcurrentPlan<VM = VM> + PlanTraceObject<VM>, const KIND: TraceKind> | ||
SATBBarrierSemantics<VM, P, KIND> | ||
{ | ||
pub fn new(mmtk: &'static MMTK<VM>, tls: VMMutatorThread) -> Self { | ||
Self { | ||
mmtk, | ||
tls, | ||
satb: VectorQueue::default(), | ||
refs: VectorQueue::default(), | ||
plan: mmtk.get_plan().downcast_ref::<P>().unwrap(), | ||
} | ||
} | ||
|
||
fn slow(&mut self, _src: Option<ObjectReference>, _slot: VM::VMSlot, old: ObjectReference) { | ||
self.satb.push(old); | ||
if self.satb.is_full() { | ||
self.flush_satb(); | ||
} | ||
} | ||
|
||
fn enqueue_node( | ||
&mut self, | ||
src: Option<ObjectReference>, | ||
slot: VM::VMSlot, | ||
_new: Option<ObjectReference>, | ||
) -> bool { | ||
if let Some(old) = slot.load() { | ||
self.slow(src, slot, old); | ||
} | ||
true | ||
} | ||
|
||
/// Attempt to atomically log an object. | ||
/// Returns true if the object is not logged previously. | ||
fn log_object(&self, object: ObjectReference) -> bool { | ||
Self::UNLOG_BIT_SPEC.store_atomic::<VM, u8>(object, 0, None, Ordering::SeqCst); | ||
true | ||
} | ||
|
||
fn flush_satb(&mut self) { | ||
if !self.satb.is_empty() { | ||
if self.should_create_satb_packets() { | ||
let satb = self.satb.take(); | ||
let bucket = if self.plan.concurrent_work_in_progress() { | ||
WorkBucketStage::Concurrent | ||
} else { | ||
debug_assert_ne!(self.plan.current_pause(), Some(Pause::InitialMark)); | ||
WorkBucketStage::Closure | ||
}; | ||
self.mmtk.scheduler.work_buckets[bucket] | ||
.add(ProcessModBufSATB::<VM, P, KIND>::new(satb)); | ||
} else { | ||
let _ = self.satb.take(); | ||
}; | ||
} | ||
} | ||
|
||
#[cold] | ||
fn flush_weak_refs(&mut self) { | ||
if !self.refs.is_empty() { | ||
let nodes = self.refs.take(); | ||
let bucket = if self.plan.concurrent_work_in_progress() { | ||
WorkBucketStage::Concurrent | ||
} else { | ||
debug_assert_ne!(self.plan.current_pause(), Some(Pause::InitialMark)); | ||
WorkBucketStage::Closure | ||
}; | ||
self.mmtk.scheduler.work_buckets[bucket] | ||
.add(ProcessModBufSATB::<VM, P, KIND>::new(nodes)); | ||
} | ||
} | ||
|
||
fn should_create_satb_packets(&self) -> bool { | ||
self.plan.concurrent_work_in_progress() | ||
|| self.plan.current_pause() == Some(Pause::FinalMark) | ||
} | ||
} | ||
|
||
impl<VM: VMBinding, P: ConcurrentPlan<VM = VM> + PlanTraceObject<VM>, const KIND: TraceKind> | ||
BarrierSemantics for SATBBarrierSemantics<VM, P, KIND> | ||
{ | ||
type VM = VM; | ||
|
||
#[cold] | ||
fn flush(&mut self) { | ||
self.flush_satb(); | ||
self.flush_weak_refs(); | ||
} | ||
|
||
fn object_reference_write_slow( | ||
&mut self, | ||
src: ObjectReference, | ||
_slot: <Self::VM as VMBinding>::VMSlot, | ||
_target: Option<ObjectReference>, | ||
) { | ||
self.object_probable_write_slow(src); | ||
self.log_object(src); | ||
} | ||
|
||
fn memory_region_copy_slow( | ||
&mut self, | ||
_src: <Self::VM as VMBinding>::VMMemorySlice, | ||
dst: <Self::VM as VMBinding>::VMMemorySlice, | ||
) { | ||
for s in dst.iter_slots() { | ||
self.enqueue_node(None, s, None); | ||
} | ||
} | ||
|
||
/// Enqueue the referent during concurrent marking. | ||
/// | ||
/// Note: During concurrent marking, a collector based on snapshot-at-the-beginning (SATB) will | ||
/// not reach objects that were weakly reachable at the time of `InitialMark`. But if a mutator | ||
/// loads from a weak reference field during concurrent marking, it will make the referent | ||
/// strongly reachable, yet the referent is still not part of the SATB. We must conservatively | ||
/// enqueue the referent even though its reachability has not yet been established, otherwise it | ||
/// (and its children) may be treated as garbage if it happened to be weakly reachable at the | ||
/// time of `InitialMark`. | ||
fn load_weak_reference(&mut self, o: ObjectReference) { | ||
if !self.plan.concurrent_work_in_progress() { | ||
return; | ||
} | ||
self.refs.push(o); | ||
if self.refs.is_full() { | ||
self.flush_weak_refs(); | ||
} | ||
} | ||
|
||
fn object_probable_write_slow(&mut self, obj: ObjectReference) { | ||
crate::plan::tracing::SlotIterator::<VM>::iterate_fields(obj, self.tls.0, |s| { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Need to enqueue There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Probably I'm wrong, but I guess
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
/// A pre-barrier indicating that some fields of the object will probably be modified soon.
/// Specifically, the caller should ensure that:
/// * The barrier must called before any field modification.
/// * Some fields (unknown at the time of calling this barrier) might be modified soon, without a write barrier.
/// * There are no safepoints between the barrier call and the field writes.
If the fields are assigned during concurrent marking, the (new) values will either come from the snapshot at the beginning, or be a new object allocated during concurrent marking. In either case, they will be kept alive. (Update: But the old children of the fields that are overwritten by the assignments will not be kept alive.) To put it another way, the SATB barrier is a deletion barrier. As long as no objects are disconnected from other objects, there is no need to apply barrier. In the case of OpenJDK, it is assigning objects to fields that are not initialized, so no objects are disconnected.
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Wait. After a second thought, I think if MMTk implements the semantics described in the doc comment of The OpenJDK binding can do VM-specific optimization by eliding the invocation of There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Please don't remove it or change semantics arbitrarily. I (kind of) depend on it in ART as well. ART has a barrier |
||
self.enqueue_node(Some(obj), s, None); | ||
}); | ||
} | ||
} |
Uh oh!
There was an error while loading. Please reload this page.