Skip to content

Commit 62964b7

Browse files
wenyuzhaoJavad Amiriqinsoon
authored
PageProtect GC (#345)
* Add a paging GC: Each object is allocated to a separate page, where free pages are protected * Panic for failed prorect/unprotect. Add a warning to PageProtect.gc_init() * Expose PageProtect constraints, same as other plans * expose .._VM_BASE_ADDRESS instead of .._BASE_ADDRESS to expose the base address available to Bindings Co-authored-by: Javad Amiri <javad.amiri@anu.edu.au> Co-authored-by: Yi Lin <qinsoon@gmail.com>
1 parent 9d7305e commit 62964b7

File tree

13 files changed

+434
-42
lines changed

13 files changed

+434
-42
lines changed

src/plan/global.rs

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -126,6 +126,9 @@ pub fn create_mutator<VM: VMBinding>(
126126
PlanSelector::MarkSweep => {
127127
crate::plan::marksweep::mutator::create_ms_mutator(tls, &*mmtk.plan)
128128
}
129+
PlanSelector::PageProtect => {
130+
crate::plan::pageprotect::mutator::create_pp_mutator(tls, &*mmtk.plan)
131+
}
129132
})
130133
}
131134

@@ -146,6 +149,9 @@ pub fn create_plan<VM: VMBinding>(
146149
PlanSelector::MarkSweep => Box::new(crate::plan::marksweep::MarkSweep::new(
147150
vm_map, mmapper, options,
148151
)),
152+
PlanSelector::PageProtect => Box::new(crate::plan::pageprotect::PageProtect::new(
153+
vm_map, mmapper, options,
154+
)),
149155
}
150156
}
151157

@@ -733,6 +739,7 @@ impl<VM: VMBinding> CommonPlan<VM> {
733739
mmapper,
734740
&mut heap,
735741
constraints,
742+
false,
736743
),
737744
base: BasePlan::new(
738745
vm_map,

src/plan/mod.rs

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,7 @@ pub use transitive_closure::TransitiveClosure;
4343
mod gencopy;
4444
mod marksweep;
4545
mod nogc;
46+
mod pageprotect;
4647
mod semispace;
4748

4849
// Expose plan constraints as public. Though a binding can get them from plan.constraints(),
@@ -51,4 +52,5 @@ mod semispace;
5152
pub use gencopy::GENCOPY_CONSTRAINTS;
5253
pub use marksweep::MS_CONSTRAINTS;
5354
pub use nogc::NOGC_CONSTRAINTS;
55+
pub use pageprotect::PP_CONSTRAINTS;
5456
pub use semispace::SS_CONSTRAINTS;

src/plan/pageprotect/gc_work.rs

Lines changed: 58 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,58 @@
1+
use super::global::PageProtect;
2+
use crate::plan::global::NoCopy;
3+
use crate::policy::space::Space;
4+
use crate::scheduler::gc_work::*;
5+
use crate::util::{Address, ObjectReference};
6+
use crate::vm::VMBinding;
7+
use crate::MMTK;
8+
use std::ops::{Deref, DerefMut};
9+
10+
/// Edge scanning work packet.
11+
pub struct PPProcessEdges<VM: VMBinding> {
12+
/// Use a static ref to the specific plan to avoid overhead from dynamic dispatch or
13+
/// downcast for each traced object.
14+
plan: &'static PageProtect<VM>,
15+
base: ProcessEdgesBase<PPProcessEdges<VM>>,
16+
}
17+
18+
impl<VM: VMBinding> ProcessEdgesWork for PPProcessEdges<VM> {
19+
const OVERWRITE_REFERENCE: bool = false;
20+
type VM = VM;
21+
fn new(edges: Vec<Address>, _roots: bool, mmtk: &'static MMTK<VM>) -> Self {
22+
let base = ProcessEdgesBase::new(edges, mmtk);
23+
let plan = base.plan().downcast_ref::<PageProtect<VM>>().unwrap();
24+
Self { plan, base }
25+
}
26+
#[inline]
27+
fn trace_object(&mut self, object: ObjectReference) -> ObjectReference {
28+
let object = unsafe {
29+
let untagged_word = object.to_address().as_usize() & !0b11usize;
30+
Address::from_usize(untagged_word).to_object_reference()
31+
};
32+
if object.is_null() {
33+
return object;
34+
}
35+
if self.plan.space.in_space(object) {
36+
self.plan.space.trace_object::<Self>(self, object)
37+
} else {
38+
self.plan
39+
.common
40+
.trace_object::<Self, NoCopy<VM>>(self, object)
41+
}
42+
}
43+
}
44+
45+
impl<VM: VMBinding> Deref for PPProcessEdges<VM> {
46+
type Target = ProcessEdgesBase<Self>;
47+
#[inline]
48+
fn deref(&self) -> &Self::Target {
49+
&self.base
50+
}
51+
}
52+
53+
impl<VM: VMBinding> DerefMut for PPProcessEdges<VM> {
54+
#[inline]
55+
fn deref_mut(&mut self) -> &mut Self::Target {
56+
&mut self.base
57+
}
58+
}

src/plan/pageprotect/global.rs

Lines changed: 171 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,171 @@
1+
use super::gc_work::PPProcessEdges;
2+
use super::mutator::ALLOCATOR_MAPPING;
3+
use crate::mmtk::MMTK;
4+
use crate::plan::global::GcStatus;
5+
use crate::plan::AllocationSemantics;
6+
use crate::plan::Plan;
7+
use crate::plan::PlanConstraints;
8+
use crate::policy::space::Space;
9+
use crate::scheduler::gc_work::*;
10+
use crate::scheduler::*;
11+
use crate::util::alloc::allocators::AllocatorSelector;
12+
#[cfg(feature = "analysis")]
13+
use crate::util::analysis::GcHookWork;
14+
use crate::util::heap::layout::heap_layout::Mmapper;
15+
use crate::util::heap::layout::heap_layout::VMMap;
16+
use crate::util::heap::layout::vm_layout_constants::{HEAP_END, HEAP_START};
17+
use crate::util::heap::HeapMeta;
18+
use crate::util::heap::VMRequest;
19+
use crate::util::metadata::side_metadata::SideMetadataContext;
20+
use crate::util::options::UnsafeOptionsWrapper;
21+
#[cfg(feature = "sanity")]
22+
use crate::util::sanity::sanity_checker::*;
23+
use crate::{plan::global::BasePlan, vm::VMBinding};
24+
use crate::{
25+
plan::global::{CommonPlan, NoCopy},
26+
policy::largeobjectspace::LargeObjectSpace,
27+
util::opaque_pointer::VMWorkerThread,
28+
};
29+
use enum_map::EnumMap;
30+
use std::sync::Arc;
31+
32+
pub struct PageProtect<VM: VMBinding> {
33+
pub space: LargeObjectSpace<VM>,
34+
pub common: CommonPlan<VM>,
35+
}
36+
37+
pub const CONSTRAINTS: PlanConstraints = PlanConstraints {
38+
moves_objects: false,
39+
..PlanConstraints::default()
40+
};
41+
42+
impl<VM: VMBinding> Plan for PageProtect<VM> {
43+
type VM = VM;
44+
45+
fn constraints(&self) -> &'static PlanConstraints {
46+
&CONSTRAINTS
47+
}
48+
49+
fn create_worker_local(
50+
&self,
51+
tls: VMWorkerThread,
52+
mmtk: &'static MMTK<Self::VM>,
53+
) -> GCWorkerLocalPtr {
54+
let mut c = NoCopy::new(mmtk);
55+
c.init(tls);
56+
GCWorkerLocalPtr::new(c)
57+
}
58+
59+
fn gc_init(
60+
&mut self,
61+
heap_size: usize,
62+
vm_map: &'static VMMap,
63+
scheduler: &Arc<MMTkScheduler<VM>>,
64+
) {
65+
// Warn users that the plan may fail due to maximum mapping allowed.
66+
warn!(
67+
"PageProtect uses a high volume of memory mappings. \
68+
If you encounter failures in memory protect/unprotect in this plan,\
69+
consider increase the maximum mapping allowed by the OS{}.",
70+
if cfg!(target_os = "linux") {
71+
" (e.g. sudo sysctl -w vm.max_map_count=655300)"
72+
} else {
73+
""
74+
}
75+
);
76+
self.common.gc_init(heap_size, vm_map, scheduler);
77+
self.space.init(&vm_map);
78+
}
79+
80+
fn schedule_collection(&'static self, scheduler: &MMTkScheduler<VM>) {
81+
self.base().set_collection_kind();
82+
self.base().set_gc_status(GcStatus::GcPrepare);
83+
self.common()
84+
.schedule_common::<PPProcessEdges<VM>>(&CONSTRAINTS, scheduler);
85+
// Stop & scan mutators (mutator scanning can happen before STW)
86+
scheduler.work_buckets[WorkBucketStage::Unconstrained]
87+
.add(StopMutators::<PPProcessEdges<VM>>::new());
88+
// Prepare global/collectors/mutators
89+
scheduler.work_buckets[WorkBucketStage::Prepare]
90+
.add(Prepare::<Self, NoCopy<VM>>::new(self));
91+
// Release global/collectors/mutators
92+
scheduler.work_buckets[WorkBucketStage::Release]
93+
.add(Release::<Self, NoCopy<VM>>::new(self));
94+
// Scheduling all the gc hooks of analysis routines. It is generally recommended
95+
// to take advantage of the scheduling system we have in place for more performance
96+
#[cfg(feature = "analysis")]
97+
scheduler.work_buckets[WorkBucketStage::Unconstrained].add(GcHookWork);
98+
// Resume mutators
99+
#[cfg(feature = "sanity")]
100+
scheduler.work_buckets[WorkBucketStage::Final]
101+
.add(ScheduleSanityGC::<Self, NoCopy<VM>>::new(self));
102+
scheduler.set_finalizer(Some(EndOfGC));
103+
}
104+
105+
fn get_allocator_mapping(&self) -> &'static EnumMap<AllocationSemantics, AllocatorSelector> {
106+
&*ALLOCATOR_MAPPING
107+
}
108+
109+
fn prepare(&mut self, tls: VMWorkerThread) {
110+
self.common.prepare(tls, true);
111+
self.space.prepare(true);
112+
}
113+
114+
fn release(&mut self, tls: VMWorkerThread) {
115+
self.common.release(tls, true);
116+
self.space.release(true);
117+
}
118+
119+
fn collection_required(&self, space_full: bool, space: &dyn Space<Self::VM>) -> bool {
120+
self.base().collection_required(self, space_full, space)
121+
}
122+
123+
fn get_collection_reserve(&self) -> usize {
124+
0
125+
}
126+
127+
fn get_pages_used(&self) -> usize {
128+
self.space.reserved_pages() + self.common.get_pages_used()
129+
}
130+
131+
fn base(&self) -> &BasePlan<VM> {
132+
&self.common.base
133+
}
134+
135+
fn common(&self) -> &CommonPlan<VM> {
136+
&self.common
137+
}
138+
}
139+
140+
impl<VM: VMBinding> PageProtect<VM> {
141+
pub fn new(
142+
vm_map: &'static VMMap,
143+
mmapper: &'static Mmapper,
144+
options: Arc<UnsafeOptionsWrapper>,
145+
) -> Self {
146+
let mut heap = HeapMeta::new(HEAP_START, HEAP_END);
147+
let global_metadata_specs = SideMetadataContext::new_global_specs(&[]);
148+
149+
PageProtect {
150+
space: LargeObjectSpace::new(
151+
"los",
152+
true,
153+
VMRequest::discontiguous(),
154+
global_metadata_specs.clone(),
155+
vm_map,
156+
mmapper,
157+
&mut heap,
158+
&CONSTRAINTS,
159+
true,
160+
),
161+
common: CommonPlan::new(
162+
vm_map,
163+
mmapper,
164+
options,
165+
heap,
166+
&CONSTRAINTS,
167+
global_metadata_specs,
168+
),
169+
}
170+
}
171+
}

src/plan/pageprotect/mod.rs

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
//! Plan: pageprotect
2+
//!
3+
//! Allocate each object on a separate page and protect the memory on release.
4+
//! This GC is commonly used for debugging purposes.
5+
6+
pub(super) mod gc_work;
7+
pub(super) mod global;
8+
pub(super) mod mutator;
9+
10+
pub use self::global::PageProtect;
11+
pub use self::global::CONSTRAINTS as PP_CONSTRAINTS;

src/plan/pageprotect/mutator.rs

Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,56 @@
1+
use super::PageProtect;
2+
use crate::plan::mutator_context::Mutator;
3+
use crate::plan::mutator_context::MutatorConfig;
4+
use crate::plan::AllocationSemantics as AllocationType;
5+
use crate::plan::Plan;
6+
use crate::util::alloc::allocators::{AllocatorSelector, Allocators};
7+
use crate::vm::VMBinding;
8+
use crate::{
9+
plan::barriers::NoBarrier,
10+
util::opaque_pointer::{VMMutatorThread, VMWorkerThread},
11+
};
12+
use enum_map::enum_map;
13+
use enum_map::EnumMap;
14+
15+
/// Prepare mutator. Do nothing.
16+
fn pp_mutator_prepare<VM: VMBinding>(_mutator: &mut Mutator<VM>, _tls: VMWorkerThread) {}
17+
18+
/// Release mutator. Do nothing.
19+
fn pp_mutator_release<VM: VMBinding>(_mutator: &mut Mutator<VM>, _tls: VMWorkerThread) {}
20+
21+
lazy_static! {
22+
pub static ref ALLOCATOR_MAPPING: EnumMap<AllocationType, AllocatorSelector> = enum_map! {
23+
AllocationType::Default | AllocationType::Los => AllocatorSelector::LargeObject(0),
24+
// Temporarily put code and readonly objects to immortal space, for v8 support.
25+
AllocationType::Immortal | AllocationType::Code | AllocationType::ReadOnly => AllocatorSelector::BumpPointer(0),
26+
};
27+
}
28+
29+
/// Create a mutator instance.
30+
/// Every object is allocated to LOS.
31+
pub fn create_pp_mutator<VM: VMBinding>(
32+
mutator_tls: VMMutatorThread,
33+
plan: &'static dyn Plan<VM = VM>,
34+
) -> Mutator<VM> {
35+
let page = plan.downcast_ref::<PageProtect<VM>>().unwrap();
36+
let config = MutatorConfig {
37+
allocator_mapping: &*ALLOCATOR_MAPPING,
38+
space_mapping: box vec![
39+
(
40+
AllocatorSelector::BumpPointer(0),
41+
page.common.get_immortal(),
42+
),
43+
(AllocatorSelector::LargeObject(0), &page.space),
44+
],
45+
prepare_func: &pp_mutator_prepare,
46+
release_func: &pp_mutator_release,
47+
};
48+
49+
Mutator {
50+
allocators: Allocators::<VM>::new(mutator_tls, plan, &config.space_mapping),
51+
barrier: box NoBarrier,
52+
mutator_tls,
53+
config,
54+
plan,
55+
}
56+
}

0 commit comments

Comments
 (0)