Skip to content

Commit c2c4df0

Browse files
authored
Minimize the scope of acquire_lock (#614)
This PR changes the scope of `Space.acquire_lock`. The lock was introduced in #555, and it caused bad allocation performance when we have many threads (e.g. more than 24 threads, mutator threads or GC threads with copying allocators). This pull request reduces the scope of this `acquire_lock`, and the overhead from it is mostly neglectable. This closes #610.
1 parent a424ef2 commit c2c4df0

File tree

1 file changed

+14
-8
lines changed

1 file changed

+14
-8
lines changed

src/policy/space.rs

Lines changed: 14 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -395,12 +395,6 @@ pub trait Space<VM: VMBinding>: 'static + SFT + Sync + Downcast {
395395
// initialize_collection() has to be called so we know GC is initialized.
396396
let allow_gc = should_poll && VM::VMActivePlan::global().is_initialized();
397397

398-
// We need this lock: Othrewise, it is possible that one thread acquires pages in a new chunk, but not yet
399-
// set SFT for it (in grow_space()), and another thread acquires pages in the same chunk, which is not
400-
// a new chunk so grow_space() won't be called on it. The second thread could return a result in the chunk before
401-
// its SFT is properly set.
402-
let lock = self.common().acquire_lock.lock().unwrap();
403-
404398
trace!("Reserving pages");
405399
let pr = self.get_page_resource();
406400
let pages_reserved = pr.reserve_pages(pages);
@@ -411,12 +405,19 @@ pub trait Space<VM: VMBinding>: 'static + SFT + Sync + Downcast {
411405
debug!("Collection required");
412406
assert!(allow_gc, "GC is not allowed here: collection is not initialized (did you call initialize_collection()?).");
413407
pr.clear_request(pages_reserved);
414-
drop(lock); // drop the lock before block
415408
VM::VMCollection::block_for_gc(VMMutatorThread(tls)); // We have checked that this is mutator
416409
unsafe { Address::zero() }
417410
} else {
418411
debug!("Collection not required");
419412

413+
// We need this lock: Othrewise, it is possible that one thread acquires pages in a new chunk, but not yet
414+
// set SFT for it (in grow_space()), and another thread acquires pages in the same chunk, which is not
415+
// a new chunk so grow_space() won't be called on it. The second thread could return a result in the chunk before
416+
// its SFT is properly set.
417+
// We need to minimize the scope of this lock for performance when we have many threads (mutator threads, or GC threads with copying allocators).
418+
// See: https://github.com/mmtk/mmtk-core/issues/610
419+
let lock = self.common().acquire_lock.lock().unwrap();
420+
420421
match pr.get_new_pages(self.common().descriptor, pages_reserved, pages, tls) {
421422
Ok(res) => {
422423
debug!(
@@ -429,6 +430,10 @@ pub trait Space<VM: VMBinding>: 'static + SFT + Sync + Downcast {
429430
);
430431
let bytes = conversions::pages_to_bytes(res.pages);
431432
self.grow_space(res.start, bytes, res.new_chunk);
433+
434+
// Once we finish grow_space, we can drop the lock.
435+
drop(lock);
436+
432437
// Mmap the pages and the side metadata, and handle error. In case of any error,
433438
// we will either call back to the VM for OOM, or simply panic.
434439
if let Err(mmap_error) = self
@@ -479,6 +484,8 @@ pub trait Space<VM: VMBinding>: 'static + SFT + Sync + Downcast {
479484
res.start
480485
}
481486
Err(_) => {
487+
drop(lock); // drop the lock immediately
488+
482489
// We thought we had memory to allocate, but somehow failed the allocation. Will force a GC.
483490
assert!(
484491
allow_gc,
@@ -488,7 +495,6 @@ pub trait Space<VM: VMBinding>: 'static + SFT + Sync + Downcast {
488495
let gc_performed = VM::VMActivePlan::global().poll(true, Some(self.as_space()));
489496
debug_assert!(gc_performed, "GC not performed when forced.");
490497
pr.clear_request(pages_reserved);
491-
drop(lock); // drop the lock before block
492498
VM::VMCollection::block_for_gc(VMMutatorThread(tls)); // We asserted that this is mutator.
493499
unsafe { Address::zero() }
494500
}

0 commit comments

Comments
 (0)