| /* |
| * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved. |
| * Copyright (c) 2017, 2022, Red Hat, Inc. All rights reserved. |
| * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| * |
| * This code is free software; you can redistribute it and/or modify it |
| * under the terms of the GNU General Public License version 2 only, as |
| * published by the Free Software Foundation. |
| * |
| * This code is distributed in the hope that it will be useful, but WITHOUT |
| * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| * version 2 for more details (a copy is included in the LICENSE file that |
| * accompanied this code). |
| * |
| * You should have received a copy of the GNU General Public License version |
| * 2 along with this work; if not, write to the Free Software Foundation, |
| * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| * |
| * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| * or visit www.oracle.com if you need additional information or have any |
| * questions. |
| * |
| */ |
| |
| #include "precompiled.hpp" |
| #include "gc/epsilon/epsilonHeap.hpp" |
| #include "gc/epsilon/epsilonInitLogger.hpp" |
| #include "gc/epsilon/epsilonMemoryPool.hpp" |
| #include "gc/epsilon/epsilonThreadLocalData.hpp" |
| #include "gc/shared/gcArguments.hpp" |
| #include "gc/shared/locationPrinter.inline.hpp" |
| #include "logging/log.hpp" |
| #include "memory/allocation.hpp" |
| #include "memory/allocation.inline.hpp" |
| #include "memory/metaspaceUtils.hpp" |
| #include "memory/resourceArea.hpp" |
| #include "memory/universe.hpp" |
| #include "runtime/atomic.hpp" |
| #include "runtime/globals.hpp" |
| |
| jint EpsilonHeap::initialize() { |
| size_t align = HeapAlignment; |
| size_t init_byte_size = align_up(InitialHeapSize, align); |
| size_t max_byte_size = align_up(MaxHeapSize, align); |
| |
| // Initialize backing storage |
| ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, align); |
| _virtual_space.initialize(heap_rs, init_byte_size); |
| |
| MemRegion committed_region((HeapWord*)_virtual_space.low(), (HeapWord*)_virtual_space.high()); |
| |
| initialize_reserved_region(heap_rs); |
| |
| _space = new ContiguousSpace(); |
| _space->initialize(committed_region, /* clear_space = */ true, /* mangle_space = */ true); |
| |
| // Precompute hot fields |
| _max_tlab_size = MIN2(CollectedHeap::max_tlab_size(), align_object_size(EpsilonMaxTLABSize / HeapWordSize)); |
| _step_counter_update = MIN2<size_t>(max_byte_size / 16, EpsilonUpdateCountersStep); |
| _step_heap_print = (EpsilonPrintHeapSteps == 0) ? SIZE_MAX : (max_byte_size / EpsilonPrintHeapSteps); |
| _decay_time_ns = (int64_t) EpsilonTLABDecayTime * NANOSECS_PER_MILLISEC; |
| |
| // Enable monitoring |
| _monitoring_support = new EpsilonMonitoringSupport(this); |
| _last_counter_update = 0; |
| _last_heap_print = 0; |
| |
| // Install barrier set |
| BarrierSet::set_barrier_set(new EpsilonBarrierSet()); |
| |
| // All done, print out the configuration |
| EpsilonInitLogger::print(); |
| |
| return JNI_OK; |
| } |
| |
| void EpsilonHeap::initialize_serviceability() { |
| _pool = new EpsilonMemoryPool(this); |
| _memory_manager.add_pool(_pool); |
| } |
| |
| GrowableArray<GCMemoryManager*> EpsilonHeap::memory_managers() { |
| GrowableArray<GCMemoryManager*> memory_managers(1); |
| memory_managers.append(&_memory_manager); |
| return memory_managers; |
| } |
| |
| GrowableArray<MemoryPool*> EpsilonHeap::memory_pools() { |
| GrowableArray<MemoryPool*> memory_pools(1); |
| memory_pools.append(_pool); |
| return memory_pools; |
| } |
| |
| size_t EpsilonHeap::unsafe_max_tlab_alloc(Thread* thr) const { |
| // Return max allocatable TLAB size, and let allocation path figure out |
| // the actual allocation size. Note: result should be in bytes. |
| return _max_tlab_size * HeapWordSize; |
| } |
| |
| EpsilonHeap* EpsilonHeap::heap() { |
| return named_heap<EpsilonHeap>(CollectedHeap::Epsilon); |
| } |
| |
| HeapWord* EpsilonHeap::allocate_work(size_t size, bool verbose) { |
| assert(is_object_aligned(size), "Allocation size should be aligned: " SIZE_FORMAT, size); |
| |
| HeapWord* res = nullptr; |
| while (true) { |
| // Try to allocate, assume space is available |
| res = _space->par_allocate(size); |
| if (res != nullptr) { |
| break; |
| } |
| |
| // Allocation failed, attempt expansion, and retry: |
| { |
| MutexLocker ml(Heap_lock); |
| |
| // Try to allocate under the lock, assume another thread was able to expand |
| res = _space->par_allocate(size); |
| if (res != nullptr) { |
| break; |
| } |
| |
| // Expand and loop back if space is available |
| size_t size_in_bytes = size * HeapWordSize; |
| size_t uncommitted_space = max_capacity() - capacity(); |
| size_t unused_space = max_capacity() - used(); |
| size_t want_space = MAX2(size_in_bytes, EpsilonMinHeapExpand); |
| assert(unused_space >= uncommitted_space, |
| "Unused (" SIZE_FORMAT ") >= uncommitted (" SIZE_FORMAT ")", |
| unused_space, uncommitted_space); |
| |
| if (want_space < uncommitted_space) { |
| // Enough space to expand in bulk: |
| bool expand = _virtual_space.expand_by(want_space); |
| assert(expand, "Should be able to expand"); |
| } else if (size_in_bytes < unused_space) { |
| // No space to expand in bulk, and this allocation is still possible, |
| // take all the remaining space: |
| bool expand = _virtual_space.expand_by(uncommitted_space); |
| assert(expand, "Should be able to expand"); |
| } else { |
| // No space left: |
| return nullptr; |
| } |
| |
| _space->set_end((HeapWord *) _virtual_space.high()); |
| } |
| } |
| |
| size_t used = _space->used(); |
| |
| // Allocation successful, update counters |
| if (verbose) { |
| size_t last = _last_counter_update; |
| if ((used - last >= _step_counter_update) && Atomic::cmpxchg(&_last_counter_update, last, used) == last) { |
| _monitoring_support->update_counters(); |
| } |
| } |
| |
| // ...and print the occupancy line, if needed |
| if (verbose) { |
| size_t last = _last_heap_print; |
| if ((used - last >= _step_heap_print) && Atomic::cmpxchg(&_last_heap_print, last, used) == last) { |
| print_heap_info(used); |
| print_metaspace_info(); |
| } |
| } |
| |
| assert(is_object_aligned(res), "Object should be aligned: " PTR_FORMAT, p2i(res)); |
| return res; |
| } |
| |
| HeapWord* EpsilonHeap::allocate_new_tlab(size_t min_size, |
| size_t requested_size, |
| size_t* actual_size) { |
| Thread* thread = Thread::current(); |
| |
| // Defaults in case elastic paths are not taken |
| bool fits = true; |
| size_t size = requested_size; |
| size_t ergo_tlab = requested_size; |
| int64_t time = 0; |
| |
| if (EpsilonElasticTLAB) { |
| ergo_tlab = EpsilonThreadLocalData::ergo_tlab_size(thread); |
| |
| if (EpsilonElasticTLABDecay) { |
| int64_t last_time = EpsilonThreadLocalData::last_tlab_time(thread); |
| time = (int64_t) os::javaTimeNanos(); |
| |
| assert(last_time <= time, "time should be monotonic"); |
| |
| // If the thread had not allocated recently, retract the ergonomic size. |
| // This conserves memory when the thread had initial burst of allocations, |
| // and then started allocating only sporadically. |
| if (last_time != 0 && (time - last_time > _decay_time_ns)) { |
| ergo_tlab = 0; |
| EpsilonThreadLocalData::set_ergo_tlab_size(thread, 0); |
| } |
| } |
| |
| // If we can fit the allocation under current TLAB size, do so. |
| // Otherwise, we want to elastically increase the TLAB size. |
| fits = (requested_size <= ergo_tlab); |
| if (!fits) { |
| size = (size_t) (ergo_tlab * EpsilonTLABElasticity); |
| } |
| } |
| |
| // Always honor boundaries |
| size = clamp(size, min_size, _max_tlab_size); |
| |
| // Always honor alignment |
| size = align_up(size, MinObjAlignment); |
| |
| // Check that adjustments did not break local and global invariants |
| assert(is_object_aligned(size), |
| "Size honors object alignment: " SIZE_FORMAT, size); |
| assert(min_size <= size, |
| "Size honors min size: " SIZE_FORMAT " <= " SIZE_FORMAT, min_size, size); |
| assert(size <= _max_tlab_size, |
| "Size honors max size: " SIZE_FORMAT " <= " SIZE_FORMAT, size, _max_tlab_size); |
| assert(size <= CollectedHeap::max_tlab_size(), |
| "Size honors global max size: " SIZE_FORMAT " <= " SIZE_FORMAT, size, CollectedHeap::max_tlab_size()); |
| |
| if (log_is_enabled(Trace, gc)) { |
| ResourceMark rm; |
| log_trace(gc)("TLAB size for \"%s\" (Requested: " SIZE_FORMAT "K, Min: " SIZE_FORMAT |
| "K, Max: " SIZE_FORMAT "K, Ergo: " SIZE_FORMAT "K) -> " SIZE_FORMAT "K", |
| thread->name(), |
| requested_size * HeapWordSize / K, |
| min_size * HeapWordSize / K, |
| _max_tlab_size * HeapWordSize / K, |
| ergo_tlab * HeapWordSize / K, |
| size * HeapWordSize / K); |
| } |
| |
| // All prepared, let's do it! |
| HeapWord* res = allocate_work(size); |
| |
| if (res != nullptr) { |
| // Allocation successful |
| *actual_size = size; |
| if (EpsilonElasticTLABDecay) { |
| EpsilonThreadLocalData::set_last_tlab_time(thread, time); |
| } |
| if (EpsilonElasticTLAB && !fits) { |
| // If we requested expansion, this is our new ergonomic TLAB size |
| EpsilonThreadLocalData::set_ergo_tlab_size(thread, size); |
| } |
| } else { |
| // Allocation failed, reset ergonomics to try and fit smaller TLABs |
| if (EpsilonElasticTLAB) { |
| EpsilonThreadLocalData::set_ergo_tlab_size(thread, 0); |
| } |
| } |
| |
| return res; |
| } |
| |
| HeapWord* EpsilonHeap::mem_allocate(size_t size, bool *gc_overhead_limit_was_exceeded) { |
| *gc_overhead_limit_was_exceeded = false; |
| return allocate_work(size); |
| } |
| |
| HeapWord* EpsilonHeap::allocate_loaded_archive_space(size_t size) { |
| // Cannot use verbose=true because Metaspace is not initialized |
| return allocate_work(size, /* verbose = */false); |
| } |
| |
| void EpsilonHeap::collect(GCCause::Cause cause) { |
| switch (cause) { |
| case GCCause::_metadata_GC_threshold: |
| case GCCause::_metadata_GC_clear_soft_refs: |
| // Receiving these causes means the VM itself entered the safepoint for metadata collection. |
| // While Epsilon does not do GC, it has to perform sizing adjustments, otherwise we would |
| // re-enter the safepoint again very soon. |
| |
| assert(SafepointSynchronize::is_at_safepoint(), "Expected at safepoint"); |
| log_info(gc)("GC request for \"%s\" is handled", GCCause::to_string(cause)); |
| MetaspaceGC::compute_new_size(); |
| print_metaspace_info(); |
| break; |
| default: |
| log_info(gc)("GC request for \"%s\" is ignored", GCCause::to_string(cause)); |
| } |
| _monitoring_support->update_counters(); |
| } |
| |
| void EpsilonHeap::do_full_collection(bool clear_all_soft_refs) { |
| collect(gc_cause()); |
| } |
| |
| void EpsilonHeap::object_iterate(ObjectClosure *cl) { |
| _space->object_iterate(cl); |
| } |
| |
| void EpsilonHeap::print_on(outputStream *st) const { |
| st->print_cr("Epsilon Heap"); |
| |
| _virtual_space.print_on(st); |
| |
| if (_space != nullptr) { |
| st->print_cr("Allocation space:"); |
| _space->print_on(st); |
| } |
| |
| MetaspaceUtils::print_on(st); |
| } |
| |
| bool EpsilonHeap::print_location(outputStream* st, void* addr) const { |
| return BlockLocationPrinter<EpsilonHeap>::print_location(st, addr); |
| } |
| |
| void EpsilonHeap::print_tracing_info() const { |
| print_heap_info(used()); |
| print_metaspace_info(); |
| } |
| |
| void EpsilonHeap::print_heap_info(size_t used) const { |
| size_t reserved = max_capacity(); |
| size_t committed = capacity(); |
| |
| if (reserved != 0) { |
| log_info(gc)("Heap: " SIZE_FORMAT "%s reserved, " SIZE_FORMAT "%s (%.2f%%) committed, " |
| SIZE_FORMAT "%s (%.2f%%) used", |
| byte_size_in_proper_unit(reserved), proper_unit_for_byte_size(reserved), |
| byte_size_in_proper_unit(committed), proper_unit_for_byte_size(committed), |
| committed * 100.0 / reserved, |
| byte_size_in_proper_unit(used), proper_unit_for_byte_size(used), |
| used * 100.0 / reserved); |
| } else { |
| log_info(gc)("Heap: no reliable data"); |
| } |
| } |
| |
| void EpsilonHeap::print_metaspace_info() const { |
| MetaspaceCombinedStats stats = MetaspaceUtils::get_combined_statistics(); |
| size_t reserved = stats.reserved(); |
| size_t committed = stats.committed(); |
| size_t used = stats.used(); |
| |
| if (reserved != 0) { |
| log_info(gc, metaspace)("Metaspace: " SIZE_FORMAT "%s reserved, " SIZE_FORMAT "%s (%.2f%%) committed, " |
| SIZE_FORMAT "%s (%.2f%%) used", |
| byte_size_in_proper_unit(reserved), proper_unit_for_byte_size(reserved), |
| byte_size_in_proper_unit(committed), proper_unit_for_byte_size(committed), |
| committed * 100.0 / reserved, |
| byte_size_in_proper_unit(used), proper_unit_for_byte_size(used), |
| used * 100.0 / reserved); |
| } else { |
| log_info(gc, metaspace)("Metaspace: no reliable data"); |
| } |
| } |