|
22 | 22 | #include <malloc.h> // For mallinfo() |
23 | 23 | #endif |
24 | 24 | #include <memory> |
| 25 | +#include <random> |
| 26 | +#include <unistd.h> |
| 27 | +#include <sys/types.h> |
25 | 28 | #include <vector> |
26 | 29 |
|
27 | 30 | #include "android-base/stringprintf.h" |
@@ -316,6 +319,7 @@ Heap::Heap(size_t initial_size, |
316 | 319 | next_gc_type_(collector::kGcTypePartial), |
317 | 320 | capacity_(capacity), |
318 | 321 | growth_limit_(growth_limit), |
| 322 | + initial_heap_size_(initial_size), |
319 | 323 | target_footprint_(initial_size), |
320 | 324 | // Using kPostMonitorLock as a lock at kDefaultMutexLevel is acquired after |
321 | 325 | // this one. |
@@ -2128,6 +2132,27 @@ HomogeneousSpaceCompactResult Heap::PerformHomogeneousSpaceCompact() { |
2128 | 2132 | return HomogeneousSpaceCompactResult::kSuccess; |
2129 | 2133 | } |
2130 | 2134 |
|
| 2135 | +void Heap::SetDefaultConcurrentStartBytes() { |
| 2136 | + MutexLock mu(Thread::Current(), *gc_complete_lock_); |
| 2137 | + if (collector_type_running_ != kCollectorTypeNone) { |
| 2138 | + // If a collector is already running, just let it set concurrent_start_bytes_ . |
| 2139 | + return; |
| 2140 | + } |
| 2141 | + SetDefaultConcurrentStartBytesLocked(); |
| 2142 | +} |
| 2143 | + |
| 2144 | +void Heap::SetDefaultConcurrentStartBytesLocked() { |
| 2145 | + if (IsGcConcurrent()) { |
| 2146 | + size_t target_footprint = target_footprint_.load(std::memory_order_relaxed); |
| 2147 | + size_t reserve_bytes = target_footprint / 4; |
| 2148 | + reserve_bytes = std::min(reserve_bytes, kMaxConcurrentRemainingBytes); |
| 2149 | + reserve_bytes = std::max(reserve_bytes, kMinConcurrentRemainingBytes); |
| 2150 | + concurrent_start_bytes_ = UnsignedDifference(target_footprint, reserve_bytes); |
| 2151 | + } else { |
| 2152 | + concurrent_start_bytes_ = std::numeric_limits<size_t>::max(); |
| 2153 | + } |
| 2154 | +} |
| 2155 | + |
2131 | 2156 | void Heap::ChangeCollector(CollectorType collector_type) { |
2132 | 2157 | // TODO: Only do this with all mutators suspended to avoid races. |
2133 | 2158 | if (collector_type != collector_type_) { |
@@ -2174,13 +2199,7 @@ void Heap::ChangeCollector(CollectorType collector_type) { |
2174 | 2199 | UNREACHABLE(); |
2175 | 2200 | } |
2176 | 2201 | } |
2177 | | - if (IsGcConcurrent()) { |
2178 | | - concurrent_start_bytes_ = |
2179 | | - UnsignedDifference(target_footprint_.load(std::memory_order_relaxed), |
2180 | | - kMinConcurrentRemainingBytes); |
2181 | | - } else { |
2182 | | - concurrent_start_bytes_ = std::numeric_limits<size_t>::max(); |
2183 | | - } |
| 2202 | + SetDefaultConcurrentStartBytesLocked(); |
2184 | 2203 | } |
2185 | 2204 | } |
2186 | 2205 |
|
@@ -3544,6 +3563,11 @@ double Heap::HeapGrowthMultiplier() const { |
3544 | 3563 |
|
3545 | 3564 | void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran, |
3546 | 3565 | size_t bytes_allocated_before_gc) { |
| 3566 | + // We're running in the thread that set collector_type_running_ to something other than none, |
| 3567 | + // thus ensuring that there is only one of us running. Thus |
| 3568 | + // collector_type_running_ != kCollectorTypeNone, but that's a little tricky to turn into a |
| 3569 | + // DCHECK. |
| 3570 | + |
3547 | 3571 | // We know what our utilization is at this moment. |
3548 | 3572 | // This doesn't actually resize any memory. It just lets the heap grow more when necessary. |
3549 | 3573 | const size_t bytes_allocated = GetBytesAllocated(); |
@@ -3669,8 +3693,7 @@ void Heap::ClearGrowthLimit() { |
3669 | 3693 | if (target_footprint_.load(std::memory_order_relaxed) == growth_limit_ |
3670 | 3694 | && growth_limit_ < capacity_) { |
3671 | 3695 | target_footprint_.store(capacity_, std::memory_order_relaxed); |
3672 | | - concurrent_start_bytes_ = |
3673 | | - UnsignedDifference(capacity_, kMinConcurrentRemainingBytes); |
| 3696 | + SetDefaultConcurrentStartBytes(); |
3674 | 3697 | } |
3675 | 3698 | growth_limit_ = capacity_; |
3676 | 3699 | ScopedObjectAccess soa(Thread::Current()); |
@@ -4437,32 +4460,97 @@ void Heap::VlogHeapGrowth(size_t old_footprint, size_t new_footprint, size_t all |
4437 | 4460 | << PrettySize(new_footprint) << " for a " << PrettySize(alloc_size) << " allocation"; |
4438 | 4461 | } |
4439 | 4462 |
|
| 4463 | +// Run a gc if we haven't run one since initial_gc_num. This forces processes to |
| 4464 | +// reclaim memory allocated during startup, even if they don't do much |
| 4465 | +// allocation post startup. If the process is actively allocating and triggering |
| 4466 | +// GCs, or has moved to the background and hence forced a GC, this does nothing. |
4440 | 4467 | class Heap::TriggerPostForkCCGcTask : public HeapTask { |
4441 | 4468 | public: |
4442 | | - explicit TriggerPostForkCCGcTask(uint64_t target_time) : HeapTask(target_time) {} |
| 4469 | + explicit TriggerPostForkCCGcTask(uint64_t target_time, uint32_t initial_gc_num) : |
| 4470 | + HeapTask(target_time), initial_gc_num_(initial_gc_num) {} |
4443 | 4471 | void Run(Thread* self) override { |
4444 | 4472 | gc::Heap* heap = Runtime::Current()->GetHeap(); |
4445 | | - // Trigger a GC, if not already done. The first GC after fork, whenever it |
4446 | | - // takes place, will adjust the thresholds to normal levels. |
4447 | | - if (heap->target_footprint_.load(std::memory_order_relaxed) == heap->growth_limit_) { |
4448 | | - heap->RequestConcurrentGC(self, kGcCauseBackground, false, heap->GetCurrentGcNum()); |
| 4473 | + if (heap->GetCurrentGcNum() == initial_gc_num_) { |
| 4474 | + if (kLogAllGCs) { |
| 4475 | + LOG(INFO) << "Forcing GC for allocation-inactive process"; |
| 4476 | + } |
| 4477 | + heap->RequestConcurrentGC(self, kGcCauseBackground, false, initial_gc_num_); |
4449 | 4478 | } |
4450 | 4479 | } |
| 4480 | + private: |
| 4481 | + uint32_t initial_gc_num_; |
4451 | 4482 | }; |
4452 | 4483 |
|
| 4484 | +// Reduce target footprint, if no GC has occurred since initial_gc_num. |
| 4485 | +// If a GC already occurred, it will have done this for us. |
| 4486 | +class Heap::ReduceTargetFootprintTask : public HeapTask { |
| 4487 | + public: |
| 4488 | + explicit ReduceTargetFootprintTask(uint64_t target_time, size_t new_target_sz, |
| 4489 | + uint32_t initial_gc_num) : |
| 4490 | + HeapTask(target_time), new_target_sz_(new_target_sz), initial_gc_num_(initial_gc_num) {} |
| 4491 | + void Run(Thread* self) override { |
| 4492 | + gc::Heap* heap = Runtime::Current()->GetHeap(); |
| 4493 | + MutexLock mu(self, *(heap->gc_complete_lock_)); |
| 4494 | + if (heap->GetCurrentGcNum() == initial_gc_num_ |
| 4495 | + && heap->collector_type_running_ == kCollectorTypeNone) { |
| 4496 | + size_t target_footprint = heap->target_footprint_.load(std::memory_order_relaxed); |
| 4497 | + if (target_footprint > new_target_sz_) { |
| 4498 | + if (heap->target_footprint_.CompareAndSetStrongRelaxed(target_footprint, new_target_sz_)) { |
| 4499 | + heap->SetDefaultConcurrentStartBytesLocked(); |
| 4500 | + } |
| 4501 | + } |
| 4502 | + } |
| 4503 | + } |
| 4504 | + private: |
| 4505 | + size_t new_target_sz_; |
| 4506 | + uint32_t initial_gc_num_; |
| 4507 | +}; |
| 4508 | + |
| 4509 | +// Return a pseudo-random integer between 0 and 19999, using the uid as a seed. We want this to |
| 4510 | +// be deterministic for a given process, but to vary randomly across processes. Empirically, the |
| 4511 | +// uids for processes for which this matters are distinct. |
| 4512 | +static uint32_t GetPseudoRandomFromUid() { |
| 4513 | + std::default_random_engine rng(getuid()); |
| 4514 | + std::uniform_int_distribution<int> dist(0, 19999); |
| 4515 | + return dist(rng); |
| 4516 | +} |
| 4517 | + |
4453 | 4518 | void Heap::PostForkChildAction(Thread* self) { |
| 4519 | + uint32_t starting_gc_num = GetCurrentGcNum(); |
| 4520 | + uint64_t last_adj_time = NanoTime(); |
| 4521 | + next_gc_type_ = NonStickyGcType(); // Always start with a full gc. |
| 4522 | + |
4454 | 4523 | // Temporarily increase target_footprint_ and concurrent_start_bytes_ to |
4455 | 4524 | // max values to avoid GC during app launch. |
4456 | | - if (collector_type_ == kCollectorTypeCC && !IsLowMemoryMode()) { |
| 4525 | + if (!IsLowMemoryMode()) { |
4457 | 4526 | // Set target_footprint_ to the largest allowed value. |
4458 | 4527 | SetIdealFootprint(growth_limit_); |
4459 | | - // Set concurrent_start_bytes_ to half of the heap size. |
4460 | | - size_t target_footprint = target_footprint_.load(std::memory_order_relaxed); |
4461 | | - concurrent_start_bytes_ = std::max(target_footprint / 2, GetBytesAllocated()); |
4462 | | - |
4463 | | - GetTaskProcessor()->AddTask( |
4464 | | - self, new TriggerPostForkCCGcTask(NanoTime() + MsToNs(kPostForkMaxHeapDurationMS))); |
| 4528 | + SetDefaultConcurrentStartBytes(); |
| 4529 | + |
| 4530 | + // Shrink heap after kPostForkMaxHeapDurationMS, to force a memory hog process to GC. |
| 4531 | + // This remains high enough that many processes will continue without a GC. |
| 4532 | + if (initial_heap_size_ < growth_limit_) { |
| 4533 | + size_t first_shrink_size = std::max(growth_limit_ / 4, initial_heap_size_); |
| 4534 | + last_adj_time += MsToNs(kPostForkMaxHeapDurationMS); |
| 4535 | + GetTaskProcessor()->AddTask( |
| 4536 | + self, new ReduceTargetFootprintTask(last_adj_time, first_shrink_size, starting_gc_num)); |
| 4537 | + // Shrink to a small value after a substantial time period. This will typically force a |
| 4538 | + // GC if none has occurred yet. Has no effect if there was a GC before this anyway, which |
| 4539 | + // is commonly the case, e.g. because of a process transition. |
| 4540 | + if (initial_heap_size_ < first_shrink_size) { |
| 4541 | + last_adj_time += MsToNs(4 * kPostForkMaxHeapDurationMS); |
| 4542 | + GetTaskProcessor()->AddTask( |
| 4543 | + self, |
| 4544 | + new ReduceTargetFootprintTask(last_adj_time, initial_heap_size_, starting_gc_num)); |
| 4545 | + } |
| 4546 | + } |
4465 | 4547 | } |
| 4548 | + // Schedule a GC after a substantial period of time. This will become a no-op if another GC is |
| 4549 | + // scheduled in the interim. If not, we want to avoid holding onto start-up garbage. |
| 4550 | + uint64_t post_fork_gc_time = last_adj_time |
| 4551 | + + MsToNs(4 * kPostForkMaxHeapDurationMS + GetPseudoRandomFromUid()); |
| 4552 | + GetTaskProcessor()->AddTask(self, |
| 4553 | + new TriggerPostForkCCGcTask(post_fork_gc_time, starting_gc_num)); |
4466 | 4554 | } |
4467 | 4555 |
|
4468 | 4556 | void Heap::VisitReflectiveTargets(ReflectiveValueVisitor *visit) { |
|
0 commit comments