1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
|
/*
* Copyright (C) 2012 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ART_RUNTIME_GC_COLLECTOR_GARBAGE_COLLECTOR_H_
#define ART_RUNTIME_GC_COLLECTOR_GARBAGE_COLLECTOR_H_
#include <stdint.h>
#include <list>
#include "base/histogram.h"
#include "base/mutex.h"
#include "base/timing_logger.h"
#include "gc/collector_type.h"
#include "gc/gc_cause.h"
#include "gc_root.h"
#include "gc_type.h"
#include "iteration.h"
#include "object_byte_pair.h"
#include "object_callbacks.h"
namespace art {
namespace mirror {
class Class;
class Object;
class Reference;
} // namespace mirror
namespace gc {
class Heap;
namespace collector {
class GarbageCollector : public RootVisitor, public IsMarkedVisitor, public MarkObjectVisitor {
public:
class SCOPED_LOCKABLE ScopedPause {
public:
explicit ScopedPause(GarbageCollector* collector, bool with_reporting = true)
EXCLUSIVE_LOCK_FUNCTION(Locks::mutator_lock_);
~ScopedPause() UNLOCK_FUNCTION();
private:
const uint64_t start_time_;
GarbageCollector* const collector_;
bool with_reporting_;
};
GarbageCollector(Heap* heap, const std::string& name);
virtual ~GarbageCollector() { }
const char* GetName() const {
return name_.c_str();
}
virtual GcType GetGcType() const = 0;
virtual CollectorType GetCollectorType() const = 0;
// Run the garbage collector.
void Run(GcCause gc_cause, bool clear_soft_references) REQUIRES(!pause_histogram_lock_);
Heap* GetHeap() const {
return heap_;
}
void RegisterPause(uint64_t nano_length);
const CumulativeLogger& GetCumulativeTimings() const {
return cumulative_timings_;
}
void ResetCumulativeStatistics() REQUIRES(!pause_histogram_lock_);
// Swap the live and mark bitmaps of spaces that are active for the collector. For partial GC,
// this is the allocation space, for full GC then we swap the zygote bitmaps too.
void SwapBitmaps()
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
uint64_t GetTotalCpuTime() const {
return total_thread_cpu_time_ns_;
}
uint64_t GetTotalPausedTimeNs() REQUIRES(!pause_histogram_lock_);
int64_t GetTotalFreedBytes() const {
return total_freed_bytes_;
}
uint64_t GetTotalFreedObjects() const {
return total_freed_objects_;
}
// Reset the cumulative timings and pause histogram.
void ResetMeasurements() REQUIRES(!pause_histogram_lock_);
// Returns the estimated throughput in bytes / second.
uint64_t GetEstimatedMeanThroughput() const;
// Returns how many GC iterations have been run.
size_t NumberOfIterations() const {
return GetCumulativeTimings().GetIterations();
}
// Returns the current GC iteration and assocated info.
Iteration* GetCurrentIteration();
const Iteration* GetCurrentIteration() const;
TimingLogger* GetTimings() {
return &GetCurrentIteration()->timings_;
}
// Record a free of normal objects.
void RecordFree(const ObjectBytePair& freed);
// Record a free of large objects.
void RecordFreeLOS(const ObjectBytePair& freed);
virtual void DumpPerformanceInfo(std::ostream& os) REQUIRES(!pause_histogram_lock_);
// Extract RSS for GC-specific memory ranges using mincore().
uint64_t ExtractRssFromMincore(std::list<std::pair<void*, void*>>* gc_ranges);
// Helper functions for querying if objects are marked. These are used for processing references,
// and will be used for reading system weaks while the GC is running.
virtual mirror::Object* IsMarked(mirror::Object* obj)
REQUIRES_SHARED(Locks::mutator_lock_) = 0;
// Returns true if the given heap reference is null or is already marked. If it's already marked,
// update the reference (uses a CAS if do_atomic_update is true). Otherwise, returns false.
virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj,
bool do_atomic_update)
REQUIRES_SHARED(Locks::mutator_lock_) = 0;
// Used by reference processor.
virtual void ProcessMarkStack() REQUIRES_SHARED(Locks::mutator_lock_) = 0;
// Force mark an object.
virtual mirror::Object* MarkObject(mirror::Object* obj)
REQUIRES_SHARED(Locks::mutator_lock_) = 0;
virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* obj,
bool do_atomic_update)
REQUIRES_SHARED(Locks::mutator_lock_) = 0;
virtual void DelayReferenceReferent(ObjPtr<mirror::Class> klass,
ObjPtr<mirror::Reference> reference)
REQUIRES_SHARED(Locks::mutator_lock_) = 0;
bool IsTransactionActive() const {
return is_transaction_active_;
}
protected:
// Run all of the GC phases.
virtual void RunPhases() = 0;
// Revoke all the thread-local buffers.
virtual void RevokeAllThreadLocalBuffers() = 0;
static constexpr size_t kPauseBucketSize = 500;
static constexpr size_t kPauseBucketCount = 32;
static constexpr size_t kMemBucketSize = 10;
static constexpr size_t kMemBucketCount = 16;
Heap* const heap_;
std::string name_;
// Cumulative statistics.
Histogram<uint64_t> pause_histogram_ GUARDED_BY(pause_histogram_lock_);
Histogram<uint64_t> rss_histogram_;
Histogram<size_t> freed_bytes_histogram_;
uint64_t total_thread_cpu_time_ns_;
uint64_t total_time_ns_;
uint64_t total_freed_objects_;
int64_t total_freed_bytes_;
CumulativeLogger cumulative_timings_;
mutable Mutex pause_histogram_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
bool is_transaction_active_;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(GarbageCollector);
};
} // namespace collector
} // namespace gc
} // namespace art
#endif // ART_RUNTIME_GC_COLLECTOR_GARBAGE_COLLECTOR_H_
|