1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182
|
/*
* Copyright (C) 2015 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ART_RUNTIME_JIT_PROFILING_INFO_H_
#define ART_RUNTIME_JIT_PROFILING_INFO_H_
#include <vector>
#include <cstring>
#include "base/macros.h"
#include "gc_root.h"
#include "offsets.h"
namespace art {
class ArtMethod;
class ProfilingInfo;
namespace jit {
class JitCodeCache;
} // namespace jit
namespace mirror {
class Class;
} // namespace mirror
// Structure to store the classes seen at runtime for a specific instruction.
// Once the classes_ array is full, we consider the INVOKE to be megamorphic.
class InlineCache {
public:
// This is hard coded in the assembly stub art_quick_update_inline_cache.
static constexpr uint8_t kIndividualCacheSize = 5;
static constexpr MemberOffset ClassesOffset() {
return MemberOffset(OFFSETOF_MEMBER(InlineCache, classes_));
}
private:
uint32_t dex_pc_;
GcRoot<mirror::Class> classes_[kIndividualCacheSize];
friend class jit::JitCodeCache;
friend class ProfilingInfo;
DISALLOW_COPY_AND_ASSIGN(InlineCache);
};
/**
* Profiling info for a method, created and filled by the interpreter once the
* method is warm, and used by the compiler to drive optimizations.
*/
class ProfilingInfo {
public:
// Create a ProfilingInfo for 'method'. Return whether it succeeded, or if it is
// not needed in case the method does not have virtual/interface invocations.
static bool Create(Thread* self, ArtMethod* method, bool retry_allocation)
REQUIRES_SHARED(Locks::mutator_lock_);
// Add information from an executed INVOKE instruction to the profile.
void AddInvokeInfo(uint32_t dex_pc, mirror::Class* cls)
// Method should not be interruptible, as it manipulates the ProfilingInfo
// which can be concurrently collected.
REQUIRES(Roles::uninterruptible_)
REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* GetMethod() const {
return method_;
}
// Mutator lock only required for debugging output.
InlineCache* GetInlineCache(uint32_t dex_pc)
REQUIRES_SHARED(Locks::mutator_lock_);
bool IsMethodBeingCompiled(bool osr) const {
return osr
? is_osr_method_being_compiled_
: is_method_being_compiled_;
}
void SetIsMethodBeingCompiled(bool value, bool osr) {
if (osr) {
is_osr_method_being_compiled_ = value;
} else {
is_method_being_compiled_ = value;
}
}
void SetSavedEntryPoint(const void* entry_point) {
saved_entry_point_ = entry_point;
}
const void* GetSavedEntryPoint() const {
return saved_entry_point_;
}
// Increments the number of times this method is currently being inlined.
// Returns whether it was successful, that is it could increment without
// overflowing.
bool IncrementInlineUse() {
if (current_inline_uses_ == std::numeric_limits<uint16_t>::max()) {
return false;
}
current_inline_uses_++;
return true;
}
void DecrementInlineUse() {
DCHECK_GT(current_inline_uses_, 0);
current_inline_uses_--;
}
bool IsInUseByCompiler() const {
return IsMethodBeingCompiled(/*osr=*/ true) || IsMethodBeingCompiled(/*osr=*/ false) ||
(current_inline_uses_ > 0);
}
static constexpr MemberOffset BaselineHotnessCountOffset() {
return MemberOffset(OFFSETOF_MEMBER(ProfilingInfo, baseline_hotness_count_));
}
void SetBaselineHotnessCount(uint16_t count) {
baseline_hotness_count_ = count;
}
uint16_t GetBaselineHotnessCount() const {
return baseline_hotness_count_;
}
private:
ProfilingInfo(ArtMethod* method, const std::vector<uint32_t>& entries);
// Hotness count for methods compiled with the JIT baseline compiler. Once
// a threshold is hit (currentily the maximum value of uint16_t), we will
// JIT compile optimized the method.
uint16_t baseline_hotness_count_;
// Method this profiling info is for.
// Not 'const' as JVMTI introduces obsolete methods that we implement by creating new ArtMethods.
// See JitCodeCache::MoveObsoleteMethod.
ArtMethod* method_;
// Entry point of the corresponding ArtMethod, while the JIT code cache
// is poking for the liveness of compiled code.
const void* saved_entry_point_;
// Number of instructions we are profiling in the ArtMethod.
const uint32_t number_of_inline_caches_;
// When the compiler inlines the method associated to this ProfilingInfo,
// it updates this counter so that the GC does not try to clear the inline caches.
uint16_t current_inline_uses_;
// Whether the ArtMethod is currently being compiled. This flag
// is implicitly guarded by the JIT code cache lock.
// TODO: Make the JIT code cache lock global.
bool is_method_being_compiled_;
bool is_osr_method_being_compiled_;
// Dynamically allocated array of size `number_of_inline_caches_`.
InlineCache cache_[0];
friend class jit::JitCodeCache;
DISALLOW_COPY_AND_ASSIGN(ProfilingInfo);
};
} // namespace art
#endif // ART_RUNTIME_JIT_PROFILING_INFO_H_
|