File: allocation_record.h

package info (click to toggle)
android-platform-art 11.0.0%2Br48-5
  • links: PTS, VCS
  • area: main
  • in suites: bookworm
  • size: 78,932 kB
  • sloc: cpp: 459,858; java: 163,268; asm: 22,644; python: 9,815; sh: 6,330; ansic: 4,117; xml: 2,855; perl: 77; makefile: 73
file content (312 lines) | stat: -rw-r--r-- 9,664 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
/*
 * Copyright (C) 2015 The Android Open Source Project
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#ifndef ART_RUNTIME_GC_ALLOCATION_RECORD_H_
#define ART_RUNTIME_GC_ALLOCATION_RECORD_H_

#include <list>
#include <memory>

#include "base/mutex.h"
#include "gc_root.h"
#include "obj_ptr.h"

namespace art {

class ArtMethod;
class IsMarkedVisitor;
class Thread;

namespace mirror {
class Class;
class Object;
}  // namespace mirror

namespace gc {

class AllocRecordStackTraceElement {
 public:
  int32_t ComputeLineNumber() const REQUIRES_SHARED(Locks::mutator_lock_);

  AllocRecordStackTraceElement() = default;
  AllocRecordStackTraceElement(ArtMethod* method, uint32_t dex_pc)
      : method_(method),
        dex_pc_(dex_pc) {}

  ArtMethod* GetMethod() const {
    return method_;
  }

  void SetMethod(ArtMethod* m) {
    method_ = m;
  }

  uint32_t GetDexPc() const {
    return dex_pc_;
  }

  void SetDexPc(uint32_t pc) {
    dex_pc_ = pc;
  }

  bool operator==(const AllocRecordStackTraceElement& other) const {
    return method_ == other.method_ && dex_pc_ == other.dex_pc_;
  }

 private:
  ArtMethod* method_ = nullptr;
  uint32_t dex_pc_ = 0;
};

class AllocRecordStackTrace {
 public:
  static constexpr size_t kHashMultiplier = 17;

  AllocRecordStackTrace() = default;

  AllocRecordStackTrace(AllocRecordStackTrace&& r)
      : tid_(r.tid_),
        stack_(std::move(r.stack_)) {}

  AllocRecordStackTrace(const AllocRecordStackTrace& r)
      : tid_(r.tid_),
        stack_(r.stack_) {}

  pid_t GetTid() const {
    return tid_;
  }

  void SetTid(pid_t t) {
    tid_ = t;
  }

  size_t GetDepth() const {
    return stack_.size();
  }

  const AllocRecordStackTraceElement& GetStackElement(size_t index) const {
    DCHECK_LT(index, GetDepth());
    return stack_[index];
  }

  void AddStackElement(const AllocRecordStackTraceElement& element) {
    stack_.push_back(element);
  }

  void SetStackElementAt(size_t index, ArtMethod* m, uint32_t dex_pc) {
    DCHECK_LT(index, stack_.size());
    stack_[index].SetMethod(m);
    stack_[index].SetDexPc(dex_pc);
  }

  bool operator==(const AllocRecordStackTrace& other) const {
    if (this == &other) return true;
    return tid_ == other.tid_ && stack_ == other.stack_;
  }

 private:
  pid_t tid_ = 0;
  std::vector<AllocRecordStackTraceElement> stack_;
};

struct HashAllocRecordTypes {
  size_t operator()(const AllocRecordStackTraceElement& r) const {
    return std::hash<void*>()(reinterpret_cast<void*>(r.GetMethod())) *
        AllocRecordStackTrace::kHashMultiplier + std::hash<uint32_t>()(r.GetDexPc());
  }

  size_t operator()(const AllocRecordStackTrace& r) const {
    size_t depth = r.GetDepth();
    size_t result = r.GetTid() * AllocRecordStackTrace::kHashMultiplier + depth;
    for (size_t i = 0; i < depth; ++i) {
      result = result * AllocRecordStackTrace::kHashMultiplier + (*this)(r.GetStackElement(i));
    }
    return result;
  }
};

template <typename T> struct HashAllocRecordTypesPtr {
  size_t operator()(const T* r) const {
    if (r == nullptr) return 0;
    return HashAllocRecordTypes()(*r);
  }
};

template <typename T> struct EqAllocRecordTypesPtr {
  bool operator()(const T* r1, const T* r2) const {
    if (r1 == r2) return true;
    if (r1 == nullptr || r2 == nullptr) return false;
    return *r1 == *r2;
  }
};

class AllocRecord {
 public:
  // All instances of AllocRecord should be managed by an instance of AllocRecordObjectMap.
  AllocRecord(size_t count, mirror::Class* klass, AllocRecordStackTrace&& trace)
      : byte_count_(count), klass_(klass), trace_(std::move(trace)) {}

  size_t GetDepth() const {
    return trace_.GetDepth();
  }

  const AllocRecordStackTrace* GetStackTrace() const {
    return &trace_;
  }

  size_t ByteCount() const {
    return byte_count_;
  }

  pid_t GetTid() const {
    return trace_.GetTid();
  }

  mirror::Class* GetClass() const REQUIRES_SHARED(Locks::mutator_lock_) {
    return klass_.Read();
  }

  const char* GetClassDescriptor(std::string* storage) const
      REQUIRES_SHARED(Locks::mutator_lock_);

  GcRoot<mirror::Class>& GetClassGcRoot() REQUIRES_SHARED(Locks::mutator_lock_) {
    return klass_;
  }

  const AllocRecordStackTraceElement& StackElement(size_t index) const {
    return trace_.GetStackElement(index);
  }

 private:
  const size_t byte_count_;
  // The klass_ could be a strong or weak root for GC
  GcRoot<mirror::Class> klass_;
  // TODO: Share between alloc records with identical stack traces.
  AllocRecordStackTrace trace_;
};

class AllocRecordObjectMap {
 public:
  static constexpr size_t kDefaultNumAllocRecords = 512 * 1024;
  static constexpr size_t kDefaultNumRecentRecords = 64 * 1024 - 1;
  static constexpr size_t kDefaultAllocStackDepth = 16;
  static constexpr size_t kMaxSupportedStackDepth = 128;

  // GcRoot<mirror::Object> pointers in the list are weak roots, and the last recent_record_max_
  // number of AllocRecord::klass_ pointers are strong roots (and the rest of klass_ pointers are
  // weak roots). The last recent_record_max_ number of pairs in the list are always kept for DDMS's
  // recent allocation tracking, but GcRoot<mirror::Object> pointers in these pairs can become null.
  // Both types of pointers need read barriers, do not directly access them.
  using EntryPair = std::pair<GcRoot<mirror::Object>, AllocRecord>;
  typedef std::list<EntryPair> EntryList;

  // Caller needs to check that it is enabled before calling since we read the stack trace before
  // checking the enabled boolean.
  void RecordAllocation(Thread* self,
                        ObjPtr<mirror::Object>* obj,
                        size_t byte_count)
      REQUIRES(!Locks::alloc_tracker_lock_)
      REQUIRES_SHARED(Locks::mutator_lock_);

  static void SetAllocTrackingEnabled(bool enabled) REQUIRES(!Locks::alloc_tracker_lock_);

  AllocRecordObjectMap() REQUIRES(Locks::alloc_tracker_lock_);
  ~AllocRecordObjectMap();

  void Put(mirror::Object* obj, AllocRecord&& record)
      REQUIRES_SHARED(Locks::mutator_lock_)
      REQUIRES(Locks::alloc_tracker_lock_) {
    if (entries_.size() == alloc_record_max_) {
      entries_.pop_front();
    }
    entries_.push_back(EntryPair(GcRoot<mirror::Object>(obj), std::move(record)));
  }

  size_t Size() const REQUIRES_SHARED(Locks::alloc_tracker_lock_) {
    return entries_.size();
  }

  size_t GetRecentAllocationSize() const REQUIRES_SHARED(Locks::alloc_tracker_lock_) {
    CHECK_LE(recent_record_max_, alloc_record_max_);
    size_t sz = entries_.size();
    return std::min(recent_record_max_, sz);
  }

  void VisitRoots(RootVisitor* visitor)
      REQUIRES_SHARED(Locks::mutator_lock_)
      REQUIRES(Locks::alloc_tracker_lock_);

  void SweepAllocationRecords(IsMarkedVisitor* visitor)
      REQUIRES_SHARED(Locks::mutator_lock_)
      REQUIRES(Locks::alloc_tracker_lock_);

  // Allocation tracking could be enabled by user in between DisallowNewAllocationRecords() and
  // AllowNewAllocationRecords(), in which case new allocation records can be added although they
  // should be disallowed. However, this is GC-safe because new objects are not processed in this GC
  // cycle. The only downside of not handling this case is that such new allocation records can be
  // swept from the list. But missing the first few records is acceptable for using the button to
  // enable allocation tracking.
  void DisallowNewAllocationRecords()
      REQUIRES_SHARED(Locks::mutator_lock_)
      REQUIRES(Locks::alloc_tracker_lock_);
  void AllowNewAllocationRecords()
      REQUIRES_SHARED(Locks::mutator_lock_)
      REQUIRES(Locks::alloc_tracker_lock_);
  void BroadcastForNewAllocationRecords()
      REQUIRES(Locks::alloc_tracker_lock_);

  // TODO: Is there a better way to hide the entries_'s type?
  EntryList::iterator Begin()
      REQUIRES_SHARED(Locks::mutator_lock_)
      REQUIRES(Locks::alloc_tracker_lock_) {
    return entries_.begin();
  }

  EntryList::iterator End()
      REQUIRES_SHARED(Locks::mutator_lock_)
      REQUIRES(Locks::alloc_tracker_lock_) {
    return entries_.end();
  }

  EntryList::reverse_iterator RBegin()
      REQUIRES_SHARED(Locks::mutator_lock_)
      REQUIRES(Locks::alloc_tracker_lock_) {
    return entries_.rbegin();
  }

  EntryList::reverse_iterator REnd()
      REQUIRES_SHARED(Locks::mutator_lock_)
      REQUIRES(Locks::alloc_tracker_lock_) {
    return entries_.rend();
  }

  void Clear() REQUIRES(Locks::alloc_tracker_lock_);

 private:
  size_t alloc_record_max_ GUARDED_BY(Locks::alloc_tracker_lock_) = kDefaultNumAllocRecords;
  size_t recent_record_max_ GUARDED_BY(Locks::alloc_tracker_lock_) = kDefaultNumRecentRecords;
  size_t max_stack_depth_ = kDefaultAllocStackDepth;
  bool allow_new_record_ GUARDED_BY(Locks::alloc_tracker_lock_) = true;
  ConditionVariable new_record_condition_ GUARDED_BY(Locks::alloc_tracker_lock_);
  // see the comment in typedef of EntryList
  EntryList entries_ GUARDED_BY(Locks::alloc_tracker_lock_);

  void SetMaxStackDepth(size_t max_stack_depth) REQUIRES(Locks::alloc_tracker_lock_);
};

}  // namespace gc
}  // namespace art
#endif  // ART_RUNTIME_GC_ALLOCATION_RECORD_H_