1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267
|
/*
* Copyright (C) 2011-2023 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#pragma once
#include "AbstractSlotVisitor.h"
#include "HandleTypes.h"
#include <wtf/Forward.h>
#include <wtf/IterationStatus.h>
#include <wtf/MonotonicTime.h>
#include <wtf/TZoneMalloc.h>
namespace JSC {
class GCThreadSharedData;
class HeapCell;
class HeapAnalyzer;
class MarkingConstraint;
class MarkingConstraintSolver;
typedef uint32_t HeapVersion;
class SlotVisitor final : public AbstractSlotVisitor {
WTF_MAKE_NONCOPYABLE(SlotVisitor);
WTF_MAKE_TZONE_ALLOCATED(SlotVisitor);
using Base = AbstractSlotVisitor;
friend class SetCurrentCellScope;
friend class Heap;
public:
class ReferrerContext {
public:
ALWAYS_INLINE ReferrerContext(AbstractSlotVisitor&, ReferrerToken) { }
ALWAYS_INLINE ReferrerContext(AbstractSlotVisitor&, OpaqueRootTag) { }
};
class SuppressGCVerifierScope {
public:
SuppressGCVerifierScope(SlotVisitor&) { }
};
class DefaultMarkingViolationAssertionScope {
public:
#if ASSERT_ENABLED
DefaultMarkingViolationAssertionScope(SlotVisitor& visitor)
: m_visitor(visitor)
{
m_wasCheckingForDefaultMarkViolation = m_visitor.m_isCheckingForDefaultMarkViolation;
m_visitor.m_isCheckingForDefaultMarkViolation = false;
}
~DefaultMarkingViolationAssertionScope()
{
m_visitor.m_isCheckingForDefaultMarkViolation = m_wasCheckingForDefaultMarkViolation;
}
private:
SlotVisitor& m_visitor;
bool m_wasCheckingForDefaultMarkViolation;
#else
DefaultMarkingViolationAssertionScope(SlotVisitor&) { }
#endif
};
SlotVisitor(Heap&, CString codeName);
~SlotVisitor();
void append(const ConservativeRoots&) final;
template<typename T, typename Traits> void append(const WriteBarrierBase<T, Traits>&);
template<typename T, typename Traits> void appendHidden(const WriteBarrierBase<T, Traits>&);
void append(const WriteBarrierStructureID&);
void appendHidden(const WriteBarrierStructureID&);
template<typename Iterator> void append(Iterator begin , Iterator end);
ALWAYS_INLINE void appendValues(std::span<const WriteBarrier<Unknown, RawValueTraits<Unknown>>>);
ALWAYS_INLINE void appendValues(const WriteBarrierBase<Unknown, RawValueTraits<Unknown>>*, size_t count);
ALWAYS_INLINE void appendValuesHidden(const WriteBarrierBase<Unknown, RawValueTraits<Unknown>>*, size_t count);
// These don't require you to prove that you have a WriteBarrier<>. That makes sense
// for:
//
// - roots.
// - sophisticated data structures that barrier through other means (like DFG::Plan and
// friends).
//
// If you are not a root and you don't know what kind of barrier you have, then you
// shouldn't call these methods.
ALWAYS_INLINE void appendUnbarriered(JSValue);
ALWAYS_INLINE void appendUnbarriered(JSValue*, size_t);
void appendUnbarriered(JSCell*) final;
template<typename T>
void append(const Weak<T>& weak);
ALWAYS_INLINE void appendHiddenUnbarriered(JSValue);
void appendHiddenUnbarriered(JSCell*) final;
bool isFirstVisit() const final { return m_isFirstVisit; }
bool isMarked(const void*) const final;
bool isMarked(MarkedBlock&, HeapCell*) const final;
bool isMarked(PreciseAllocation&, HeapCell*) const final;
void didStartMarking();
void reset();
void clearMarkStacks();
size_t bytesVisited() const { return m_bytesVisited; }
void donate();
void drain(MonotonicTime timeout = MonotonicTime::infinity());
void donateAndDrain(MonotonicTime timeout = MonotonicTime::infinity());
enum SharedDrainMode { HelperDrain, MainDrain };
enum class SharedDrainResult { Done, TimedOut };
SharedDrainResult drainFromShared(SharedDrainMode, MonotonicTime timeout = MonotonicTime::infinity());
SharedDrainResult drainInParallel(MonotonicTime timeout = MonotonicTime::infinity());
SharedDrainResult drainInParallelPassively(MonotonicTime timeout = MonotonicTime::infinity());
SharedDrainResult waitForTermination(MonotonicTime timeout = MonotonicTime::infinity());
// Attempts to perform an increment of draining that involves only walking `bytes` worth of data. This
// is likely to accidentally walk more or less than that. It will usually mark more than bytes. It may
// mark less than bytes if we're reaching termination or if the global worklist is empty (which may in
// rare cases happen temporarily even if we're not reaching termination).
size_t performIncrementOfDraining(size_t bytes);
// This informs the GC about auxiliary of some size that we are keeping alive. If you don't do
// this then the space will be freed at end of GC.
void markAuxiliary(const void* base) final;
void reportExtraMemoryVisited(size_t) final;
#if ENABLE(RESOURCE_USAGE)
void reportExternalMemoryVisited(size_t) final;
#endif
void dump(PrintStream&) const final;
HeapVersion markingVersion() const { return m_markingVersion; }
bool mutatorIsStopped() const final { return m_mutatorIsStopped; }
Lock& rightToRun() WTF_RETURNS_LOCK(m_rightToRun) { return m_rightToRun; }
void updateMutatorIsStopped(const AbstractLocker&);
void updateMutatorIsStopped();
bool hasAcknowledgedThatTheMutatorIsResumed() const;
bool mutatorIsStoppedIsUpToDate() const;
void optimizeForStoppedMutator();
void didRace(const VisitRaceKey&) final;
void didRace(JSCell* cell, const char* reason) { didRace(VisitRaceKey(cell, reason)); }
void visitAsConstraint(const JSCell*) final;
bool didReachTermination();
void donateAll();
NO_RETURN_DUE_TO_CRASH void addParallelConstraintTask(RefPtr<SharedTask<void(AbstractSlotVisitor&)>>) final;
JS_EXPORT_PRIVATE void addParallelConstraintTask(RefPtr<SharedTask<void(SlotVisitor&)>>) final;
private:
friend class ParallelModeEnabler;
friend class MarkingConstraintSolver;
void appendJSCellOrAuxiliary(HeapCell*);
JS_EXPORT_PRIVATE void appendSlow(JSCell*, Dependency);
JS_EXPORT_PRIVATE void appendHiddenSlow(JSCell*, Dependency);
void appendHiddenSlowImpl(JSCell*, Dependency);
template<typename ContainerType>
void setMarkedAndAppendToMarkStack(ContainerType&, JSCell*, Dependency);
void appendToMarkStack(JSCell*);
template<typename ContainerType>
void appendToMarkStack(ContainerType&, JSCell*);
void noteLiveAuxiliaryCell(HeapCell*);
void visitChildren(const JSCell*);
void propagateExternalMemoryVisitedIfNecessary();
void donateKnownParallel();
void donateKnownParallel(MarkStackArray& from, MarkStackArray& to);
void donateAll(const AbstractLocker&);
bool hasWork(const AbstractLocker&);
bool didReachTermination(const AbstractLocker&);
template<typename Func>
IterationStatus forEachMarkStack(const Func&);
MarkStackArray& correspondingGlobalStack(MarkStackArray&);
HeapVersion m_markingVersion;
size_t m_bytesVisited { 0 };
size_t m_nonCellVisitCount { 0 }; // Used for incremental draining, ignored otherwise.
CheckedSize m_extraMemorySize { 0 };
HeapAnalyzer* m_heapAnalyzer { nullptr };
JSCell* m_currentCell { nullptr };
bool m_isFirstVisit { false };
bool m_mutatorIsStopped { false };
bool m_canOptimizeForStoppedMutator { false };
bool m_isInParallelMode { false };
Lock m_rightToRun;
// Put padding here to mitigate false sharing between multiple SlotVisitors.
char padding[64];
#if ASSERT_ENABLED
bool m_isCheckingForDefaultMarkViolation { false };
#endif
};
class ParallelModeEnabler {
public:
ParallelModeEnabler(SlotVisitor& stack)
: m_stack(stack)
{
ASSERT(!m_stack.m_isInParallelMode);
m_stack.m_isInParallelMode = true;
}
~ParallelModeEnabler()
{
ASSERT(m_stack.m_isInParallelMode);
m_stack.m_isInParallelMode = false;
}
private:
SlotVisitor& m_stack;
};
} // namespace JSC
|