1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728
|
// Copyright 2020 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/sequence_manager/thread_controller.h"
#include <atomic>
#include <string_view>
#include "base/check.h"
#include "base/feature_list.h"
#include "base/metrics/histogram.h"
#include "base/metrics/histogram_base.h"
#include "base/metrics/histogram_functions.h"
#include "base/metrics/histogram_macros.h"
#include "base/notreached.h"
#include "base/strings/strcat.h"
#include "base/strings/string_util.h"
#include "base/time/tick_clock.h"
#include "base/time/time.h"
#include "base/trace_event/trace_event.h"
namespace base::sequence_manager::internal {
namespace {
// Enable sample metadata recording in this class, if it's currently disabled.
// Note that even if `kThreadControllerSetsProfilerMetadata` is disabled, sample
// metadata may still be recorded.
BASE_FEATURE(kThreadControllerSetsProfilerMetadata,
"ThreadControllerSetsProfilerMetadata",
base::FEATURE_DISABLED_BY_DEFAULT);
// Thread safe copy to be updated once feature list is available. This
// defaults to true to make sure that no metadata is lost on clients that
// need to record. This leads to some overeporting before feature list
// initialization on other clients but that's still way better than the current
// situation which is reporting all the time.
std::atomic<bool> g_thread_controller_sets_profiler_metadata{true};
// ThreadController interval metrics are mostly of interest for intervals that
// are not trivially short. Under a certain threshold it's unlikely that
// intervention from developers would move metrics. Log with suffix for
// intervals under a threshold chosen via tracing data. To validate the
// threshold makes sense and does not filter out too many samples
// ThreadController.ActiveIntervalDuration can be used.
constexpr TimeDelta kNonTrivialActiveIntervalLength = Milliseconds(1);
constexpr TimeDelta kMediumActiveIntervalLength = Milliseconds(100);
std::string MakeSuffix(std::string_view time_suffix,
std::string_view thread_name) {
return base::StrCat({".", time_suffix, ".", thread_name});
}
} // namespace
ThreadController::ThreadController(const TickClock* time_source)
: associated_thread_(AssociatedThreadId::CreateUnbound()),
time_source_(time_source) {}
ThreadController::~ThreadController() = default;
void ThreadController::SetTickClock(const TickClock* clock) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
time_source_ = clock;
}
ThreadController::RunLevelTracker::RunLevelTracker(
const ThreadController& outer)
: outer_(outer) {}
ThreadController::RunLevelTracker::~RunLevelTracker() {
DCHECK_CALLED_ON_VALID_THREAD(outer_->associated_thread_->thread_checker);
// There shouldn't be any remaining |run_levels_| by the time this unwinds.
DCHECK_EQ(run_levels_.size(), 0u);
}
// static
void ThreadController::InitializeFeatures(
features::EmitThreadControllerProfilerMetadata emit_profiler_metadata) {
g_thread_controller_sets_profiler_metadata.store(
emit_profiler_metadata ==
features::EmitThreadControllerProfilerMetadata::kForce ||
base::FeatureList::IsEnabled(kThreadControllerSetsProfilerMetadata),
std::memory_order_relaxed);
}
bool ThreadController::RunLevelTracker::RunLevel::ShouldRecordSampleMetadata() {
return g_thread_controller_sets_profiler_metadata.load(
std::memory_order_relaxed);
}
std::string_view ThreadController::RunLevelTracker::RunLevel::GetThreadName() {
std::string_view thread_name = "Other";
if (!time_keeper_->thread_name().empty()) {
thread_name = time_keeper_->thread_name();
}
return thread_name;
}
std::string
ThreadController::RunLevelTracker::RunLevel::GetSuffixForCatchAllHistogram() {
return MakeSuffix("Any", GetThreadName());
}
std::string ThreadController::RunLevelTracker::RunLevel::GetSuffixForHistogram(
TimeDelta duration) {
std::string_view time_suffix;
if (duration < kNonTrivialActiveIntervalLength) {
time_suffix = "Short";
} else if (duration < kMediumActiveIntervalLength) {
time_suffix = "Medium";
}
return MakeSuffix(time_suffix, GetThreadName());
}
void ThreadController::EnableMessagePumpTimeKeeperMetrics(
const char* thread_name,
bool wall_time_based_metrics_enabled_for_testing) {
// MessagePump runs too fast, a low-res clock would result in noisy metrics.
if (!base::TimeTicks::IsHighResolution()) {
return;
}
run_level_tracker_.EnableTimeKeeperMetrics(
thread_name, wall_time_based_metrics_enabled_for_testing);
}
void ThreadController::RunLevelTracker::EnableTimeKeeperMetrics(
const char* thread_name,
bool wall_time_based_metrics_enabled_for_testing) {
time_keeper_.EnableRecording(thread_name,
wall_time_based_metrics_enabled_for_testing);
}
void ThreadController::RunLevelTracker::TimeKeeper::EnableRecording(
const char* thread_name,
bool wall_time_based_metrics_enabled_for_testing) {
DCHECK(!histogram_);
thread_name_ = thread_name;
wall_time_based_metrics_enabled_for_testing_ =
wall_time_based_metrics_enabled_for_testing;
histogram_ = LinearHistogram::FactoryGet(
JoinString({"Scheduling.MessagePumpTimeKeeper", thread_name}, "."), 1,
Phase::kLastPhase, Phase::kLastPhase + 1,
base::HistogramBase::kUmaTargetedHistogramFlag);
perfetto_track_.emplace(
reinterpret_cast<uint64_t>(this),
// TODO(crbug.com/42050015): Replace with ThreadTrack::Current() after SDK
// migration.
// In the non-SDK version, ThreadTrack::Current() returns a different
// track id on some platforms (for example Mac OS), which results in
// async tracks not being associated with their thread.
perfetto::ThreadTrack::ForThread(
base::PlatformThread::CurrentId().raw()));
// TODO(crbug.com/42050015): Use Perfetto library to name this Track.
// auto desc = perfetto_track_->Serialize();
// desc.set_name(JoinString({"MessagePumpPhases", thread_name}, " "));
// perfetto::internal::TrackEventDataSource::SetTrackDescriptor(
// *perfetto_track_, desc);
}
void ThreadController::RunLevelTracker::OnRunLoopStarted(State initial_state,
LazyNow& lazy_now) {
DCHECK_CALLED_ON_VALID_THREAD(outer_->associated_thread_->thread_checker);
const bool is_nested = !run_levels_.empty();
run_levels_.emplace(initial_state, is_nested, time_keeper_, lazy_now);
// In unit tests, RunLoop::Run() acts as the initial wake-up.
if (!is_nested && initial_state != kIdle) {
time_keeper_.RecordWakeUp(lazy_now);
}
}
void ThreadController::RunLevelTracker::OnRunLoopEnded() {
DCHECK_CALLED_ON_VALID_THREAD(outer_->associated_thread_->thread_checker);
// Normally this will occur while kIdle or kInBetweenWorkItems but it can also
// occur while kRunningWorkItem in rare situations where the owning
// ThreadController is deleted from within a task. Ref.
// SequenceManagerWithTaskRunnerTest.DeleteSequenceManagerInsideATask. Thus we
// can't assert anything about the current state other than that it must be
// exiting an existing RunLevel.
DCHECK(!run_levels_.empty());
LazyNow exit_lazy_now(outer_->time_source_);
run_levels_.top().set_exit_lazy_now(&exit_lazy_now);
run_levels_.pop();
}
void ThreadController::RunLevelTracker::OnWorkStarted(LazyNow& lazy_now) {
DCHECK_CALLED_ON_VALID_THREAD(outer_->associated_thread_->thread_checker);
// Ignore work outside the main run loop.
// The only practical case where this would happen is if a native loop is spun
// outside the main runloop (e.g. system dialog during startup). We cannot
// support this because we are not guaranteed to be able to observe its exit
// (like we would inside an application task which is at least guaranteed to
// itself notify us when it ends). Some ThreadControllerWithMessagePumpTest
// also drive ThreadController outside a RunLoop and hit this.
if (run_levels_.empty()) {
return;
}
// Already running a work item? => #work-in-work-implies-nested
if (run_levels_.top().state() == kRunningWorkItem) {
run_levels_.emplace(kRunningWorkItem, /*nested=*/true, time_keeper_,
lazy_now);
} else {
if (run_levels_.top().state() == kIdle) {
time_keeper_.RecordWakeUp(lazy_now);
} else {
time_keeper_.RecordEndOfPhase(kPumpOverhead, lazy_now);
}
// Going from kIdle or kInBetweenWorkItems to kRunningWorkItem.
run_levels_.top().UpdateState(kRunningWorkItem, lazy_now);
}
}
void ThreadController::RunLevelTracker::OnApplicationTaskSelected(
TimeTicks queue_time,
LazyNow& lazy_now) {
DCHECK_CALLED_ON_VALID_THREAD(outer_->associated_thread_->thread_checker);
// As-in OnWorkStarted. Early native loops can result in
// ThreadController::DoWork because the lack of a top-level RunLoop means
// `task_execution_allowed` wasn't consumed.
if (run_levels_.empty()) {
return;
}
// OnWorkStarted() is expected to precede OnApplicationTaskSelected().
DCHECK_EQ(run_levels_.top().state(), kRunningWorkItem);
time_keeper_.OnApplicationTaskSelected(queue_time, lazy_now);
}
void ThreadController::RunLevelTracker::OnWorkEnded(LazyNow& lazy_now,
int run_level_depth) {
DCHECK_CALLED_ON_VALID_THREAD(outer_->associated_thread_->thread_checker);
if (run_levels_.empty()) {
return;
}
// #done-work-at-lower-runlevel-implies-done-nested
if (run_level_depth != static_cast<int>(num_run_levels())) {
DCHECK_EQ(run_level_depth + 1, static_cast<int>(num_run_levels()));
run_levels_.top().set_exit_lazy_now(&lazy_now);
run_levels_.pop();
} else {
time_keeper_.RecordEndOfPhase(kWorkItem, lazy_now);
}
// Whether we exited a nested run-level or not: the current run-level is now
// transitioning from kRunningWorkItem to kInBetweenWorkItems.
DCHECK_EQ(run_levels_.top().state(), kRunningWorkItem);
run_levels_.top().UpdateState(kInBetweenWorkItems, lazy_now);
}
void ThreadController::RunLevelTracker::OnIdle(LazyNow& lazy_now) {
DCHECK_CALLED_ON_VALID_THREAD(outer_->associated_thread_->thread_checker);
if (run_levels_.empty()) {
return;
}
DCHECK_NE(run_levels_.top().state(), kRunningWorkItem);
time_keeper_.RecordEndOfPhase(kIdleWork, lazy_now);
run_levels_.top().UpdateState(kIdle, lazy_now);
}
void ThreadController::RunLevelTracker::RecordScheduleWork() {
// Matching TerminatingFlow is found at
// ThreadController::RunLevelTracker::RunLevel::UpdateState
if (outer_->associated_thread_->IsBoundToCurrentThread()) {
TRACE_EVENT_INSTANT("wakeup.flow", "ScheduleWorkToSelf");
} else {
TRACE_EVENT_INSTANT("wakeup.flow", "ScheduleWork",
perfetto::Flow::FromPointer(this));
}
}
// static
void ThreadController::RunLevelTracker::SetTraceObserverForTesting(
TraceObserverForTesting* trace_observer_for_testing) {
DCHECK_NE(!!trace_observer_for_testing_, !!trace_observer_for_testing);
trace_observer_for_testing_ = trace_observer_for_testing;
}
// static
ThreadController::RunLevelTracker::TraceObserverForTesting*
ThreadController::RunLevelTracker::trace_observer_for_testing_ = nullptr;
ThreadController::RunLevelTracker::RunLevel::RunLevel(State initial_state,
bool is_nested,
TimeKeeper& time_keeper,
LazyNow& lazy_now)
: is_nested_(is_nested),
time_keeper_(time_keeper),
thread_controller_sample_metadata_("ThreadController active",
base::SampleMetadataScope::kThread) {
if (is_nested_) {
// Stop the current kWorkItem phase now, it will resume after the kNested
// phase ends.
time_keeper_->RecordEndOfPhase(kWorkItemSuspendedOnNested, lazy_now);
}
UpdateState(initial_state, lazy_now);
}
ThreadController::RunLevelTracker::RunLevel::~RunLevel() {
if (!was_moved_) {
DCHECK(exit_lazy_now_);
UpdateState(kIdle, *exit_lazy_now_);
if (is_nested_) {
// Attribute the entire time in this nested RunLevel to kNested phase. If
// this wasn't the last nested RunLevel, this is ignored and will be
// applied on the final pop().
time_keeper_->RecordEndOfPhase(kNested, *exit_lazy_now_);
if (ShouldRecordSampleMetadata()) {
// Intentionally ordered after UpdateState(kIdle), reinstantiates
// thread_controller_sample_metadata_ when yielding back to a parent
// RunLevel (which is active by definition as it is currently running
// this one).
thread_controller_sample_metadata_.Set(
static_cast<int64_t>(++thread_controller_active_id_));
}
}
}
}
ThreadController::RunLevelTracker::RunLevel::RunLevel(RunLevel&& other) =
default;
void ThreadController::RunLevelTracker::RunLevel::LogPercentageMetric(
const char* name,
int percentage) {
UmaHistogramPercentage(base::StrCat({name, ".", GetThreadName()}),
percentage);
}
void ThreadController::RunLevelTracker::RunLevel::LogPercentageMetric(
const char* name,
int percentage,
base::TimeDelta interval_duration) {
UmaHistogramPercentage(base::StrCat({name, GetSuffixForCatchAllHistogram()}),
percentage);
UmaHistogramPercentage(
base::StrCat({name, GetSuffixForHistogram(interval_duration)}),
percentage);
}
void ThreadController::RunLevelTracker::RunLevel::LogIntervalMetric(
const char* name,
base::TimeDelta value,
base::TimeDelta interval_duration) {
// Log towards "Any" time suffix first.
UmaHistogramTimes(base::StrCat({name, GetSuffixForCatchAllHistogram()}),
value);
if (interval_duration < kNonTrivialActiveIntervalLength) {
UmaHistogramCustomMicrosecondsTimes(
base::StrCat({name, GetSuffixForHistogram(interval_duration)}), value,
base::Microseconds(1), kNonTrivialActiveIntervalLength, 100);
} else if (interval_duration < kMediumActiveIntervalLength) {
UmaHistogramCustomTimes(
base::StrCat({name, GetSuffixForHistogram(interval_duration)}), value,
kNonTrivialActiveIntervalLength, kMediumActiveIntervalLength, 100);
}
}
void ThreadController::RunLevelTracker::RunLevel::LogOnActiveMetrics(
LazyNow& lazy_now) {
CHECK(last_active_start_.is_null());
CHECK(last_active_threadtick_start_.is_null());
if (!last_active_end_.is_null()) {
const base::TimeDelta idle_time = lazy_now.Now() - last_active_end_;
LogIntervalMetric("Scheduling.ThreadController.IdleDuration", idle_time,
idle_time);
last_active_end_ = base::TimeTicks();
accumulated_idle_time_ += idle_time;
}
// Taking thread ticks can be expensive. Make sure to do it rarely enough to
// not have a discernible impact on performance.
static const bool thread_ticks_supported = ThreadTicks::IsSupported();
// Disable subsampling to support wall-time based metrics. Only supported for
// testing purposes. By default, the subsampling probability is 0.1%.
const double probability =
time_keeper_->wall_time_based_metrics_enabled_for_testing() ? 1.0 : 0.001;
if (thread_ticks_supported &&
metrics_sub_sampler_.ShouldSample(probability)) {
last_active_start_ = lazy_now.Now();
last_active_threadtick_start_ = ThreadTicks::Now();
}
}
void ThreadController::RunLevelTracker::RunLevel::LogOnIdleMetrics(
LazyNow& lazy_now) {
if (!last_active_start_.is_null()) {
const base::TimeDelta elapsed_ticks = lazy_now.Now() - last_active_start_;
base::TimeDelta elapsed_thread_ticks =
ThreadTicks::Now() - last_active_threadtick_start_;
// Round to 100% in case of clock imprecisions making it look like
// there's impossibly more ThreadTicks than TimeTicks elapsed.
elapsed_thread_ticks = std::min(elapsed_thread_ticks, elapsed_ticks);
LogIntervalMetric("Scheduling.ThreadController.ActiveIntervalDuration",
elapsed_ticks, elapsed_ticks);
LogIntervalMetric(
"Scheduling.ThreadController.ActiveIntervalOffCpuDuration",
elapsed_ticks - elapsed_thread_ticks, elapsed_ticks);
LogIntervalMetric("Scheduling.ThreadController.ActiveIntervalOnCpuDuration",
elapsed_thread_ticks, elapsed_ticks);
// If the interval was shorter than a tick, 100% on-cpu time is assumed.
int active_interval_cpu_percentage =
elapsed_ticks.is_zero()
? 100
: static_cast<int>(
(elapsed_thread_ticks * 100).IntDiv(elapsed_ticks));
LogPercentageMetric(
"Scheduling.ThreadController.ActiveIntervalOnCpuPercentage",
active_interval_cpu_percentage, elapsed_ticks);
if (time_keeper_->wall_time_based_metrics_enabled_for_testing()) {
accumulated_active_time_ += elapsed_ticks;
accumulated_active_on_cpu_time_ += elapsed_thread_ticks;
accumulated_active_off_cpu_time_ +=
(elapsed_ticks - elapsed_thread_ticks);
// Accumulated wall-time since last wall-time based metric was stored.
const base::TimeDelta accumulated_wall_time =
accumulated_active_time_ + accumulated_idle_time_;
// Add wall-time based ratio metrics (in percent) when the total sum of
// active and idle times is larger than one second.
if (accumulated_wall_time > Seconds(1)) {
const int active_vs_wall_time_percentage = checked_cast<int>(
(accumulated_active_time_ * 100).IntDiv(accumulated_wall_time));
LogPercentageMetric(
"Scheduling.ThreadController.ActiveVsWallTimePercentage",
active_vs_wall_time_percentage);
const int active_on_cpu_vs_wall_time_percentage =
checked_cast<int>((accumulated_active_on_cpu_time_ * 100)
.IntDiv(accumulated_wall_time));
LogPercentageMetric(
"Scheduling.ThreadController.ActiveOnCpuVsWallTimePercentage",
active_on_cpu_vs_wall_time_percentage);
const int active_off_cpu_vs_wall_time_percentage =
checked_cast<int>((accumulated_active_off_cpu_time_ * 100)
.IntDiv(accumulated_wall_time));
LogPercentageMetric(
"Scheduling.ThreadController.ActiveOffCpuVsWallTimePercentage",
active_off_cpu_vs_wall_time_percentage);
accumulated_idle_time_ = base::TimeDelta();
accumulated_active_time_ = base::TimeDelta();
accumulated_active_on_cpu_time_ = base::TimeDelta();
accumulated_active_off_cpu_time_ = base::TimeDelta();
}
}
// Reset timings.
last_active_start_ = base::TimeTicks();
last_active_threadtick_start_ = base::ThreadTicks();
last_active_end_ = lazy_now.Now();
}
}
void ThreadController::RunLevelTracker::RunLevel::UpdateState(
State new_state,
LazyNow& lazy_now) {
// The only state that can be redeclared is idle, anything else should be a
// transition.
DCHECK(state_ != new_state || new_state == kIdle)
<< state_ << "," << new_state;
const bool was_active = state_ != kIdle;
const bool is_active = new_state != kIdle;
state_ = new_state;
if (was_active == is_active) {
return;
}
// Change of state.
if (is_active) {
LogOnActiveMetrics(lazy_now);
// Flow emission is found at
// ThreadController::RunLevelTracker::RecordScheduleWork.
TRACE_EVENT_BEGIN("base", "ThreadController active", lazy_now.Now(),
[&](perfetto::EventContext& ctx) {
time_keeper_->MaybeEmitIncomingWakeupFlow(ctx);
});
if (ShouldRecordSampleMetadata()) {
// Overriding the annotation from the previous RunLevel is intentional.
// Only the top RunLevel is ever updated, which holds the relevant state.
thread_controller_sample_metadata_.Set(
static_cast<int64_t>(++thread_controller_active_id_));
}
} else {
if (ShouldRecordSampleMetadata()) {
thread_controller_sample_metadata_.Remove();
}
LogOnIdleMetrics(lazy_now);
TRACE_EVENT_END("base", lazy_now.Now());
}
if (trace_observer_for_testing_) {
if (is_active) {
trace_observer_for_testing_->OnThreadControllerActiveBegin();
} else {
trace_observer_for_testing_->OnThreadControllerActiveEnd();
}
}
}
ThreadController::RunLevelTracker::TimeKeeper::TimeKeeper(
const RunLevelTracker& outer)
: outer_(outer) {}
void ThreadController::RunLevelTracker::TimeKeeper::RecordWakeUp(
LazyNow& lazy_now) {
if (!ShouldRecordNow(ShouldRecordReqs::kOnWakeUp)) {
return;
}
// Phase::kScheduled will be accounted against `last_wakeup_` in
// OnTaskSelected, if there's an application task in this work cycle.
last_wakeup_ = lazy_now.Now();
// Account the next phase starting from now.
last_phase_end_ = last_wakeup_;
// Emit the END of the kScheduled phase right away, this avoids incorrect
// ordering when kScheduled is later emitted and its END matches the BEGIN of
// an already emitted phase (tracing's sort is stable and would keep the late
// END for kScheduled after the earlier BEGIN of the next phase):
// crbug.com/1333460. As we just woke up, there are no events active at this
// point (we don't record MessagePumpPhases while nested). In the absence of
// a kScheduled phase, this unmatched END will be ignored.
TRACE_EVENT_END(TRACE_DISABLED_BY_DEFAULT("base"), *perfetto_track_,
last_wakeup_);
}
void ThreadController::RunLevelTracker::TimeKeeper::OnApplicationTaskSelected(
TimeTicks queue_time,
LazyNow& lazy_now) {
if (!ShouldRecordNow()) {
return;
}
if (!last_wakeup_.is_null()) {
// `queue_time` can be null on threads that did not
// `SetAddQueueTimeToTasks(true)`. `queue_time` can also be ahead of
// `last_wakeup` in racy cases where the first chrome task is enqueued
// while the pump was already awake (e.g. for native work). Consider the
// kScheduled phase inexistent in that case.
if (!queue_time.is_null() && queue_time < last_wakeup_) {
if (!last_sleep_.is_null() && queue_time < last_sleep_) {
// Avoid overlapping kScheduled and kIdleWork phases when work is
// scheduled while going to sleep.
queue_time = last_sleep_;
}
RecordTimeInPhase(kScheduled, queue_time, last_wakeup_);
// Match the END event which was already emitted by RecordWakeUp().
TRACE_EVENT_BEGIN(TRACE_DISABLED_BY_DEFAULT("base"),
perfetto::StaticString(PhaseToEventName(kScheduled)),
*perfetto_track_, queue_time);
}
last_wakeup_ = TimeTicks();
}
RecordEndOfPhase(kSelectingApplicationTask, lazy_now);
current_work_item_is_native_ = false;
}
void ThreadController::RunLevelTracker::TimeKeeper::RecordEndOfPhase(
Phase phase,
LazyNow& lazy_now) {
if (!ShouldRecordNow(phase == kNested ? ShouldRecordReqs::kOnEndNested
: ShouldRecordReqs::kRegular)) {
return;
}
if (phase == kWorkItem && !current_work_item_is_native_) {
phase = kApplicationTask;
// Back to assuming future work is native until OnApplicationTaskSelected()
// is invoked.
current_work_item_is_native_ = true;
} else if (phase == kWorkItemSuspendedOnNested) {
// kWorkItemSuspendedOnNested temporarily marks the end of time allocated to
// the current work item. It is reported as a separate phase to skip the
// above `current_work_item_is_native_ = true` which assumes the work item
// is truly complete.
phase = current_work_item_is_native_ ? kNativeWork : kApplicationTask;
}
const TimeTicks phase_end = lazy_now.Now();
RecordTimeInPhase(phase, last_phase_end_, phase_end);
// Ugly hack to name our `perfetto_track_`.
bool is_tracing_enabled = false;
TRACE_EVENT_CATEGORY_GROUP_ENABLED(TRACE_DISABLED_BY_DEFAULT("base"),
&is_tracing_enabled);
if (is_tracing_enabled) {
if (!was_tracing_enabled_) {
// The first event name on the track hackily names the track...
// TODO(crbug.com/42050015): Use the Perfetto library to properly name
// this Track in EnableRecording above.
TRACE_EVENT_INSTANT(TRACE_DISABLED_BY_DEFAULT("base"),
"MessagePumpPhases", *perfetto_track_,
last_phase_end_ - Seconds(1));
}
const char* event_name = PhaseToEventName(phase);
TRACE_EVENT_BEGIN(TRACE_DISABLED_BY_DEFAULT("base"),
perfetto::StaticString(event_name), *perfetto_track_,
last_phase_end_);
TRACE_EVENT_END(TRACE_DISABLED_BY_DEFAULT("base"), *perfetto_track_,
phase_end);
}
was_tracing_enabled_ = is_tracing_enabled;
last_phase_end_ = phase_end;
}
void ThreadController::RunLevelTracker::TimeKeeper::MaybeEmitIncomingWakeupFlow(
perfetto::EventContext& ctx) {
static const uint8_t* flow_enabled =
TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED("wakeup.flow");
if (!*flow_enabled) {
return;
}
perfetto::TerminatingFlow::ProcessScoped(
reinterpret_cast<uint64_t>(&(outer_.get())))(ctx);
}
bool ThreadController::RunLevelTracker::TimeKeeper::ShouldRecordNow(
ShouldRecordReqs reqs) {
DCHECK_CALLED_ON_VALID_THREAD(
outer_->outer_->associated_thread_->thread_checker);
// Recording is technically enabled once `histogram_` is set, however
// `last_phase_end_` will be null until the next RecordWakeUp in the work
// cycle in which `histogram_` is enabled. Only start recording from there.
// Ignore any nested phases. `reqs` may indicate exceptions to this.
//
// TODO(crbug.com/40226913): In a follow-up, we could probably always be
// tracking the phases of the pump and merely ignore the reporting if
// `histogram_` isn't set.
switch (reqs) {
case ShouldRecordReqs::kRegular:
return histogram_ && !last_phase_end_.is_null() &&
outer_->run_levels_.size() == 1;
case ShouldRecordReqs::kOnWakeUp:
return histogram_ && outer_->run_levels_.size() == 1;
case ShouldRecordReqs::kOnEndNested:
return histogram_ && !last_phase_end_.is_null() &&
outer_->run_levels_.size() <= 2;
}
}
void ThreadController::RunLevelTracker::TimeKeeper::RecordTimeInPhase(
Phase phase,
TimeTicks phase_begin,
TimeTicks phase_end) {
DCHECK(ShouldRecordNow(phase == kNested ? ShouldRecordReqs::kOnEndNested
: ShouldRecordReqs::kRegular));
// Report a phase only when at least 100ms has been attributed to it.
static constexpr auto kReportInterval = Milliseconds(100);
// Above 30s in a single phase, assume suspend-resume and ignore the report.
static constexpr auto kSkippedDelta = Seconds(30);
const auto delta = phase_end - phase_begin;
DCHECK(!delta.is_negative()) << delta;
if (delta >= kSkippedDelta) {
return;
}
deltas_[phase] += delta;
if (deltas_[phase] >= kReportInterval) {
const int count = deltas_[phase] / Milliseconds(1);
histogram_->AddCount(phase, count);
deltas_[phase] -= Milliseconds(count);
}
if (phase == kIdleWork) {
last_sleep_ = phase_end;
}
if (outer_->trace_observer_for_testing_) {
outer_->trace_observer_for_testing_->OnPhaseRecorded(phase);
}
}
// static
const char* ThreadController::RunLevelTracker::TimeKeeper::PhaseToEventName(
Phase phase) {
switch (phase) {
case kScheduled:
return "Scheduled";
case kPumpOverhead:
return "PumpOverhead";
case kNativeWork:
return "NativeTask";
case kSelectingApplicationTask:
return "SelectingApplicationTask";
case kApplicationTask:
return "ApplicationTask";
case kIdleWork:
return "IdleWork";
case kNested:
return "Nested";
case kWorkItemSuspendedOnNested:
// kWorkItemSuspendedOnNested should be transformed into kNativeWork or
// kApplicationTask before this point.
NOTREACHED();
}
}
} // namespace base::sequence_manager::internal
|