1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
|
// Copyright 2022 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "components/omnibox/browser/autocomplete_scoring_model_service.h"
#include <optional>
#include <utility>
#include "base/functional/callback.h"
#include "base/task/task_traits.h"
#include "base/task/thread_pool.h"
#include "base/trace_event/common/trace_event_common.h"
#include "components/omnibox/browser/autocomplete_scoring_model_executor.h"
#include "components/omnibox/browser/autocomplete_scoring_model_handler.h"
#include "components/omnibox/browser/omnibox_field_trial.h"
#include "components/optimization_guide/core/delivery/optimization_guide_model_provider.h"
#include "components/optimization_guide/proto/autocomplete_scoring_model_metadata.pb.h"
#include "components/optimization_guide/proto/models.pb.h"
namespace {
const char kAutocompleteScoringModelMetadataTypeUrl[] =
"type.googleapis.com/"
"google.internal.chrome.optimizationguide.v1."
"AutocompleteScoringModelMetadata";
// The current version the client supports for the autocomplete scoring model.
// This should be incremented any time we update the client code to add new
// scoring signals beyond those which are currently supported for ML scoring.
extern const int32_t kAutocompleteScoringModelVersion = 2;
void LogMLScoreCacheHit(bool cache_hit) {
base::UmaHistogramBoolean(
"Omnibox.URLScoringModelExecuted.MLScoreCache.CacheHit", cache_hit);
}
} // namespace
AutocompleteScoringModelService::AutocompleteScoringModelService(
optimization_guide::OptimizationGuideModelProvider* model_provider)
: score_cache_(OmniboxFieldTrial::GetMLConfig().max_ml_score_cache_size) {
// `model_provider` may be null for tests.
if (OmniboxFieldTrial::IsUrlScoringModelEnabled() && model_provider) {
model_executor_task_runner_ =
base::SequencedTaskRunner::GetCurrentDefault();
optimization_guide::proto::Any any_metadata;
any_metadata.set_type_url(kAutocompleteScoringModelMetadataTypeUrl);
optimization_guide::proto::AutocompleteScoringModelMetadata model_metadata;
model_metadata.set_version(kAutocompleteScoringModelVersion);
model_metadata.SerializeToString(any_metadata.mutable_value());
url_scoring_model_handler_ =
std::make_unique<AutocompleteScoringModelHandler>(
model_provider, model_executor_task_runner_.get(),
std::make_unique<AutocompleteScoringModelExecutor>(),
optimization_guide::proto::OPTIMIZATION_TARGET_OMNIBOX_URL_SCORING,
/*model_metadata=*/any_metadata);
}
}
AutocompleteScoringModelService::~AutocompleteScoringModelService() = default;
void AutocompleteScoringModelService::AddOnModelUpdatedCallback(
base::OnceClosure callback) {
url_scoring_model_handler_->AddOnModelUpdatedCallback(std::move(callback));
}
int AutocompleteScoringModelService::GetModelVersion() const {
auto info = url_scoring_model_handler_->GetModelInfo();
return info.has_value() ? info->GetVersion() : -1;
}
std::vector<AutocompleteScoringModelService::Result>
AutocompleteScoringModelService::BatchScoreAutocompleteUrlMatchesSync(
const std::vector<const ScoringSignals*>& batch_scoring_signals) {
TRACE_EVENT0(
"omnibox",
"AutocompleteScoringModelService::BatchScoreAutocompleteUrlMatchesSync");
if (!UrlScoringModelAvailable()) {
return {};
}
std::optional<std::vector<std::vector<float>>> batch_model_input =
url_scoring_model_handler_->GetBatchModelInput(batch_scoring_signals);
if (!batch_model_input) {
return {};
}
std::vector<Result> batch_results(batch_model_input->size());
if (OmniboxFieldTrial::GetMLConfig().ml_url_score_caching) {
std::vector<size_t> uncached_positions;
std::vector<std::vector<float>> uncached_inputs;
// Source ML scores from the in-memory cache when possible.
for (size_t i = 0; i < batch_model_input->size(); ++i) {
const auto& model_input = batch_model_input->at(i);
const auto it = score_cache_.Get(model_input);
const bool cache_hit = it != score_cache_.end();
if (cache_hit) {
batch_results[i] = it->second;
} else {
uncached_positions.push_back(i);
uncached_inputs.push_back(model_input);
}
LogMLScoreCacheHit(cache_hit);
}
// Synchronous model execution.
const auto batch_model_output =
url_scoring_model_handler_->BatchExecuteModelWithInputSync(
uncached_inputs);
size_t i = 0;
for (const auto& model_output : batch_model_output) {
batch_results[uncached_positions[i]] =
model_output ? std::make_optional(model_output->at(0)) : std::nullopt;
if (model_output) {
score_cache_.Put(uncached_inputs.at(i), model_output->at(0));
}
++i;
}
} else {
// Synchronous model execution.
const auto batch_model_output =
url_scoring_model_handler_->BatchExecuteModelWithInputSync(
*batch_model_input);
size_t i = 0;
for (const auto& model_output : batch_model_output) {
batch_results[i++] =
model_output ? std::make_optional(model_output->at(0)) : std::nullopt;
}
}
return batch_results;
}
bool AutocompleteScoringModelService::UrlScoringModelAvailable() {
return url_scoring_model_handler_ &&
url_scoring_model_handler_->ModelAvailable();
}
|