File: custom_input_processor.h

package info (click to toggle)
chromium 139.0.7258.127-1
  • links: PTS, VCS
  • area: main
  • in suites:
  • size: 6,122,068 kB
  • sloc: cpp: 35,100,771; ansic: 7,163,530; javascript: 4,103,002; python: 1,436,920; asm: 946,517; xml: 746,709; pascal: 187,653; perl: 88,691; sh: 88,436; objc: 79,953; sql: 51,488; cs: 44,583; fortran: 24,137; makefile: 22,147; tcl: 15,277; php: 13,980; yacc: 8,984; ruby: 7,485; awk: 3,720; lisp: 3,096; lex: 1,327; ada: 727; jsp: 228; sed: 36
file content (120 lines) | stat: -rw-r--r-- 5,156 bytes parent folder | download | duplicates (10)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
// Copyright 2022 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#ifndef COMPONENTS_SEGMENTATION_PLATFORM_INTERNAL_EXECUTION_PROCESSING_CUSTOM_INPUT_PROCESSOR_H_
#define COMPONENTS_SEGMENTATION_PLATFORM_INTERNAL_EXECUTION_PROCESSING_CUSTOM_INPUT_PROCESSOR_H_

#include <vector>

#include "base/containers/flat_map.h"
#include "base/memory/weak_ptr.h"
#include "base/time/time.h"
#include "components/segmentation_platform/internal/execution/processing/query_processor.h"
#include "components/segmentation_platform/public/proto/model_metadata.pb.h"

namespace segmentation_platform::processing {
class FeatureProcessorState;
class InputDelegateHolder;
struct Data;

// CustomInputProcessor adds support to a larger variety of data type
// (timestamps, strings mapped to enums, etc), transforming them into valid
// input tensor to use when executing the ML model.
class CustomInputProcessor : public QueryProcessor {
 public:
  CustomInputProcessor(const base::Time prediction_time,
                       InputDelegateHolder* input_delegate_holder);
  CustomInputProcessor(base::flat_map<FeatureIndex, Data>&& data,
                       const base::Time prediction_time,
                       InputDelegateHolder* input_delegate_holder);
  ~CustomInputProcessor() override;

  // QueryProcessor implementation.
  void Process(FeatureProcessorState& feature_processor_state,
               QueryProcessorCallback callback) override;

  template <typename IndexType>
  using TemplateCallback =
      base::OnceCallback<void(base::flat_map<IndexType, Tensor>)>;

  // Process a data mapping with a customized index type and return the tensor
  // values in |callback|. Appends the input to the provided `result` and
  // returns it.
  template <typename IndexType>
  void ProcessIndexType(
      base::flat_map<IndexType, proto::CustomInput> custom_inputs,
      FeatureProcessorState& feature_processor_state,
      std::unique_ptr<base::flat_map<IndexType, Tensor>> result,
      TemplateCallback<IndexType> callback);

 private:
  // Helper method to handle async custom inputs for `ProcessIndexType()`
  template <typename IndexType>
  void OnGotProcessedValue(
      base::flat_map<IndexType, proto::CustomInput> custom_inputs,
      base::WeakPtr<FeatureProcessorState> feature_processor_state,
      std::unique_ptr<base::flat_map<IndexType, Tensor>> result,
      TemplateCallback<IndexType> callback,
      IndexType current_index,
      size_t current_tensor_length,
      bool error,
      Tensor current_value);

  // Helper function for parsing a single sync custom input and insert the
  // result along with the corresponding feature index.
  QueryProcessor::Tensor ProcessSingleCustomInput(
      const proto::CustomInput& custom_input,
      FeatureProcessorState& feature_processor_state);

  // Add a tensor value for CustomInput::FILL_PREDICTION_TIME type and return
  // whether it succeeded.
  bool AddPredictionTime(const proto::CustomInput& custom_input,
                         std::vector<ProcessedValue>& out_tensor);

  // Add a tensor value for CustomInput::FILL_DEVICE_RAM type and return
  // whether it succeeded.
  bool AddDeviceRAMInMB(const proto::CustomInput& custom_input,
                        std::vector<ProcessedValue>& out_tensor);

  // Add a tensor value for CustomInput::FILL_DEVICE_OS type and return
  // whether it succeeded.
  bool AddDeviceOSVersionNumber(const proto::CustomInput& custom_input,
                                std::vector<ProcessedValue>& out_tensor);

  // Add a tensor value for CustomInput::FILL_DEVICE_PPI type and return
  // whether it succeeded.
  bool AddDevicePPI(const proto::CustomInput& custom_input,
                    std::vector<ProcessedValue>& out_tensor);

  // Add a tensor value for CustomInput::TIME_RANGE_BEFORE_PREDICTION type and
  // return whether it succeeded.
  bool AddTimeRangeBeforePrediction(const proto::CustomInput& custom_input,
                                    std::vector<ProcessedValue>& out_tensor);

  // Add a tensor value for CustomInput::FILL_FROM_INPUT_CONTEXT and return
  // whether it succeeded.
  bool AddFromInputContext(const proto::CustomInput& custom_input,
                           FeatureProcessorState& feature_processor_state,
                           std::vector<ProcessedValue>& out_tensor);

  // Add a random number for CustomInput::FILL_RANDOM and return whether it
  // succeeded.
  bool AddRandom(const proto::CustomInput& custom_input,
                 std::vector<ProcessedValue>& out_tensor);

  const raw_ptr<InputDelegateHolder, AcrossTasksDanglingUntriaged>
      input_delegate_holder_;

  // List of custom inputs to process into input tensors.
  base::flat_map<FeatureIndex, proto::CustomInput> custom_inputs_;

  // Time at which we expect the model execution to run.
  base::Time prediction_time_;

  base::WeakPtrFactory<CustomInputProcessor> weak_ptr_factory_{this};
};

}  // namespace segmentation_platform::processing

#endif  // COMPONENTS_SEGMENTATION_PLATFORM_INTERNAL_EXECUTION_PROCESSING_CUSTOM_INPUT_PROCESSOR_H_