File: log_source.cc

package info (click to toggle)
chromium 138.0.7204.183-1~deb12u1
  • links: PTS, VCS
  • area: main
  • in suites: bookworm-proposed-updates
  • size: 6,080,960 kB
  • sloc: cpp: 34,937,079; ansic: 7,176,967; javascript: 4,110,704; python: 1,419,954; asm: 946,768; xml: 739,971; pascal: 187,324; sh: 89,623; perl: 88,663; objc: 79,944; sql: 50,304; cs: 41,786; fortran: 24,137; makefile: 21,811; php: 13,980; tcl: 13,166; yacc: 8,925; ruby: 7,485; awk: 3,720; lisp: 3,096; lex: 1,327; ada: 727; jsp: 228; sed: 36
file content (195 lines) | stat: -rw-r--r-- 6,400 bytes parent folder | download | duplicates (6)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
// Copyright 2024 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "chrome/browser/ash/chromebox_for_meetings/artemis/log_source.h"

#include <sys/stat.h>

#include "base/logging.h"
#include "base/strings/string_split.h"
#include "base/task/thread_pool.h"
#include "base/threading/scoped_blocking_call.h"
#include "base/time/time.h"
#include "chrome/browser/ash/chromebox_for_meetings/artemis/specialized_log_sources.h"

// Some files won't appear until after Chrome starts up. We'll try to open
// the file at every `Fetch()` request up to `kMaxFileOpenAttempts` times.
inline constexpr int kMaxFileOpenAttempts = 3;

namespace ash::cfm {

namespace {

// Byte cap for each call to log source's `RetrieveNextLogs()`.
// Ensures that we are not sending back a large amount of data
// when calling `GetNextData()`. Example: we have a log file
// that has unexpectedly large lines consecutively, which leads
// to a larger-than-usual batch.
constexpr size_t kLogBatchByteLimit = 100 * 1000;  // 100Kb

}  // namespace

LogSource::LogSource(const std::string& filepath,
                     base::TimeDelta poll_rate,
                     size_t batch_size)
    : LocalDataSource(poll_rate,
                      /*data_needs_redacting=*/true,
                      /*is_incremental=*/true),
      log_file_(filepath),
      batch_size_(batch_size) {
  recovery_offset_ = GetLastKnownOffsetFromStorage();
  InitializeFile();
}

LogSource::~LogSource() = default;

bool LogSource::InitializeFile() {
  if (!log_file_.OpenAtOffset(recovery_offset_)) {
    num_failed_open_attempts_ += 1;
    LOG(ERROR) << "Unable to open file " << GetDisplayName() << ". Trying "
               << kMaxFileOpenAttempts - num_failed_open_attempts_
               << " more times.";
    return false;
  }

  // Store this now so we can detect rotations later.
  last_known_inode_ = GetCurrentFileInode();
  return true;
}

std::unique_ptr<LogSource> LogSource::Create(const std::string& filename,
                                             base::TimeDelta poll_rate,
                                             size_t batch_size) {
  if (filename == kCfmAuditLogFile) {
    return std::make_unique<AuditLogSource>(poll_rate, batch_size);
  } else if (filename == kCfmBiosInfoLogFile) {
    return std::make_unique<BiosInfoLogSource>(poll_rate, batch_size);
  } else if (filename == kCfmEventlogLogFile) {
    return std::make_unique<EventlogLogSource>(poll_rate, batch_size);
  } else if (filename == kCfmVariationsListLogFile) {
    return std::make_unique<VariationsListLogSource>(poll_rate, batch_size);
  }

  return std::make_unique<LogSource>(filename, poll_rate, batch_size);
}

void LogSource::Fetch(FetchCallback callback) {
  // If the log file is not open by this point, and we're under our
  // max retry attempts, try to open it again.
  if (!log_file_.IsOpen()) {
    if (num_failed_open_attempts_ >= kMaxFileOpenAttempts ||
        !InitializeFile()) {
      std::move(callback).Run({});
      return;
    }
  }

  // Cache the current offset to use as a recovery offset in the
  // event of a crash. Note that this will NOT be flushed to disk
  // until we get a call to Flush(), so if we crash before then,
  // the last flushed offset will be used.
  //
  // Since the data buffer will continue filling up between this
  // call to Fetch() and the next call to Flush(), we MUST cache
  // this value here, or we risk dropping those logs.
  recovery_offset_ = log_file_.GetCurrentOffset();
  LocalDataSource::Fetch(std::move(callback));
}

void LogSource::Flush() {
  if (!log_file_.IsOpen()) {
    return;
  }
  // The upload succeeded, so update our recovery offset.
  PersistCurrentOffsetToStorage();
  LocalDataSource::Flush();
}

const std::string& LogSource::GetDisplayName() {
  return log_file_.GetFilePath();
}

std::vector<std::string> LogSource::GetNextData() {
  if (!log_file_.IsOpen()) {
    return {};
  }

  if (log_file_.IsInFailState()) {
    LOG(ERROR) << "Attempted to fetch logs for '" << log_file_.GetFilePath()
               << "', but the stream is dead";
    return {};
  }

  // If the file rotated from under us, reset it to start following the
  // new file. TODO(b/320996557): this might drop newest logs from old
  // rotated file.
  if (DidFileRotate()) {
    VLOG(1) << "Detected rotation in file '" << log_file_.GetFilePath() << "'";
    log_file_.CloseStream();
    log_file_.OpenAtOffset(0);
  }

  // ifstreams for files that have reached an EOF will not yield
  // newly-written lines unless the file is explicitly reset.
  // If we've hit an EOF, refresh the stream (close & re-open).
  //
  // NB: if the last read didn't cause an EOF, new lines will be
  // available immediately without the need to Refresh() first.
  if (log_file_.IsAtEOF()) {
    VLOG(3) << "Refreshing log file '" << log_file_.GetFilePath() << "'";
    log_file_.Refresh();
  }

  return log_file_.RetrieveNextLogs(batch_size_, kLogBatchByteLimit);
}

int LogSource::GetCurrentFileInode() {
  base::ScopedBlockingCall scoped_blocking_call(FROM_HERE,
                                                base::BlockingType::MAY_BLOCK);
  struct stat file_info;
  const std::string& filepath = log_file_.GetFilePath();

  if (stat(filepath.c_str(), &file_info) != 0) {
    LOG(ERROR) << "Unable to get inode of " << filepath;
    return kInvalidFileInode;
  }

  return file_info.st_ino;
}

bool LogSource::DidFileRotate() {
  int curr_inode = GetCurrentFileInode();

  if (curr_inode != kInvalidFileInode && last_known_inode_ != curr_inode) {
    if (PersistentDb::IsInitialized()) {
      PersistentDb::Get()->DeleteKeyIfExists(last_known_inode_);
    }
    last_known_inode_ = curr_inode;
    return true;
  }

  return false;
}

std::streampos LogSource::GetLastKnownOffsetFromStorage() {
  int default_value = 0;

  if (!PersistentDb::IsInitialized()) {
    return default_value;
  }

  int inode = GetCurrentFileInode();
  return PersistentDb::Get()->GetValueFromKey(inode, default_value);
}

void LogSource::PersistCurrentOffsetToStorage() {
  if (!PersistentDb::IsInitialized()) {
    LOG(WARNING) << "PersistentDb is inactive; recovery feature is disabled";
    return;
  }
  int inode = GetCurrentFileInode();
  PersistentDb::Get()->SaveValueToKey(inode, recovery_offset_);
}

}  // namespace ash::cfm