File: log_source_unittest.cc

package info (click to toggle)
chromium 138.0.7204.183-1~deb12u1
  • links: PTS, VCS
  • area: main
  • in suites: bookworm-proposed-updates
  • size: 6,080,960 kB
  • sloc: cpp: 34,937,079; ansic: 7,176,967; javascript: 4,110,704; python: 1,419,954; asm: 946,768; xml: 739,971; pascal: 187,324; sh: 89,623; perl: 88,663; objc: 79,944; sql: 50,304; cs: 41,786; fortran: 24,137; makefile: 21,811; php: 13,980; tcl: 13,166; yacc: 8,925; ruby: 7,485; awk: 3,720; lisp: 3,096; lex: 1,327; ada: 727; jsp: 228; sed: 36
file content (488 lines) | stat: -rw-r--r-- 16,582 bytes parent folder | download | duplicates (6)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
// Copyright 2024 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "chrome/browser/ash/chromebox_for_meetings/artemis/log_source.h"

#include <filesystem>
#include <fstream>
#include <map>

#include "base/strings/string_number_conversions.h"
#include "base/test/task_environment.h"
#include "base/time/time.h"
#include "chrome/browser/ash/chromebox_for_meetings/artemis/log_file.h"
#include "chrome/browser/ash/chromebox_for_meetings/artemis/persistent_db.h"
#include "testing/gtest/include/gtest/gtest.h"

namespace ash::cfm {
namespace {

constexpr size_t kTestFileNumLines = 10;
constexpr int kLargeOffset = 100000;

// We aren't actually polling, so this value doesn't matter.
constexpr base::TimeDelta kDefaultPollFrequency = base::Seconds(0);

// Default to reading all lines in test file.
constexpr size_t kDefaultBatchSize = kTestFileNumLines;

// Byte cap for each batch read.
constexpr size_t kBatchByteLimit = 1000;  // 1Kb

// Define a barebones db here that uses a std::map as the backing.
class PersistentDbForTesting : public PersistentDb {
 public:
  PersistentDbForTesting() = default;
  PersistentDbForTesting(const PersistentDbForTesting&) = delete;
  PersistentDbForTesting& operator=(const PersistentDbForTesting&) = delete;
  ~PersistentDbForTesting() override = default;

  // PersistentDb:
  int GetValueFromKey(int key, int default_value) override {
    if (map_.count(key) == 0) {
      return default_value;
    }
    return map_[key];
  }

  void SaveValueToKey(int key, int value) override { map_[key] = value; }

  void DeleteKeyIfExists(int key) override {
    if (map_.count(key) != 0) {
      map_.erase(key);
    }
  }

  size_t GetSize() const override { return map_.size(); }

 private:
  std::map<int, int> map_;
};

// Define a testing LogSource that allows us to fill the data buffer
// at will. Note there is no call to StartPollTimer().
class LogSourceForTesting : public LogSource {
 public:
  LogSourceForTesting(const std::string& filepath,
                      base::TimeDelta poll_rate,
                      size_t batch_size)
      : LogSource(filepath, poll_rate, batch_size) {}
  LogSourceForTesting(const LogSourceForTesting&) = delete;
  LogSourceForTesting& operator=(const LogSourceForTesting&) = delete;
  ~LogSourceForTesting() override = default;

  void FillDataBufferForTesting() { FillDataBuffer(); }

 protected:
  // Override this and avoid serialization as it greatly complicates testing
  void SerializeDataBuffer(std::vector<std::string>& buffer) override {}
};

// Define test fixture. This fixture will be used for both the
// LogFile and LogSource objects as they are closely linked and
// require similar setup. Each test will create real (temporary)
// files on the filesystem, to be used as the underlying data
// sources.
class ArtemisLogSourceTest : public testing::Test {
 public:
  ArtemisLogSourceTest()
      : test_file_("test_file.log"),
        rotated_file_("test_file.log.1"),
        rotate_log_prefix_("ROTATE: ") {}
  ArtemisLogSourceTest(const ArtemisLogSourceTest&) = delete;
  ArtemisLogSourceTest& operator=(const ArtemisLogSourceTest&) = delete;

  void SetUp() override {
    test_db_ = std::make_unique<PersistentDbForTesting>();
    PersistentDb::InitializeForTesting(test_db_.get());
    AppendNewLines(test_file_, kTestFileNumLines, "");
  }

  void TearDown() override {
    std::filesystem::remove(test_file_);
    if (std::filesystem::exists(rotated_file_)) {
      std::filesystem::remove(rotated_file_);
    }
    test_db_.reset(nullptr);
    PersistentDb::ShutdownForTesting();
  }

  void RotateFile() {
    std::filesystem::rename(test_file_, rotated_file_);
    AppendNewLines(test_file_, kTestFileNumLines, rotate_log_prefix_);
  }

  void AppendNewLines(const std::string& filename,
                      size_t count,
                      const std::string& prefix) {
    std::ofstream test_file;
    test_file.open(filename, std::ios_base::app);
    for (unsigned int i = 0; i < count; i++) {
      test_file << prefix << i << std::endl;
    }
    test_file.close();
  }

  int GetFileSize() {
    std::ifstream tmp_stream(test_file_);
    tmp_stream.seekg(0, tmp_stream.end);
    return (int)tmp_stream.tellg();
  }

 protected:
  const std::string test_file_;
  const std::string rotated_file_;
  const std::string rotate_log_prefix_;

 private:
  std::unique_ptr<PersistentDb> test_db_;
};

// ------- Start LogFile tests -------

TEST_F(ArtemisLogSourceTest, OpenFileAtVariousOffsets) {
  LogFile logfile_bad(test_file_ + "noexist");
  LogFile logfile(test_file_);

  // File doesn't exist
  EXPECT_FALSE(logfile_bad.OpenAtOffset(0));
  logfile_bad.CloseStream();

  // File opens and is accessible
  EXPECT_TRUE(logfile.OpenAtOffset(0));
  logfile.CloseStream();

  // File opens and can seek to a reasonable offset
  EXPECT_TRUE(logfile.OpenAtOffset(8));
  logfile.CloseStream();

  // Negative offsets are not invalid. File will seek
  // to end, minus the offset.
  EXPECT_TRUE(logfile.OpenAtOffset(-1));
  logfile.CloseStream();

  // Too-high offset is not invalid. File will seek to end.
  EXPECT_TRUE(logfile.OpenAtOffset(kLargeOffset));
  logfile.CloseStream();
}

TEST_F(ArtemisLogSourceTest, CheckEOFStateAfterVariousOpens) {
  LogFile logfile(test_file_);
  int file_size = GetFileSize();

  // File opened at beginning
  logfile.OpenAtOffset(0);
  EXPECT_FALSE(logfile.IsAtEOF());

  // File opened at the end yields EOF
  logfile.OpenAtOffset(file_size);
  logfile.RetrieveNextLogs(1, kBatchByteLimit);
  EXPECT_TRUE(logfile.IsAtEOF());

  // File opened at high offset yields EOF
  logfile.OpenAtOffset(kLargeOffset);
  logfile.RetrieveNextLogs(1, kBatchByteLimit);
  EXPECT_TRUE(logfile.IsAtEOF());

  // File opened just near end yields no EOF
  logfile.OpenAtOffset(file_size - 1);
  EXPECT_FALSE(logfile.IsAtEOF());
}

TEST_F(ArtemisLogSourceTest, RequestVaryingAmountOfLogLines) {
  LogFile logfile(test_file_);
  logfile.OpenAtOffset(0);

  // Read all logs. Note EOF is expected to be false until
  // we try another read, so expect false.
  auto lines = logfile.RetrieveNextLogs(kTestFileNumLines, kBatchByteLimit);
  EXPECT_EQ(lines.size(), kTestFileNumLines);
  EXPECT_FALSE(logfile.IsAtEOF());
  logfile.CloseStream();

  // Try to read more than all logs. Verify result is the same,
  // but this time with EOF == true.
  logfile.OpenAtOffset(0);
  lines = logfile.RetrieveNextLogs(kTestFileNumLines + 1, kBatchByteLimit);
  EXPECT_EQ(lines.size(), kTestFileNumLines);
  EXPECT_TRUE(logfile.IsAtEOF());
  logfile.CloseStream();

  // Verify partial reads
  size_t num_to_read = 3;
  logfile.OpenAtOffset(0);
  lines = logfile.RetrieveNextLogs(num_to_read, kBatchByteLimit);
  EXPECT_EQ(lines.size(), num_to_read);
  lines = logfile.RetrieveNextLogs(kTestFileNumLines - num_to_read,
                                   kBatchByteLimit);
  EXPECT_EQ(lines.size(), kTestFileNumLines - num_to_read);
  logfile.CloseStream();
}

TEST_F(ArtemisLogSourceTest, CheckByteCapLimitsDataOutput) {
  // This test fixture normally creates a very basic file where
  // each line is an incrementing integer starting at 1. This is
  // insufficient for this particular test, so let's recreate it
  // and add larger data.
  std::filesystem::remove(test_file_);

  size_t new_file_num_lines = 3;
  size_t line_size = 100;

  std::ofstream stream;
  std::string large_line = std::string(line_size, '!');

  stream.open(test_file_, std::ios_base::app);
  for (unsigned int i = 0; i < new_file_num_lines; i++) {
    stream << large_line << std::endl;
  }
  stream.close();

  LogFile logfile(test_file_);
  logfile.OpenAtOffset(0);
  size_t byte_cap = line_size;

  // Try to read all logs and verify that we only get one back.
  auto lines = logfile.RetrieveNextLogs(new_file_num_lines, byte_cap);
  EXPECT_EQ(lines.size(), 1u);
  EXPECT_FALSE(logfile.IsAtEOF());
  logfile.CloseStream();

  // Be even more egregious with our byte cap and confirm that
  // we drop logs in the worst case.
  logfile.OpenAtOffset(0);
  byte_cap = line_size - 1;

  lines = logfile.RetrieveNextLogs(new_file_num_lines, byte_cap);
  EXPECT_EQ(lines.size(), 0u);
  EXPECT_FALSE(logfile.IsAtEOF());

  // Now try to read the rest (with a normal byte cap) and confirm
  // that the first log was abandoned entirely.
  lines = logfile.RetrieveNextLogs(new_file_num_lines, kBatchByteLimit);
  EXPECT_EQ(lines.size(), new_file_num_lines - 1);
  logfile.CloseStream();
}

TEST_F(ArtemisLogSourceTest, VerifyNewLinesAppearAfterRefresh) {
  LogFile logfile(test_file_);
  logfile.OpenAtOffset(0);

  // Note for the below tests: there are two cases we need to
  // consider:
  // 1. The case where we "exhaust" the data source by attempting
  //    to read more than what's available, which triggers an EOF
  //    and requires a subsequent Refresh().
  // 2. The case where we read exactly (or less than) the amount
  //    of data in the log file and do NOT hit an EOF. New lines
  //    will be available immediately after adding them and will
  //    not require a Refresh() to observe them.
  //
  // This is likely a side effect of how EOFs are handled in the
  // underlying C++ filesystem API.

  // Exhaust all the lines in the file, then add more. This is
  // case #1 above.
  logfile.RetrieveNextLogs(kTestFileNumLines + 1, kBatchByteLimit);
  EXPECT_TRUE(logfile.IsAtEOF());
  AppendNewLines(test_file_, kTestFileNumLines, "NEW: ");

  // Verify no new lines are reported before Refresh()
  auto new_lines = logfile.RetrieveNextLogs(kTestFileNumLines, kBatchByteLimit);
  EXPECT_EQ(new_lines.size(), 0u);
  EXPECT_TRUE(logfile.IsAtEOF());

  // Verify Refresh() triggers new lines to appear
  logfile.Refresh();
  EXPECT_FALSE(logfile.IsAtEOF());
  new_lines = logfile.RetrieveNextLogs(kTestFileNumLines, kBatchByteLimit);
  EXPECT_EQ(new_lines.size(), kTestFileNumLines);

  // Verify that the lines are the new lines
  for (auto& line : new_lines) {
    EXPECT_TRUE(line.starts_with("NEW: "));
  }

  // We read the exact log count in the previous operation, so EOF
  // should be false and newly appended lines should be available
  // immediately. Add more lines and test case #2 above.
  EXPECT_FALSE(logfile.IsAtEOF());
  AppendNewLines(test_file_, kTestFileNumLines, "NEW2: ");

  // Verify lines are immediately observable.
  new_lines = logfile.RetrieveNextLogs(kTestFileNumLines, kBatchByteLimit);
  EXPECT_EQ(new_lines.size(), kTestFileNumLines);
  EXPECT_FALSE(logfile.IsAtEOF());

  // Verify that the lines are the new lines
  for (auto& line : new_lines) {
    EXPECT_TRUE(line.starts_with("NEW2: "));
  }
}

// ------- Start LogSource tests -------

TEST_F(ArtemisLogSourceTest, TestBatchSizeCorrectlyLimitsOutput) {
  const size_t batch_size = 2;
  const size_t expected_num_reads = kTestFileNumLines / batch_size;

  auto log_source = LogSource(test_file_, kDefaultPollFrequency, batch_size);

  for (size_t i = 0; i < expected_num_reads; ++i) {
    auto data = log_source.GetNextData();
    EXPECT_EQ(data.size(), batch_size);
  }

  auto data = log_source.GetNextData();
  EXPECT_EQ(data.size(), 0u);
}

TEST_F(ArtemisLogSourceTest, VerifyNewLinesAppearAfterRotation) {
  auto log_source =
      LogSource(test_file_, kDefaultPollFrequency, kDefaultBatchSize);

  // Initial setup. Read everything from original file
  auto data = log_source.GetNextData();
  EXPECT_EQ(data.size(), kTestFileNumLines);
  data = log_source.GetNextData();
  EXPECT_EQ(data.size(), 0u);

  // Add more lines to original file
  AppendNewLines(test_file_, kTestFileNumLines, "NEW: ");

  // Rotate file and verify that next fetch returns lines from new file
  RotateFile();
  data = log_source.GetNextData();
  EXPECT_EQ(data.size(), kTestFileNumLines);
  for (auto& line : data) {
    // TODO(b/320996557): we are expecting the new lines (NEW: ...) from
    // the old file to be dropped here. This will be the case until we
    // add full rotation support.
    EXPECT_TRUE(line.starts_with(rotate_log_prefix_));
  }
}

TEST_F(ArtemisLogSourceTest, TestCrashRecovery) {
  base::test::TaskEnvironment task_environment;
  base::RunLoop run_loop;

  size_t batch_size = 2;
  auto log_source = std::make_unique<LogSourceForTesting>(
      test_file_, kDefaultPollFrequency, batch_size);

  // Add <batch_size> lines to internal buffer. Then fetch & drop.
  log_source->FillDataBufferForTesting();
  log_source->Fetch(base::DoNothing());
  run_loop.RunUntilIdle();

  // We haven't flushed yet, so the db should be empty.
  EXPECT_EQ(PersistentDb::Get()->GetSize(), 0u);

  // Flush to report success. Verify inode was cached.
  log_source->Flush();
  run_loop.RunUntilIdle();
  EXPECT_EQ(PersistentDb::Get()->GetSize(), 1u);

  // Tear down and reset the log source. Then add more data.
  log_source.reset(nullptr);
  log_source = std::make_unique<LogSourceForTesting>(
      test_file_, kDefaultPollFrequency, batch_size);
  log_source->FillDataBufferForTesting();

  // Run the next Fetch and expect the data to be continued from the last
  // point. Note that the file we're examining is just filled with integers,
  // 0 to kTestFileNumLines, so we expect to start at integer <batch_size>.
  log_source->Fetch(base::BindOnce(
      [](size_t start, const std::vector<std::string>& results) {
        EXPECT_EQ(results[0], base::NumberToString(start));
        EXPECT_EQ(results[1], base::NumberToString(start + 1));
      },
      batch_size));
  run_loop.RunUntilIdle();

  // Explicitly do not Flush()! Tear down and reset again. Add more data.
  log_source.reset(nullptr);
  log_source = std::make_unique<LogSourceForTesting>(
      test_file_, kDefaultPollFrequency, batch_size);
  log_source->FillDataBufferForTesting();

  // Because Flush() was not called, we assume that the last attempt failed,
  // so make sure we start from the same recovery location.
  log_source->Fetch(base::BindOnce(
      [](size_t start, const std::vector<std::string>& results) {
        ASSERT_EQ(results.size(), 2u);
        EXPECT_EQ(results[0], base::NumberToString(start));
        EXPECT_EQ(results[1], base::NumberToString(start + 1));
      },
      batch_size));
  run_loop.RunUntilIdle();

  // Tear down and reset again.
  log_source.reset(nullptr);
  log_source = std::make_unique<LogSourceForTesting>(
      test_file_, kDefaultPollFrequency, batch_size);

  // This time, before adding data, rotate the file.
  RotateFile();
  log_source->FillDataBufferForTesting();

  // Expect that we are now starting from the beginning of the rotated file.
  log_source->Fetch(base::BindOnce(
      [](size_t start, const std::string& prefix,
         const std::vector<std::string>& results) {
        ASSERT_EQ(results.size(), 2u);
        EXPECT_EQ(results[0], prefix + base::NumberToString(start));
        EXPECT_EQ(results[1], prefix + base::NumberToString(start + 1));
      },
      0u, rotate_log_prefix_));
  log_source->Flush();
  run_loop.RunUntilIdle();

  // Expect that the old inode was deleted and replaced with the new one.
  EXPECT_EQ(PersistentDb::Get()->GetSize(), 1u);
}

// Note: not using fixture here to avoid file creation. We want
// to test the scenario where the file doesn't exist at first.
TEST(ArtemisLogSourceTestBadFile, TestFileInitializationIsRetried) {
  base::test::TaskEnvironment task_environment;
  base::RunLoop run_loop;
  const std::string filename = "test.file";

  auto log_source = std::make_unique<LogSourceForTesting>(
      filename, kDefaultPollFrequency, kDefaultBatchSize);

  // Attempt to fill the buffer. Verify that Fetch() returns nothing
  // due to inaccessible file.
  log_source->FillDataBufferForTesting();
  log_source->Fetch(base::BindOnce([](const std::vector<std::string>& results) {
    EXPECT_EQ(results.size(), 0u);
  }));
  run_loop.RunUntilIdle();

  // Create file and add some junk data.
  std::ofstream test_file;
  test_file.open(filename, std::ios_base::app);
  test_file << "fake data" << std::endl;
  test_file.close();

  // Fetch() to trigger the file open, then try to fill it.
  log_source->Fetch(base::DoNothing());
  log_source->FillDataBufferForTesting();

  // Verify that we now have data.
  log_source->Fetch(base::BindOnce([](const std::vector<std::string>& results) {
    EXPECT_EQ(results.size(), 1u);
    if (results.size() > 0) {
      EXPECT_EQ(results[0], "fake data");
    }
  }));
  run_loop.RunUntilIdle();

  // Clean up.
  std::filesystem::remove(filename);
}

}  // namespace
}  // namespace ash::cfm