File: SerializedLogChunk.cpp

package info (click to toggle)
android-platform-tools 34.0.5-12
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid, trixie
  • size: 150,900 kB
  • sloc: cpp: 805,786; java: 293,500; ansic: 128,288; xml: 127,491; python: 41,481; sh: 14,245; javascript: 9,665; cs: 3,846; asm: 2,049; makefile: 1,917; yacc: 440; awk: 368; ruby: 183; sql: 140; perl: 88; lex: 67
file content (92 lines) | stat: -rw-r--r-- 3,160 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
/*
 * Copyright (C) 2020 The Android Open Source Project
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include "SerializedLogChunk.h"

#include <android-base/logging.h>

#include "CompressionEngine.h"
#include "SerializedFlushToState.h"

SerializedLogChunk::~SerializedLogChunk() {
    CHECK_EQ(reader_ref_count_, 0U);
}

void SerializedLogChunk::FinishWriting() {
    writer_active_ = false;
    CHECK_EQ(compressed_log_.size(), 0U);
    CompressionEngine::GetInstance().Compress(contents_, write_offset_, compressed_log_);
    LOG(VERBOSE) << "Compressed Log, buffer max size: " << contents_.size()
                 << " size used: " << write_offset_
                 << " compressed size: " << compressed_log_.size();
    if (reader_ref_count_ == 0) {
        contents_.Resize(0);
    }
}

// TODO: Develop a better reference counting strategy to guard against the case where the writer is
// much faster than the reader, and we needlessly compess / decompress the logs.
void SerializedLogChunk::IncReaderRefCount() {
    if (++reader_ref_count_ != 1 || writer_active_) {
        return;
    }
    contents_.Resize(write_offset_);
    CompressionEngine::GetInstance().Decompress(compressed_log_, contents_);
}

void SerializedLogChunk::DecReaderRefCount() {
    CHECK_NE(reader_ref_count_, 0U);
    if (--reader_ref_count_ != 0) {
        return;
    }
    if (!writer_active_) {
        contents_.Resize(0);
    }
}

void SerializedLogChunk::AttachReader(SerializedFlushToState* reader) {
    readers_.emplace_back(reader);
    IncReaderRefCount();
}

void SerializedLogChunk::DetachReader(SerializedFlushToState* reader) {
    auto it = std::find(readers_.begin(), readers_.end(), reader);
    CHECK(readers_.end() != it);
    readers_.erase(it);
    DecReaderRefCount();
}

void SerializedLogChunk::NotifyReadersOfPrune(log_id_t log_id) {
    // Readers will call DetachReader() in their Prune() call, so we make a copy of the list first.
    auto readers = readers_;
    for (auto& reader : readers) {
        reader->Prune(log_id);
    }
}

bool SerializedLogChunk::CanLog(size_t len) {
    return write_offset_ + len <= contents_.size();
}

SerializedLogEntry* SerializedLogChunk::Log(uint64_t sequence, log_time realtime, uid_t uid,
                                            pid_t pid, pid_t tid, const char* msg, uint16_t len) {
    auto new_log_address = contents_.data() + write_offset_;
    auto* entry = new (new_log_address) SerializedLogEntry(uid, pid, tid, sequence, realtime, len);
    memcpy(entry->msg(), msg, len);
    write_offset_ += entry->total_len();
    highest_sequence_number_ = sequence;
    return entry;
}