File: audio_frame_unittest.cc

package info (click to toggle)
chromium 138.0.7204.183-1
  • links: PTS, VCS
  • area: main
  • in suites: trixie
  • size: 6,071,908 kB
  • sloc: cpp: 34,937,088; ansic: 7,176,967; javascript: 4,110,704; python: 1,419,953; asm: 946,768; xml: 739,971; pascal: 187,324; sh: 89,623; perl: 88,663; objc: 79,944; sql: 50,304; cs: 41,786; fortran: 24,137; makefile: 21,806; php: 13,980; tcl: 13,166; yacc: 8,925; ruby: 7,485; awk: 3,720; lisp: 3,096; lex: 1,327; ada: 727; jsp: 228; sed: 36
file content (182 lines) | stat: -rw-r--r-- 6,594 bytes parent folder | download | duplicates (5)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
/*
 *  Copyright 2018 The WebRTC Project Authors. All rights reserved.
 *
 *  Use of this source code is governed by a BSD-style license
 *  that can be found in the LICENSE file in the root of the source
 *  tree. An additional intellectual property rights grant can be found
 *  in the file PATENTS.  All contributing project authors may
 *  be found in the AUTHORS file in the root of the source tree.
 */

#include "api/audio/audio_frame.h"

#include <stdint.h>
#include <string.h>  // memcmp

#include "api/audio/audio_view.h"
#include "api/audio/channel_layout.h"
#include "rtc_base/checks.h"
#include "test/gtest.h"

namespace webrtc {

namespace {

bool AllSamplesAre(int16_t sample, InterleavedView<const int16_t> samples) {
  for (const auto s : samples) {
    if (s != sample) {
      return false;
    }
  }
  return true;
}

bool AllSamplesAre(int16_t sample, const AudioFrame& frame) {
  return AllSamplesAre(sample, frame.data_view());
}

// Checks the values of samples in the AudioFrame buffer, regardless of whether
// they're valid or not, and disregard the `muted()` state of the frame.
// I.e. use `max_16bit_samples()` instead of `data_view().size()`
bool AllBufferSamplesAre(int16_t sample, const AudioFrame& frame) {
  auto view = frame.data_view();
  RTC_DCHECK(!view.empty());
  const int16_t* data = &view.data()[0];
  for (size_t i = 0; i < frame.max_16bit_samples(); ++i) {
    if (data[i] != sample) {
      return false;
    }
  }
  return true;
}

constexpr uint32_t kTimestamp = 27;
constexpr int kSampleRateHz = 16000;
constexpr size_t kNumChannelsMono = 1;
constexpr size_t kNumChannelsStereo = 2;
constexpr size_t kNumChannels5_1 = 6;
constexpr size_t kSamplesPerChannel = kSampleRateHz / 100;

}  // namespace

TEST(AudioFrameTest, FrameStartsZeroedAndMuted) {
  AudioFrame frame;
  EXPECT_TRUE(frame.muted());
  EXPECT_TRUE(frame.data_view().empty());
  EXPECT_TRUE(AllSamplesAre(0, frame));
}

// TODO: b/335805780 - Delete test when `mutable_data()` returns ArrayView.
TEST(AudioFrameTest, UnmutedFrameIsInitiallyZeroedLegacy) {
  AudioFrame frame(kSampleRateHz, kNumChannelsMono, CHANNEL_LAYOUT_NONE);
  frame.mutable_data();
  EXPECT_FALSE(frame.muted());
  EXPECT_TRUE(AllSamplesAre(0, frame));
  EXPECT_TRUE(AllBufferSamplesAre(0, frame));
}

TEST(AudioFrameTest, UnmutedFrameIsInitiallyZeroed) {
  AudioFrame frame;
  auto data = frame.mutable_data(kSamplesPerChannel, kNumChannelsMono);
  EXPECT_FALSE(frame.muted());
  EXPECT_TRUE(IsMono(data));
  EXPECT_EQ(frame.data_view().size(), kSamplesPerChannel);
  EXPECT_EQ(SamplesPerChannel(data), kSamplesPerChannel);
  EXPECT_TRUE(AllSamplesAre(0, frame));
}

TEST(AudioFrameTest, MutedFrameBufferIsZeroed) {
  AudioFrame frame;
  int16_t* frame_data =
      frame.mutable_data(kSamplesPerChannel, kNumChannelsMono).begin();
  EXPECT_FALSE(frame.muted());
  // Fill the reserved buffer with non-zero data.
  for (size_t i = 0; i < frame.max_16bit_samples(); i++) {
    frame_data[i] = 17;
  }
  ASSERT_TRUE(AllSamplesAre(17, frame));
  ASSERT_TRUE(AllBufferSamplesAre(17, frame));
  frame.Mute();
  EXPECT_TRUE(frame.muted());
  EXPECT_TRUE(AllSamplesAre(0, frame));
  ASSERT_TRUE(AllBufferSamplesAre(0, frame));
}

TEST(AudioFrameTest, UpdateFrameMono) {
  AudioFrame frame;
  int16_t samples[kNumChannelsMono * kSamplesPerChannel] = {17};
  frame.UpdateFrame(kTimestamp, samples, kSamplesPerChannel, kSampleRateHz,
                    AudioFrame::kPLC, AudioFrame::kVadActive, kNumChannelsMono);

  EXPECT_EQ(kTimestamp, frame.timestamp_);
  EXPECT_EQ(kSamplesPerChannel, frame.samples_per_channel());
  EXPECT_EQ(kSampleRateHz, frame.sample_rate_hz());
  EXPECT_EQ(AudioFrame::kPLC, frame.speech_type_);
  EXPECT_EQ(AudioFrame::kVadActive, frame.vad_activity_);
  EXPECT_EQ(kNumChannelsMono, frame.num_channels());
  EXPECT_EQ(CHANNEL_LAYOUT_MONO, frame.channel_layout());

  EXPECT_FALSE(frame.muted());
  EXPECT_EQ(0, memcmp(samples, frame.data(), sizeof(samples)));

  frame.UpdateFrame(kTimestamp, nullptr /* data*/, kSamplesPerChannel,
                    kSampleRateHz, AudioFrame::kPLC, AudioFrame::kVadActive,
                    kNumChannelsMono);
  EXPECT_TRUE(frame.muted());
  EXPECT_TRUE(AllSamplesAre(0, frame));
}

TEST(AudioFrameTest, UpdateFrameMultiChannel) {
  AudioFrame frame;
  frame.UpdateFrame(kTimestamp, nullptr /* data */, kSamplesPerChannel,
                    kSampleRateHz, AudioFrame::kPLC, AudioFrame::kVadActive,
                    kNumChannelsStereo);
  EXPECT_EQ(kSamplesPerChannel, frame.samples_per_channel());
  EXPECT_EQ(kNumChannelsStereo, frame.num_channels());
  EXPECT_EQ(CHANNEL_LAYOUT_STEREO, frame.channel_layout());
  EXPECT_TRUE(frame.muted());

  // Initialize the frame with valid `kNumChannels5_1` data to make sure we
  // get an unmuted frame with valid samples.
  int16_t samples[kSamplesPerChannel * kNumChannels5_1] = {17};
  frame.UpdateFrame(kTimestamp, samples /* data */, kSamplesPerChannel,
                    kSampleRateHz, AudioFrame::kPLC, AudioFrame::kVadActive,
                    kNumChannels5_1);
  EXPECT_FALSE(frame.muted());
  EXPECT_EQ(kSamplesPerChannel, frame.samples_per_channel());
  EXPECT_EQ(kSamplesPerChannel * kNumChannels5_1, frame.data_view().size());
  EXPECT_EQ(kNumChannels5_1, frame.num_channels());
  EXPECT_EQ(CHANNEL_LAYOUT_5_1, frame.channel_layout());
}

TEST(AudioFrameTest, CopyFrom) {
  AudioFrame frame1;
  AudioFrame frame2;

  int16_t samples[kNumChannelsMono * kSamplesPerChannel] = {17};
  frame2.UpdateFrame(kTimestamp, samples, kSamplesPerChannel, kSampleRateHz,
                     AudioFrame::kPLC, AudioFrame::kVadActive,
                     kNumChannelsMono);
  frame1.CopyFrom(frame2);

  EXPECT_EQ(frame2.timestamp_, frame1.timestamp_);
  EXPECT_EQ(frame2.samples_per_channel_, frame1.samples_per_channel_);
  EXPECT_EQ(frame2.sample_rate_hz_, frame1.sample_rate_hz_);
  EXPECT_EQ(frame2.speech_type_, frame1.speech_type_);
  EXPECT_EQ(frame2.vad_activity_, frame1.vad_activity_);
  EXPECT_EQ(frame2.num_channels_, frame1.num_channels_);

  EXPECT_EQ(frame2.data_view().size(), frame1.data_view().size());
  EXPECT_EQ(frame2.muted(), frame1.muted());
  EXPECT_EQ(0, memcmp(frame2.data(), frame1.data(), sizeof(samples)));

  frame2.UpdateFrame(kTimestamp, nullptr /* data */, kSamplesPerChannel,
                     kSampleRateHz, AudioFrame::kPLC, AudioFrame::kVadActive,
                     kNumChannelsMono);
  frame1.CopyFrom(frame2);

  EXPECT_EQ(frame2.muted(), frame1.muted());
  EXPECT_EQ(0, memcmp(frame2.data(), frame1.data(), sizeof(samples)));
}

}  // namespace webrtc