1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179
|
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_coding/neteq/sync_buffer.h"
#include <cstddef>
#include <cstdint>
#include "api/audio/audio_frame.h"
#include "modules/audio_coding/neteq/audio_multi_vector.h"
#include "rtc_base/numerics/safe_conversions.h"
#include "test/gtest.h"
namespace webrtc {
TEST(SyncBuffer, CreateAndDestroy) {
// Create a SyncBuffer with two channels and 10 samples each.
static const size_t kLen = 10;
static const size_t kChannels = 2;
SyncBuffer sync_buffer(kChannels, kLen);
EXPECT_EQ(kChannels, sync_buffer.Channels());
EXPECT_EQ(kLen, sync_buffer.Size());
// When the buffer is empty, the next index to play out is at the end.
EXPECT_EQ(kLen, sync_buffer.next_index());
// Verify that all elements are zero.
for (size_t channel = 0; channel < kChannels; ++channel) {
for (size_t i = 0; i < kLen; ++i) {
EXPECT_EQ(0, sync_buffer[channel][i]);
}
}
}
TEST(SyncBuffer, SetNextIndex) {
// Create a SyncBuffer with two channels and 100 samples each.
static const size_t kLen = 100;
static const size_t kChannels = 2;
SyncBuffer sync_buffer(kChannels, kLen);
sync_buffer.set_next_index(0);
EXPECT_EQ(0u, sync_buffer.next_index());
sync_buffer.set_next_index(kLen / 2);
EXPECT_EQ(kLen / 2, sync_buffer.next_index());
sync_buffer.set_next_index(kLen);
EXPECT_EQ(kLen, sync_buffer.next_index());
// Try to set larger than the buffer size; should cap at buffer size.
sync_buffer.set_next_index(kLen + 1);
EXPECT_EQ(kLen, sync_buffer.next_index());
}
TEST(SyncBuffer, PushBackAndFlush) {
// Create a SyncBuffer with two channels and 100 samples each.
static const size_t kLen = 100;
static const size_t kChannels = 2;
SyncBuffer sync_buffer(kChannels, kLen);
static const size_t kNewLen = 10;
AudioMultiVector new_data(kChannels, kNewLen);
// Populate `new_data`.
for (size_t channel = 0; channel < kChannels; ++channel) {
for (size_t i = 0; i < kNewLen; ++i) {
new_data[channel][i] = checked_cast<int16_t>(i);
}
}
// Push back `new_data` into `sync_buffer`. This operation should pop out
// data from the front of `sync_buffer`, so that the size of the buffer
// remains the same. The `next_index_` should also move with the same length.
sync_buffer.PushBack(new_data);
ASSERT_EQ(kLen, sync_buffer.Size());
// Verify that `next_index_` moved accordingly.
EXPECT_EQ(kLen - kNewLen, sync_buffer.next_index());
// Verify the new contents.
for (size_t channel = 0; channel < kChannels; ++channel) {
for (size_t i = 0; i < kNewLen; ++i) {
EXPECT_EQ(new_data[channel][i],
sync_buffer[channel][sync_buffer.next_index() + i]);
}
}
// Now flush the buffer, and verify that it is all zeros, and that next_index
// points to the end.
sync_buffer.Flush();
ASSERT_EQ(kLen, sync_buffer.Size());
EXPECT_EQ(kLen, sync_buffer.next_index());
for (size_t channel = 0; channel < kChannels; ++channel) {
for (size_t i = 0; i < kLen; ++i) {
EXPECT_EQ(0, sync_buffer[channel][i]);
}
}
}
TEST(SyncBuffer, PushFrontZeros) {
// Create a SyncBuffer with two channels and 100 samples each.
static const size_t kLen = 100;
static const size_t kChannels = 2;
SyncBuffer sync_buffer(kChannels, kLen);
static const size_t kNewLen = 10;
AudioMultiVector new_data(kChannels, kNewLen);
// Populate `new_data`.
for (size_t channel = 0; channel < kChannels; ++channel) {
for (size_t i = 0; i < kNewLen; ++i) {
new_data[channel][i] = checked_cast<int16_t>(1000 + i);
}
}
sync_buffer.PushBack(new_data);
EXPECT_EQ(kLen, sync_buffer.Size());
// Push `kNewLen` - 1 zeros into each channel in the front of the SyncBuffer.
sync_buffer.PushFrontZeros(kNewLen - 1);
EXPECT_EQ(kLen, sync_buffer.Size()); // Size should remain the same.
// Verify that `next_index_` moved accordingly. Should be at the end - 1.
EXPECT_EQ(kLen - 1, sync_buffer.next_index());
// Verify the zeros.
for (size_t channel = 0; channel < kChannels; ++channel) {
for (size_t i = 0; i < kNewLen - 1; ++i) {
EXPECT_EQ(0, sync_buffer[channel][i]);
}
}
// Verify that the correct data is at the end of the SyncBuffer.
for (size_t channel = 0; channel < kChannels; ++channel) {
EXPECT_EQ(1000, sync_buffer[channel][sync_buffer.next_index()]);
}
}
TEST(SyncBuffer, GetNextAudioInterleaved) {
// Create a SyncBuffer with two channels and 100 samples each.
static const size_t kLen = 100;
static const size_t kChannels = 2;
SyncBuffer sync_buffer(kChannels, kLen);
static const size_t kNewLen = 10;
AudioMultiVector new_data(kChannels, kNewLen);
// Populate `new_data`.
for (size_t channel = 0; channel < kChannels; ++channel) {
for (size_t i = 0; i < kNewLen; ++i) {
new_data[channel][i] = checked_cast<int16_t>(i);
}
}
// Push back `new_data` into `sync_buffer`. This operation should pop out
// data from the front of `sync_buffer`, so that the size of the buffer
// remains the same. The `next_index_` should also move with the same length.
sync_buffer.PushBack(new_data);
// Read to interleaved output. Read in two batches, where each read operation
// should automatically update the `net_index_` in the SyncBuffer.
// Note that `samples_read` is the number of samples read from each channel.
// That is, the number of samples written to `output` is
// `samples_read` * `kChannels`.
AudioFrame output1;
sync_buffer.GetNextAudioInterleaved(kNewLen / 2, &output1);
EXPECT_EQ(kChannels, output1.num_channels_);
EXPECT_EQ(kNewLen / 2, output1.samples_per_channel_);
AudioFrame output2;
sync_buffer.GetNextAudioInterleaved(kNewLen / 2, &output2);
EXPECT_EQ(kChannels, output2.num_channels_);
EXPECT_EQ(kNewLen / 2, output2.samples_per_channel_);
// Verify the data.
const int16_t* output_ptr = output1.data();
for (size_t i = 0; i < kNewLen / 2; ++i) {
for (size_t channel = 0; channel < kChannels; ++channel) {
EXPECT_EQ(new_data[channel][i], *output_ptr);
++output_ptr;
}
}
output_ptr = output2.data();
for (size_t i = kNewLen / 2; i < kNewLen; ++i) {
for (size_t channel = 0; channel < kChannels; ++channel) {
EXPECT_EQ(new_data[channel][i], *output_ptr);
++output_ptr;
}
}
}
} // namespace webrtc
|