File: frame_helpers_unittest.cc

package info (click to toggle)
chromium 138.0.7204.183-1
  • links: PTS, VCS
  • area: main
  • in suites: trixie
  • size: 6,071,908 kB
  • sloc: cpp: 34,937,088; ansic: 7,176,967; javascript: 4,110,704; python: 1,419,953; asm: 946,768; xml: 739,971; pascal: 187,324; sh: 89,623; perl: 88,663; objc: 79,944; sql: 50,304; cs: 41,786; fortran: 24,137; makefile: 21,806; php: 13,980; tcl: 13,166; yacc: 8,925; ruby: 7,485; awk: 3,720; lisp: 3,096; lex: 1,327; ada: 727; jsp: 228; sed: 36
file content (117 lines) | stat: -rw-r--r-- 4,132 bytes parent folder | download | duplicates (5)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
/*
 *  Copyright (c) 2022 The WebRTC project authors. All Rights Reserved.
 *
 *  Use of this source code is governed by a BSD-style license
 *  that can be found in the LICENSE file in the root of the source
 *  tree. An additional intellectual property rights grant can be found
 *  in the file PATENTS.  All contributing project authors may
 *  be found in the AUTHORS file in the root of the source tree.
 */

#include "modules/video_coding/frame_helpers.h"

#include <cstddef>
#include <cstdint>
#include <memory>
#include <optional>
#include <utility>
#include <variant>

#include "absl/container/inlined_vector.h"
#include "api/scoped_refptr.h"
#include "api/units/timestamp.h"
#include "api/video/encoded_frame.h"
#include "api/video/encoded_image.h"
#include "common_video/frame_instrumentation_data.h"
#include "test/gmock.h"
#include "test/gtest.h"

namespace webrtc {
namespace {

using ::testing::ElementsAre;

constexpr uint32_t kRtpTimestamp = 123456710;

scoped_refptr<EncodedImageBuffer> CreateEncodedImageBufferOfSizeN(size_t n,
                                                                  uint8_t x) {
  scoped_refptr<EncodedImageBuffer> buffer = EncodedImageBuffer::Create(n);
  for (size_t i = 0; i < n; ++i) {
    buffer->data()[i] = static_cast<uint8_t>(x + i);
  }
  return buffer;
}

// Returns an `EncodedFrame` with data values [x, x+1, ... x+(n-1)].
EncodedFrame CreateEncodedImageOfSizeN(size_t n, uint8_t x) {
  EncodedFrame image;
  image.SetEncodedData(CreateEncodedImageBufferOfSizeN(n, x));
  image.SetRtpTimestamp(kRtpTimestamp);
  return image;
}

TEST(FrameHasBadRenderTimingTest, LargePositiveFrameDelayIsBad) {
  Timestamp render_time = Timestamp::Seconds(12);
  Timestamp now = Timestamp::Seconds(0);

  EXPECT_TRUE(FrameHasBadRenderTiming(render_time, now));
}

TEST(FrameHasBadRenderTimingTest, LargeNegativeFrameDelayIsBad) {
  Timestamp render_time = Timestamp::Seconds(12);
  Timestamp now = Timestamp::Seconds(24);

  EXPECT_TRUE(FrameHasBadRenderTiming(render_time, now));
}

TEST(FrameInstrumentationDataTest,
     CombinedFrameHasSameDataAsHighestSpatialLayer) {
  // Assume L2T1 scalability mode.
  EncodedFrame spatial_layer_1 = CreateEncodedImageOfSizeN(/*n=*/10, /*x=*/1);
  const FrameInstrumentationData frame_ins_data_1 = {
      .sequence_index = 100,
      .communicate_upper_bits = false,
      .std_dev = 0.5,
      .luma_error_threshold = 5,
      .chroma_error_threshold = 4,
      .sample_values = {0.2, 0.7, 1.9}};
  spatial_layer_1.SetFrameInstrumentationData(frame_ins_data_1);

  EncodedFrame spatial_layer_2 = CreateEncodedImageOfSizeN(/*n=*/10, /*x=*/11);
  FrameInstrumentationData frame_ins_data_2 = {
      .sequence_index = 10,
      .communicate_upper_bits = false,
      .std_dev = 1.0,
      .luma_error_threshold = 3,
      .chroma_error_threshold = 4,
      .sample_values = {0.1, 0.3, 2.1}};
  spatial_layer_2.SetFrameInstrumentationData(frame_ins_data_2);

  absl::InlinedVector<std::unique_ptr<EncodedFrame>, 4> frames;
  frames.push_back(std::make_unique<EncodedFrame>(spatial_layer_1));
  frames.push_back(std::make_unique<EncodedFrame>(spatial_layer_2));

  std::optional<
      std::variant<FrameInstrumentationSyncData, FrameInstrumentationData>>
      data = CombineAndDeleteFrames(std::move(frames))
                 ->CodecSpecific()
                 ->frame_instrumentation_data;

  ASSERT_TRUE(data.has_value());
  ASSERT_TRUE(std::holds_alternative<FrameInstrumentationData>(*data));
  FrameInstrumentationData frame_instrumentation_data =
      std::get<FrameInstrumentationData>(*data);

  // Expect to have the same frame_instrumentation_data as the highest spatial
  // layer.
  EXPECT_EQ(frame_instrumentation_data.sequence_index, 10);
  EXPECT_FALSE(frame_instrumentation_data.communicate_upper_bits);
  EXPECT_EQ(frame_instrumentation_data.std_dev, 1.0);
  EXPECT_EQ(frame_instrumentation_data.luma_error_threshold, 3);
  EXPECT_EQ(frame_instrumentation_data.chroma_error_threshold, 4);
  EXPECT_THAT(frame_instrumentation_data.sample_values,
              ElementsAre(0.1, 0.3, 2.1));
}

}  // namespace
}  // namespace webrtc