File: VideoDecoder.cpp

package info (click to toggle)
freespace2 24.2.0%2Brepack-1
  • links: PTS, VCS
  • area: non-free
  • in suites: forky, sid
  • size: 43,716 kB
  • sloc: cpp: 595,001; ansic: 21,741; python: 1,174; sh: 457; makefile: 248; xml: 181
file content (165 lines) | stat: -rw-r--r-- 4,998 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
#include "cutscene/ffmpeg/VideoDecoder.h"

#include "tracing/tracing.h"

namespace {
SwsContext* getSWSContext(int width, int height, AVPixelFormat fmt, AVPixelFormat destination_fmt)
{
	return sws_getContext(width, height, fmt, width, height, destination_fmt, SWS_BILINEAR, nullptr, nullptr, nullptr);
}

double getFrameTime(int64_t pts, AVRational time_base) {
	return pts * av_q2d(time_base);
}
}

namespace cutscene {
namespace ffmpeg {
class FFMPEGVideoFrame: public VideoFrame {
	size_t _width;
	size_t _height;
	AVFrame* _frame;

  public:
	FFMPEGVideoFrame(size_t width, size_t height, AVFrame* frame) : _width(width), _height(height), _frame(frame) {}

	~FFMPEGVideoFrame() override {
		if (_frame != nullptr) {
			av_freep(&_frame->data[0]);
			av_frame_free(&_frame);
		}
	}
	size_t getPlaneNumber() override
	{
		switch (_frame->format) {
		case AV_PIX_FMT_YUV420P:
			// YUV data is planar
			return 3;
		default:
			// Everything else is packed
			return 1;
		}
	}
	FrameSize getPlaneSize(size_t plane) override
	{
		switch (_frame->format) {
		case AV_PIX_FMT_YUV420P: {
			// YUV data is planar
			auto width  = plane > 0 ? _width / 2 : _width;
			auto height = plane > 0 ? _height / 2 : _height;

			return {width, height, (size_t)_frame->linesize[plane]};
		}
		default:
			// Everything else is packed
			return {_width, _height, (size_t)_frame->linesize[plane]};
		}
	}
	void* getPlaneData(size_t plane) override { return _frame->data[plane]; }
};

VideoDecoder::VideoDecoder(DecoderStatus* status, AVPixelFormat destination_fmt)
    : FFMPEGStreamDecoder(status), m_frameId(0), m_destinationFormat(destination_fmt)
{
	m_swsCtx = getSWSContext(m_status->videoCodecPars.width, m_status->videoCodecPars.height,
	                         m_status->videoCodecPars.pixel_format, destination_fmt);
}

VideoDecoder::~VideoDecoder() {
	sws_freeContext(m_swsCtx);
}

void VideoDecoder::convertAndPushPicture(const AVFrame* frame) {
	// Allocate a picture to hold the destination data
	AVFrame* yuvFrame = av_frame_alloc();
	av_frame_copy(yuvFrame, frame);
	yuvFrame->format = m_destinationFormat;

	av_image_alloc(yuvFrame->data, yuvFrame->linesize, m_status->videoCodecPars.width, m_status->videoCodecPars.height,
	               m_destinationFormat, 1);

	if (m_status->videoCodecPars.pixel_format == m_destinationFormat) {
		av_image_copy(yuvFrame->data, yuvFrame->linesize, (const uint8_t**)(frame->data), frame->linesize,
		              m_destinationFormat, m_status->videoCodecPars.width, m_status->videoCodecPars.height);
	} else {
		// Convert frame to destination format
		sws_scale(m_swsCtx, (uint8_t const* const*)frame->data, frame->linesize, 0, m_status->videoCodecPars.height,
		          yuvFrame->data, yuvFrame->linesize);
	}

	std::unique_ptr<FFMPEGVideoFrame> videoFramePtr(
	    new FFMPEGVideoFrame(static_cast<size_t>(m_status->videoCodecPars.width),
	                         static_cast<size_t>(m_status->videoCodecPars.height), yuvFrame));
	videoFramePtr->id = ++m_frameId;
#if LIBAVCODEC_VERSION_INT > AV_VERSION_INT(58, 3, 102)
	videoFramePtr->frameTime = getFrameTime(frame->best_effort_timestamp, m_status->videoStream->time_base);
#else
	videoFramePtr->frameTime = getFrameTime(av_frame_get_best_effort_timestamp(frame), m_status->videoStream->time_base);
#endif

	pushFrame(VideoFramePtr(videoFramePtr.release()));
}

void VideoDecoder::decodePacket(AVPacket* packet) {
	TRACE_SCOPE(tracing::CutsceneFFmpegVideoDecoder);
#if LIBAVCODEC_VERSION_INT > AV_VERSION_INT(57, 24, 255)
	int send_result;
	do {
		send_result = avcodec_send_packet(m_status->videoCodecCtx, packet);

		while(avcodec_receive_frame(m_status->videoCodecCtx, m_decodeFrame) == 0) {
			convertAndPushPicture(m_decodeFrame);
		}
	} while (send_result == AVERROR(EAGAIN));
#else
	int finishedFrame = 0;
	auto result = avcodec_decode_video2(m_status->videoCodecCtx, m_decodeFrame, &finishedFrame, packet);

	if (result >= 0 && finishedFrame) {
		convertAndPushPicture(m_decodeFrame);
	}
#endif
}

void VideoDecoder::finishDecoding() {
	TRACE_SCOPE(tracing::CutsceneFFmpegVideoDecoder);

#if LIBAVCODEC_VERSION_INT > AV_VERSION_INT(57, 24, 255)
	// Send flush packet
	avcodec_send_packet(m_status->videoCodecCtx, nullptr);

	// Handle those decoders that have a delay
	while (true) {
		auto ret = avcodec_receive_frame(m_status->videoCodecCtx, m_decodeFrame);

		if (ret == 0) {
			convertAndPushPicture(m_decodeFrame);
		} else {
			// Everything consumed or error
			break;
		}
	}
#else
	// Handle those decoders that have a delay
	AVPacket nullPacket;
	memset(&nullPacket, 0, sizeof(nullPacket));
	nullPacket.data = nullptr;
	nullPacket.size = 0;

	while (true) {
		int finishedFrame = 1;
		auto err = avcodec_decode_video2(m_status->videoCodecCtx, m_decodeFrame, &finishedFrame, &nullPacket);

		if (err < 0 || !finishedFrame) {
			break;
		}

		convertAndPushPicture(m_decodeFrame);
	}
#endif
}
void VideoDecoder::flushBuffers() {
	avcodec_flush_buffers(m_status->videoCodecCtx);
}
}
}