xref: /haiku/src/add-ons/media/plugins/ffmpeg/AVCodecDecoder.cpp (revision da4dbfa47a47beb355289f3dd685797cee69ab77)
1 /*
2  * Copyright (C) 2001 Carlos Hasan
3  * Copyright (C) 2001 François Revol
4  * Copyright (C) 2001 Axel Dörfler
5  * Copyright (C) 2004 Marcus Overhagen
6  * Copyright (C) 2009 Stephan Amßus <superstippi@gmx.de>
7  * Copyright (C) 2014 Colin Günther <coling@gmx.de>
8  * Copyright (C) 2015 Adrien Destugues <pulkomandy@pulkomandy.tk>
9  *
10  * All rights reserved. Distributed under the terms of the MIT License.
11  */
12 
13 //! libavcodec based decoder for Haiku
14 
15 
16 #include "AVCodecDecoder.h"
17 
18 #include <new>
19 
20 #include <assert.h>
21 #include <string.h>
22 
23 #include <Bitmap.h>
24 #include <Debug.h>
25 #include <String.h>
26 
27 #include "Utilities.h"
28 
29 
30 #undef TRACE
31 //#define TRACE_AV_CODEC
32 #ifdef TRACE_AV_CODEC
33 #	define TRACE(x...)	printf(x)
34 #	define TRACE_AUDIO(x...)	printf(x)
35 #	define TRACE_VIDEO(x...)	printf(x)
36 #else
37 #	define TRACE(x...)
38 #	define TRACE_AUDIO(x...)
39 #	define TRACE_VIDEO(x...)
40 #endif
41 
42 //#define LOG_STREAM_TO_FILE
43 #ifdef LOG_STREAM_TO_FILE
44 #	include <File.h>
45 	static BFile sAudioStreamLogFile(
46 		"/boot/home/Desktop/AVCodecDebugAudioStream.raw",
47 		B_CREATE_FILE | B_ERASE_FILE | B_WRITE_ONLY);
48 	static BFile sVideoStreamLogFile(
49 		"/boot/home/Desktop/AVCodecDebugVideoStream.raw",
50 		B_CREATE_FILE | B_ERASE_FILE | B_WRITE_ONLY);
51 	static int sDumpedPackets = 0;
52 #endif
53 
54 typedef AVCodecID CodecID;
55 
56 struct wave_format_ex {
57 	uint16 format_tag;
58 	uint16 channels;
59 	uint32 frames_per_sec;
60 	uint32 avg_bytes_per_sec;
61 	uint16 block_align;
62 	uint16 bits_per_sample;
63 	uint16 extra_size;
64 	// extra_data[extra_size]
65 } _PACKED;
66 
67 struct avformat_codec_context {
68 	int sample_rate;
69 	int channels;
70 };
71 
72 
73 // profiling related globals
74 #define DO_PROFILING 0
75 #if DO_PROFILING
76 static bigtime_t decodingTime = 0;
77 static bigtime_t conversionTime = 0;
78 static long profileCounter = 0;
79 #endif
80 
81 
82 AVCodecDecoder::AVCodecDecoder()
83 	:
84 	fHeader(),
85 	fInputFormat(),
86 	fFrame(0),
87 	fIsAudio(false),
88 	fCodec(NULL),
89 	fCodecContext(avcodec_alloc_context3(NULL)),
90 	fResampleContext(NULL),
91 	fDecodedData(NULL),
92 	fDecodedDataSizeInBytes(0),
93 	fPostProcessedDecodedPicture(av_frame_alloc()),
94 	fRawDecodedPicture(av_frame_alloc()),
95 	fRawDecodedAudio(av_frame_alloc()),
96 
97 	fCodecInitDone(false),
98 
99 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
100 	fSwsContext(NULL),
101 #else
102 	fFormatConversionFunc(NULL),
103 #endif
104 
105 	fExtraData(NULL),
106 	fExtraDataSize(0),
107 	fBlockAlign(0),
108 
109 	fOutputColorSpace(B_NO_COLOR_SPACE),
110 	fOutputFrameCount(0),
111 	fOutputFrameRate(1.0),
112 	fOutputFrameSize(0),
113 	fInputFrameSize(0),
114 
115 	fChunkBuffer(NULL),
116 	fChunkBufferSize(0),
117 	fAudioDecodeError(false),
118 
119 	fDecodedDataBuffer(av_frame_alloc()),
120 	fDecodedDataBufferOffset(0),
121 	fDecodedDataBufferSize(0),
122 	fTempPacket(NULL),
123 	fBufferSinkContext(NULL),
124 	fBufferSourceContext(NULL),
125 	fFilterGraph(NULL),
126 	fFilterFrame(NULL)
127 {
128 	TRACE("AVCodecDecoder::AVCodecDecoder()\n");
129 
130 	system_info info;
131 	get_system_info(&info);
132 
133 	fCodecContext->err_recognition = AV_EF_CAREFUL;
134 	fCodecContext->error_concealment = 3;
135 	fCodecContext->thread_count = info.cpu_count;
136 }
137 
138 
139 AVCodecDecoder::~AVCodecDecoder()
140 {
141 	TRACE("[%c] AVCodecDecoder::~AVCodecDecoder()\n", fIsAudio?('a'):('v'));
142 
143 #if DO_PROFILING
144 	if (profileCounter > 0) {
145 		printf("[%c] profile: d1 = %lld, d2 = %lld (%lld)\n",
146 			fIsAudio?('a'):('v'), decodingTime / profileCounter,
147 			conversionTime / profileCounter, fFrame);
148 	}
149 #endif
150 
151 	swr_free(&fResampleContext);
152 	free(fChunkBuffer);
153 	free(fDecodedData);
154 
155 	av_frame_free(&fPostProcessedDecodedPicture);
156 	av_frame_free(&fRawDecodedPicture);
157 	av_free(fRawDecodedAudio->opaque);
158 	av_frame_free(&fRawDecodedAudio);
159 	fCodecContext->extradata = NULL;
160 	avcodec_free_context(&fCodecContext);
161 	av_frame_free(&fDecodedDataBuffer);
162 
163 	av_frame_free(&fFilterFrame);
164 	avfilter_graph_free(&fFilterGraph);
165 
166 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
167 	if (fSwsContext != NULL)
168 		sws_freeContext(fSwsContext);
169 #endif
170 
171 	delete[] fExtraData;
172 
173 	av_packet_free(&fTempPacket);
174 }
175 
176 
177 void
178 AVCodecDecoder::GetCodecInfo(media_codec_info* mci)
179 {
180 	snprintf(mci->short_name, 32, "%s", fCodec->name);
181 	snprintf(mci->pretty_name, 96, "%s", fCodec->long_name);
182 	mci->id = 0;
183 	mci->sub_id = fCodec->id;
184 }
185 
186 
187 status_t
188 AVCodecDecoder::Setup(media_format* ioEncodedFormat, const void* infoBuffer,
189 	size_t infoSize)
190 {
191 	if (ioEncodedFormat->type != B_MEDIA_ENCODED_AUDIO
192 		&& ioEncodedFormat->type != B_MEDIA_ENCODED_VIDEO)
193 		return B_ERROR;
194 
195 	fIsAudio = (ioEncodedFormat->type == B_MEDIA_ENCODED_AUDIO);
196 	TRACE("[%c] AVCodecDecoder::Setup()\n", fIsAudio?('a'):('v'));
197 
198 #ifdef TRACE_AV_CODEC
199 	char buffer[1024];
200 	string_for_format(*ioEncodedFormat, buffer, sizeof(buffer));
201 	TRACE("[%c]   input_format = %s\n", fIsAudio?('a'):('v'), buffer);
202 	TRACE("[%c]   infoSize = %ld\n", fIsAudio?('a'):('v'), infoSize);
203 	TRACE("[%c]   user_data_type = %08lx\n", fIsAudio?('a'):('v'),
204 		ioEncodedFormat->user_data_type);
205 	TRACE("[%c]   meta_data_size = %ld\n", fIsAudio?('a'):('v'),
206 		ioEncodedFormat->MetaDataSize());
207 #endif
208 
209 	media_format_description description;
210 	if (BMediaFormats().GetCodeFor(*ioEncodedFormat,
211 			B_MISC_FORMAT_FAMILY, &description) == B_OK) {
212 		if (description.u.misc.file_format != 'ffmp')
213 			return B_NOT_SUPPORTED;
214 		fCodec = avcodec_find_decoder(static_cast<CodecID>(
215 			description.u.misc.codec));
216 		if (fCodec == NULL) {
217 			TRACE("  unable to find the correct FFmpeg "
218 				"decoder (id = %lu)\n", description.u.misc.codec);
219 			return B_ERROR;
220 		}
221 		TRACE("  found decoder %s\n", fCodec->name);
222 
223 		const void* extraData = infoBuffer;
224 		fExtraDataSize = infoSize;
225 		if (description.family == B_WAV_FORMAT_FAMILY
226 				&& infoSize >= sizeof(wave_format_ex)) {
227 			TRACE("  trying to use wave_format_ex\n");
228 			// Special case extra data in B_WAV_FORMAT_FAMILY
229 			const wave_format_ex* waveFormatData
230 				= (const wave_format_ex*)infoBuffer;
231 
232 			size_t waveFormatSize = infoSize;
233 			if (waveFormatData != NULL && waveFormatSize > 0) {
234 				fBlockAlign = waveFormatData->block_align;
235 				TRACE("  found block align: %d\n", fBlockAlign);
236 				fExtraDataSize = waveFormatData->extra_size;
237 				// skip the wave_format_ex from the extra data.
238 				extraData = waveFormatData + 1;
239 			}
240 		} else {
241 			if (fIsAudio) {
242 				fBlockAlign
243 					= ioEncodedFormat->u.encoded_audio.output.buffer_size;
244 				TRACE("  using buffer_size as block align: %d\n",
245 					fBlockAlign);
246 			}
247 		}
248 		if (extraData != NULL && fExtraDataSize > 0) {
249 			TRACE("AVCodecDecoder: extra data size %ld\n", infoSize);
250 			delete[] fExtraData;
251 			fExtraData = new(std::nothrow) char[fExtraDataSize];
252 			if (fExtraData != NULL)
253 				memcpy(fExtraData, infoBuffer, fExtraDataSize);
254 			else
255 				fExtraDataSize = 0;
256 		}
257 
258 		fInputFormat = *ioEncodedFormat;
259 		return B_OK;
260 	} else {
261 		TRACE("AVCodecDecoder: BMediaFormats().GetCodeFor() failed.\n");
262 	}
263 
264 	printf("AVCodecDecoder::Setup failed!\n");
265 	return B_ERROR;
266 }
267 
268 
269 status_t
270 AVCodecDecoder::SeekedTo(int64 frame, bigtime_t time)
271 {
272 	status_t ret = B_OK;
273 	// Reset the FFmpeg codec to flush buffers, so we keep the sync
274 	if (fCodecInitDone) {
275 		avcodec_flush_buffers(fCodecContext);
276 		_ResetTempPacket();
277 	}
278 
279 	// Flush internal buffers as well.
280 	free(fChunkBuffer);
281 	fChunkBuffer = NULL;
282 	fChunkBufferSize = 0;
283 	fDecodedDataBufferOffset = 0;
284 	fDecodedDataBufferSize = 0;
285 	fDecodedDataSizeInBytes = 0;
286 
287 	fFrame = frame;
288 
289 	return ret;
290 }
291 
292 
293 status_t
294 AVCodecDecoder::NegotiateOutputFormat(media_format* inOutFormat)
295 {
296 	TRACE("AVCodecDecoder::NegotiateOutputFormat() [%c] \n",
297 		fIsAudio?('a'):('v'));
298 
299 #ifdef TRACE_AV_CODEC
300 	char buffer[1024];
301 	string_for_format(*inOutFormat, buffer, sizeof(buffer));
302 	TRACE("  [%c]  requested format = %s\n", fIsAudio?('a'):('v'), buffer);
303 #endif
304 
305 	// close any previous instance
306 	fCodecContext->extradata = NULL;
307 	avcodec_free_context(&fCodecContext);
308 	fCodecContext = avcodec_alloc_context3(fCodec);
309 	fCodecInitDone = false;
310 
311 	if (fIsAudio)
312 		return _NegotiateAudioOutputFormat(inOutFormat);
313 	else
314 		return _NegotiateVideoOutputFormat(inOutFormat);
315 }
316 
317 
318 status_t
319 AVCodecDecoder::Decode(void* outBuffer, int64* outFrameCount,
320 	media_header* mediaHeader, media_decode_info* info)
321 {
322 	if (!fCodecInitDone)
323 		return B_NO_INIT;
324 
325 	status_t ret;
326 	if (fIsAudio)
327 		ret = _DecodeAudio(outBuffer, outFrameCount, mediaHeader, info);
328 	else
329 		ret = _DecodeVideo(outBuffer, outFrameCount, mediaHeader, info);
330 
331 	return ret;
332 }
333 
334 
335 // #pragma mark -
336 
337 
338 void
339 AVCodecDecoder::_ResetTempPacket()
340 {
341 	if (fTempPacket == NULL)
342 		fTempPacket = av_packet_alloc();
343 	fTempPacket->size = 0;
344 	fTempPacket->data = NULL;
345 }
346 
347 
348 status_t
349 AVCodecDecoder::_NegotiateAudioOutputFormat(media_format* inOutFormat)
350 {
351 	TRACE("AVCodecDecoder::_NegotiateAudioOutputFormat()\n");
352 
353 	_ApplyEssentialAudioContainerPropertiesToContext();
354 		// This makes audio formats play that encode the audio properties in
355 		// the audio container (e.g. WMA) and not in the audio frames
356 		// themself (e.g. MP3).
357 		// Note: Doing this step unconditionally is OK, because the first call
358 		// to _DecodeNextAudioFrameChunk() will update the essential audio
359 		// format properties accordingly regardless of the settings here.
360 
361 	if (avcodec_open2(fCodecContext, fCodec, NULL) < 0) {
362 		TRACE("avcodec_open() failed to init codec!\n");
363 		return B_ERROR;
364 	}
365 	fCodecInitDone = true;
366 
367 	free(fChunkBuffer);
368 	fChunkBuffer = NULL;
369 	fChunkBufferSize = 0;
370 	fAudioDecodeError = false;
371 	fDecodedDataBufferOffset = 0;
372 	fDecodedDataBufferSize = 0;
373 
374 	_ResetTempPacket();
375 
376 	status_t statusOfDecodingFirstFrameChunk = _DecodeNextAudioFrameChunk();
377 	if (statusOfDecodingFirstFrameChunk != B_OK) {
378 		TRACE("[a] decoding first audio frame chunk failed\n");
379 		return B_ERROR;
380 	}
381 
382 	media_multi_audio_format outputAudioFormat;
383 	outputAudioFormat = media_raw_audio_format::wildcard;
384 	outputAudioFormat.byte_order = B_MEDIA_HOST_ENDIAN;
385 	outputAudioFormat.frame_rate = fCodecContext->sample_rate;
386 	outputAudioFormat.channel_count = fCodecContext->channels;
387 	ConvertAVSampleFormatToRawAudioFormat(fCodecContext->sample_fmt,
388 		outputAudioFormat.format);
389 	// Check that format is not still a wild card!
390 	if (outputAudioFormat.format == 0) {
391 		TRACE("  format still a wild-card, assuming B_AUDIO_SHORT.\n");
392 		outputAudioFormat.format = media_raw_audio_format::B_AUDIO_SHORT;
393 	}
394 	outputAudioFormat.buffer_size = inOutFormat->u.raw_audio.buffer_size;
395 	// Check that buffer_size has a sane value
396 	size_t sampleSize = outputAudioFormat.format
397 		& media_raw_audio_format::B_AUDIO_SIZE_MASK;
398 	if (outputAudioFormat.buffer_size == 0) {
399 		outputAudioFormat.buffer_size = 512 * sampleSize
400 			* outputAudioFormat.channel_count;
401 	}
402 
403 	inOutFormat->type = B_MEDIA_RAW_AUDIO;
404 	inOutFormat->u.raw_audio = outputAudioFormat;
405 	inOutFormat->require_flags = 0;
406 	inOutFormat->deny_flags = B_MEDIA_MAUI_UNDEFINED_FLAGS;
407 
408 	// Initialize variables needed to manage decoding as much audio frames as
409 	// needed to fill the buffer_size.
410 	fOutputFrameSize = sampleSize * outputAudioFormat.channel_count;
411 	fOutputFrameCount = outputAudioFormat.buffer_size / fOutputFrameSize;
412 	fOutputFrameRate = outputAudioFormat.frame_rate;
413 	if (av_sample_fmt_is_planar(fCodecContext->sample_fmt))
414 		fInputFrameSize = sampleSize;
415 	else
416 		fInputFrameSize = fOutputFrameSize;
417 
418 	fRawDecodedAudio->opaque
419 		= av_realloc(fRawDecodedAudio->opaque, sizeof(avformat_codec_context));
420 	if (fRawDecodedAudio->opaque == NULL)
421 		return B_NO_MEMORY;
422 
423 	if (av_sample_fmt_is_planar(fCodecContext->sample_fmt)) {
424 		fResampleContext = swr_alloc_set_opts(NULL,
425 			fCodecContext->channel_layout,
426 			fCodecContext->request_sample_fmt,
427 			fCodecContext->sample_rate,
428 			fCodecContext->channel_layout,
429 			fCodecContext->sample_fmt,
430 			fCodecContext->sample_rate,
431 			0, NULL);
432 		swr_init(fResampleContext);
433 	}
434 
435 	TRACE("  bit_rate = %d, sample_rate = %d, channels = %d, "
436 		"output frame size: %d, count: %ld, rate: %.2f\n",
437 		fCodecContext->bit_rate, fCodecContext->sample_rate, fCodecContext->channels,
438 		fOutputFrameSize, fOutputFrameCount, fOutputFrameRate);
439 
440 	return B_OK;
441 }
442 
443 
444 status_t
445 AVCodecDecoder::_NegotiateVideoOutputFormat(media_format* inOutFormat)
446 {
447 	TRACE("AVCodecDecoder::_NegotiateVideoOutputFormat()\n");
448 
449 	TRACE("  requested video format 0x%x\n",
450 		inOutFormat->u.raw_video.display.format);
451 
452 	_ApplyEssentialVideoContainerPropertiesToContext();
453 		// This makes video formats play that encode the video properties in
454 		// the video container (e.g. WMV) and not in the video frames
455 		// themself (e.g. MPEG2).
456 		// Note: Doing this step unconditionally is OK, because the first call
457 		// to _DecodeNextVideoFrame() will update the essential video format
458 		// properties accordingly regardless of the settings here.
459 
460 	if (avcodec_open2(fCodecContext, fCodec, NULL) < 0) {
461 		TRACE("avcodec_open() failed to init codec!\n");
462 		return B_ERROR;
463 	}
464 	fCodecInitDone = true;
465 
466 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
467 	fOutputColorSpace = B_RGB32;
468 #else
469 	// Make MediaPlayer happy (if not in rgb32 screen depth and no overlay,
470 	// it will only ask for YCbCr, which DrawBitmap doesn't handle, so the
471 	// default colordepth is RGB32).
472 	if (inOutFormat->u.raw_video.display.format == B_YCbCr422)
473 		fOutputColorSpace = B_YCbCr422;
474 	else
475 		fOutputColorSpace = B_RGB32;
476 #endif
477 
478 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
479 	if (fSwsContext != NULL)
480 		sws_freeContext(fSwsContext);
481 	fSwsContext = NULL;
482 #else
483 	fFormatConversionFunc = 0;
484 #endif
485 
486 	free(fChunkBuffer);
487 	fChunkBuffer = NULL;
488 	fChunkBufferSize = 0;
489 
490 	_ResetTempPacket();
491 
492 	status_t statusOfDecodingFirstFrame = _DecodeNextVideoFrame();
493 	if (statusOfDecodingFirstFrame != B_OK) {
494 		TRACE("[v] decoding first video frame failed\n");
495 		return B_ERROR;
496 	}
497 
498 	// Note: fSwsContext / fFormatConversionFunc should have been initialized
499 	// by first call to _DecodeNextVideoFrame() above.
500 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
501 	if (fSwsContext == NULL) {
502 		TRACE("No SWS Scale context or decoder has not set the pixel format "
503 			"yet!\n");
504 	}
505 #else
506 	if (fFormatConversionFunc == NULL) {
507 		TRACE("no pixel format conversion function found or decoder has "
508 			"not set the pixel format yet!\n");
509 	}
510 #endif
511 
512 	inOutFormat->type = B_MEDIA_RAW_VIDEO;
513 	inOutFormat->require_flags = 0;
514 	inOutFormat->deny_flags = B_MEDIA_MAUI_UNDEFINED_FLAGS;
515 	inOutFormat->u.raw_video = fInputFormat.u.encoded_video.output;
516 	inOutFormat->u.raw_video.interlace = 1;
517 		// Progressive (non-interlaced) video frames are delivered
518 	inOutFormat->u.raw_video.first_active
519 		= fHeader.u.raw_video.first_active_line;
520 	inOutFormat->u.raw_video.last_active = fHeader.u.raw_video.line_count;
521 	inOutFormat->u.raw_video.pixel_width_aspect
522 		= fHeader.u.raw_video.pixel_width_aspect;
523 	inOutFormat->u.raw_video.pixel_height_aspect
524 		= fHeader.u.raw_video.pixel_height_aspect;
525 #if 0
526 	// This was added by Colin Günther in order to handle streams with a
527 	// variable frame rate. fOutputFrameRate is computed from the stream
528 	// time_base, but it actually assumes a timebase equal to the FPS. As far
529 	// as I can see, a stream with a variable frame rate would have a higher
530 	// resolution time_base and increment the pts (presentation time) of each
531 	// frame by a value bigger than one.
532 	//
533 	// Fixed rate stream:
534 	// time_base = 1/50s, frame PTS = 1, 2, 3... (for 50Hz)
535 	//
536 	// Variable rate stream:
537 	// time_base = 1/300s, frame PTS = 6, 12, 18, ... (for 50Hz)
538 	// time_base = 1/300s, frame PTS = 5, 10, 15, ... (for 60Hz)
539 	//
540 	// The fOutputFrameRate currently does not take this into account and
541 	// ignores the PTS. This results in playing the above sample at 300Hz
542 	// instead of 50 or 60.
543 	//
544 	// However, comparing the PTS for two consecutive implies we have already
545 	// decoded 2 frames, which may not be the case when this method is first
546 	// called.
547 	inOutFormat->u.raw_video.field_rate = fOutputFrameRate;
548 		// Was calculated by first call to _DecodeNextVideoFrame()
549 #endif
550 	inOutFormat->u.raw_video.display.format = fOutputColorSpace;
551 	inOutFormat->u.raw_video.display.line_width
552 		= fHeader.u.raw_video.display_line_width;
553 	inOutFormat->u.raw_video.display.line_count
554 		= fHeader.u.raw_video.display_line_count;
555 	inOutFormat->u.raw_video.display.bytes_per_row
556 		= fHeader.u.raw_video.bytes_per_row;
557 
558 #ifdef TRACE_AV_CODEC
559 	char buffer[1024];
560 	string_for_format(*inOutFormat, buffer, sizeof(buffer));
561 	TRACE("[v]  outFormat = %s\n", buffer);
562 	TRACE("  returned  video format 0x%x\n",
563 		inOutFormat->u.raw_video.display.format);
564 #endif
565 
566 	return B_OK;
567 }
568 
569 
570 /*! \brief Fills the outBuffer with one or more already decoded audio frames.
571 
572 	Besides the main duty described above, this method also fills out the other
573 	output parameters as documented below.
574 
575 	\param outBuffer Pointer to the output buffer to copy the decoded audio
576 		frames to.
577 	\param outFrameCount Pointer to the output variable to assign the number of
578 		copied audio frames (usually several audio frames at once).
579 	\param mediaHeader Pointer to the output media header that contains the
580 		properties of the decoded audio frame being the first in the outBuffer.
581 	\param info Specifies additional decoding parameters. (Note: unused).
582 
583 	\returns B_OK Decoding audio frames succeeded.
584 	\returns B_LAST_BUFFER_ERROR There are no more audio frames available.
585 	\returns Other error codes
586 */
587 status_t
588 AVCodecDecoder::_DecodeAudio(void* outBuffer, int64* outFrameCount,
589 	media_header* mediaHeader, media_decode_info* info)
590 {
591 	TRACE_AUDIO("AVCodecDecoder::_DecodeAudio(audio start_time %.6fs)\n",
592 		mediaHeader->start_time / 1000000.0);
593 
594 	status_t audioDecodingStatus
595 		= fDecodedDataSizeInBytes > 0 ? B_OK : _DecodeNextAudioFrame();
596 
597 	if (audioDecodingStatus != B_OK)
598 		return audioDecodingStatus;
599 
600 	*outFrameCount = fDecodedDataSizeInBytes / fOutputFrameSize;
601 	*mediaHeader = fHeader;
602 	memcpy(outBuffer, fDecodedData, fDecodedDataSizeInBytes);
603 
604 	fDecodedDataSizeInBytes = 0;
605 
606 	return B_OK;
607 }
608 
609 
610 /*! \brief Fills the outBuffer with an already decoded video frame.
611 
612 	Besides the main duty described above, this method also fills out the other
613 	output parameters as documented below.
614 
615 	\param outBuffer Pointer to the output buffer to copy the decoded video
616 		frame to.
617 	\param outFrameCount Pointer to the output variable to assign the number of
618 		copied video frames (usually one video frame).
619 	\param mediaHeader Pointer to the output media header that contains the
620 		decoded video frame properties.
621 	\param info Specifies additional decoding parameters. (Note: unused).
622 
623 	\returns B_OK Decoding a video frame succeeded.
624 	\returns B_LAST_BUFFER_ERROR There are no more video frames available.
625 	\returns Other error codes
626 */
627 status_t
628 AVCodecDecoder::_DecodeVideo(void* outBuffer, int64* outFrameCount,
629 	media_header* mediaHeader, media_decode_info* info)
630 {
631 	status_t videoDecodingStatus
632 		= fDecodedDataSizeInBytes > 0 ? B_OK : _DecodeNextVideoFrame();
633 
634 	if (videoDecodingStatus != B_OK)
635 		return videoDecodingStatus;
636 
637 	*outFrameCount = 1;
638 	*mediaHeader = fHeader;
639 	memcpy(outBuffer, fDecodedData, mediaHeader->size_used);
640 
641 	fDecodedDataSizeInBytes = 0;
642 
643 	return B_OK;
644 }
645 
646 
647 /*!	\brief Decodes next audio frame.
648 
649 	We decode at least one audio frame into fDecodedData. To achieve this goal,
650     we might need to request several chunks of encoded data resulting in a
651     variable execution time of this function.
652 
653     The length of the decoded audio frame(s) is stored in
654     fDecodedDataSizeInBytes. If this variable is greater than zero you can
655     assert that all audio frames in fDecodedData are valid.
656 
657 	It is assumed that the number of expected audio frames is stored in
658 	fOutputFrameCount. So _DecodeNextAudioFrame() must be called only after
659 	fOutputFrameCount has been set.
660 
661 	Note: fOutputFrameCount contains the maximum number of frames a caller
662 	of BMediaDecoder::Decode() expects to receive. There is a direct
663 	relationship between fOutputFrameCount and the buffer size a caller of
664 	BMediaDecoder::Decode() will provide so we make sure to respect this limit
665 	for fDecodedDataSizeInBytes.
666 
667 	On return with status code B_OK the following conditions hold true:
668 		1. fDecodedData contains as much audio frames as the caller of
669 		   BMediaDecoder::Decode() expects.
670 		2. fDecodedData contains lesser audio frames as the caller of
671 		   BMediaDecoder::Decode() expects only when one of the following
672 		   conditions hold true:
673 		       i  No more audio frames left. Consecutive calls to
674 		          _DecodeNextAudioFrame() will then result in the return of
675 		          status code B_LAST_BUFFER_ERROR.
676 		       ii TODO: A change in the size of the audio frames.
677 		3. fHeader is populated with the audio frame properties of the first
678 		   audio frame in fDecodedData. Especially the start_time field of
679 		   fHeader relates to that first audio frame. Start times of
680 		   consecutive audio frames in fDecodedData have to be calculated
681 		   manually (using the frame rate and the frame duration) if the
682 		   caller needs them.
683 
684 	TODO: Handle change of channel_count. Such a change results in a change of
685 	the audio frame size and thus has different buffer requirements.
686 	The most sane approach for implementing this is to return the audio frames
687 	that were still decoded with the previous channel_count and inform the
688 	client of BMediaDecoder::Decode() about the change so that it can adapt to
689 	it. Furthermore we need to adapt our fDecodedData to the new buffer size
690 	requirements accordingly.
691 
692 	\returns B_OK when we successfully decoded enough audio frames
693 	\returns B_LAST_BUFFER_ERROR when there are no more audio frames available.
694 	\returns Other Errors
695 */
696 status_t
697 AVCodecDecoder::_DecodeNextAudioFrame()
698 {
699 	assert(fTempPacket->size >= 0);
700 	assert(fDecodedDataSizeInBytes == 0);
701 		// _DecodeNextAudioFrame needs to be called on empty fDecodedData only!
702 		// If this assert holds wrong we have a bug somewhere.
703 
704 	status_t resetStatus = _ResetRawDecodedAudio();
705 	if (resetStatus != B_OK)
706 		return resetStatus;
707 
708 	while (fRawDecodedAudio->nb_samples < fOutputFrameCount) {
709 		_CheckAndFixConditionsThatHintAtBrokenAudioCodeBelow();
710 
711 		bool decodedDataBufferHasData = fDecodedDataBufferSize > 0;
712 		if (decodedDataBufferHasData) {
713 			_MoveAudioFramesToRawDecodedAudioAndUpdateStartTimes();
714 			continue;
715 		}
716 
717 		status_t decodeAudioChunkStatus = _DecodeNextAudioFrameChunk();
718 		if (decodeAudioChunkStatus == B_LAST_BUFFER_ERROR
719 				&& fRawDecodedAudio->nb_samples > 0)
720 			break;
721 		if (decodeAudioChunkStatus != B_OK)
722 			return decodeAudioChunkStatus;
723 	}
724 
725 	fFrame += fRawDecodedAudio->nb_samples;
726 	fDecodedDataSizeInBytes = fRawDecodedAudio->linesize[0];
727 
728 	_UpdateMediaHeaderForAudioFrame();
729 
730 #ifdef DEBUG
731 	dump_ffframe_audio(fRawDecodedAudio, "ffaudi");
732 #endif
733 
734 	TRACE_AUDIO("  frame count: %ld current: %lld\n",
735 		fRawDecodedAudio->nb_samples, fFrame);
736 
737 	return B_OK;
738 }
739 
740 
741 /*!	\brief Applies all essential audio input properties to fCodecContext that were
742 		passed to AVCodecDecoder when Setup() was called.
743 
744 	Note: This function must be called before the AVCodec is opened via
745 	avcodec_open2(). Otherwise the behaviour of FFMPEG's audio decoding
746 	function avcodec_receive_frame() is undefined.
747 
748 	Essential properties applied from fInputFormat.u.encoded_audio:
749 		- bit_rate copied to fCodecContext->bit_rate
750 		- frame_size copied to fCodecContext->frame_size
751 		- output.format converted to fCodecContext->sample_fmt
752 		- output.frame_rate copied to fCodecContext->sample_rate
753 		- output.channel_count copied to fCodecContext->channels
754 
755 	Other essential properties being applied:
756 		- fBlockAlign to fCodecContext->block_align
757 		- fExtraData to fCodecContext->extradata
758 		- fExtraDataSize to fCodecContext->extradata_size
759 
760 	TODO: Either the following documentation section should be removed or this
761 	TODO when it is clear whether fInputFormat.MetaData() and
762 	fInputFormat.MetaDataSize() have to be applied to fCodecContext. See the related
763 	TODO in the method implementation.
764 	Only applied when fInputFormat.MetaDataSize() is greater than zero:
765 		- fInputFormat.MetaData() to fCodecContext->extradata
766 		- fInputFormat.MetaDataSize() to fCodecContext->extradata_size
767 */
768 void
769 AVCodecDecoder::_ApplyEssentialAudioContainerPropertiesToContext()
770 {
771 	media_encoded_audio_format containerProperties
772 		= fInputFormat.u.encoded_audio;
773 
774 	fCodecContext->bit_rate
775 		= static_cast<int>(containerProperties.bit_rate);
776 	fCodecContext->frame_size
777 		= static_cast<int>(containerProperties.frame_size);
778 	ConvertRawAudioFormatToAVSampleFormat(
779 		containerProperties.output.format, fCodecContext->sample_fmt);
780 	ConvertRawAudioFormatToAVSampleFormat(
781 		containerProperties.output.format, fCodecContext->request_sample_fmt);
782 	fCodecContext->sample_rate
783 		= static_cast<int>(containerProperties.output.frame_rate);
784 	fCodecContext->channels
785 		= static_cast<int>(containerProperties.output.channel_count);
786 	// Check that channel count is not still a wild card!
787 	if (fCodecContext->channels == 0) {
788 		TRACE("  channel_count still a wild-card, assuming stereo.\n");
789 		fCodecContext->channels = 2;
790 	}
791 
792 	fCodecContext->block_align = fBlockAlign;
793 	fCodecContext->extradata = reinterpret_cast<uint8_t*>(fExtraData);
794 	fCodecContext->extradata_size = fExtraDataSize;
795 
796 	// TODO: This probably needs to go away, there is some misconception
797 	// about extra data / info buffer and meta data. See
798 	// Reader::GetStreamInfo(). The AVFormatReader puts extradata and
799 	// extradata_size into media_format::MetaData(), but used to ignore
800 	// the infoBuffer passed to GetStreamInfo(). I think this may be why
801 	// the code below was added.
802 	if (fInputFormat.MetaDataSize() > 0) {
803 		fCodecContext->extradata = static_cast<uint8_t*>(
804 			const_cast<void*>(fInputFormat.MetaData()));
805 		fCodecContext->extradata_size = fInputFormat.MetaDataSize();
806 	}
807 
808 	TRACE("  bit_rate %d, sample_rate %d, channels %d, block_align %d, "
809 		"extradata_size %d\n",
810 		fCodecContext->bit_rate,
811 		fCodecContext->sample_rate,
812 		fCodecContext->channels,
813 		fCodecContext->block_align,
814 		fCodecContext->extradata_size);
815 }
816 
817 
818 /*!	\brief Resets important fields in fRawDecodedVideo to their default values.
819 
820 	Note: Also initializes fDecodedData if not done already.
821 
822 	\returns B_OK Resetting successfully completed.
823 	\returns B_NO_MEMORY No memory left for correct operation.
824 */
825 status_t
826 AVCodecDecoder::_ResetRawDecodedAudio()
827 {
828 	if (fDecodedData == NULL) {
829 		size_t maximumSizeOfDecodedData = fOutputFrameCount * fOutputFrameSize;
830 		fDecodedData
831 			= static_cast<uint8_t*>(malloc(maximumSizeOfDecodedData));
832 	}
833 	if (fDecodedData == NULL)
834 		return B_NO_MEMORY;
835 
836 	fRawDecodedAudio->data[0] = fDecodedData;
837 	fRawDecodedAudio->linesize[0] = 0;
838 	fRawDecodedAudio->format = AV_SAMPLE_FMT_NONE;
839 	fRawDecodedAudio->pkt_dts = AV_NOPTS_VALUE;
840 	fRawDecodedAudio->nb_samples = 0;
841 	memset(fRawDecodedAudio->opaque, 0, sizeof(avformat_codec_context));
842 
843 	return B_OK;
844 }
845 
846 
847 /*!	\brief Checks fDecodedDataBufferSize and fTempPacket for invalid values,
848 		reports them and assigns valid values.
849 
850 	Note: This method is intended to be called before any code is executed that
851 	deals with moving, loading or decoding any audio frames.
852 */
853 void
854 AVCodecDecoder::_CheckAndFixConditionsThatHintAtBrokenAudioCodeBelow()
855 {
856 	if (fDecodedDataBufferSize < 0) {
857 		fprintf(stderr, "Decoding read past the end of the decoded data "
858 			"buffer! %" B_PRId32 "\n", fDecodedDataBufferSize);
859 		fDecodedDataBufferSize = 0;
860 	}
861 	if (fTempPacket->size < 0) {
862 		fprintf(stderr, "Decoding read past the end of the temp packet! %d\n",
863 			fTempPacket->size);
864 		fTempPacket->size = 0;
865 	}
866 }
867 
868 
869 /*!	\brief Moves audio frames from fDecodedDataBuffer to fRawDecodedAudio (and
870 		thus to fDecodedData) and updates the start times of fRawDecodedAudio,
871 		fDecodedDataBuffer and fTempPacket accordingly.
872 
873 	When moving audio frames to fRawDecodedAudio this method also makes sure
874 	that the following important fields of fRawDecodedAudio are populated and
875 	updated with correct values:
876 		- fRawDecodedAudio->data[0]: Points to first free byte of fDecodedData
877 		- fRawDecodedAudio->linesize[0]: Total size of frames in fDecodedData
878 		- fRawDecodedAudio->format: Format of first audio frame
879 		- fRawDecodedAudio->pkt_dts: Start time of first audio frame
880 		- fRawDecodedAudio->nb_samples: Number of audio frames
881 		- fRawDecodedAudio->opaque: Contains the following fields for the first
882 		  audio frame:
883 		      - channels: Channel count of first audio frame
884 		      - sample_rate: Frame rate of first audio frame
885 
886 	This function assumes to be called only when the following assumptions
887 	hold true:
888 		1. There are decoded audio frames available in fDecodedDataBuffer
889 		   meaning that fDecodedDataBufferSize is greater than zero.
890 		2. There is space left in fRawDecodedAudio to move some audio frames
891 		   in. This means that fRawDecodedAudio has lesser audio frames than
892 		   the maximum allowed (specified by fOutputFrameCount).
893 		3. The audio frame rate is known so that we can calculate the time
894 		   range (covered by the moved audio frames) to update the start times
895 		   accordingly.
896 		4. The field fRawDecodedAudio->opaque points to a memory block
897 		   representing a structure of type avformat_codec_context.
898 
899 	After this function returns the caller can safely make the following
900 	assumptions:
901 		1. The number of decoded audio frames in fDecodedDataBuffer is
902 		   decreased though it may still be greater then zero.
903 		2. The number of frames in fRawDecodedAudio has increased and all
904 		   important fields are updated (see listing above).
905 		3. Start times of fDecodedDataBuffer and fTempPacket were increased
906 		   with the time range covered by the moved audio frames.
907 
908 	Note: This function raises an exception (by calling the debugger), when
909 	fDecodedDataBufferSize is not a multiple of fOutputFrameSize.
910 */
911 void
912 AVCodecDecoder::_MoveAudioFramesToRawDecodedAudioAndUpdateStartTimes()
913 {
914 	assert(fDecodedDataBufferSize > 0);
915 	assert(fRawDecodedAudio->nb_samples < fOutputFrameCount);
916 	assert(fOutputFrameRate > 0);
917 
918 	int32 outFrames = fOutputFrameCount - fRawDecodedAudio->nb_samples;
919 	int32 inFrames = fDecodedDataBufferSize;
920 
921 	int32 frames = min_c(outFrames, inFrames);
922 	if (frames == 0)
923 		debugger("fDecodedDataBufferSize not multiple of frame size!");
924 
925 	// Some decoders do not support format conversion on themselves, or use
926 	// "planar" audio (each channel separated instead of interleaved samples).
927 	// In that case, we use swresample to convert the data
928 	if (av_sample_fmt_is_planar(fCodecContext->sample_fmt)) {
929 #if 0
930 		const uint8_t* ptr[8];
931 		for (int i = 0; i < 8; i++) {
932 			if (fDecodedDataBuffer->data[i] == NULL)
933 				ptr[i] = NULL;
934 			else
935 				ptr[i] = fDecodedDataBuffer->data[i] + fDecodedDataBufferOffset;
936 		}
937 
938 		// When there are more input frames than space in the output buffer,
939 		// we could feed everything to swr and it would buffer the extra data.
940 		// However, there is no easy way to flush that data without feeding more
941 		// input, and it makes our timestamp computations fail.
942 		// So, we feed only as much frames as we can get out, and handle the
943 		// buffering ourselves.
944 		// TODO Ideally, we should try to size our output buffer so that it can
945 		// always hold all the output (swr provides helper functions for this)
946 		inFrames = frames;
947 		frames = swr_convert(fResampleContext, fRawDecodedAudio->data,
948 			outFrames, ptr, inFrames);
949 
950 		if (frames < 0)
951 			debugger("resampling failed");
952 #else
953 		// interleave planar audio with same format
954 		uintptr_t out = (uintptr_t)fRawDecodedAudio->data[0];
955 		int32 offset = fDecodedDataBufferOffset;
956 		for (int i = 0; i < frames; i++) {
957 			for (int j = 0; j < fCodecContext->channels; j++) {
958 				memcpy((void*)out, fDecodedDataBuffer->data[j]
959 					+ offset, fInputFrameSize);
960 				out += fInputFrameSize;
961 			}
962 			offset += fInputFrameSize;
963 		}
964 		outFrames = frames;
965 		inFrames = frames;
966 #endif
967 	} else {
968 		memcpy(fRawDecodedAudio->data[0], fDecodedDataBuffer->data[0]
969 				+ fDecodedDataBufferOffset, frames * fOutputFrameSize);
970 		outFrames = frames;
971 		inFrames = frames;
972 	}
973 
974 	size_t remainingSize = inFrames * fInputFrameSize;
975 	size_t decodedSize = outFrames * fOutputFrameSize;
976 	fDecodedDataBufferSize -= inFrames;
977 
978 	bool firstAudioFramesCopiedToRawDecodedAudio
979 		= fRawDecodedAudio->data[0] != fDecodedData;
980 	if (!firstAudioFramesCopiedToRawDecodedAudio) {
981 		fRawDecodedAudio->format = fDecodedDataBuffer->format;
982 		fRawDecodedAudio->pkt_dts = fDecodedDataBuffer->pkt_dts;
983 
984 		avformat_codec_context* codecContext
985 			= static_cast<avformat_codec_context*>(fRawDecodedAudio->opaque);
986 		codecContext->channels = fCodecContext->channels;
987 		codecContext->sample_rate = fCodecContext->sample_rate;
988 	}
989 
990 	fRawDecodedAudio->data[0] += decodedSize;
991 	fRawDecodedAudio->linesize[0] += decodedSize;
992 	fRawDecodedAudio->nb_samples += outFrames;
993 
994 	fDecodedDataBufferOffset += remainingSize;
995 
996 	// Update start times accordingly
997 	bigtime_t framesTimeInterval = static_cast<bigtime_t>(
998 		(1000000LL * frames) / fOutputFrameRate);
999 	fDecodedDataBuffer->pkt_dts += framesTimeInterval;
1000 	// Start time of buffer is updated in case that it contains
1001 	// more audio frames to move.
1002 	fTempPacket->dts += framesTimeInterval;
1003 	// Start time of fTempPacket is updated in case the fTempPacket
1004 	// contains more audio frames to decode.
1005 }
1006 
1007 
1008 /*!	\brief Decodes next chunk of audio frames.
1009 
1010 	This method handles all the details of loading the input buffer
1011 	(fChunkBuffer) at the right time and of calling FFMPEG often engouh until
1012 	some audio frames have been decoded.
1013 
1014 	FFMPEG decides how much audio frames belong to a chunk. Because of that
1015 	it is very likely that _DecodeNextAudioFrameChunk has to be called several
1016 	times to decode enough audio frames to please the caller of
1017 	BMediaDecoder::Decode().
1018 
1019 	This function assumes to be called only when the following assumptions
1020 	hold true:
1021 		1. fDecodedDataBufferSize equals zero.
1022 
1023 	After this function returns successfully the caller can safely make the
1024 	following assumptions:
1025 		1. fDecodedDataBufferSize is greater than zero.
1026 		2. fDecodedDataBufferOffset is set to zero.
1027 		3. fDecodedDataBuffer contains audio frames.
1028 
1029 
1030 	\returns B_OK on successfully decoding one audio frame chunk.
1031 	\returns B_LAST_BUFFER_ERROR No more audio frame chunks available. From
1032 		this point on further calls will return this same error.
1033 	\returns B_ERROR Decoding failed
1034 */
1035 status_t
1036 AVCodecDecoder::_DecodeNextAudioFrameChunk()
1037 {
1038 	assert(fDecodedDataBufferSize == 0);
1039 
1040 	while (fDecodedDataBufferSize == 0) {
1041 		status_t loadingChunkStatus
1042 			= _LoadNextChunkIfNeededAndAssignStartTime();
1043 		if (loadingChunkStatus != B_OK)
1044 			return loadingChunkStatus;
1045 
1046 		status_t decodingStatus
1047 			= _DecodeSomeAudioFramesIntoEmptyDecodedDataBuffer();
1048 		if (decodingStatus != B_OK) {
1049 			// Assume the audio decoded until now is broken so replace it with
1050 			// some silence.
1051 			memset(fDecodedData, 0, fRawDecodedAudio->linesize[0]);
1052 
1053 			if (!fAudioDecodeError) {
1054 				// Report failure if not done already
1055 				int32 chunkBufferOffset = fTempPacket->data - fChunkBuffer;
1056 				printf("########### audio decode error, "
1057 					"fTempPacket->size %d, fChunkBuffer data offset %" B_PRId32
1058 					"\n", fTempPacket->size, chunkBufferOffset);
1059 				fAudioDecodeError = true;
1060 			}
1061 
1062 			// Assume that next audio chunk can be decoded so keep decoding.
1063 			continue;
1064 		}
1065 
1066 		fAudioDecodeError = false;
1067 	}
1068 
1069 	return B_OK;
1070 }
1071 
1072 
1073 /*!	\brief Tries to decode at least one audio frame and store it in the
1074 		fDecodedDataBuffer.
1075 
1076 	This function assumes to be called only when the following assumptions
1077 	hold true:
1078 		1. fDecodedDataBufferSize equals zero.
1079 		2. fTempPacket->size is greater than zero.
1080 
1081 	After this function returns successfully the caller can safely make the
1082 	following assumptions:
1083 		1. fDecodedDataBufferSize is greater than zero in the common case.
1084 		   Also see "Note" below.
1085 		2. fTempPacket was updated to exclude the data chunk that was consumed
1086 		   by avcodec_send_packet().
1087 		3. fDecodedDataBufferOffset is set to zero.
1088 
1089 	When this function failed to decode at least one audio frame due to a
1090 	decoding error the caller can safely make the following assumptions:
1091 		1. fDecodedDataBufferSize equals zero.
1092 		2. fTempPacket->size equals zero.
1093 
1094 	Note: It is possible that there wasn't any audio frame decoded into
1095 	fDecodedDataBuffer after calling this function. This is normal and can
1096 	happen when there was either a decoding error or there is some decoding
1097 	delay in FFMPEGs audio decoder. Another call to this method is totally
1098 	safe and is even expected as long as the calling assumptions hold true.
1099 
1100 	\returns B_OK Decoding successful. fDecodedDataBuffer contains decoded
1101 		audio frames only when fDecodedDataBufferSize is greater than zero.
1102 		fDecodedDataBuffer is empty, when avcodec_receive_frame() didn't return
1103 		audio frames due to delayed decoding or incomplete audio frames.
1104 	\returns B_ERROR Decoding failed thus fDecodedDataBuffer contains no audio
1105 		frames.
1106 */
1107 status_t
1108 AVCodecDecoder::_DecodeSomeAudioFramesIntoEmptyDecodedDataBuffer()
1109 {
1110 	assert(fDecodedDataBufferSize == 0);
1111 
1112 	av_frame_unref(fDecodedDataBuffer);
1113 	fDecodedDataBufferOffset = 0;
1114 
1115 	int error = avcodec_receive_frame(fCodecContext, fDecodedDataBuffer);
1116 	if (error == AVERROR_EOF)
1117 		return B_LAST_BUFFER_ERROR;
1118 
1119 	if (error == AVERROR(EAGAIN)) {
1120 		// We need to feed more data into the decoder
1121 		avcodec_send_packet(fCodecContext, fTempPacket);
1122 
1123 		// All the data is always consumed by avcodec_send_packet
1124 		fTempPacket->size = 0;
1125 
1126 		// Try again to see if we can get some decoded audio out now
1127 		error = avcodec_receive_frame(fCodecContext, fDecodedDataBuffer);
1128 	}
1129 
1130 	fDecodedDataBufferSize = fDecodedDataBuffer->nb_samples;
1131 	if (fDecodedDataBufferSize < 0)
1132 		fDecodedDataBufferSize = 0;
1133 
1134 	if (error == 0)
1135 		return B_OK;
1136 	else
1137 		return B_ERROR;
1138 }
1139 
1140 
1141 /*! \brief Updates relevant fields of the class member fHeader with the
1142 		properties of the most recently decoded audio frame.
1143 
1144 	The following fields of fHeader are updated:
1145 		- fHeader.type
1146 		- fHeader.file_pos
1147 		- fHeader.orig_size
1148 		- fHeader.start_time
1149 		- fHeader.size_used
1150 		- fHeader.u.raw_audio.frame_rate
1151 		- fHeader.u.raw_audio.channel_count
1152 
1153 	It is assumed that this function is called only	when the following asserts
1154 	hold true:
1155 		1. We actually got a new audio frame decoded by the audio decoder.
1156 		2. fHeader wasn't updated for the new audio frame yet. You MUST call
1157 		   this method only once per decoded audio frame.
1158 		3. fRawDecodedAudio's fields relate to the first audio frame contained
1159 		   in fDecodedData. Especially the following fields are of importance:
1160 		       - fRawDecodedAudio->pkt_dts: Start time of first audio frame
1161 		       - fRawDecodedAudio->opaque: Contains the following fields for
1162 		         the first audio frame:
1163 			         - channels: Channel count of first audio frame
1164 			         - sample_rate: Frame rate of first audio frame
1165 */
1166 void
1167 AVCodecDecoder::_UpdateMediaHeaderForAudioFrame()
1168 {
1169 	fHeader.type = B_MEDIA_RAW_AUDIO;
1170 	fHeader.file_pos = 0;
1171 	fHeader.orig_size = 0;
1172 	fHeader.start_time = fRawDecodedAudio->pkt_dts;
1173 	fHeader.size_used = fRawDecodedAudio->linesize[0];
1174 
1175 	avformat_codec_context* codecContext
1176 		= static_cast<avformat_codec_context*>(fRawDecodedAudio->opaque);
1177 	fHeader.u.raw_audio.channel_count = codecContext->channels;
1178 	fHeader.u.raw_audio.frame_rate = codecContext->sample_rate;
1179 }
1180 
1181 
1182 /*! \brief Decodes next video frame.
1183 
1184     We decode exactly one video frame into fDecodedData. To achieve this goal,
1185     we might need to request several chunks of encoded data resulting in a
1186     variable execution time of this function.
1187 
1188     The length of the decoded video frame is stored in
1189     fDecodedDataSizeInBytes. If this variable is greater than zero, you can
1190     assert that there is a valid video frame available in fDecodedData.
1191 
1192     The decoded video frame in fDecodedData has color space conversion and
1193     deinterlacing already applied.
1194 
1195     To every decoded video frame there is a media_header populated in
1196     fHeader, containing the corresponding video frame properties.
1197 
1198 	Normally every decoded video frame has a start_time field populated in the
1199 	associated fHeader, that determines the presentation time of the frame.
1200 	This relationship will only hold true, when each data chunk that is
1201 	provided via GetNextChunk() contains data for exactly one encoded video
1202 	frame (one complete frame) - not more and not less.
1203 
1204 	We can decode data chunks that contain partial video frame data, too. In
1205 	that case, you cannot trust the value of the start_time field in fHeader.
1206 	We simply have no logic in place to establish a meaningful relationship
1207 	between an incomplete frame and the start time it should be presented.
1208 	Though this	might change in the future.
1209 
1210 	We can decode data chunks that contain more than one video frame, too. In
1211 	that case, you cannot trust the value of the start_time field in fHeader.
1212 	We simply have no logic in place to track the start_time across multiple
1213 	video frames. So a meaningful relationship between the 2nd, 3rd, ... frame
1214 	and the start time it should be presented isn't established at the moment.
1215 	Though this	might change in the future.
1216 
1217 	More over the fOutputFrameRate variable is updated for every decoded video
1218 	frame.
1219 
1220 	On first call the member variables fSwsContext / fFormatConversionFunc	are
1221 	initialized.
1222 
1223 	\returns B_OK when we successfully decoded one video frame
1224 	\returns B_LAST_BUFFER_ERROR when there are no more video frames available.
1225 	\returns B_NO_MEMORY when we have no memory left for correct operation.
1226 	\returns Other Errors
1227 */
1228 status_t
1229 AVCodecDecoder::_DecodeNextVideoFrame()
1230 {
1231 	int error;
1232 	int send_error;
1233 
1234 #if DO_PROFILING
1235 	bigtime_t startTime = system_time();
1236 #endif
1237 
1238 	error = avcodec_receive_frame(fCodecContext, fRawDecodedPicture);
1239 
1240 	if (error == AVERROR_EOF)
1241 		return B_LAST_BUFFER_ERROR;
1242 
1243 	if (error == AVERROR(EAGAIN)) {
1244 		do {
1245 			status_t loadingChunkStatus
1246 				= _LoadNextChunkIfNeededAndAssignStartTime();
1247 			if (loadingChunkStatus == B_LAST_BUFFER_ERROR)
1248 				return _FlushOneVideoFrameFromDecoderBuffer();
1249 			if (loadingChunkStatus != B_OK) {
1250 				TRACE("[v] AVCodecDecoder::_DecodeNextVideoFrame(): error from "
1251 					"GetNextChunk(): %s\n", strerror(loadingChunkStatus));
1252 				return loadingChunkStatus;
1253 			}
1254 
1255 			char timestamp[AV_TS_MAX_STRING_SIZE];
1256 			av_ts_make_time_string(timestamp,
1257 				fTempPacket->dts, &fCodecContext->time_base);
1258 			TRACE("[v] Feed %d more bytes (dts %s)\n", fTempPacket->size,
1259 				timestamp);
1260 
1261 			send_error = avcodec_send_packet(fCodecContext, fTempPacket);
1262 			if (send_error < 0 && send_error != AVERROR(EAGAIN)) {
1263 				TRACE("[v] AVCodecDecoder: ignoring error in decoding frame "
1264 				"%lld: %d\n", fFrame, error);
1265 			}
1266 
1267 			// Packet is consumed, clear it
1268 			fTempPacket->data = NULL;
1269 			fTempPacket->size = 0;
1270 
1271 			error = avcodec_receive_frame(fCodecContext, fRawDecodedPicture);
1272 			if (error != 0 && error != AVERROR(EAGAIN)) {
1273 				TRACE("[v] frame %lld - decoding error, error code: %d, "
1274 					"chunk size: %ld\n", fFrame, error, fChunkBufferSize);
1275 			}
1276 
1277 		} while (error != 0);
1278 	}
1279 
1280 #if DO_PROFILING
1281 	bigtime_t formatConversionStart = system_time();
1282 #endif
1283 
1284 	status_t handleStatus = _HandleNewVideoFrameAndUpdateSystemState();
1285 	if (handleStatus != B_OK)
1286 		return handleStatus;
1287 
1288 #if DO_PROFILING
1289 	bigtime_t doneTime = system_time();
1290 	decodingTime += formatConversionStart - startTime;
1291 	conversionTime += doneTime - formatConversionStart;
1292 	profileCounter++;
1293 	if (!(fFrame % 5)) {
1294 		printf("[v] profile: d1 = %lld, d2 = %lld (%lld) required %lld\n",
1295 			decodingTime / profileCounter, conversionTime / profileCounter,
1296 			fFrame, bigtime_t(1000000LL / fOutputFrameRate));
1297 		decodingTime = 0;
1298 		conversionTime = 0;
1299 		profileCounter = 0;
1300 	}
1301 #endif
1302 	return error;
1303 }
1304 
1305 
1306 /*!	\brief Applies all essential video input properties to fCodecContext that were
1307 		passed to AVCodecDecoder when Setup() was called.
1308 
1309 	Note: This function must be called before the AVCodec is opened via
1310 	avcodec_open2(). Otherwise the behaviour of FFMPEG's video decoding
1311 	function avcodec_decode_video2() is undefined.
1312 
1313 	Essential properties applied from fInputFormat.u.encoded_video.output:
1314 		- display.line_width copied to fCodecContext->width
1315 		- display.line_count copied to fCodecContext->height
1316 		- pixel_width_aspect and pixel_height_aspect converted to
1317 		  fCodecContext->sample_aspect_ratio
1318 		- field_rate converted to fCodecContext->time_base and
1319 		  fCodecContext->ticks_per_frame
1320 
1321 	Other essential properties being applied:
1322 		- fExtraData to fCodecContext->extradata
1323 		- fExtraDataSize to fCodecContext->extradata_size
1324 */
1325 void
1326 AVCodecDecoder::_ApplyEssentialVideoContainerPropertiesToContext()
1327 {
1328 	media_raw_video_format containerProperties
1329 		= fInputFormat.u.encoded_video.output;
1330 
1331 	fCodecContext->width = containerProperties.display.line_width;
1332 	fCodecContext->height = containerProperties.display.line_count;
1333 
1334 	if (containerProperties.pixel_width_aspect > 0
1335 		&& containerProperties.pixel_height_aspect > 0) {
1336 		ConvertVideoAspectWidthAndHeightToAVCodecContext(
1337 			containerProperties.pixel_width_aspect,
1338 			containerProperties.pixel_height_aspect, *fCodecContext);
1339 	}
1340 
1341 	if (containerProperties.field_rate > 0.0) {
1342 		ConvertVideoFrameRateToAVCodecContext(containerProperties.field_rate,
1343 			*fCodecContext);
1344 	}
1345 
1346 	fCodecContext->extradata = reinterpret_cast<uint8_t*>(fExtraData);
1347 	fCodecContext->extradata_size = fExtraDataSize;
1348 }
1349 
1350 
1351 /*! \brief Loads the next  chunk into fChunkBuffer and assigns it (including
1352 		the start time) to fTempPacket but only if fTempPacket is empty.
1353 
1354 	\returns B_OK
1355 		1. meaning: Next chunk is loaded.
1356 		2. meaning: No need to load and assign anything. Proceed as usual.
1357 	\returns B_LAST_BUFFER_ERROR No more chunks available. fChunkBuffer	and
1358 		fTempPacket are left untouched.
1359 	\returns Other errors Caller should bail out because fChunkBuffer and
1360 		fTempPacket are in unknown states. Normal operation cannot be
1361 		guaranteed.
1362 */
1363 status_t
1364 AVCodecDecoder::_LoadNextChunkIfNeededAndAssignStartTime()
1365 {
1366 	if (fTempPacket->size > 0)
1367 		return B_OK;
1368 
1369 	const void* chunkBuffer = NULL;
1370 	size_t chunkBufferSize = 0;
1371 		// In the case that GetNextChunk() returns an error fChunkBufferSize
1372 		// should be left untouched.
1373 	media_header chunkMediaHeader;
1374 
1375 	status_t getNextChunkStatus = GetNextChunk(&chunkBuffer, &chunkBufferSize,
1376 		&chunkMediaHeader);
1377 	if (getNextChunkStatus != B_OK)
1378 		return getNextChunkStatus;
1379 
1380 	status_t chunkBufferPaddingStatus
1381 		= _CopyChunkToChunkBufferAndAddPadding(chunkBuffer, chunkBufferSize);
1382 	if (chunkBufferPaddingStatus != B_OK)
1383 		return chunkBufferPaddingStatus;
1384 
1385 	fTempPacket->data = fChunkBuffer;
1386 	fTempPacket->size = fChunkBufferSize;
1387 	fTempPacket->dts = chunkMediaHeader.start_time;
1388 		// Let FFMPEG handle the correct relationship between start_time and
1389 		// decoded a/v frame. By doing so we are simply copying the way how it
1390 		// is implemented in ffplay.c for video frames (for audio frames it
1391 		// works, too, but isn't used by ffplay.c).
1392 		// \see http://git.videolan.org/?p=ffmpeg.git;a=blob;f=ffplay.c;h=09623db374e5289ed20b7cc28c262c4375a8b2e4;hb=9153b33a742c4e2a85ff6230aea0e75f5a8b26c2#l1502
1393 		//
1394 		// FIXME: Research how to establish a meaningful relationship between
1395 		// start_time and decoded a/v frame when the received chunk buffer
1396 		// contains partial a/v frames. Maybe some data formats do contain time
1397 		// stamps (ake pts / dts fields) that can be evaluated by FFMPEG. But
1398 		// as long as I don't have such video data to test it, it makes no
1399 		// sense trying to implement it.
1400 		//
1401 		// FIXME: Implement tracking start_time of video frames originating in
1402 		// data chunks that encode more than one video frame at a time. In that
1403 		// case on would increment the start_time for each consecutive frame of
1404 		// such a data chunk (like it is done for audio frame decoding). But as
1405 		// long as I don't have such video data to test it, it makes no sense
1406 		// to implement it.
1407 
1408 #ifdef LOG_STREAM_TO_FILE
1409 	BFile* logFile = fIsAudio ? &sAudioStreamLogFile : &sVideoStreamLogFile;
1410 	if (sDumpedPackets < 100) {
1411 		logFile->Write(chunkBuffer, fChunkBufferSize);
1412 		printf("wrote %ld bytes\n", fChunkBufferSize);
1413 		sDumpedPackets++;
1414 	} else if (sDumpedPackets == 100)
1415 		logFile->Unset();
1416 #endif
1417 
1418 	return B_OK;
1419 }
1420 
1421 
1422 /*! \brief Copies a chunk into fChunkBuffer and adds a "safety net" of
1423 		additional memory as required by FFMPEG for input buffers to video
1424 		decoders.
1425 
1426 	This is needed so that some decoders can read safely a predefined number of
1427 	bytes at a time for performance optimization purposes.
1428 
1429 	The additional memory has a size of AV_INPUT_BUFFER_PADDING_SIZE as defined
1430 	in avcodec.h.
1431 
1432 	Ownership of fChunkBuffer memory is with the class so it needs to be freed
1433 	at the right times (on destruction, on seeking).
1434 
1435 	Also update fChunkBufferSize to reflect the size of the contained data
1436 	(leaving out the padding).
1437 
1438 	\param chunk The chunk to copy.
1439 	\param chunkSize Size of the chunk in bytes
1440 
1441 	\returns B_OK Padding was successful. You are responsible for releasing the
1442 		allocated memory. fChunkBufferSize is set to chunkSize.
1443 	\returns B_NO_MEMORY Padding failed.
1444 		fChunkBuffer is set to NULL making it safe to call free() on it.
1445 		fChunkBufferSize is set to 0 to reflect the size of fChunkBuffer.
1446 */
1447 status_t
1448 AVCodecDecoder::_CopyChunkToChunkBufferAndAddPadding(const void* chunk,
1449 	size_t chunkSize)
1450 {
1451 	uint8_t* tmpBuffer = static_cast<uint8_t*>(realloc(fChunkBuffer,
1452 		chunkSize + AV_INPUT_BUFFER_PADDING_SIZE));
1453 	if (tmpBuffer == NULL) {
1454 		free(fChunkBuffer);
1455 		fChunkBuffer = NULL;
1456 		fChunkBufferSize = 0;
1457 		return B_NO_MEMORY;
1458 	} else {
1459 		fChunkBuffer = tmpBuffer;
1460 	}
1461 
1462 	memcpy(fChunkBuffer, chunk, chunkSize);
1463 	memset(fChunkBuffer + chunkSize, 0, AV_INPUT_BUFFER_PADDING_SIZE);
1464 		// Establish safety net, by zero'ing the padding area.
1465 
1466 	fChunkBufferSize = chunkSize;
1467 
1468 	return B_OK;
1469 }
1470 
1471 
1472 /*! \brief Executes all steps needed for a freshly decoded video frame.
1473 
1474 	\see _UpdateMediaHeaderForVideoFrame() and
1475 	\see _DeinterlaceAndColorConvertVideoFrame() for when you are allowed to
1476 	call this method.
1477 
1478 	\returns B_OK when video frame was handled successfully
1479 	\returnb B_NO_MEMORY when no memory is left for correct operation.
1480 */
1481 status_t
1482 AVCodecDecoder::_HandleNewVideoFrameAndUpdateSystemState()
1483 {
1484 	_UpdateMediaHeaderForVideoFrame();
1485 	status_t postProcessStatus = _DeinterlaceAndColorConvertVideoFrame();
1486 	if (postProcessStatus != B_OK)
1487 		return postProcessStatus;
1488 
1489 	ConvertAVCodecContextToVideoFrameRate(*fCodecContext, fOutputFrameRate);
1490 
1491 #ifdef DEBUG
1492 	dump_ffframe_video(fRawDecodedPicture, "ffpict");
1493 #endif
1494 
1495 	fFrame++;
1496 
1497 	return B_OK;
1498 }
1499 
1500 
1501 /*! \brief Flushes one video frame - if any - still buffered by the decoder.
1502 
1503 	Some FFMPEG decoder are buffering video frames. To retrieve those buffered
1504 	frames the decoder needs to be told so.
1505 
1506 	The intended use of this method is to call it, once there are no more data
1507 	chunks for decoding left. Reframed in other words: Once GetNextChunk()
1508 	returns with status B_LAST_BUFFER_ERROR it is time to start flushing.
1509 
1510 	\returns B_OK Retrieved one video frame, handled it accordingly and updated
1511 		the system state accordingly.
1512 		There maybe more video frames left. So it is valid for the client of
1513 		AVCodecDecoder to call it one more time.
1514 
1515 	\returns B_LAST_BUFFER_ERROR No video frame left.
1516 		The client of the AVCodecDecoder should stop calling it now.
1517 
1518 	\returns B_NO_MEMORY No memory left for correct operation.
1519 */
1520 status_t
1521 AVCodecDecoder::_FlushOneVideoFrameFromDecoderBuffer()
1522 {
1523 	// Tell the decoder there is nothing to send anymore
1524 	avcodec_send_packet(fCodecContext, NULL);
1525 
1526 	// Get any remaining frame
1527 	int error = avcodec_receive_frame(fCodecContext, fRawDecodedPicture);
1528 
1529 	if (error != 0 && error != AVERROR(EAGAIN)) {
1530 		// video buffer is flushed successfully
1531 		// (or there is an error, not much we can do about it)
1532 		return B_LAST_BUFFER_ERROR;
1533 	}
1534 
1535 	return _HandleNewVideoFrameAndUpdateSystemState();
1536 }
1537 
1538 
1539 /*! \brief Updates relevant fields of the class member fHeader with the
1540 		properties of the most recently decoded video frame.
1541 
1542 	It is assumed that this function is called only	when the following asserts
1543 	hold true:
1544 		1. We actually got a new picture decoded by the video decoder.
1545 		2. fHeader wasn't updated for the new picture yet. You MUST call this
1546 		   method only once per decoded video frame.
1547 		3. This function MUST be called after
1548 		   _DeinterlaceAndColorConvertVideoFrame() as it relys on an updated
1549 		    fDecodedDataSizeInBytes.
1550 		4. There will be at maximumn only one decoded video frame in our cache
1551 		   at any single point in time. Otherwise you couldn't tell to which
1552 		   cached decoded video frame the properties in fHeader relate to.
1553 		5. AVCodecContext is still valid for this video frame (This is the case
1554 		   when this function is called after avcodec_decode_video2() and
1555 		   before the next call to avcodec_decode_video2().
1556 */
1557 void
1558 AVCodecDecoder::_UpdateMediaHeaderForVideoFrame()
1559 {
1560 	fHeader.type = B_MEDIA_RAW_VIDEO;
1561 	fHeader.file_pos = 0;
1562 	fHeader.orig_size = 0;
1563 	fHeader.start_time = fRawDecodedPicture->pkt_dts;
1564 		// The pkt_dts is already in microseconds, even if ffmpeg docs says
1565 		// 'in codec time_base units'
1566 	fHeader.size_used = av_image_get_buffer_size(
1567 		colorspace_to_pixfmt(fOutputColorSpace), fRawDecodedPicture->width,
1568 		fRawDecodedPicture->height, 1);
1569 	fHeader.u.raw_video.display_line_width = fRawDecodedPicture->width;
1570 	fHeader.u.raw_video.display_line_count = fRawDecodedPicture->height;
1571 	fHeader.u.raw_video.bytes_per_row
1572 		= CalculateBytesPerRowWithColorSpaceAndVideoWidth(fOutputColorSpace,
1573 			fRawDecodedPicture->width);
1574 	fHeader.u.raw_video.field_gamma = 1.0;
1575 	fHeader.u.raw_video.field_sequence = fFrame;
1576 	fHeader.u.raw_video.field_number = 0;
1577 	fHeader.u.raw_video.pulldown_number = 0;
1578 	fHeader.u.raw_video.first_active_line = 1;
1579 	fHeader.u.raw_video.line_count = fRawDecodedPicture->height;
1580 
1581 	ConvertAVCodecContextToVideoAspectWidthAndHeight(*fCodecContext,
1582 		fHeader.u.raw_video.pixel_width_aspect,
1583 		fHeader.u.raw_video.pixel_height_aspect);
1584 
1585 	char timestamp[AV_TS_MAX_STRING_SIZE];
1586 	av_ts_make_time_string(timestamp,
1587 		fRawDecodedPicture->best_effort_timestamp, &fCodecContext->time_base);
1588 
1589 	TRACE("[v] start_time=%s field_sequence=%lu\n",
1590 		timestamp, fHeader.u.raw_video.field_sequence);
1591 }
1592 
1593 
1594 /*! \brief This function applies deinterlacing (only if needed) and color
1595 	conversion to the video frame in fRawDecodedPicture.
1596 
1597 	It is assumed that fRawDecodedPicture wasn't deinterlaced and color
1598 	converted yet (otherwise this function behaves in unknown manners).
1599 
1600 	This function MUST be called after _UpdateMediaHeaderForVideoFrame() as it
1601 	relys on the fHeader.size_used and fHeader.u.raw_video.bytes_per_row fields
1602 	for correct operation
1603 
1604 	You should only call this function when you	got a new picture decoded by
1605 	the video decoder.
1606 
1607 	When this function finishes the postprocessed video frame will be available
1608 	in fPostProcessedDecodedPicture and fDecodedData (fDecodedDataSizeInBytes
1609 	will be set accordingly).
1610 
1611 	\returns B_OK video frame successfully deinterlaced and color converted.
1612 	\returns B_NO_MEMORY Not enough memory available for correct operation.
1613 */
1614 status_t
1615 AVCodecDecoder::_DeinterlaceAndColorConvertVideoFrame()
1616 {
1617 	int displayWidth = fRawDecodedPicture->width;
1618 	int displayHeight = fRawDecodedPicture->height;
1619 	AVFrame deinterlacedPicture;
1620 	bool useDeinterlacedPicture = false;
1621 
1622 	if (fRawDecodedPicture->interlaced_frame) {
1623 		AVFrame rawPicture;
1624 		rawPicture.data[0] = fRawDecodedPicture->data[0];
1625 		rawPicture.data[1] = fRawDecodedPicture->data[1];
1626 		rawPicture.data[2] = fRawDecodedPicture->data[2];
1627 		rawPicture.data[3] = fRawDecodedPicture->data[3];
1628 		rawPicture.linesize[0] = fRawDecodedPicture->linesize[0];
1629 		rawPicture.linesize[1] = fRawDecodedPicture->linesize[1];
1630 		rawPicture.linesize[2] = fRawDecodedPicture->linesize[2];
1631 		rawPicture.linesize[3] = fRawDecodedPicture->linesize[3];
1632 
1633 		if (av_image_alloc(deinterlacedPicture.data,
1634 				deinterlacedPicture.linesize, displayWidth, displayHeight,
1635 				fCodecContext->pix_fmt, 1) < 0)
1636 			return B_NO_MEMORY;
1637 
1638 		// deinterlace implemented using avfilter
1639 		_ProcessFilterGraph(&deinterlacedPicture, &rawPicture,
1640 			fCodecContext->pix_fmt, displayWidth, displayHeight);
1641 		useDeinterlacedPicture = true;
1642 	}
1643 
1644 	// Some decoders do not set pix_fmt until they have decoded 1 frame
1645 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
1646 	if (fSwsContext == NULL) {
1647 		fSwsContext = sws_getContext(displayWidth, displayHeight,
1648 			fCodecContext->pix_fmt, displayWidth, displayHeight,
1649 			colorspace_to_pixfmt(fOutputColorSpace),
1650 			SWS_FAST_BILINEAR, NULL, NULL, NULL);
1651 	}
1652 #else
1653 	if (fFormatConversionFunc == NULL) {
1654 		fFormatConversionFunc = resolve_colorspace(fOutputColorSpace,
1655 			fCodecContext->pix_fmt, displayWidth, displayHeight);
1656 	}
1657 #endif
1658 
1659 	fDecodedDataSizeInBytes = fHeader.size_used;
1660 
1661 	if (fDecodedData == NULL) {
1662 		const size_t kOptimalAlignmentForColorConversion = 32;
1663 		posix_memalign(reinterpret_cast<void**>(&fDecodedData),
1664 			kOptimalAlignmentForColorConversion, fDecodedDataSizeInBytes);
1665 	}
1666 	if (fDecodedData == NULL)
1667 		return B_NO_MEMORY;
1668 
1669 	fPostProcessedDecodedPicture->data[0] = fDecodedData;
1670 	fPostProcessedDecodedPicture->linesize[0]
1671 		= fHeader.u.raw_video.bytes_per_row;
1672 
1673 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
1674 	if (fSwsContext != NULL) {
1675 #else
1676 	if (fFormatConversionFunc != NULL) {
1677 #endif
1678 		if (useDeinterlacedPicture) {
1679 			AVFrame deinterlacedFrame;
1680 			deinterlacedFrame.data[0] = deinterlacedPicture.data[0];
1681 			deinterlacedFrame.data[1] = deinterlacedPicture.data[1];
1682 			deinterlacedFrame.data[2] = deinterlacedPicture.data[2];
1683 			deinterlacedFrame.data[3] = deinterlacedPicture.data[3];
1684 			deinterlacedFrame.linesize[0]
1685 				= deinterlacedPicture.linesize[0];
1686 			deinterlacedFrame.linesize[1]
1687 				= deinterlacedPicture.linesize[1];
1688 			deinterlacedFrame.linesize[2]
1689 				= deinterlacedPicture.linesize[2];
1690 			deinterlacedFrame.linesize[3]
1691 				= deinterlacedPicture.linesize[3];
1692 
1693 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
1694 			sws_scale(fSwsContext, deinterlacedFrame.data,
1695 				deinterlacedFrame.linesize, 0, displayHeight,
1696 				fPostProcessedDecodedPicture->data,
1697 				fPostProcessedDecodedPicture->linesize);
1698 #else
1699 			(*fFormatConversionFunc)(&deinterlacedFrame,
1700 				fPostProcessedDecodedPicture, displayWidth, displayHeight);
1701 #endif
1702 		} else {
1703 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
1704 			sws_scale(fSwsContext, fRawDecodedPicture->data,
1705 				fRawDecodedPicture->linesize, 0, displayHeight,
1706 				fPostProcessedDecodedPicture->data,
1707 				fPostProcessedDecodedPicture->linesize);
1708 #else
1709 			(*fFormatConversionFunc)(fRawDecodedPicture,
1710 				fPostProcessedDecodedPicture, displayWidth, displayHeight);
1711 #endif
1712 		}
1713 	}
1714 
1715 	if (fRawDecodedPicture->interlaced_frame)
1716 		av_freep(&deinterlacedPicture.data[0]);
1717 
1718 	return B_OK;
1719 }
1720 
1721 
1722 /*! \brief Init the deinterlace filter graph.
1723 
1724 	\returns B_OK the filter graph could be built.
1725 	\returns B_BAD_VALUE something was wrong with building the graph.
1726 */
1727 status_t
1728 AVCodecDecoder::_InitFilterGraph(enum AVPixelFormat pixfmt, int32 width,
1729 	int32 height)
1730 {
1731 	if (fFilterGraph != NULL) {
1732 		av_frame_free(&fFilterFrame);
1733 		avfilter_graph_free(&fFilterGraph);
1734 	}
1735 
1736 	fFilterGraph = avfilter_graph_alloc();
1737 
1738 	BString arguments;
1739 	arguments.SetToFormat("buffer=video_size=%" B_PRId32 "x%" B_PRId32
1740 		":pix_fmt=%d:time_base=1/1:pixel_aspect=0/1[in];[in]yadif[out];"
1741 		"[out]buffersink", width, height,
1742 		pixfmt);
1743 	AVFilterInOut* inputs = NULL;
1744 	AVFilterInOut* outputs = NULL;
1745 	TRACE("[v] _InitFilterGraph(): %s\n", arguments.String());
1746 	int ret = avfilter_graph_parse2(fFilterGraph, arguments.String(), &inputs,
1747 		&outputs);
1748 	if (ret < 0) {
1749 		fprintf(stderr, "avfilter_graph_parse2() failed\n");
1750 		return B_BAD_VALUE;
1751 	}
1752 
1753 	ret = avfilter_graph_config(fFilterGraph, NULL);
1754 	if (ret < 0) {
1755 		fprintf(stderr, "avfilter_graph_config() failed\n");
1756 		return B_BAD_VALUE;
1757 	}
1758 
1759 	fBufferSourceContext = avfilter_graph_get_filter(fFilterGraph,
1760 		"Parsed_buffer_0");
1761 	fBufferSinkContext = avfilter_graph_get_filter(fFilterGraph,
1762 		"Parsed_buffersink_2");
1763 	if (fBufferSourceContext == NULL || fBufferSinkContext == NULL) {
1764 		fprintf(stderr, "avfilter_graph_get_filter() failed\n");
1765 		return B_BAD_VALUE;
1766 	}
1767 	fFilterFrame = av_frame_alloc();
1768 	fLastWidth = width;
1769 	fLastHeight = height;
1770 	fLastPixfmt = pixfmt;
1771 
1772 	return B_OK;
1773 }
1774 
1775 
1776 /*! \brief Process an AVPicture with the deinterlace filter graph.
1777 
1778     We decode exactly one video frame into dst.
1779 	Equivalent function for avpicture_deinterlace() from version 2.x.
1780 
1781 	\returns B_OK video frame successfully deinterlaced.
1782 	\returns B_BAD_DATA No frame could be output.
1783 	\returns B_NO_MEMORY Not enough memory available for correct operation.
1784 */
1785 status_t
1786 AVCodecDecoder::_ProcessFilterGraph(AVFrame *dst, const AVFrame *src,
1787 	enum AVPixelFormat pixfmt, int32 width, int32 height)
1788 {
1789 	if (fFilterGraph == NULL || width != fLastWidth
1790 		|| height != fLastHeight || pixfmt != fLastPixfmt) {
1791 
1792 		status_t err = _InitFilterGraph(pixfmt, width, height);
1793 		if (err != B_OK)
1794 			return err;
1795 	}
1796 
1797 	memcpy(fFilterFrame->data, src->data, sizeof(src->data));
1798 	memcpy(fFilterFrame->linesize, src->linesize, sizeof(src->linesize));
1799 	fFilterFrame->width = width;
1800 	fFilterFrame->height = height;
1801 	fFilterFrame->format = pixfmt;
1802 
1803 	int ret = av_buffersrc_add_frame(fBufferSourceContext, fFilterFrame);
1804 	if (ret < 0)
1805 		return B_NO_MEMORY;
1806 
1807 	ret = av_buffersink_get_frame(fBufferSinkContext, fFilterFrame);
1808 	if (ret < 0)
1809 		return B_BAD_DATA;
1810 
1811 	av_image_copy(dst->data, dst->linesize, (const uint8**)fFilterFrame->data,
1812 		fFilterFrame->linesize, pixfmt, width, height);
1813 	av_frame_unref(fFilterFrame);
1814 	return B_OK;
1815 }
1816