xref: /haiku/src/add-ons/media/plugins/ffmpeg/AVCodecDecoder.cpp (revision d579eb9efe82919385bd0e18d9c26baa6d5d46bf)
1 /*
2  * Copyright (C) 2001 Carlos Hasan
3  * Copyright (C) 2001 François Revol
4  * Copyright (C) 2001 Axel Dörfler
5  * Copyright (C) 2004 Marcus Overhagen
6  * Copyright (C) 2009 Stephan Amßus <superstippi@gmx.de>
7  * Copyright (C) 2014 Colin Günther <coling@gmx.de>
8  * Copyright (C) 2015 Adrien Destugues <pulkomandy@pulkomandy.tk>
9  *
10  * All rights reserved. Distributed under the terms of the MIT License.
11  */
12 
13 //! libavcodec based decoder for Haiku
14 
15 
16 #include "AVCodecDecoder.h"
17 
18 #include <new>
19 
20 #include <assert.h>
21 #include <string.h>
22 
23 #include <Bitmap.h>
24 #include <Debug.h>
25 #include <String.h>
26 
27 #include "Utilities.h"
28 
29 
30 #undef TRACE
31 //#define TRACE_AV_CODEC
32 #ifdef TRACE_AV_CODEC
33 #	define TRACE(x...)	printf(x)
34 #	define TRACE_AUDIO(x...)	printf(x)
35 #	define TRACE_VIDEO(x...)	printf(x)
36 #else
37 #	define TRACE(x...)
38 #	define TRACE_AUDIO(x...)
39 #	define TRACE_VIDEO(x...)
40 #endif
41 
42 //#define LOG_STREAM_TO_FILE
43 #ifdef LOG_STREAM_TO_FILE
44 #	include <File.h>
45 	static BFile sAudioStreamLogFile(
46 		"/boot/home/Desktop/AVCodecDebugAudioStream.raw",
47 		B_CREATE_FILE | B_ERASE_FILE | B_WRITE_ONLY);
48 	static BFile sVideoStreamLogFile(
49 		"/boot/home/Desktop/AVCodecDebugVideoStream.raw",
50 		B_CREATE_FILE | B_ERASE_FILE | B_WRITE_ONLY);
51 	static int sDumpedPackets = 0;
52 #endif
53 
54 typedef AVCodecID CodecID;
55 
56 struct wave_format_ex {
57 	uint16 format_tag;
58 	uint16 channels;
59 	uint32 frames_per_sec;
60 	uint32 avg_bytes_per_sec;
61 	uint16 block_align;
62 	uint16 bits_per_sample;
63 	uint16 extra_size;
64 	// extra_data[extra_size]
65 } _PACKED;
66 
67 struct avformat_codec_context {
68 	int sample_rate;
69 	int channels;
70 };
71 
72 
73 // profiling related globals
74 #define DO_PROFILING 0
75 #if DO_PROFILING
76 static bigtime_t decodingTime = 0;
77 static bigtime_t conversionTime = 0;
78 static long profileCounter = 0;
79 #endif
80 
81 
82 AVCodecDecoder::AVCodecDecoder()
83 	:
84 	fHeader(),
85 	fInputFormat(),
86 	fFrame(0),
87 	fIsAudio(false),
88 	fCodec(NULL),
89 	fCodecContext(avcodec_alloc_context3(NULL)),
90 	fResampleContext(NULL),
91 	fDecodedData(NULL),
92 	fDecodedDataSizeInBytes(0),
93 	fPostProcessedDecodedPicture(av_frame_alloc()),
94 	fRawDecodedPicture(av_frame_alloc()),
95 	fRawDecodedAudio(av_frame_alloc()),
96 
97 	fCodecInitDone(false),
98 
99 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
100 	fSwsContext(NULL),
101 #else
102 	fFormatConversionFunc(NULL),
103 #endif
104 
105 	fExtraData(NULL),
106 	fExtraDataSize(0),
107 	fBlockAlign(0),
108 
109 	fOutputColorSpace(B_NO_COLOR_SPACE),
110 	fOutputFrameCount(0),
111 	fOutputFrameRate(1.0),
112 	fOutputFrameSize(0),
113 	fInputFrameSize(0),
114 
115 	fChunkBuffer(NULL),
116 	fChunkBufferSize(0),
117 	fAudioDecodeError(false),
118 
119 	fDecodedDataBuffer(av_frame_alloc()),
120 	fDecodedDataBufferOffset(0),
121 	fDecodedDataBufferSize(0),
122 	fTempPacket(NULL),
123 	fBufferSinkContext(NULL),
124 	fBufferSourceContext(NULL),
125 	fFilterGraph(NULL),
126 	fFilterFrame(NULL)
127 {
128 	TRACE("AVCodecDecoder::AVCodecDecoder()\n");
129 
130 	system_info info;
131 	get_system_info(&info);
132 
133 	fCodecContext->err_recognition = AV_EF_CAREFUL;
134 	fCodecContext->error_concealment = 3;
135 	fCodecContext->thread_count = info.cpu_count;
136 }
137 
138 
139 AVCodecDecoder::~AVCodecDecoder()
140 {
141 	TRACE("[%c] AVCodecDecoder::~AVCodecDecoder()\n", fIsAudio?('a'):('v'));
142 
143 #if DO_PROFILING
144 	if (profileCounter > 0) {
145 		printf("[%c] profile: d1 = %lld, d2 = %lld (%lld)\n",
146 			fIsAudio?('a'):('v'), decodingTime / profileCounter,
147 			conversionTime / profileCounter, fFrame);
148 	}
149 #endif
150 
151 	swr_free(&fResampleContext);
152 	free(fChunkBuffer);
153 	free(fDecodedData);
154 
155 	av_frame_free(&fPostProcessedDecodedPicture);
156 	av_frame_free(&fRawDecodedPicture);
157 	av_free(fRawDecodedAudio->opaque);
158 	av_frame_free(&fRawDecodedAudio);
159 	fCodecContext->extradata = NULL;
160 	avcodec_free_context(&fCodecContext);
161 	av_frame_free(&fDecodedDataBuffer);
162 
163 	av_frame_free(&fFilterFrame);
164 	avfilter_graph_free(&fFilterGraph);
165 
166 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
167 	if (fSwsContext != NULL)
168 		sws_freeContext(fSwsContext);
169 #endif
170 
171 	delete[] fExtraData;
172 
173 	av_packet_free(&fTempPacket);
174 }
175 
176 
177 void
178 AVCodecDecoder::GetCodecInfo(media_codec_info* mci)
179 {
180 	snprintf(mci->short_name, 32, "%s", fCodec->name);
181 	snprintf(mci->pretty_name, 96, "%s", fCodec->long_name);
182 	mci->id = 0;
183 	mci->sub_id = fCodec->id;
184 }
185 
186 
187 status_t
188 AVCodecDecoder::Setup(media_format* ioEncodedFormat, const void* infoBuffer,
189 	size_t infoSize)
190 {
191 	if (ioEncodedFormat->type != B_MEDIA_ENCODED_AUDIO
192 		&& ioEncodedFormat->type != B_MEDIA_ENCODED_VIDEO)
193 		return B_ERROR;
194 
195 	fIsAudio = (ioEncodedFormat->type == B_MEDIA_ENCODED_AUDIO);
196 	TRACE("[%c] AVCodecDecoder::Setup()\n", fIsAudio?('a'):('v'));
197 
198 #ifdef TRACE_AV_CODEC
199 	char buffer[1024];
200 	string_for_format(*ioEncodedFormat, buffer, sizeof(buffer));
201 	TRACE("[%c]   input_format = %s\n", fIsAudio?('a'):('v'), buffer);
202 	TRACE("[%c]   infoSize = %ld\n", fIsAudio?('a'):('v'), infoSize);
203 	TRACE("[%c]   user_data_type = %08lx\n", fIsAudio?('a'):('v'),
204 		ioEncodedFormat->user_data_type);
205 	TRACE("[%c]   meta_data_size = %ld\n", fIsAudio?('a'):('v'),
206 		ioEncodedFormat->MetaDataSize());
207 #endif
208 
209 	media_format_description description;
210 	if (BMediaFormats().GetCodeFor(*ioEncodedFormat,
211 			B_MISC_FORMAT_FAMILY, &description) == B_OK) {
212 		if (description.u.misc.file_format != 'ffmp')
213 			return B_NOT_SUPPORTED;
214 		fCodec = avcodec_find_decoder(static_cast<CodecID>(
215 			description.u.misc.codec));
216 		if (fCodec == NULL) {
217 			TRACE("  unable to find the correct FFmpeg "
218 				"decoder (id = %lu)\n", description.u.misc.codec);
219 			return B_ERROR;
220 		}
221 		TRACE("  found decoder %s\n", fCodec->name);
222 
223 		const void* extraData = infoBuffer;
224 		fExtraDataSize = infoSize;
225 		if (description.family == B_WAV_FORMAT_FAMILY
226 				&& infoSize >= sizeof(wave_format_ex)) {
227 			TRACE("  trying to use wave_format_ex\n");
228 			// Special case extra data in B_WAV_FORMAT_FAMILY
229 			const wave_format_ex* waveFormatData
230 				= (const wave_format_ex*)infoBuffer;
231 
232 			size_t waveFormatSize = infoSize;
233 			if (waveFormatData != NULL && waveFormatSize > 0) {
234 				fBlockAlign = waveFormatData->block_align;
235 				TRACE("  found block align: %d\n", fBlockAlign);
236 				fExtraDataSize = waveFormatData->extra_size;
237 				// skip the wave_format_ex from the extra data.
238 				extraData = waveFormatData + 1;
239 			}
240 		} else {
241 			if (fIsAudio) {
242 				fBlockAlign
243 					= ioEncodedFormat->u.encoded_audio.output.buffer_size;
244 				TRACE("  using buffer_size as block align: %d\n",
245 					fBlockAlign);
246 			}
247 		}
248 		if (extraData != NULL && fExtraDataSize > 0) {
249 			TRACE("AVCodecDecoder: extra data size %ld\n", infoSize);
250 			delete[] fExtraData;
251 			fExtraData = new(std::nothrow) char[fExtraDataSize];
252 			if (fExtraData != NULL)
253 				memcpy(fExtraData, infoBuffer, fExtraDataSize);
254 			else
255 				fExtraDataSize = 0;
256 		}
257 
258 		fInputFormat = *ioEncodedFormat;
259 		return B_OK;
260 	} else {
261 		TRACE("AVCodecDecoder: BMediaFormats().GetCodeFor() failed.\n");
262 	}
263 
264 	printf("AVCodecDecoder::Setup failed!\n");
265 	return B_ERROR;
266 }
267 
268 
269 status_t
270 AVCodecDecoder::SeekedTo(int64 frame, bigtime_t time)
271 {
272 	status_t ret = B_OK;
273 	// Reset the FFmpeg codec to flush buffers, so we keep the sync
274 	if (fCodecInitDone) {
275 		avcodec_flush_buffers(fCodecContext);
276 		_ResetTempPacket();
277 	}
278 
279 	// Flush internal buffers as well.
280 	free(fChunkBuffer);
281 	fChunkBuffer = NULL;
282 	fChunkBufferSize = 0;
283 	fDecodedDataBufferOffset = 0;
284 	fDecodedDataBufferSize = 0;
285 	fDecodedDataSizeInBytes = 0;
286 
287 	fFrame = frame;
288 
289 	return ret;
290 }
291 
292 
293 status_t
294 AVCodecDecoder::NegotiateOutputFormat(media_format* inOutFormat)
295 {
296 	TRACE("AVCodecDecoder::NegotiateOutputFormat() [%c] \n",
297 		fIsAudio?('a'):('v'));
298 
299 #ifdef TRACE_AV_CODEC
300 	char buffer[1024];
301 	string_for_format(*inOutFormat, buffer, sizeof(buffer));
302 	TRACE("  [%c]  requested format = %s\n", fIsAudio?('a'):('v'), buffer);
303 #endif
304 
305 	// close any previous instance
306 	fCodecContext->extradata = NULL;
307 	avcodec_free_context(&fCodecContext);
308 	fCodecContext = avcodec_alloc_context3(fCodec);
309 	fCodecInitDone = false;
310 
311 	if (fIsAudio)
312 		return _NegotiateAudioOutputFormat(inOutFormat);
313 	else
314 		return _NegotiateVideoOutputFormat(inOutFormat);
315 }
316 
317 
318 status_t
319 AVCodecDecoder::Decode(void* outBuffer, int64* outFrameCount,
320 	media_header* mediaHeader, media_decode_info* info)
321 {
322 	if (!fCodecInitDone)
323 		return B_NO_INIT;
324 
325 	status_t ret;
326 	if (fIsAudio)
327 		ret = _DecodeAudio(outBuffer, outFrameCount, mediaHeader, info);
328 	else
329 		ret = _DecodeVideo(outBuffer, outFrameCount, mediaHeader, info);
330 
331 	return ret;
332 }
333 
334 
335 // #pragma mark -
336 
337 
338 void
339 AVCodecDecoder::_ResetTempPacket()
340 {
341 	if (fTempPacket == NULL)
342 		fTempPacket = av_packet_alloc();
343 	fTempPacket->size = 0;
344 	fTempPacket->data = NULL;
345 }
346 
347 
348 status_t
349 AVCodecDecoder::_NegotiateAudioOutputFormat(media_format* inOutFormat)
350 {
351 	TRACE("AVCodecDecoder::_NegotiateAudioOutputFormat()\n");
352 
353 	_ApplyEssentialAudioContainerPropertiesToContext();
354 		// This makes audio formats play that encode the audio properties in
355 		// the audio container (e.g. WMA) and not in the audio frames
356 		// themself (e.g. MP3).
357 		// Note: Doing this step unconditionally is OK, because the first call
358 		// to _DecodeNextAudioFrameChunk() will update the essential audio
359 		// format properties accordingly regardless of the settings here.
360 
361 	if (avcodec_open2(fCodecContext, fCodec, NULL) < 0) {
362 		TRACE("avcodec_open() failed to init codec!\n");
363 		return B_ERROR;
364 	}
365 	fCodecInitDone = true;
366 
367 	free(fChunkBuffer);
368 	fChunkBuffer = NULL;
369 	fChunkBufferSize = 0;
370 	fAudioDecodeError = false;
371 	fDecodedDataBufferOffset = 0;
372 	fDecodedDataBufferSize = 0;
373 
374 	_ResetTempPacket();
375 
376 	status_t statusOfDecodingFirstFrameChunk = _DecodeNextAudioFrameChunk();
377 	if (statusOfDecodingFirstFrameChunk != B_OK) {
378 		TRACE("[a] decoding first audio frame chunk failed\n");
379 		return B_ERROR;
380 	}
381 
382 	media_multi_audio_format outputAudioFormat;
383 	outputAudioFormat = media_raw_audio_format::wildcard;
384 	outputAudioFormat.byte_order = B_MEDIA_HOST_ENDIAN;
385 	outputAudioFormat.frame_rate = fCodecContext->sample_rate;
386 	outputAudioFormat.channel_count = fCodecContext->channels;
387 	ConvertAVSampleFormatToRawAudioFormat(fCodecContext->sample_fmt,
388 		outputAudioFormat.format);
389 	// Check that format is not still a wild card!
390 	if (outputAudioFormat.format == 0) {
391 		TRACE("  format still a wild-card, assuming B_AUDIO_SHORT.\n");
392 		outputAudioFormat.format = media_raw_audio_format::B_AUDIO_SHORT;
393 	}
394 	outputAudioFormat.buffer_size = inOutFormat->u.raw_audio.buffer_size;
395 	// Check that buffer_size has a sane value
396 	size_t sampleSize = outputAudioFormat.format
397 		& media_raw_audio_format::B_AUDIO_SIZE_MASK;
398 	if (outputAudioFormat.buffer_size == 0) {
399 		outputAudioFormat.buffer_size = 512 * sampleSize
400 			* outputAudioFormat.channel_count;
401 	}
402 
403 	inOutFormat->type = B_MEDIA_RAW_AUDIO;
404 	inOutFormat->u.raw_audio = outputAudioFormat;
405 	inOutFormat->require_flags = 0;
406 	inOutFormat->deny_flags = B_MEDIA_MAUI_UNDEFINED_FLAGS;
407 
408 	// Initialize variables needed to manage decoding as much audio frames as
409 	// needed to fill the buffer_size.
410 	fOutputFrameSize = sampleSize * outputAudioFormat.channel_count;
411 	fOutputFrameCount = outputAudioFormat.buffer_size / fOutputFrameSize;
412 	fOutputFrameRate = outputAudioFormat.frame_rate;
413 	if (av_sample_fmt_is_planar(fCodecContext->sample_fmt))
414 		fInputFrameSize = sampleSize;
415 	else
416 		fInputFrameSize = fOutputFrameSize;
417 
418 	fRawDecodedAudio->opaque
419 		= av_realloc(fRawDecodedAudio->opaque, sizeof(avformat_codec_context));
420 	if (fRawDecodedAudio->opaque == NULL)
421 		return B_NO_MEMORY;
422 
423 	if (av_sample_fmt_is_planar(fCodecContext->sample_fmt)) {
424 		fResampleContext = swr_alloc_set_opts(NULL,
425 			fCodecContext->channel_layout,
426 			fCodecContext->request_sample_fmt,
427 			fCodecContext->sample_rate,
428 			fCodecContext->channel_layout,
429 			fCodecContext->sample_fmt,
430 			fCodecContext->sample_rate,
431 			0, NULL);
432 		swr_init(fResampleContext);
433 	}
434 
435 	TRACE("  bit_rate = %d, sample_rate = %d, channels = %d, "
436 		"output frame size: %d, count: %ld, rate: %.2f\n",
437 		fCodecContext->bit_rate, fCodecContext->sample_rate, fCodecContext->channels,
438 		fOutputFrameSize, fOutputFrameCount, fOutputFrameRate);
439 
440 	return B_OK;
441 }
442 
443 
444 status_t
445 AVCodecDecoder::_NegotiateVideoOutputFormat(media_format* inOutFormat)
446 {
447 	TRACE("AVCodecDecoder::_NegotiateVideoOutputFormat()\n");
448 
449 	TRACE("  requested video format 0x%x\n",
450 		inOutFormat->u.raw_video.display.format);
451 
452 	_ApplyEssentialVideoContainerPropertiesToContext();
453 		// This makes video formats play that encode the video properties in
454 		// the video container (e.g. WMV) and not in the video frames
455 		// themself (e.g. MPEG2).
456 		// Note: Doing this step unconditionally is OK, because the first call
457 		// to _DecodeNextVideoFrame() will update the essential video format
458 		// properties accordingly regardless of the settings here.
459 
460 	if (avcodec_open2(fCodecContext, fCodec, NULL) < 0) {
461 		TRACE("avcodec_open() failed to init codec!\n");
462 		return B_ERROR;
463 	}
464 	fCodecInitDone = true;
465 
466 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
467 	fOutputColorSpace = B_RGB32;
468 #else
469 	// Make MediaPlayer happy (if not in rgb32 screen depth and no overlay,
470 	// it will only ask for YCbCr, which DrawBitmap doesn't handle, so the
471 	// default colordepth is RGB32).
472 	if (inOutFormat->u.raw_video.display.format == B_YCbCr422)
473 		fOutputColorSpace = B_YCbCr422;
474 	else
475 		fOutputColorSpace = B_RGB32;
476 #endif
477 
478 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
479 	if (fSwsContext != NULL)
480 		sws_freeContext(fSwsContext);
481 	fSwsContext = NULL;
482 #else
483 	fFormatConversionFunc = 0;
484 #endif
485 
486 	free(fChunkBuffer);
487 	fChunkBuffer = NULL;
488 	fChunkBufferSize = 0;
489 
490 	_ResetTempPacket();
491 
492 	status_t statusOfDecodingFirstFrame = _DecodeNextVideoFrame();
493 	if (statusOfDecodingFirstFrame != B_OK) {
494 		TRACE("[v] decoding first video frame failed\n");
495 		return B_ERROR;
496 	}
497 
498 	// Note: fSwsContext / fFormatConversionFunc should have been initialized
499 	// by first call to _DecodeNextVideoFrame() above.
500 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
501 	if (fSwsContext == NULL) {
502 		TRACE("No SWS Scale context or decoder has not set the pixel format "
503 			"yet!\n");
504 	}
505 #else
506 	if (fFormatConversionFunc == NULL) {
507 		TRACE("no pixel format conversion function found or decoder has "
508 			"not set the pixel format yet!\n");
509 	}
510 #endif
511 
512 	inOutFormat->type = B_MEDIA_RAW_VIDEO;
513 	inOutFormat->require_flags = 0;
514 	inOutFormat->deny_flags = B_MEDIA_MAUI_UNDEFINED_FLAGS;
515 	inOutFormat->u.raw_video = fInputFormat.u.encoded_video.output;
516 	inOutFormat->u.raw_video.interlace = 1;
517 		// Progressive (non-interlaced) video frames are delivered
518 	inOutFormat->u.raw_video.first_active
519 		= fHeader.u.raw_video.first_active_line;
520 	inOutFormat->u.raw_video.last_active = fHeader.u.raw_video.line_count;
521 	inOutFormat->u.raw_video.pixel_width_aspect
522 		= fHeader.u.raw_video.pixel_width_aspect;
523 	inOutFormat->u.raw_video.pixel_height_aspect
524 		= fHeader.u.raw_video.pixel_height_aspect;
525 	// The framerate in fCodecContext is set to 0 if the codec doesn't know the framerate. Some
526 	// codecs work only at a fixed framerate, while others allow each frame to have its owm
527 	// timestamp. For example a stream may switch from 50 to 60Hz, depending on how it was
528 	// constructed. In that case, it's fine to leave the field_rate as 0 as well, the media kit
529 	// will handle that just fine as long as each frame comes with a correct presentation timestamp.
530 	// In fact, it seems better to not set the field_rate at all, rather than set it to a wrong
531 	// value.
532 	//
533 	// TODO The field_rate is twice the frame rate for interlaced streams, so we need to determine
534 	// if we are decoding an interlaced stream, and wether ffmpeg delivers every half-frame or not
535 	// in that case (since we let ffmpeg do the deinterlacing).
536 	inOutFormat->u.raw_video.field_rate = av_q2d(fCodecContext->framerate);
537 	inOutFormat->u.raw_video.display.format = fOutputColorSpace;
538 	inOutFormat->u.raw_video.display.line_width
539 		= fHeader.u.raw_video.display_line_width;
540 	inOutFormat->u.raw_video.display.line_count
541 		= fHeader.u.raw_video.display_line_count;
542 	inOutFormat->u.raw_video.display.bytes_per_row
543 		= fHeader.u.raw_video.bytes_per_row;
544 
545 #ifdef TRACE_AV_CODEC
546 	char buffer[1024];
547 	string_for_format(*inOutFormat, buffer, sizeof(buffer));
548 	TRACE("[v]  outFormat = %s\n", buffer);
549 	TRACE("  returned  video format 0x%x\n",
550 		inOutFormat->u.raw_video.display.format);
551 #endif
552 
553 	return B_OK;
554 }
555 
556 
557 /*! \brief Fills the outBuffer with one or more already decoded audio frames.
558 
559 	Besides the main duty described above, this method also fills out the other
560 	output parameters as documented below.
561 
562 	\param outBuffer Pointer to the output buffer to copy the decoded audio
563 		frames to.
564 	\param outFrameCount Pointer to the output variable to assign the number of
565 		copied audio frames (usually several audio frames at once).
566 	\param mediaHeader Pointer to the output media header that contains the
567 		properties of the decoded audio frame being the first in the outBuffer.
568 	\param info Specifies additional decoding parameters. (Note: unused).
569 
570 	\returns B_OK Decoding audio frames succeeded.
571 	\returns B_LAST_BUFFER_ERROR There are no more audio frames available.
572 	\returns Other error codes
573 */
574 status_t
575 AVCodecDecoder::_DecodeAudio(void* outBuffer, int64* outFrameCount,
576 	media_header* mediaHeader, media_decode_info* info)
577 {
578 	TRACE_AUDIO("AVCodecDecoder::_DecodeAudio(audio start_time %.6fs)\n",
579 		mediaHeader->start_time / 1000000.0);
580 
581 	status_t audioDecodingStatus
582 		= fDecodedDataSizeInBytes > 0 ? B_OK : _DecodeNextAudioFrame();
583 
584 	if (audioDecodingStatus != B_OK)
585 		return audioDecodingStatus;
586 
587 	*outFrameCount = fDecodedDataSizeInBytes / fOutputFrameSize;
588 	*mediaHeader = fHeader;
589 	memcpy(outBuffer, fDecodedData, fDecodedDataSizeInBytes);
590 
591 	fDecodedDataSizeInBytes = 0;
592 
593 	return B_OK;
594 }
595 
596 
597 /*! \brief Fills the outBuffer with an already decoded video frame.
598 
599 	Besides the main duty described above, this method also fills out the other
600 	output parameters as documented below.
601 
602 	\param outBuffer Pointer to the output buffer to copy the decoded video
603 		frame to.
604 	\param outFrameCount Pointer to the output variable to assign the number of
605 		copied video frames (usually one video frame).
606 	\param mediaHeader Pointer to the output media header that contains the
607 		decoded video frame properties.
608 	\param info Specifies additional decoding parameters. (Note: unused).
609 
610 	\returns B_OK Decoding a video frame succeeded.
611 	\returns B_LAST_BUFFER_ERROR There are no more video frames available.
612 	\returns Other error codes
613 */
614 status_t
615 AVCodecDecoder::_DecodeVideo(void* outBuffer, int64* outFrameCount,
616 	media_header* mediaHeader, media_decode_info* info)
617 {
618 	status_t videoDecodingStatus
619 		= fDecodedDataSizeInBytes > 0 ? B_OK : _DecodeNextVideoFrame();
620 
621 	if (videoDecodingStatus != B_OK)
622 		return videoDecodingStatus;
623 
624 	*outFrameCount = 1;
625 	*mediaHeader = fHeader;
626 	memcpy(outBuffer, fDecodedData, mediaHeader->size_used);
627 
628 	fDecodedDataSizeInBytes = 0;
629 
630 	return B_OK;
631 }
632 
633 
634 /*!	\brief Decodes next audio frame.
635 
636 	We decode at least one audio frame into fDecodedData. To achieve this goal,
637     we might need to request several chunks of encoded data resulting in a
638     variable execution time of this function.
639 
640     The length of the decoded audio frame(s) is stored in
641     fDecodedDataSizeInBytes. If this variable is greater than zero you can
642     assert that all audio frames in fDecodedData are valid.
643 
644 	It is assumed that the number of expected audio frames is stored in
645 	fOutputFrameCount. So _DecodeNextAudioFrame() must be called only after
646 	fOutputFrameCount has been set.
647 
648 	Note: fOutputFrameCount contains the maximum number of frames a caller
649 	of BMediaDecoder::Decode() expects to receive. There is a direct
650 	relationship between fOutputFrameCount and the buffer size a caller of
651 	BMediaDecoder::Decode() will provide so we make sure to respect this limit
652 	for fDecodedDataSizeInBytes.
653 
654 	On return with status code B_OK the following conditions hold true:
655 		1. fDecodedData contains as much audio frames as the caller of
656 		   BMediaDecoder::Decode() expects.
657 		2. fDecodedData contains lesser audio frames as the caller of
658 		   BMediaDecoder::Decode() expects only when one of the following
659 		   conditions hold true:
660 		       i  No more audio frames left. Consecutive calls to
661 		          _DecodeNextAudioFrame() will then result in the return of
662 		          status code B_LAST_BUFFER_ERROR.
663 		       ii TODO: A change in the size of the audio frames.
664 		3. fHeader is populated with the audio frame properties of the first
665 		   audio frame in fDecodedData. Especially the start_time field of
666 		   fHeader relates to that first audio frame. Start times of
667 		   consecutive audio frames in fDecodedData have to be calculated
668 		   manually (using the frame rate and the frame duration) if the
669 		   caller needs them.
670 
671 	TODO: Handle change of channel_count. Such a change results in a change of
672 	the audio frame size and thus has different buffer requirements.
673 	The most sane approach for implementing this is to return the audio frames
674 	that were still decoded with the previous channel_count and inform the
675 	client of BMediaDecoder::Decode() about the change so that it can adapt to
676 	it. Furthermore we need to adapt our fDecodedData to the new buffer size
677 	requirements accordingly.
678 
679 	\returns B_OK when we successfully decoded enough audio frames
680 	\returns B_LAST_BUFFER_ERROR when there are no more audio frames available.
681 	\returns Other Errors
682 */
683 status_t
684 AVCodecDecoder::_DecodeNextAudioFrame()
685 {
686 	assert(fTempPacket->size >= 0);
687 	assert(fDecodedDataSizeInBytes == 0);
688 		// _DecodeNextAudioFrame needs to be called on empty fDecodedData only!
689 		// If this assert holds wrong we have a bug somewhere.
690 
691 	status_t resetStatus = _ResetRawDecodedAudio();
692 	if (resetStatus != B_OK)
693 		return resetStatus;
694 
695 	while (fRawDecodedAudio->nb_samples < fOutputFrameCount) {
696 		_CheckAndFixConditionsThatHintAtBrokenAudioCodeBelow();
697 
698 		bool decodedDataBufferHasData = fDecodedDataBufferSize > 0;
699 		if (decodedDataBufferHasData) {
700 			_MoveAudioFramesToRawDecodedAudioAndUpdateStartTimes();
701 			continue;
702 		}
703 
704 		status_t decodeAudioChunkStatus = _DecodeNextAudioFrameChunk();
705 		if (decodeAudioChunkStatus == B_LAST_BUFFER_ERROR
706 				&& fRawDecodedAudio->nb_samples > 0)
707 			break;
708 		if (decodeAudioChunkStatus != B_OK)
709 			return decodeAudioChunkStatus;
710 	}
711 
712 	fFrame += fRawDecodedAudio->nb_samples;
713 	fDecodedDataSizeInBytes = fRawDecodedAudio->linesize[0];
714 
715 	_UpdateMediaHeaderForAudioFrame();
716 
717 #ifdef DEBUG
718 	dump_ffframe_audio(fRawDecodedAudio, "ffaudi");
719 #endif
720 
721 	TRACE_AUDIO("  frame count: %ld current: %lld\n",
722 		fRawDecodedAudio->nb_samples, fFrame);
723 
724 	return B_OK;
725 }
726 
727 
728 /*!	\brief Applies all essential audio input properties to fCodecContext that were
729 		passed to AVCodecDecoder when Setup() was called.
730 
731 	Note: This function must be called before the AVCodec is opened via
732 	avcodec_open2(). Otherwise the behaviour of FFMPEG's audio decoding
733 	function avcodec_receive_frame() is undefined.
734 
735 	Essential properties applied from fInputFormat.u.encoded_audio:
736 		- bit_rate copied to fCodecContext->bit_rate
737 		- frame_size copied to fCodecContext->frame_size
738 		- output.format converted to fCodecContext->sample_fmt
739 		- output.frame_rate copied to fCodecContext->sample_rate
740 		- output.channel_count copied to fCodecContext->channels
741 
742 	Other essential properties being applied:
743 		- fBlockAlign to fCodecContext->block_align
744 		- fExtraData to fCodecContext->extradata
745 		- fExtraDataSize to fCodecContext->extradata_size
746 
747 	TODO: Either the following documentation section should be removed or this
748 	TODO when it is clear whether fInputFormat.MetaData() and
749 	fInputFormat.MetaDataSize() have to be applied to fCodecContext. See the related
750 	TODO in the method implementation.
751 	Only applied when fInputFormat.MetaDataSize() is greater than zero:
752 		- fInputFormat.MetaData() to fCodecContext->extradata
753 		- fInputFormat.MetaDataSize() to fCodecContext->extradata_size
754 */
755 void
756 AVCodecDecoder::_ApplyEssentialAudioContainerPropertiesToContext()
757 {
758 	media_encoded_audio_format containerProperties
759 		= fInputFormat.u.encoded_audio;
760 
761 	fCodecContext->bit_rate
762 		= static_cast<int>(containerProperties.bit_rate);
763 	fCodecContext->frame_size
764 		= static_cast<int>(containerProperties.frame_size);
765 	ConvertRawAudioFormatToAVSampleFormat(
766 		containerProperties.output.format, fCodecContext->sample_fmt);
767 	ConvertRawAudioFormatToAVSampleFormat(
768 		containerProperties.output.format, fCodecContext->request_sample_fmt);
769 	fCodecContext->sample_rate
770 		= static_cast<int>(containerProperties.output.frame_rate);
771 	fCodecContext->channels
772 		= static_cast<int>(containerProperties.output.channel_count);
773 	// Check that channel count is not still a wild card!
774 	if (fCodecContext->channels == 0) {
775 		TRACE("  channel_count still a wild-card, assuming stereo.\n");
776 		fCodecContext->channels = 2;
777 	}
778 
779 	fCodecContext->block_align = fBlockAlign;
780 	fCodecContext->extradata = reinterpret_cast<uint8_t*>(fExtraData);
781 	fCodecContext->extradata_size = fExtraDataSize;
782 
783 	// TODO: This probably needs to go away, there is some misconception
784 	// about extra data / info buffer and meta data. See
785 	// Reader::GetStreamInfo(). The AVFormatReader puts extradata and
786 	// extradata_size into media_format::MetaData(), but used to ignore
787 	// the infoBuffer passed to GetStreamInfo(). I think this may be why
788 	// the code below was added.
789 	if (fInputFormat.MetaDataSize() > 0) {
790 		fCodecContext->extradata = static_cast<uint8_t*>(
791 			const_cast<void*>(fInputFormat.MetaData()));
792 		fCodecContext->extradata_size = fInputFormat.MetaDataSize();
793 	}
794 
795 	TRACE("  bit_rate %d, sample_rate %d, channels %d, block_align %d, "
796 		"extradata_size %d\n",
797 		fCodecContext->bit_rate,
798 		fCodecContext->sample_rate,
799 		fCodecContext->channels,
800 		fCodecContext->block_align,
801 		fCodecContext->extradata_size);
802 }
803 
804 
805 /*!	\brief Resets important fields in fRawDecodedVideo to their default values.
806 
807 	Note: Also initializes fDecodedData if not done already.
808 
809 	\returns B_OK Resetting successfully completed.
810 	\returns B_NO_MEMORY No memory left for correct operation.
811 */
812 status_t
813 AVCodecDecoder::_ResetRawDecodedAudio()
814 {
815 	if (fDecodedData == NULL) {
816 		size_t maximumSizeOfDecodedData = fOutputFrameCount * fOutputFrameSize;
817 		fDecodedData
818 			= static_cast<uint8_t*>(malloc(maximumSizeOfDecodedData));
819 	}
820 	if (fDecodedData == NULL)
821 		return B_NO_MEMORY;
822 
823 	fRawDecodedAudio->data[0] = fDecodedData;
824 	fRawDecodedAudio->linesize[0] = 0;
825 	fRawDecodedAudio->format = AV_SAMPLE_FMT_NONE;
826 	fRawDecodedAudio->pkt_dts = AV_NOPTS_VALUE;
827 	fRawDecodedAudio->nb_samples = 0;
828 	memset(fRawDecodedAudio->opaque, 0, sizeof(avformat_codec_context));
829 
830 	return B_OK;
831 }
832 
833 
834 /*!	\brief Checks fDecodedDataBufferSize and fTempPacket for invalid values,
835 		reports them and assigns valid values.
836 
837 	Note: This method is intended to be called before any code is executed that
838 	deals with moving, loading or decoding any audio frames.
839 */
840 void
841 AVCodecDecoder::_CheckAndFixConditionsThatHintAtBrokenAudioCodeBelow()
842 {
843 	if (fDecodedDataBufferSize < 0) {
844 		fprintf(stderr, "Decoding read past the end of the decoded data "
845 			"buffer! %" B_PRId32 "\n", fDecodedDataBufferSize);
846 		fDecodedDataBufferSize = 0;
847 	}
848 	if (fTempPacket->size < 0) {
849 		fprintf(stderr, "Decoding read past the end of the temp packet! %d\n",
850 			fTempPacket->size);
851 		fTempPacket->size = 0;
852 	}
853 }
854 
855 
856 /*!	\brief Moves audio frames from fDecodedDataBuffer to fRawDecodedAudio (and
857 		thus to fDecodedData) and updates the start times of fRawDecodedAudio,
858 		fDecodedDataBuffer and fTempPacket accordingly.
859 
860 	When moving audio frames to fRawDecodedAudio this method also makes sure
861 	that the following important fields of fRawDecodedAudio are populated and
862 	updated with correct values:
863 		- fRawDecodedAudio->data[0]: Points to first free byte of fDecodedData
864 		- fRawDecodedAudio->linesize[0]: Total size of frames in fDecodedData
865 		- fRawDecodedAudio->format: Format of first audio frame
866 		- fRawDecodedAudio->pkt_dts: Start time of first audio frame
867 		- fRawDecodedAudio->nb_samples: Number of audio frames
868 		- fRawDecodedAudio->opaque: Contains the following fields for the first
869 		  audio frame:
870 		      - channels: Channel count of first audio frame
871 		      - sample_rate: Frame rate of first audio frame
872 
873 	This function assumes to be called only when the following assumptions
874 	hold true:
875 		1. There are decoded audio frames available in fDecodedDataBuffer
876 		   meaning that fDecodedDataBufferSize is greater than zero.
877 		2. There is space left in fRawDecodedAudio to move some audio frames
878 		   in. This means that fRawDecodedAudio has lesser audio frames than
879 		   the maximum allowed (specified by fOutputFrameCount).
880 		3. The audio frame rate is known so that we can calculate the time
881 		   range (covered by the moved audio frames) to update the start times
882 		   accordingly.
883 		4. The field fRawDecodedAudio->opaque points to a memory block
884 		   representing a structure of type avformat_codec_context.
885 
886 	After this function returns the caller can safely make the following
887 	assumptions:
888 		1. The number of decoded audio frames in fDecodedDataBuffer is
889 		   decreased though it may still be greater then zero.
890 		2. The number of frames in fRawDecodedAudio has increased and all
891 		   important fields are updated (see listing above).
892 		3. Start times of fDecodedDataBuffer and fTempPacket were increased
893 		   with the time range covered by the moved audio frames.
894 
895 	Note: This function raises an exception (by calling the debugger), when
896 	fDecodedDataBufferSize is not a multiple of fOutputFrameSize.
897 */
898 void
899 AVCodecDecoder::_MoveAudioFramesToRawDecodedAudioAndUpdateStartTimes()
900 {
901 	assert(fDecodedDataBufferSize > 0);
902 	assert(fRawDecodedAudio->nb_samples < fOutputFrameCount);
903 	assert(fOutputFrameRate > 0);
904 
905 	int32 outFrames = fOutputFrameCount - fRawDecodedAudio->nb_samples;
906 	int32 inFrames = fDecodedDataBufferSize;
907 
908 	int32 frames = min_c(outFrames, inFrames);
909 	if (frames == 0)
910 		debugger("fDecodedDataBufferSize not multiple of frame size!");
911 
912 	// Some decoders do not support format conversion on themselves, or use
913 	// "planar" audio (each channel separated instead of interleaved samples).
914 	// In that case, we use swresample to convert the data
915 	if (av_sample_fmt_is_planar(fCodecContext->sample_fmt)) {
916 #if 0
917 		const uint8_t* ptr[8];
918 		for (int i = 0; i < 8; i++) {
919 			if (fDecodedDataBuffer->data[i] == NULL)
920 				ptr[i] = NULL;
921 			else
922 				ptr[i] = fDecodedDataBuffer->data[i] + fDecodedDataBufferOffset;
923 		}
924 
925 		// When there are more input frames than space in the output buffer,
926 		// we could feed everything to swr and it would buffer the extra data.
927 		// However, there is no easy way to flush that data without feeding more
928 		// input, and it makes our timestamp computations fail.
929 		// So, we feed only as much frames as we can get out, and handle the
930 		// buffering ourselves.
931 		// TODO Ideally, we should try to size our output buffer so that it can
932 		// always hold all the output (swr provides helper functions for this)
933 		inFrames = frames;
934 		frames = swr_convert(fResampleContext, fRawDecodedAudio->data,
935 			outFrames, ptr, inFrames);
936 
937 		if (frames < 0)
938 			debugger("resampling failed");
939 #else
940 		// interleave planar audio with same format
941 		uintptr_t out = (uintptr_t)fRawDecodedAudio->data[0];
942 		int32 offset = fDecodedDataBufferOffset;
943 		for (int i = 0; i < frames; i++) {
944 			for (int j = 0; j < fCodecContext->channels; j++) {
945 				memcpy((void*)out, fDecodedDataBuffer->data[j]
946 					+ offset, fInputFrameSize);
947 				out += fInputFrameSize;
948 			}
949 			offset += fInputFrameSize;
950 		}
951 		outFrames = frames;
952 		inFrames = frames;
953 #endif
954 	} else {
955 		memcpy(fRawDecodedAudio->data[0], fDecodedDataBuffer->data[0]
956 				+ fDecodedDataBufferOffset, frames * fOutputFrameSize);
957 		outFrames = frames;
958 		inFrames = frames;
959 	}
960 
961 	size_t remainingSize = inFrames * fInputFrameSize;
962 	size_t decodedSize = outFrames * fOutputFrameSize;
963 	fDecodedDataBufferSize -= inFrames;
964 
965 	bool firstAudioFramesCopiedToRawDecodedAudio
966 		= fRawDecodedAudio->data[0] != fDecodedData;
967 	if (!firstAudioFramesCopiedToRawDecodedAudio) {
968 		fRawDecodedAudio->format = fDecodedDataBuffer->format;
969 		fRawDecodedAudio->pkt_dts = fDecodedDataBuffer->pkt_dts;
970 
971 		avformat_codec_context* codecContext
972 			= static_cast<avformat_codec_context*>(fRawDecodedAudio->opaque);
973 		codecContext->channels = fCodecContext->channels;
974 		codecContext->sample_rate = fCodecContext->sample_rate;
975 	}
976 
977 	fRawDecodedAudio->data[0] += decodedSize;
978 	fRawDecodedAudio->linesize[0] += decodedSize;
979 	fRawDecodedAudio->nb_samples += outFrames;
980 
981 	fDecodedDataBufferOffset += remainingSize;
982 
983 	// Update start times accordingly
984 	bigtime_t framesTimeInterval = static_cast<bigtime_t>(
985 		(1000000LL * frames) / fOutputFrameRate);
986 	fDecodedDataBuffer->pkt_dts += framesTimeInterval;
987 	// Start time of buffer is updated in case that it contains
988 	// more audio frames to move.
989 	fTempPacket->dts += framesTimeInterval;
990 	// Start time of fTempPacket is updated in case the fTempPacket
991 	// contains more audio frames to decode.
992 }
993 
994 
995 /*!	\brief Decodes next chunk of audio frames.
996 
997 	This method handles all the details of loading the input buffer
998 	(fChunkBuffer) at the right time and of calling FFMPEG often engouh until
999 	some audio frames have been decoded.
1000 
1001 	FFMPEG decides how much audio frames belong to a chunk. Because of that
1002 	it is very likely that _DecodeNextAudioFrameChunk has to be called several
1003 	times to decode enough audio frames to please the caller of
1004 	BMediaDecoder::Decode().
1005 
1006 	This function assumes to be called only when the following assumptions
1007 	hold true:
1008 		1. fDecodedDataBufferSize equals zero.
1009 
1010 	After this function returns successfully the caller can safely make the
1011 	following assumptions:
1012 		1. fDecodedDataBufferSize is greater than zero.
1013 		2. fDecodedDataBufferOffset is set to zero.
1014 		3. fDecodedDataBuffer contains audio frames.
1015 
1016 
1017 	\returns B_OK on successfully decoding one audio frame chunk.
1018 	\returns B_LAST_BUFFER_ERROR No more audio frame chunks available. From
1019 		this point on further calls will return this same error.
1020 	\returns B_ERROR Decoding failed
1021 */
1022 status_t
1023 AVCodecDecoder::_DecodeNextAudioFrameChunk()
1024 {
1025 	assert(fDecodedDataBufferSize == 0);
1026 
1027 	while (fDecodedDataBufferSize == 0) {
1028 		status_t loadingChunkStatus
1029 			= _LoadNextChunkIfNeededAndAssignStartTime();
1030 		if (loadingChunkStatus != B_OK)
1031 			return loadingChunkStatus;
1032 
1033 		status_t decodingStatus
1034 			= _DecodeSomeAudioFramesIntoEmptyDecodedDataBuffer();
1035 		if (decodingStatus != B_OK) {
1036 			// Assume the audio decoded until now is broken so replace it with
1037 			// some silence.
1038 			memset(fDecodedData, 0, fRawDecodedAudio->linesize[0]);
1039 
1040 			if (!fAudioDecodeError) {
1041 				// Report failure if not done already
1042 				int32 chunkBufferOffset = fTempPacket->data - fChunkBuffer;
1043 				printf("########### audio decode error, "
1044 					"fTempPacket->size %d, fChunkBuffer data offset %" B_PRId32
1045 					"\n", fTempPacket->size, chunkBufferOffset);
1046 				fAudioDecodeError = true;
1047 			}
1048 
1049 			// Assume that next audio chunk can be decoded so keep decoding.
1050 			continue;
1051 		}
1052 
1053 		fAudioDecodeError = false;
1054 	}
1055 
1056 	return B_OK;
1057 }
1058 
1059 
1060 /*!	\brief Tries to decode at least one audio frame and store it in the
1061 		fDecodedDataBuffer.
1062 
1063 	This function assumes to be called only when the following assumptions
1064 	hold true:
1065 		1. fDecodedDataBufferSize equals zero.
1066 		2. fTempPacket->size is greater than zero.
1067 
1068 	After this function returns successfully the caller can safely make the
1069 	following assumptions:
1070 		1. fDecodedDataBufferSize is greater than zero in the common case.
1071 		   Also see "Note" below.
1072 		2. fTempPacket was updated to exclude the data chunk that was consumed
1073 		   by avcodec_send_packet().
1074 		3. fDecodedDataBufferOffset is set to zero.
1075 
1076 	When this function failed to decode at least one audio frame due to a
1077 	decoding error the caller can safely make the following assumptions:
1078 		1. fDecodedDataBufferSize equals zero.
1079 		2. fTempPacket->size equals zero.
1080 
1081 	Note: It is possible that there wasn't any audio frame decoded into
1082 	fDecodedDataBuffer after calling this function. This is normal and can
1083 	happen when there was either a decoding error or there is some decoding
1084 	delay in FFMPEGs audio decoder. Another call to this method is totally
1085 	safe and is even expected as long as the calling assumptions hold true.
1086 
1087 	\returns B_OK Decoding successful. fDecodedDataBuffer contains decoded
1088 		audio frames only when fDecodedDataBufferSize is greater than zero.
1089 		fDecodedDataBuffer is empty, when avcodec_receive_frame() didn't return
1090 		audio frames due to delayed decoding or incomplete audio frames.
1091 	\returns B_ERROR Decoding failed thus fDecodedDataBuffer contains no audio
1092 		frames.
1093 */
1094 status_t
1095 AVCodecDecoder::_DecodeSomeAudioFramesIntoEmptyDecodedDataBuffer()
1096 {
1097 	assert(fDecodedDataBufferSize == 0);
1098 
1099 	av_frame_unref(fDecodedDataBuffer);
1100 	fDecodedDataBufferOffset = 0;
1101 
1102 	int error = avcodec_receive_frame(fCodecContext, fDecodedDataBuffer);
1103 	if (error == AVERROR_EOF)
1104 		return B_LAST_BUFFER_ERROR;
1105 
1106 	if (error == AVERROR(EAGAIN)) {
1107 		// We need to feed more data into the decoder
1108 		avcodec_send_packet(fCodecContext, fTempPacket);
1109 
1110 		// All the data is always consumed by avcodec_send_packet
1111 		fTempPacket->size = 0;
1112 
1113 		// Try again to see if we can get some decoded audio out now
1114 		error = avcodec_receive_frame(fCodecContext, fDecodedDataBuffer);
1115 	}
1116 
1117 	fDecodedDataBufferSize = fDecodedDataBuffer->nb_samples;
1118 	if (fDecodedDataBufferSize < 0)
1119 		fDecodedDataBufferSize = 0;
1120 
1121 	if (error == 0)
1122 		return B_OK;
1123 	else
1124 		return B_ERROR;
1125 }
1126 
1127 
1128 /*! \brief Updates relevant fields of the class member fHeader with the
1129 		properties of the most recently decoded audio frame.
1130 
1131 	The following fields of fHeader are updated:
1132 		- fHeader.type
1133 		- fHeader.file_pos
1134 		- fHeader.orig_size
1135 		- fHeader.start_time
1136 		- fHeader.size_used
1137 		- fHeader.u.raw_audio.frame_rate
1138 		- fHeader.u.raw_audio.channel_count
1139 
1140 	It is assumed that this function is called only	when the following asserts
1141 	hold true:
1142 		1. We actually got a new audio frame decoded by the audio decoder.
1143 		2. fHeader wasn't updated for the new audio frame yet. You MUST call
1144 		   this method only once per decoded audio frame.
1145 		3. fRawDecodedAudio's fields relate to the first audio frame contained
1146 		   in fDecodedData. Especially the following fields are of importance:
1147 		       - fRawDecodedAudio->pkt_dts: Start time of first audio frame
1148 		       - fRawDecodedAudio->opaque: Contains the following fields for
1149 		         the first audio frame:
1150 			         - channels: Channel count of first audio frame
1151 			         - sample_rate: Frame rate of first audio frame
1152 */
1153 void
1154 AVCodecDecoder::_UpdateMediaHeaderForAudioFrame()
1155 {
1156 	fHeader.type = B_MEDIA_RAW_AUDIO;
1157 	fHeader.file_pos = 0;
1158 	fHeader.orig_size = 0;
1159 	fHeader.start_time = fRawDecodedAudio->pkt_dts;
1160 	fHeader.size_used = fRawDecodedAudio->linesize[0];
1161 
1162 	avformat_codec_context* codecContext
1163 		= static_cast<avformat_codec_context*>(fRawDecodedAudio->opaque);
1164 	fHeader.u.raw_audio.channel_count = codecContext->channels;
1165 	fHeader.u.raw_audio.frame_rate = codecContext->sample_rate;
1166 }
1167 
1168 
1169 /*! \brief Decodes next video frame.
1170 
1171     We decode exactly one video frame into fDecodedData. To achieve this goal,
1172     we might need to request several chunks of encoded data resulting in a
1173     variable execution time of this function.
1174 
1175     The length of the decoded video frame is stored in
1176     fDecodedDataSizeInBytes. If this variable is greater than zero, you can
1177     assert that there is a valid video frame available in fDecodedData.
1178 
1179     The decoded video frame in fDecodedData has color space conversion and
1180     deinterlacing already applied.
1181 
1182     To every decoded video frame there is a media_header populated in
1183     fHeader, containing the corresponding video frame properties.
1184 
1185 	Normally every decoded video frame has a start_time field populated in the
1186 	associated fHeader, that determines the presentation time of the frame.
1187 	This relationship will only hold true, when each data chunk that is
1188 	provided via GetNextChunk() contains data for exactly one encoded video
1189 	frame (one complete frame) - not more and not less.
1190 
1191 	We can decode data chunks that contain partial video frame data, too. In
1192 	that case, you cannot trust the value of the start_time field in fHeader.
1193 	We simply have no logic in place to establish a meaningful relationship
1194 	between an incomplete frame and the start time it should be presented.
1195 	Though this	might change in the future.
1196 
1197 	We can decode data chunks that contain more than one video frame, too. In
1198 	that case, you cannot trust the value of the start_time field in fHeader.
1199 	We simply have no logic in place to track the start_time across multiple
1200 	video frames. So a meaningful relationship between the 2nd, 3rd, ... frame
1201 	and the start time it should be presented isn't established at the moment.
1202 	Though this	might change in the future.
1203 
1204 	On first call the member variables fSwsContext / fFormatConversionFunc	are
1205 	initialized.
1206 
1207 	\returns B_OK when we successfully decoded one video frame
1208 	\returns B_LAST_BUFFER_ERROR when there are no more video frames available.
1209 	\returns B_NO_MEMORY when we have no memory left for correct operation.
1210 	\returns Other Errors
1211 */
1212 status_t
1213 AVCodecDecoder::_DecodeNextVideoFrame()
1214 {
1215 	int error;
1216 	int send_error;
1217 
1218 #if DO_PROFILING
1219 	bigtime_t startTime = system_time();
1220 #endif
1221 
1222 	error = avcodec_receive_frame(fCodecContext, fRawDecodedPicture);
1223 
1224 	if (error == AVERROR_EOF)
1225 		return B_LAST_BUFFER_ERROR;
1226 
1227 	if (error == AVERROR(EAGAIN)) {
1228 		do {
1229 			status_t loadingChunkStatus
1230 				= _LoadNextChunkIfNeededAndAssignStartTime();
1231 			if (loadingChunkStatus == B_LAST_BUFFER_ERROR)
1232 				return _FlushOneVideoFrameFromDecoderBuffer();
1233 			if (loadingChunkStatus != B_OK) {
1234 				TRACE("[v] AVCodecDecoder::_DecodeNextVideoFrame(): error from "
1235 					"GetNextChunk(): %s\n", strerror(loadingChunkStatus));
1236 				return loadingChunkStatus;
1237 			}
1238 
1239 			char timestamp[AV_TS_MAX_STRING_SIZE];
1240 			av_ts_make_time_string(timestamp,
1241 				fTempPacket->dts, &fCodecContext->time_base);
1242 			TRACE("[v] Feed %d more bytes (dts %s)\n", fTempPacket->size,
1243 				timestamp);
1244 
1245 			send_error = avcodec_send_packet(fCodecContext, fTempPacket);
1246 			if (send_error < 0 && send_error != AVERROR(EAGAIN)) {
1247 				TRACE("[v] AVCodecDecoder: ignoring error in decoding frame "
1248 				"%lld: %d\n", fFrame, error);
1249 			}
1250 
1251 			// Packet is consumed, clear it
1252 			fTempPacket->data = NULL;
1253 			fTempPacket->size = 0;
1254 
1255 			error = avcodec_receive_frame(fCodecContext, fRawDecodedPicture);
1256 			if (error != 0 && error != AVERROR(EAGAIN)) {
1257 				TRACE("[v] frame %lld - decoding error, error code: %d, "
1258 					"chunk size: %ld\n", fFrame, error, fChunkBufferSize);
1259 			}
1260 
1261 		} while (error != 0);
1262 	}
1263 
1264 #if DO_PROFILING
1265 	bigtime_t formatConversionStart = system_time();
1266 #endif
1267 
1268 	status_t handleStatus = _HandleNewVideoFrameAndUpdateSystemState();
1269 	if (handleStatus != B_OK)
1270 		return handleStatus;
1271 
1272 #if DO_PROFILING
1273 	bigtime_t doneTime = system_time();
1274 	decodingTime += formatConversionStart - startTime;
1275 	conversionTime += doneTime - formatConversionStart;
1276 	profileCounter++;
1277 	if (!(fFrame % 5)) {
1278 		printf("[v] profile: d1 = %lld, d2 = %lld (%lld)\n",
1279 			decodingTime / profileCounter, conversionTime / profileCounter,
1280 			fFrame);
1281 		decodingTime = 0;
1282 		conversionTime = 0;
1283 		profileCounter = 0;
1284 	}
1285 #endif
1286 	return error;
1287 }
1288 
1289 
1290 /*!	\brief Applies all essential video input properties to fCodecContext that were
1291 		passed to AVCodecDecoder when Setup() was called.
1292 
1293 	Note: This function must be called before the AVCodec is opened via
1294 	avcodec_open2(). Otherwise the behaviour of FFMPEG's video decoding
1295 	function avcodec_decode_video2() is undefined.
1296 
1297 	Essential properties applied from fInputFormat.u.encoded_video.output:
1298 		- display.line_width copied to fCodecContext->width
1299 		- display.line_count copied to fCodecContext->height
1300 		- pixel_width_aspect and pixel_height_aspect converted to
1301 		  fCodecContext->sample_aspect_ratio
1302 		- field_rate converted to fCodecContext->time_base and
1303 		  fCodecContext->ticks_per_frame
1304 
1305 	Other essential properties being applied:
1306 		- fExtraData to fCodecContext->extradata
1307 		- fExtraDataSize to fCodecContext->extradata_size
1308 */
1309 void
1310 AVCodecDecoder::_ApplyEssentialVideoContainerPropertiesToContext()
1311 {
1312 	media_raw_video_format containerProperties
1313 		= fInputFormat.u.encoded_video.output;
1314 
1315 	fCodecContext->width = containerProperties.display.line_width;
1316 	fCodecContext->height = containerProperties.display.line_count;
1317 
1318 	if (containerProperties.pixel_width_aspect > 0
1319 		&& containerProperties.pixel_height_aspect > 0) {
1320 		ConvertVideoAspectWidthAndHeightToAVCodecContext(
1321 			containerProperties.pixel_width_aspect,
1322 			containerProperties.pixel_height_aspect, *fCodecContext);
1323 	}
1324 
1325 	if (containerProperties.field_rate > 0.0) {
1326 		ConvertVideoFrameRateToAVCodecContext(containerProperties.field_rate,
1327 			*fCodecContext);
1328 	}
1329 
1330 	fCodecContext->extradata = reinterpret_cast<uint8_t*>(fExtraData);
1331 	fCodecContext->extradata_size = fExtraDataSize;
1332 }
1333 
1334 
1335 /*! \brief Loads the next  chunk into fChunkBuffer and assigns it (including
1336 		the start time) to fTempPacket but only if fTempPacket is empty.
1337 
1338 	\returns B_OK
1339 		1. meaning: Next chunk is loaded.
1340 		2. meaning: No need to load and assign anything. Proceed as usual.
1341 	\returns B_LAST_BUFFER_ERROR No more chunks available. fChunkBuffer	and
1342 		fTempPacket are left untouched.
1343 	\returns Other errors Caller should bail out because fChunkBuffer and
1344 		fTempPacket are in unknown states. Normal operation cannot be
1345 		guaranteed.
1346 */
1347 status_t
1348 AVCodecDecoder::_LoadNextChunkIfNeededAndAssignStartTime()
1349 {
1350 	if (fTempPacket->size > 0)
1351 		return B_OK;
1352 
1353 	const void* chunkBuffer = NULL;
1354 	size_t chunkBufferSize = 0;
1355 		// In the case that GetNextChunk() returns an error fChunkBufferSize
1356 		// should be left untouched.
1357 	media_header chunkMediaHeader;
1358 
1359 	status_t getNextChunkStatus = GetNextChunk(&chunkBuffer, &chunkBufferSize,
1360 		&chunkMediaHeader);
1361 	if (getNextChunkStatus != B_OK)
1362 		return getNextChunkStatus;
1363 
1364 	status_t chunkBufferPaddingStatus
1365 		= _CopyChunkToChunkBufferAndAddPadding(chunkBuffer, chunkBufferSize);
1366 	if (chunkBufferPaddingStatus != B_OK)
1367 		return chunkBufferPaddingStatus;
1368 
1369 	fTempPacket->data = fChunkBuffer;
1370 	fTempPacket->size = fChunkBufferSize;
1371 	fTempPacket->dts = chunkMediaHeader.start_time;
1372 		// Let FFMPEG handle the correct relationship between start_time and
1373 		// decoded a/v frame. By doing so we are simply copying the way how it
1374 		// is implemented in ffplay.c for video frames (for audio frames it
1375 		// works, too, but isn't used by ffplay.c).
1376 		// \see http://git.videolan.org/?p=ffmpeg.git;a=blob;f=ffplay.c;h=09623db374e5289ed20b7cc28c262c4375a8b2e4;hb=9153b33a742c4e2a85ff6230aea0e75f5a8b26c2#l1502
1377 		//
1378 		// FIXME: Research how to establish a meaningful relationship between
1379 		// start_time and decoded a/v frame when the received chunk buffer
1380 		// contains partial a/v frames. Maybe some data formats do contain time
1381 		// stamps (ake pts / dts fields) that can be evaluated by FFMPEG. But
1382 		// as long as I don't have such video data to test it, it makes no
1383 		// sense trying to implement it.
1384 		//
1385 		// FIXME: Implement tracking start_time of video frames originating in
1386 		// data chunks that encode more than one video frame at a time. In that
1387 		// case on would increment the start_time for each consecutive frame of
1388 		// such a data chunk (like it is done for audio frame decoding). But as
1389 		// long as I don't have such video data to test it, it makes no sense
1390 		// to implement it.
1391 
1392 #ifdef LOG_STREAM_TO_FILE
1393 	BFile* logFile = fIsAudio ? &sAudioStreamLogFile : &sVideoStreamLogFile;
1394 	if (sDumpedPackets < 100) {
1395 		logFile->Write(chunkBuffer, fChunkBufferSize);
1396 		printf("wrote %ld bytes\n", fChunkBufferSize);
1397 		sDumpedPackets++;
1398 	} else if (sDumpedPackets == 100)
1399 		logFile->Unset();
1400 #endif
1401 
1402 	return B_OK;
1403 }
1404 
1405 
1406 /*! \brief Copies a chunk into fChunkBuffer and adds a "safety net" of
1407 		additional memory as required by FFMPEG for input buffers to video
1408 		decoders.
1409 
1410 	This is needed so that some decoders can read safely a predefined number of
1411 	bytes at a time for performance optimization purposes.
1412 
1413 	The additional memory has a size of AV_INPUT_BUFFER_PADDING_SIZE as defined
1414 	in avcodec.h.
1415 
1416 	Ownership of fChunkBuffer memory is with the class so it needs to be freed
1417 	at the right times (on destruction, on seeking).
1418 
1419 	Also update fChunkBufferSize to reflect the size of the contained data
1420 	(leaving out the padding).
1421 
1422 	\param chunk The chunk to copy.
1423 	\param chunkSize Size of the chunk in bytes
1424 
1425 	\returns B_OK Padding was successful. You are responsible for releasing the
1426 		allocated memory. fChunkBufferSize is set to chunkSize.
1427 	\returns B_NO_MEMORY Padding failed.
1428 		fChunkBuffer is set to NULL making it safe to call free() on it.
1429 		fChunkBufferSize is set to 0 to reflect the size of fChunkBuffer.
1430 */
1431 status_t
1432 AVCodecDecoder::_CopyChunkToChunkBufferAndAddPadding(const void* chunk,
1433 	size_t chunkSize)
1434 {
1435 	uint8_t* tmpBuffer = static_cast<uint8_t*>(realloc(fChunkBuffer,
1436 		chunkSize + AV_INPUT_BUFFER_PADDING_SIZE));
1437 	if (tmpBuffer == NULL) {
1438 		free(fChunkBuffer);
1439 		fChunkBuffer = NULL;
1440 		fChunkBufferSize = 0;
1441 		return B_NO_MEMORY;
1442 	} else {
1443 		fChunkBuffer = tmpBuffer;
1444 	}
1445 
1446 	memcpy(fChunkBuffer, chunk, chunkSize);
1447 	memset(fChunkBuffer + chunkSize, 0, AV_INPUT_BUFFER_PADDING_SIZE);
1448 		// Establish safety net, by zero'ing the padding area.
1449 
1450 	fChunkBufferSize = chunkSize;
1451 
1452 	return B_OK;
1453 }
1454 
1455 
1456 /*! \brief Executes all steps needed for a freshly decoded video frame.
1457 
1458 	\see _UpdateMediaHeaderForVideoFrame() and
1459 	\see _DeinterlaceAndColorConvertVideoFrame() for when you are allowed to
1460 	call this method.
1461 
1462 	\returns B_OK when video frame was handled successfully
1463 	\returnb B_NO_MEMORY when no memory is left for correct operation.
1464 */
1465 status_t
1466 AVCodecDecoder::_HandleNewVideoFrameAndUpdateSystemState()
1467 {
1468 	_UpdateMediaHeaderForVideoFrame();
1469 	status_t postProcessStatus = _DeinterlaceAndColorConvertVideoFrame();
1470 	if (postProcessStatus != B_OK)
1471 		return postProcessStatus;
1472 
1473 #ifdef DEBUG
1474 	dump_ffframe_video(fRawDecodedPicture, "ffpict");
1475 #endif
1476 
1477 	fFrame++;
1478 
1479 	return B_OK;
1480 }
1481 
1482 
1483 /*! \brief Flushes one video frame - if any - still buffered by the decoder.
1484 
1485 	Some FFMPEG decoder are buffering video frames. To retrieve those buffered
1486 	frames the decoder needs to be told so.
1487 
1488 	The intended use of this method is to call it, once there are no more data
1489 	chunks for decoding left. Reframed in other words: Once GetNextChunk()
1490 	returns with status B_LAST_BUFFER_ERROR it is time to start flushing.
1491 
1492 	\returns B_OK Retrieved one video frame, handled it accordingly and updated
1493 		the system state accordingly.
1494 		There maybe more video frames left. So it is valid for the client of
1495 		AVCodecDecoder to call it one more time.
1496 
1497 	\returns B_LAST_BUFFER_ERROR No video frame left.
1498 		The client of the AVCodecDecoder should stop calling it now.
1499 
1500 	\returns B_NO_MEMORY No memory left for correct operation.
1501 */
1502 status_t
1503 AVCodecDecoder::_FlushOneVideoFrameFromDecoderBuffer()
1504 {
1505 	// Tell the decoder there is nothing to send anymore
1506 	avcodec_send_packet(fCodecContext, NULL);
1507 
1508 	// Get any remaining frame
1509 	int error = avcodec_receive_frame(fCodecContext, fRawDecodedPicture);
1510 
1511 	if (error != 0 && error != AVERROR(EAGAIN)) {
1512 		// video buffer is flushed successfully
1513 		// (or there is an error, not much we can do about it)
1514 		return B_LAST_BUFFER_ERROR;
1515 	}
1516 
1517 	return _HandleNewVideoFrameAndUpdateSystemState();
1518 }
1519 
1520 
1521 /*! \brief Updates relevant fields of the class member fHeader with the
1522 		properties of the most recently decoded video frame.
1523 
1524 	It is assumed that this function is called only	when the following asserts
1525 	hold true:
1526 		1. We actually got a new picture decoded by the video decoder.
1527 		2. fHeader wasn't updated for the new picture yet. You MUST call this
1528 		   method only once per decoded video frame.
1529 		3. This function MUST be called after
1530 		   _DeinterlaceAndColorConvertVideoFrame() as it relys on an updated
1531 		    fDecodedDataSizeInBytes.
1532 		4. There will be at maximumn only one decoded video frame in our cache
1533 		   at any single point in time. Otherwise you couldn't tell to which
1534 		   cached decoded video frame the properties in fHeader relate to.
1535 		5. AVCodecContext is still valid for this video frame (This is the case
1536 		   when this function is called after avcodec_decode_video2() and
1537 		   before the next call to avcodec_decode_video2().
1538 */
1539 void
1540 AVCodecDecoder::_UpdateMediaHeaderForVideoFrame()
1541 {
1542 	fHeader.type = B_MEDIA_RAW_VIDEO;
1543 	fHeader.file_pos = 0;
1544 	fHeader.orig_size = 0;
1545 	fHeader.start_time = fRawDecodedPicture->pkt_dts;
1546 		// The pkt_dts is already in microseconds, even if ffmpeg docs says
1547 		// 'in codec time_base units'
1548 	fHeader.size_used = av_image_get_buffer_size(
1549 		colorspace_to_pixfmt(fOutputColorSpace), fRawDecodedPicture->width,
1550 		fRawDecodedPicture->height, 1);
1551 	fHeader.u.raw_video.display_line_width = fRawDecodedPicture->width;
1552 	fHeader.u.raw_video.display_line_count = fRawDecodedPicture->height;
1553 	fHeader.u.raw_video.bytes_per_row
1554 		= CalculateBytesPerRowWithColorSpaceAndVideoWidth(fOutputColorSpace,
1555 			fRawDecodedPicture->width);
1556 	fHeader.u.raw_video.field_gamma = 1.0;
1557 	fHeader.u.raw_video.field_sequence = fFrame;
1558 	fHeader.u.raw_video.field_number = 0;
1559 	fHeader.u.raw_video.pulldown_number = 0;
1560 	fHeader.u.raw_video.first_active_line = 1;
1561 	fHeader.u.raw_video.line_count = fRawDecodedPicture->height;
1562 
1563 	ConvertAVCodecContextToVideoAspectWidthAndHeight(*fCodecContext,
1564 		fHeader.u.raw_video.pixel_width_aspect,
1565 		fHeader.u.raw_video.pixel_height_aspect);
1566 
1567 	char timestamp[AV_TS_MAX_STRING_SIZE];
1568 	av_ts_make_time_string(timestamp,
1569 		fRawDecodedPicture->best_effort_timestamp, &fCodecContext->time_base);
1570 
1571 	TRACE("[v] start_time=%s field_sequence=%lu\n",
1572 		timestamp, fHeader.u.raw_video.field_sequence);
1573 }
1574 
1575 
1576 /*! \brief This function applies deinterlacing (only if needed) and color
1577 	conversion to the video frame in fRawDecodedPicture.
1578 
1579 	It is assumed that fRawDecodedPicture wasn't deinterlaced and color
1580 	converted yet (otherwise this function behaves in unknown manners).
1581 
1582 	This function MUST be called after _UpdateMediaHeaderForVideoFrame() as it
1583 	relys on the fHeader.size_used and fHeader.u.raw_video.bytes_per_row fields
1584 	for correct operation
1585 
1586 	You should only call this function when you	got a new picture decoded by
1587 	the video decoder.
1588 
1589 	When this function finishes the postprocessed video frame will be available
1590 	in fPostProcessedDecodedPicture and fDecodedData (fDecodedDataSizeInBytes
1591 	will be set accordingly).
1592 
1593 	\returns B_OK video frame successfully deinterlaced and color converted.
1594 	\returns B_NO_MEMORY Not enough memory available for correct operation.
1595 */
1596 status_t
1597 AVCodecDecoder::_DeinterlaceAndColorConvertVideoFrame()
1598 {
1599 	int displayWidth = fRawDecodedPicture->width;
1600 	int displayHeight = fRawDecodedPicture->height;
1601 	AVFrame deinterlacedPicture;
1602 	bool useDeinterlacedPicture = false;
1603 
1604 #if LIBAVCODEC_VERSION_MAJOR >= 60
1605 	if (fRawDecodedPicture->flags & AV_FRAME_FLAG_INTERLACED) {
1606 #else
1607 	if (fRawDecodedPicture->interlaced_frame) {
1608 #endif
1609 		AVFrame rawPicture;
1610 		rawPicture.data[0] = fRawDecodedPicture->data[0];
1611 		rawPicture.data[1] = fRawDecodedPicture->data[1];
1612 		rawPicture.data[2] = fRawDecodedPicture->data[2];
1613 		rawPicture.data[3] = fRawDecodedPicture->data[3];
1614 		rawPicture.linesize[0] = fRawDecodedPicture->linesize[0];
1615 		rawPicture.linesize[1] = fRawDecodedPicture->linesize[1];
1616 		rawPicture.linesize[2] = fRawDecodedPicture->linesize[2];
1617 		rawPicture.linesize[3] = fRawDecodedPicture->linesize[3];
1618 
1619 		if (av_image_alloc(deinterlacedPicture.data,
1620 				deinterlacedPicture.linesize, displayWidth, displayHeight,
1621 				fCodecContext->pix_fmt, 1) < 0)
1622 			return B_NO_MEMORY;
1623 
1624 		// deinterlace implemented using avfilter
1625 		_ProcessFilterGraph(&deinterlacedPicture, &rawPicture,
1626 			fCodecContext->pix_fmt, displayWidth, displayHeight);
1627 		useDeinterlacedPicture = true;
1628 	}
1629 
1630 	// Some decoders do not set pix_fmt until they have decoded 1 frame
1631 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
1632 	if (fSwsContext == NULL) {
1633 		fSwsContext = sws_getContext(displayWidth, displayHeight,
1634 			fCodecContext->pix_fmt, displayWidth, displayHeight,
1635 			colorspace_to_pixfmt(fOutputColorSpace),
1636 			SWS_FAST_BILINEAR, NULL, NULL, NULL);
1637 	}
1638 #else
1639 	if (fFormatConversionFunc == NULL) {
1640 		fFormatConversionFunc = resolve_colorspace(fOutputColorSpace,
1641 			fCodecContext->pix_fmt, displayWidth, displayHeight);
1642 	}
1643 #endif
1644 
1645 	fDecodedDataSizeInBytes = fHeader.size_used;
1646 
1647 	if (fDecodedData == NULL) {
1648 		const size_t kOptimalAlignmentForColorConversion = 32;
1649 		posix_memalign(reinterpret_cast<void**>(&fDecodedData),
1650 			kOptimalAlignmentForColorConversion, fDecodedDataSizeInBytes);
1651 	}
1652 	if (fDecodedData == NULL)
1653 		return B_NO_MEMORY;
1654 
1655 	fPostProcessedDecodedPicture->data[0] = fDecodedData;
1656 	fPostProcessedDecodedPicture->linesize[0]
1657 		= fHeader.u.raw_video.bytes_per_row;
1658 
1659 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
1660 	if (fSwsContext != NULL) {
1661 #else
1662 	if (fFormatConversionFunc != NULL) {
1663 #endif
1664 		if (useDeinterlacedPicture) {
1665 			AVFrame deinterlacedFrame;
1666 			deinterlacedFrame.data[0] = deinterlacedPicture.data[0];
1667 			deinterlacedFrame.data[1] = deinterlacedPicture.data[1];
1668 			deinterlacedFrame.data[2] = deinterlacedPicture.data[2];
1669 			deinterlacedFrame.data[3] = deinterlacedPicture.data[3];
1670 			deinterlacedFrame.linesize[0]
1671 				= deinterlacedPicture.linesize[0];
1672 			deinterlacedFrame.linesize[1]
1673 				= deinterlacedPicture.linesize[1];
1674 			deinterlacedFrame.linesize[2]
1675 				= deinterlacedPicture.linesize[2];
1676 			deinterlacedFrame.linesize[3]
1677 				= deinterlacedPicture.linesize[3];
1678 
1679 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
1680 			sws_scale(fSwsContext, deinterlacedFrame.data,
1681 				deinterlacedFrame.linesize, 0, displayHeight,
1682 				fPostProcessedDecodedPicture->data,
1683 				fPostProcessedDecodedPicture->linesize);
1684 #else
1685 			(*fFormatConversionFunc)(&deinterlacedFrame,
1686 				fPostProcessedDecodedPicture, displayWidth, displayHeight);
1687 #endif
1688 		} else {
1689 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
1690 			sws_scale(fSwsContext, fRawDecodedPicture->data,
1691 				fRawDecodedPicture->linesize, 0, displayHeight,
1692 				fPostProcessedDecodedPicture->data,
1693 				fPostProcessedDecodedPicture->linesize);
1694 #else
1695 			(*fFormatConversionFunc)(fRawDecodedPicture,
1696 				fPostProcessedDecodedPicture, displayWidth, displayHeight);
1697 #endif
1698 		}
1699 	}
1700 
1701 #if LIBAVCODEC_VERSION_MAJOR >= 60
1702 	if (fRawDecodedPicture->flags & AV_FRAME_FLAG_INTERLACED)
1703 #else
1704 	if (fRawDecodedPicture->interlaced_frame)
1705 #endif
1706 		av_freep(&deinterlacedPicture.data[0]);
1707 
1708 	return B_OK;
1709 }
1710 
1711 
1712 /*! \brief Init the deinterlace filter graph.
1713 
1714 	\returns B_OK the filter graph could be built.
1715 	\returns B_BAD_VALUE something was wrong with building the graph.
1716 */
1717 status_t
1718 AVCodecDecoder::_InitFilterGraph(enum AVPixelFormat pixfmt, int32 width,
1719 	int32 height)
1720 {
1721 	if (fFilterGraph != NULL) {
1722 		av_frame_free(&fFilterFrame);
1723 		avfilter_graph_free(&fFilterGraph);
1724 	}
1725 
1726 	fFilterGraph = avfilter_graph_alloc();
1727 
1728 	BString arguments;
1729 	arguments.SetToFormat("buffer=video_size=%" B_PRId32 "x%" B_PRId32
1730 		":pix_fmt=%d:time_base=1/1:pixel_aspect=0/1[in];[in]yadif[out];"
1731 		"[out]buffersink", width, height,
1732 		pixfmt);
1733 	AVFilterInOut* inputs = NULL;
1734 	AVFilterInOut* outputs = NULL;
1735 	TRACE("[v] _InitFilterGraph(): %s\n", arguments.String());
1736 	int ret = avfilter_graph_parse2(fFilterGraph, arguments.String(), &inputs,
1737 		&outputs);
1738 	if (ret < 0) {
1739 		fprintf(stderr, "avfilter_graph_parse2() failed\n");
1740 		return B_BAD_VALUE;
1741 	}
1742 
1743 	ret = avfilter_graph_config(fFilterGraph, NULL);
1744 	if (ret < 0) {
1745 		fprintf(stderr, "avfilter_graph_config() failed\n");
1746 		return B_BAD_VALUE;
1747 	}
1748 
1749 	fBufferSourceContext = avfilter_graph_get_filter(fFilterGraph,
1750 		"Parsed_buffer_0");
1751 	fBufferSinkContext = avfilter_graph_get_filter(fFilterGraph,
1752 		"Parsed_buffersink_2");
1753 	if (fBufferSourceContext == NULL || fBufferSinkContext == NULL) {
1754 		fprintf(stderr, "avfilter_graph_get_filter() failed\n");
1755 		return B_BAD_VALUE;
1756 	}
1757 	fFilterFrame = av_frame_alloc();
1758 	fLastWidth = width;
1759 	fLastHeight = height;
1760 	fLastPixfmt = pixfmt;
1761 
1762 	return B_OK;
1763 }
1764 
1765 
1766 /*! \brief Process an AVPicture with the deinterlace filter graph.
1767 
1768     We decode exactly one video frame into dst.
1769 	Equivalent function for avpicture_deinterlace() from version 2.x.
1770 
1771 	\returns B_OK video frame successfully deinterlaced.
1772 	\returns B_BAD_DATA No frame could be output.
1773 	\returns B_NO_MEMORY Not enough memory available for correct operation.
1774 */
1775 status_t
1776 AVCodecDecoder::_ProcessFilterGraph(AVFrame *dst, const AVFrame *src,
1777 	enum AVPixelFormat pixfmt, int32 width, int32 height)
1778 {
1779 	if (fFilterGraph == NULL || width != fLastWidth
1780 		|| height != fLastHeight || pixfmt != fLastPixfmt) {
1781 
1782 		status_t err = _InitFilterGraph(pixfmt, width, height);
1783 		if (err != B_OK)
1784 			return err;
1785 	}
1786 
1787 	memcpy(fFilterFrame->data, src->data, sizeof(src->data));
1788 	memcpy(fFilterFrame->linesize, src->linesize, sizeof(src->linesize));
1789 	fFilterFrame->width = width;
1790 	fFilterFrame->height = height;
1791 	fFilterFrame->format = pixfmt;
1792 
1793 	int ret = av_buffersrc_add_frame(fBufferSourceContext, fFilterFrame);
1794 	if (ret < 0)
1795 		return B_NO_MEMORY;
1796 
1797 	ret = av_buffersink_get_frame(fBufferSinkContext, fFilterFrame);
1798 	if (ret < 0)
1799 		return B_BAD_DATA;
1800 
1801 	av_image_copy(dst->data, dst->linesize, (const uint8**)fFilterFrame->data,
1802 		fFilterFrame->linesize, pixfmt, width, height);
1803 	av_frame_unref(fFilterFrame);
1804 	return B_OK;
1805 }
1806