xref: /haiku/src/add-ons/media/plugins/ffmpeg/AVCodecDecoder.cpp (revision 2a2e7ad562841be14b2d1f8ad870780f32be2b1f)
1 /*
2  * Copyright (C) 2001 Carlos Hasan
3  * Copyright (C) 2001 François Revol
4  * Copyright (C) 2001 Axel Dörfler
5  * Copyright (C) 2004 Marcus Overhagen
6  * Copyright (C) 2009 Stephan Amßus <superstippi@gmx.de>
7  * Copyright (C) 2014 Colin Günther <coling@gmx.de>
8  * Copyright (C) 2015 Adrien Destugues <pulkomandy@pulkomandy.tk>
9  *
10  * All rights reserved. Distributed under the terms of the MIT License.
11  */
12 
13 //! libavcodec based decoder for Haiku
14 
15 
16 #include "AVCodecDecoder.h"
17 
18 #include <new>
19 
20 #include <assert.h>
21 #include <string.h>
22 
23 #include <Bitmap.h>
24 #include <Debug.h>
25 #include <String.h>
26 
27 #include "Utilities.h"
28 
29 
30 #undef TRACE
31 //#define TRACE_AV_CODEC
32 #ifdef TRACE_AV_CODEC
33 #	define TRACE(x...)	printf(x)
34 #	define TRACE_AUDIO(x...)	printf(x)
35 #	define TRACE_VIDEO(x...)	printf(x)
36 #else
37 #	define TRACE(x...)
38 #	define TRACE_AUDIO(x...)
39 #	define TRACE_VIDEO(x...)
40 #endif
41 
42 //#define LOG_STREAM_TO_FILE
43 #ifdef LOG_STREAM_TO_FILE
44 #	include <File.h>
45 	static BFile sAudioStreamLogFile(
46 		"/boot/home/Desktop/AVCodecDebugAudioStream.raw",
47 		B_CREATE_FILE | B_ERASE_FILE | B_WRITE_ONLY);
48 	static BFile sVideoStreamLogFile(
49 		"/boot/home/Desktop/AVCodecDebugVideoStream.raw",
50 		B_CREATE_FILE | B_ERASE_FILE | B_WRITE_ONLY);
51 	static int sDumpedPackets = 0;
52 #endif
53 
54 typedef AVCodecID CodecID;
55 
56 struct wave_format_ex {
57 	uint16 format_tag;
58 	uint16 channels;
59 	uint32 frames_per_sec;
60 	uint32 avg_bytes_per_sec;
61 	uint16 block_align;
62 	uint16 bits_per_sample;
63 	uint16 extra_size;
64 	// extra_data[extra_size]
65 } _PACKED;
66 
67 struct avformat_codec_context {
68 	int sample_rate;
69 	int channels;
70 };
71 
72 
73 // profiling related globals
74 #define DO_PROFILING 0
75 #if DO_PROFILING
76 static bigtime_t decodingTime = 0;
77 static bigtime_t conversionTime = 0;
78 static long profileCounter = 0;
79 #endif
80 
81 
82 AVCodecDecoder::AVCodecDecoder()
83 	:
84 	fHeader(),
85 	fInputFormat(),
86 	fFrame(0),
87 	fIsAudio(false),
88 	fCodec(NULL),
89 	fCodecContext(avcodec_alloc_context3(NULL)),
90 	fResampleContext(NULL),
91 	fDecodedData(NULL),
92 	fDecodedDataSizeInBytes(0),
93 	fPostProcessedDecodedPicture(av_frame_alloc()),
94 	fRawDecodedPicture(av_frame_alloc()),
95 	fRawDecodedAudio(av_frame_alloc()),
96 
97 	fCodecInitDone(false),
98 
99 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
100 	fSwsContext(NULL),
101 #else
102 	fFormatConversionFunc(NULL),
103 #endif
104 
105 	fExtraData(NULL),
106 	fExtraDataSize(0),
107 	fBlockAlign(0),
108 
109 	fOutputColorSpace(B_NO_COLOR_SPACE),
110 	fOutputFrameCount(0),
111 	fOutputFrameRate(1.0),
112 	fOutputFrameSize(0),
113 	fInputFrameSize(0),
114 
115 	fChunkBuffer(NULL),
116 	fChunkBufferSize(0),
117 	fAudioDecodeError(false),
118 
119 	fDecodedDataBuffer(av_frame_alloc()),
120 	fDecodedDataBufferOffset(0),
121 	fDecodedDataBufferSize(0),
122 	fTempPacket(NULL),
123 	fBufferSinkContext(NULL),
124 	fBufferSourceContext(NULL),
125 	fFilterGraph(NULL),
126 	fFilterFrame(NULL)
127 {
128 	TRACE("AVCodecDecoder::AVCodecDecoder()\n");
129 
130 	system_info info;
131 	get_system_info(&info);
132 
133 	fCodecContext->err_recognition = AV_EF_CAREFUL;
134 	fCodecContext->error_concealment = 3;
135 	fCodecContext->thread_count = info.cpu_count;
136 }
137 
138 
139 AVCodecDecoder::~AVCodecDecoder()
140 {
141 	TRACE("[%c] AVCodecDecoder::~AVCodecDecoder()\n", fIsAudio?('a'):('v'));
142 
143 #if DO_PROFILING
144 	if (profileCounter > 0) {
145 		printf("[%c] profile: d1 = %lld, d2 = %lld (%lld)\n",
146 			fIsAudio?('a'):('v'), decodingTime / profileCounter,
147 			conversionTime / profileCounter, fFrame);
148 	}
149 #endif
150 
151 	swr_free(&fResampleContext);
152 	free(fChunkBuffer);
153 	free(fDecodedData);
154 
155 	av_frame_free(&fPostProcessedDecodedPicture);
156 	av_frame_free(&fRawDecodedPicture);
157 	av_free(fRawDecodedAudio->opaque);
158 	av_frame_free(&fRawDecodedAudio);
159 	fCodecContext->extradata = NULL;
160 	avcodec_free_context(&fCodecContext);
161 	av_frame_free(&fDecodedDataBuffer);
162 
163 	av_frame_free(&fFilterFrame);
164 	avfilter_graph_free(&fFilterGraph);
165 
166 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
167 	if (fSwsContext != NULL)
168 		sws_freeContext(fSwsContext);
169 #endif
170 
171 	delete[] fExtraData;
172 
173 	av_packet_free(&fTempPacket);
174 }
175 
176 
177 void
178 AVCodecDecoder::GetCodecInfo(media_codec_info* mci)
179 {
180 	snprintf(mci->short_name, 32, "%s", fCodec->name);
181 	snprintf(mci->pretty_name, 96, "%s", fCodec->long_name);
182 	mci->id = 0;
183 	mci->sub_id = fCodec->id;
184 }
185 
186 
187 status_t
188 AVCodecDecoder::Setup(media_format* ioEncodedFormat, const void* infoBuffer,
189 	size_t infoSize)
190 {
191 	if (ioEncodedFormat->type != B_MEDIA_ENCODED_AUDIO
192 		&& ioEncodedFormat->type != B_MEDIA_ENCODED_VIDEO)
193 		return B_ERROR;
194 
195 	fIsAudio = (ioEncodedFormat->type == B_MEDIA_ENCODED_AUDIO);
196 	TRACE("[%c] AVCodecDecoder::Setup()\n", fIsAudio?('a'):('v'));
197 
198 #ifdef TRACE_AV_CODEC
199 	char buffer[1024];
200 	string_for_format(*ioEncodedFormat, buffer, sizeof(buffer));
201 	TRACE("[%c]   input_format = %s\n", fIsAudio?('a'):('v'), buffer);
202 	TRACE("[%c]   infoSize = %ld\n", fIsAudio?('a'):('v'), infoSize);
203 	TRACE("[%c]   user_data_type = %08lx\n", fIsAudio?('a'):('v'),
204 		ioEncodedFormat->user_data_type);
205 	TRACE("[%c]   meta_data_size = %ld\n", fIsAudio?('a'):('v'),
206 		ioEncodedFormat->MetaDataSize());
207 #endif
208 
209 	media_format_description description;
210 	if (BMediaFormats().GetCodeFor(*ioEncodedFormat,
211 			B_MISC_FORMAT_FAMILY, &description) == B_OK) {
212 		if (description.u.misc.file_format != 'ffmp')
213 			return B_NOT_SUPPORTED;
214 		fCodec = avcodec_find_decoder(static_cast<CodecID>(
215 			description.u.misc.codec));
216 		if (fCodec == NULL) {
217 			TRACE("  unable to find the correct FFmpeg "
218 				"decoder (id = %lu)\n", description.u.misc.codec);
219 			return B_ERROR;
220 		}
221 		TRACE("  found decoder %s\n", fCodec->name);
222 
223 		const void* extraData = infoBuffer;
224 		fExtraDataSize = infoSize;
225 		if (description.family == B_WAV_FORMAT_FAMILY
226 				&& infoSize >= sizeof(wave_format_ex)) {
227 			TRACE("  trying to use wave_format_ex\n");
228 			// Special case extra data in B_WAV_FORMAT_FAMILY
229 			const wave_format_ex* waveFormatData
230 				= (const wave_format_ex*)infoBuffer;
231 
232 			size_t waveFormatSize = infoSize;
233 			if (waveFormatData != NULL && waveFormatSize > 0) {
234 				fBlockAlign = waveFormatData->block_align;
235 				TRACE("  found block align: %d\n", fBlockAlign);
236 				fExtraDataSize = waveFormatData->extra_size;
237 				// skip the wave_format_ex from the extra data.
238 				extraData = waveFormatData + 1;
239 			}
240 		} else {
241 			if (fIsAudio) {
242 				fBlockAlign
243 					= ioEncodedFormat->u.encoded_audio.output.buffer_size;
244 				TRACE("  using buffer_size as block align: %d\n",
245 					fBlockAlign);
246 			}
247 		}
248 		if (extraData != NULL && fExtraDataSize > 0) {
249 			TRACE("AVCodecDecoder: extra data size %ld\n", infoSize);
250 			delete[] fExtraData;
251 			fExtraData = new(std::nothrow) char[fExtraDataSize];
252 			if (fExtraData != NULL)
253 				memcpy(fExtraData, infoBuffer, fExtraDataSize);
254 			else
255 				fExtraDataSize = 0;
256 		}
257 
258 		fInputFormat = *ioEncodedFormat;
259 		return B_OK;
260 	} else {
261 		TRACE("AVCodecDecoder: BMediaFormats().GetCodeFor() failed.\n");
262 	}
263 
264 	printf("AVCodecDecoder::Setup failed!\n");
265 	return B_ERROR;
266 }
267 
268 
269 status_t
270 AVCodecDecoder::SeekedTo(int64 frame, bigtime_t time)
271 {
272 	status_t ret = B_OK;
273 	// Reset the FFmpeg codec to flush buffers, so we keep the sync
274 	if (fCodecInitDone) {
275 		avcodec_flush_buffers(fCodecContext);
276 		_ResetTempPacket();
277 	}
278 
279 	// Flush internal buffers as well.
280 	free(fChunkBuffer);
281 	fChunkBuffer = NULL;
282 	fChunkBufferSize = 0;
283 	fDecodedDataBufferOffset = 0;
284 	fDecodedDataBufferSize = 0;
285 	fDecodedDataSizeInBytes = 0;
286 
287 	fFrame = frame;
288 
289 	return ret;
290 }
291 
292 
293 status_t
294 AVCodecDecoder::NegotiateOutputFormat(media_format* inOutFormat)
295 {
296 	TRACE("AVCodecDecoder::NegotiateOutputFormat() [%c] \n",
297 		fIsAudio?('a'):('v'));
298 
299 #ifdef TRACE_AV_CODEC
300 	char buffer[1024];
301 	string_for_format(*inOutFormat, buffer, sizeof(buffer));
302 	TRACE("  [%c]  requested format = %s\n", fIsAudio?('a'):('v'), buffer);
303 #endif
304 
305 	// close any previous instance
306 	fCodecContext->extradata = NULL;
307 	avcodec_free_context(&fCodecContext);
308 	fCodecContext = avcodec_alloc_context3(fCodec);
309 	fCodecInitDone = false;
310 
311 	if (fIsAudio)
312 		return _NegotiateAudioOutputFormat(inOutFormat);
313 	else
314 		return _NegotiateVideoOutputFormat(inOutFormat);
315 }
316 
317 
318 status_t
319 AVCodecDecoder::Decode(void* outBuffer, int64* outFrameCount,
320 	media_header* mediaHeader, media_decode_info* info)
321 {
322 	if (!fCodecInitDone)
323 		return B_NO_INIT;
324 
325 	status_t ret;
326 	if (fIsAudio)
327 		ret = _DecodeAudio(outBuffer, outFrameCount, mediaHeader, info);
328 	else
329 		ret = _DecodeVideo(outBuffer, outFrameCount, mediaHeader, info);
330 
331 	return ret;
332 }
333 
334 
335 // #pragma mark -
336 
337 
338 void
339 AVCodecDecoder::_ResetTempPacket()
340 {
341 	if (fTempPacket == NULL)
342 		fTempPacket = av_packet_alloc();
343 	fTempPacket->size = 0;
344 	fTempPacket->data = NULL;
345 }
346 
347 
348 static int
349 get_channel_count(AVCodecContext* context)
350 {
351 #if LIBAVCODEC_VERSION_MAJOR >= 60
352 	return context->ch_layout.nb_channels;
353 #else
354 	return context->channels;
355 #endif
356 }
357 
358 
359 static void
360 set_channel_count(AVCodecContext* context, int count)
361 {
362 #if LIBAVCODEC_VERSION_MAJOR >= 60
363 	context->ch_layout.nb_channels = count;
364 #else
365 	context->channels = count;
366 #endif
367 }
368 
369 
370 status_t
371 AVCodecDecoder::_NegotiateAudioOutputFormat(media_format* inOutFormat)
372 {
373 	TRACE("AVCodecDecoder::_NegotiateAudioOutputFormat()\n");
374 
375 	_ApplyEssentialAudioContainerPropertiesToContext();
376 		// This makes audio formats play that encode the audio properties in
377 		// the audio container (e.g. WMA) and not in the audio frames
378 		// themself (e.g. MP3).
379 		// Note: Doing this step unconditionally is OK, because the first call
380 		// to _DecodeNextAudioFrameChunk() will update the essential audio
381 		// format properties accordingly regardless of the settings here.
382 
383 	if (avcodec_open2(fCodecContext, fCodec, NULL) < 0) {
384 		TRACE("avcodec_open() failed to init codec!\n");
385 		return B_ERROR;
386 	}
387 	fCodecInitDone = true;
388 
389 	free(fChunkBuffer);
390 	fChunkBuffer = NULL;
391 	fChunkBufferSize = 0;
392 	fAudioDecodeError = false;
393 	fDecodedDataBufferOffset = 0;
394 	fDecodedDataBufferSize = 0;
395 
396 	_ResetTempPacket();
397 
398 	status_t statusOfDecodingFirstFrameChunk = _DecodeNextAudioFrameChunk();
399 	if (statusOfDecodingFirstFrameChunk != B_OK) {
400 		TRACE("[a] decoding first audio frame chunk failed\n");
401 		return B_ERROR;
402 	}
403 
404 	media_multi_audio_format outputAudioFormat;
405 	outputAudioFormat = media_raw_audio_format::wildcard;
406 	outputAudioFormat.byte_order = B_MEDIA_HOST_ENDIAN;
407 	outputAudioFormat.frame_rate = fCodecContext->sample_rate;
408 	outputAudioFormat.channel_count = get_channel_count(fCodecContext);
409 	ConvertAVSampleFormatToRawAudioFormat(fCodecContext->sample_fmt,
410 		outputAudioFormat.format);
411 	// Check that format is not still a wild card!
412 	if (outputAudioFormat.format == 0) {
413 		TRACE("  format still a wild-card, assuming B_AUDIO_SHORT.\n");
414 		outputAudioFormat.format = media_raw_audio_format::B_AUDIO_SHORT;
415 	}
416 	outputAudioFormat.buffer_size = inOutFormat->u.raw_audio.buffer_size;
417 	// Check that buffer_size has a sane value
418 	size_t sampleSize = outputAudioFormat.format
419 		& media_raw_audio_format::B_AUDIO_SIZE_MASK;
420 	if (outputAudioFormat.buffer_size == 0) {
421 		outputAudioFormat.buffer_size = 512 * sampleSize
422 			* outputAudioFormat.channel_count;
423 	}
424 
425 	inOutFormat->type = B_MEDIA_RAW_AUDIO;
426 	inOutFormat->u.raw_audio = outputAudioFormat;
427 	inOutFormat->require_flags = 0;
428 	inOutFormat->deny_flags = B_MEDIA_MAUI_UNDEFINED_FLAGS;
429 
430 	// Initialize variables needed to manage decoding as much audio frames as
431 	// needed to fill the buffer_size.
432 	fOutputFrameSize = sampleSize * outputAudioFormat.channel_count;
433 	fOutputFrameCount = outputAudioFormat.buffer_size / fOutputFrameSize;
434 	fOutputFrameRate = outputAudioFormat.frame_rate;
435 	if (av_sample_fmt_is_planar(fCodecContext->sample_fmt))
436 		fInputFrameSize = sampleSize;
437 	else
438 		fInputFrameSize = fOutputFrameSize;
439 
440 	fRawDecodedAudio->opaque
441 		= av_realloc(fRawDecodedAudio->opaque, sizeof(avformat_codec_context));
442 	if (fRawDecodedAudio->opaque == NULL)
443 		return B_NO_MEMORY;
444 
445 	if (av_sample_fmt_is_planar(fCodecContext->sample_fmt)) {
446 		fResampleContext = NULL;
447 #if LIBAVCODEC_VERSION_MAJOR >= 60
448 		swr_alloc_set_opts2(&fResampleContext,
449 			&fCodecContext->ch_layout,
450 			fCodecContext->request_sample_fmt,
451 			fCodecContext->sample_rate,
452 			&fCodecContext->ch_layout,
453 			fCodecContext->sample_fmt,
454 			fCodecContext->sample_rate,
455 			0, NULL);
456 #else
457 		fResampleContext = swr_alloc_set_opts(NULL,
458 			fCodecContext->channel_layout,
459 			fCodecContext->request_sample_fmt,
460 			fCodecContext->sample_rate,
461 			fCodecContext->channel_layout,
462 			fCodecContext->sample_fmt,
463 			fCodecContext->sample_rate,
464 			0, NULL);
465 #endif
466 		swr_init(fResampleContext);
467 	}
468 
469 	TRACE("  bit_rate = %d, sample_rate = %d, channels = %d, "
470 		"output frame size: %d, count: %ld, rate: %.2f\n",
471 		fCodecContext->bit_rate, fCodecContext->sample_rate, fCodecContext->channels,
472 		fOutputFrameSize, fOutputFrameCount, fOutputFrameRate);
473 
474 	return B_OK;
475 }
476 
477 
478 status_t
479 AVCodecDecoder::_NegotiateVideoOutputFormat(media_format* inOutFormat)
480 {
481 	TRACE("AVCodecDecoder::_NegotiateVideoOutputFormat()\n");
482 
483 	TRACE("  requested video format 0x%x\n",
484 		inOutFormat->u.raw_video.display.format);
485 
486 	_ApplyEssentialVideoContainerPropertiesToContext();
487 		// This makes video formats play that encode the video properties in
488 		// the video container (e.g. WMV) and not in the video frames
489 		// themself (e.g. MPEG2).
490 		// Note: Doing this step unconditionally is OK, because the first call
491 		// to _DecodeNextVideoFrame() will update the essential video format
492 		// properties accordingly regardless of the settings here.
493 
494 	if (avcodec_open2(fCodecContext, fCodec, NULL) < 0) {
495 		TRACE("avcodec_open() failed to init codec!\n");
496 		return B_ERROR;
497 	}
498 	fCodecInitDone = true;
499 
500 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
501 	fOutputColorSpace = B_RGB32;
502 #else
503 	// Make MediaPlayer happy (if not in rgb32 screen depth and no overlay,
504 	// it will only ask for YCbCr, which DrawBitmap doesn't handle, so the
505 	// default colordepth is RGB32).
506 	if (inOutFormat->u.raw_video.display.format == B_YCbCr422)
507 		fOutputColorSpace = B_YCbCr422;
508 	else
509 		fOutputColorSpace = B_RGB32;
510 #endif
511 
512 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
513 	if (fSwsContext != NULL)
514 		sws_freeContext(fSwsContext);
515 	fSwsContext = NULL;
516 #else
517 	fFormatConversionFunc = 0;
518 #endif
519 
520 	free(fChunkBuffer);
521 	fChunkBuffer = NULL;
522 	fChunkBufferSize = 0;
523 
524 	_ResetTempPacket();
525 
526 	status_t statusOfDecodingFirstFrame = _DecodeNextVideoFrame();
527 	if (statusOfDecodingFirstFrame != B_OK) {
528 		TRACE("[v] decoding first video frame failed\n");
529 		return B_ERROR;
530 	}
531 
532 	// Note: fSwsContext / fFormatConversionFunc should have been initialized
533 	// by first call to _DecodeNextVideoFrame() above.
534 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
535 	if (fSwsContext == NULL) {
536 		TRACE("No SWS Scale context or decoder has not set the pixel format "
537 			"yet!\n");
538 	}
539 #else
540 	if (fFormatConversionFunc == NULL) {
541 		TRACE("no pixel format conversion function found or decoder has "
542 			"not set the pixel format yet!\n");
543 	}
544 #endif
545 
546 	inOutFormat->type = B_MEDIA_RAW_VIDEO;
547 	inOutFormat->require_flags = 0;
548 	inOutFormat->deny_flags = B_MEDIA_MAUI_UNDEFINED_FLAGS;
549 	inOutFormat->u.raw_video = fInputFormat.u.encoded_video.output;
550 	inOutFormat->u.raw_video.interlace = 1;
551 		// Progressive (non-interlaced) video frames are delivered
552 	inOutFormat->u.raw_video.first_active
553 		= fHeader.u.raw_video.first_active_line;
554 	inOutFormat->u.raw_video.last_active = fHeader.u.raw_video.line_count;
555 	inOutFormat->u.raw_video.pixel_width_aspect
556 		= fHeader.u.raw_video.pixel_width_aspect;
557 	inOutFormat->u.raw_video.pixel_height_aspect
558 		= fHeader.u.raw_video.pixel_height_aspect;
559 	// The framerate in fCodecContext is set to 0 if the codec doesn't know the framerate. Some
560 	// codecs work only at a fixed framerate, while others allow each frame to have its owm
561 	// timestamp. For example a stream may switch from 50 to 60Hz, depending on how it was
562 	// constructed. In that case, it's fine to leave the field_rate as 0 as well, the media kit
563 	// will handle that just fine as long as each frame comes with a correct presentation timestamp.
564 	// In fact, it seems better to not set the field_rate at all, rather than set it to a wrong
565 	// value.
566 	//
567 	// TODO The field_rate is twice the frame rate for interlaced streams, so we need to determine
568 	// if we are decoding an interlaced stream, and wether ffmpeg delivers every half-frame or not
569 	// in that case (since we let ffmpeg do the deinterlacing).
570 	inOutFormat->u.raw_video.field_rate = av_q2d(fCodecContext->framerate);
571 	inOutFormat->u.raw_video.display.format = fOutputColorSpace;
572 	inOutFormat->u.raw_video.display.line_width
573 		= fHeader.u.raw_video.display_line_width;
574 	inOutFormat->u.raw_video.display.line_count
575 		= fHeader.u.raw_video.display_line_count;
576 	inOutFormat->u.raw_video.display.bytes_per_row
577 		= fHeader.u.raw_video.bytes_per_row;
578 
579 #ifdef TRACE_AV_CODEC
580 	char buffer[1024];
581 	string_for_format(*inOutFormat, buffer, sizeof(buffer));
582 	TRACE("[v]  outFormat = %s\n", buffer);
583 	TRACE("  returned  video format 0x%x\n",
584 		inOutFormat->u.raw_video.display.format);
585 #endif
586 
587 	return B_OK;
588 }
589 
590 
591 /*! \brief Fills the outBuffer with one or more already decoded audio frames.
592 
593 	Besides the main duty described above, this method also fills out the other
594 	output parameters as documented below.
595 
596 	\param outBuffer Pointer to the output buffer to copy the decoded audio
597 		frames to.
598 	\param outFrameCount Pointer to the output variable to assign the number of
599 		copied audio frames (usually several audio frames at once).
600 	\param mediaHeader Pointer to the output media header that contains the
601 		properties of the decoded audio frame being the first in the outBuffer.
602 	\param info Specifies additional decoding parameters. (Note: unused).
603 
604 	\returns B_OK Decoding audio frames succeeded.
605 	\returns B_LAST_BUFFER_ERROR There are no more audio frames available.
606 	\returns Other error codes
607 */
608 status_t
609 AVCodecDecoder::_DecodeAudio(void* outBuffer, int64* outFrameCount,
610 	media_header* mediaHeader, media_decode_info* info)
611 {
612 	TRACE_AUDIO("AVCodecDecoder::_DecodeAudio(audio start_time %.6fs)\n",
613 		mediaHeader->start_time / 1000000.0);
614 
615 	status_t audioDecodingStatus
616 		= fDecodedDataSizeInBytes > 0 ? B_OK : _DecodeNextAudioFrame();
617 
618 	if (audioDecodingStatus != B_OK)
619 		return audioDecodingStatus;
620 
621 	*outFrameCount = fDecodedDataSizeInBytes / fOutputFrameSize;
622 	*mediaHeader = fHeader;
623 	memcpy(outBuffer, fDecodedData, fDecodedDataSizeInBytes);
624 
625 	fDecodedDataSizeInBytes = 0;
626 
627 	return B_OK;
628 }
629 
630 
631 /*! \brief Fills the outBuffer with an already decoded video frame.
632 
633 	Besides the main duty described above, this method also fills out the other
634 	output parameters as documented below.
635 
636 	\param outBuffer Pointer to the output buffer to copy the decoded video
637 		frame to.
638 	\param outFrameCount Pointer to the output variable to assign the number of
639 		copied video frames (usually one video frame).
640 	\param mediaHeader Pointer to the output media header that contains the
641 		decoded video frame properties.
642 	\param info Specifies additional decoding parameters. (Note: unused).
643 
644 	\returns B_OK Decoding a video frame succeeded.
645 	\returns B_LAST_BUFFER_ERROR There are no more video frames available.
646 	\returns Other error codes
647 */
648 status_t
649 AVCodecDecoder::_DecodeVideo(void* outBuffer, int64* outFrameCount,
650 	media_header* mediaHeader, media_decode_info* info)
651 {
652 	status_t videoDecodingStatus
653 		= fDecodedDataSizeInBytes > 0 ? B_OK : _DecodeNextVideoFrame();
654 
655 	if (videoDecodingStatus != B_OK)
656 		return videoDecodingStatus;
657 
658 	*outFrameCount = 1;
659 	*mediaHeader = fHeader;
660 	memcpy(outBuffer, fDecodedData, mediaHeader->size_used);
661 
662 	fDecodedDataSizeInBytes = 0;
663 
664 	return B_OK;
665 }
666 
667 
668 /*!	\brief Decodes next audio frame.
669 
670 	We decode at least one audio frame into fDecodedData. To achieve this goal,
671     we might need to request several chunks of encoded data resulting in a
672     variable execution time of this function.
673 
674     The length of the decoded audio frame(s) is stored in
675     fDecodedDataSizeInBytes. If this variable is greater than zero you can
676     assert that all audio frames in fDecodedData are valid.
677 
678 	It is assumed that the number of expected audio frames is stored in
679 	fOutputFrameCount. So _DecodeNextAudioFrame() must be called only after
680 	fOutputFrameCount has been set.
681 
682 	Note: fOutputFrameCount contains the maximum number of frames a caller
683 	of BMediaDecoder::Decode() expects to receive. There is a direct
684 	relationship between fOutputFrameCount and the buffer size a caller of
685 	BMediaDecoder::Decode() will provide so we make sure to respect this limit
686 	for fDecodedDataSizeInBytes.
687 
688 	On return with status code B_OK the following conditions hold true:
689 		1. fDecodedData contains as much audio frames as the caller of
690 		   BMediaDecoder::Decode() expects.
691 		2. fDecodedData contains lesser audio frames as the caller of
692 		   BMediaDecoder::Decode() expects only when one of the following
693 		   conditions hold true:
694 		       i  No more audio frames left. Consecutive calls to
695 		          _DecodeNextAudioFrame() will then result in the return of
696 		          status code B_LAST_BUFFER_ERROR.
697 		       ii TODO: A change in the size of the audio frames.
698 		3. fHeader is populated with the audio frame properties of the first
699 		   audio frame in fDecodedData. Especially the start_time field of
700 		   fHeader relates to that first audio frame. Start times of
701 		   consecutive audio frames in fDecodedData have to be calculated
702 		   manually (using the frame rate and the frame duration) if the
703 		   caller needs them.
704 
705 	TODO: Handle change of channel_count. Such a change results in a change of
706 	the audio frame size and thus has different buffer requirements.
707 	The most sane approach for implementing this is to return the audio frames
708 	that were still decoded with the previous channel_count and inform the
709 	client of BMediaDecoder::Decode() about the change so that it can adapt to
710 	it. Furthermore we need to adapt our fDecodedData to the new buffer size
711 	requirements accordingly.
712 
713 	\returns B_OK when we successfully decoded enough audio frames
714 	\returns B_LAST_BUFFER_ERROR when there are no more audio frames available.
715 	\returns Other Errors
716 */
717 status_t
718 AVCodecDecoder::_DecodeNextAudioFrame()
719 {
720 	assert(fTempPacket->size >= 0);
721 	assert(fDecodedDataSizeInBytes == 0);
722 		// _DecodeNextAudioFrame needs to be called on empty fDecodedData only!
723 		// If this assert holds wrong we have a bug somewhere.
724 
725 	status_t resetStatus = _ResetRawDecodedAudio();
726 	if (resetStatus != B_OK)
727 		return resetStatus;
728 
729 	while (fRawDecodedAudio->nb_samples < fOutputFrameCount) {
730 		_CheckAndFixConditionsThatHintAtBrokenAudioCodeBelow();
731 
732 		bool decodedDataBufferHasData = fDecodedDataBufferSize > 0;
733 		if (decodedDataBufferHasData) {
734 			_MoveAudioFramesToRawDecodedAudioAndUpdateStartTimes();
735 			continue;
736 		}
737 
738 		status_t decodeAudioChunkStatus = _DecodeNextAudioFrameChunk();
739 		if (decodeAudioChunkStatus == B_LAST_BUFFER_ERROR
740 				&& fRawDecodedAudio->nb_samples > 0)
741 			break;
742 		if (decodeAudioChunkStatus != B_OK)
743 			return decodeAudioChunkStatus;
744 	}
745 
746 	fFrame += fRawDecodedAudio->nb_samples;
747 	fDecodedDataSizeInBytes = fRawDecodedAudio->linesize[0];
748 
749 	_UpdateMediaHeaderForAudioFrame();
750 
751 #ifdef DEBUG
752 	dump_ffframe_audio(fRawDecodedAudio, "ffaudi");
753 #endif
754 
755 	TRACE_AUDIO("  frame count: %ld current: %lld\n",
756 		fRawDecodedAudio->nb_samples, fFrame);
757 
758 	return B_OK;
759 }
760 
761 
762 /*!	\brief Applies all essential audio input properties to fCodecContext that were
763 		passed to AVCodecDecoder when Setup() was called.
764 
765 	Note: This function must be called before the AVCodec is opened via
766 	avcodec_open2(). Otherwise the behaviour of FFMPEG's audio decoding
767 	function avcodec_receive_frame() is undefined.
768 
769 	Essential properties applied from fInputFormat.u.encoded_audio:
770 		- bit_rate copied to fCodecContext->bit_rate
771 		- frame_size copied to fCodecContext->frame_size
772 		- output.format converted to fCodecContext->sample_fmt
773 		- output.frame_rate copied to fCodecContext->sample_rate
774 		- output.channel_count copied to fCodecContext->channels
775 
776 	Other essential properties being applied:
777 		- fBlockAlign to fCodecContext->block_align
778 		- fExtraData to fCodecContext->extradata
779 		- fExtraDataSize to fCodecContext->extradata_size
780 
781 	TODO: Either the following documentation section should be removed or this
782 	TODO when it is clear whether fInputFormat.MetaData() and
783 	fInputFormat.MetaDataSize() have to be applied to fCodecContext. See the related
784 	TODO in the method implementation.
785 	Only applied when fInputFormat.MetaDataSize() is greater than zero:
786 		- fInputFormat.MetaData() to fCodecContext->extradata
787 		- fInputFormat.MetaDataSize() to fCodecContext->extradata_size
788 */
789 void
790 AVCodecDecoder::_ApplyEssentialAudioContainerPropertiesToContext()
791 {
792 	media_encoded_audio_format containerProperties
793 		= fInputFormat.u.encoded_audio;
794 
795 	fCodecContext->bit_rate
796 		= static_cast<int>(containerProperties.bit_rate);
797 	fCodecContext->frame_size
798 		= static_cast<int>(containerProperties.frame_size);
799 	ConvertRawAudioFormatToAVSampleFormat(
800 		containerProperties.output.format, fCodecContext->sample_fmt);
801 	ConvertRawAudioFormatToAVSampleFormat(
802 		containerProperties.output.format, fCodecContext->request_sample_fmt);
803 	fCodecContext->sample_rate
804 		= static_cast<int>(containerProperties.output.frame_rate);
805 	int channel_count = static_cast<int>(containerProperties.output.channel_count);
806 	// Check that channel count is not still a wild card!
807 	if (channel_count == 0) {
808 		TRACE("  channel_count still a wild-card, assuming stereo.\n");
809 		set_channel_count(fCodecContext, 2);
810 	} else
811 		set_channel_count(fCodecContext, channel_count);
812 
813 	fCodecContext->block_align = fBlockAlign;
814 	fCodecContext->extradata = reinterpret_cast<uint8_t*>(fExtraData);
815 	fCodecContext->extradata_size = fExtraDataSize;
816 
817 	// TODO: This probably needs to go away, there is some misconception
818 	// about extra data / info buffer and meta data. See
819 	// Reader::GetStreamInfo(). The AVFormatReader puts extradata and
820 	// extradata_size into media_format::MetaData(), but used to ignore
821 	// the infoBuffer passed to GetStreamInfo(). I think this may be why
822 	// the code below was added.
823 	if (fInputFormat.MetaDataSize() > 0) {
824 		fCodecContext->extradata = static_cast<uint8_t*>(
825 			const_cast<void*>(fInputFormat.MetaData()));
826 		fCodecContext->extradata_size = fInputFormat.MetaDataSize();
827 	}
828 
829 	TRACE("  bit_rate %d, sample_rate %d, channels %d, block_align %d, "
830 		"extradata_size %d\n",
831 		fCodecContext->bit_rate,
832 		fCodecContext->sample_rate,
833 		fCodecContext->channels,
834 		fCodecContext->block_align,
835 		fCodecContext->extradata_size);
836 }
837 
838 
839 /*!	\brief Resets important fields in fRawDecodedVideo to their default values.
840 
841 	Note: Also initializes fDecodedData if not done already.
842 
843 	\returns B_OK Resetting successfully completed.
844 	\returns B_NO_MEMORY No memory left for correct operation.
845 */
846 status_t
847 AVCodecDecoder::_ResetRawDecodedAudio()
848 {
849 	if (fDecodedData == NULL) {
850 		size_t maximumSizeOfDecodedData = fOutputFrameCount * fOutputFrameSize;
851 		fDecodedData
852 			= static_cast<uint8_t*>(malloc(maximumSizeOfDecodedData));
853 	}
854 	if (fDecodedData == NULL)
855 		return B_NO_MEMORY;
856 
857 	fRawDecodedAudio->data[0] = fDecodedData;
858 	fRawDecodedAudio->linesize[0] = 0;
859 	fRawDecodedAudio->format = AV_SAMPLE_FMT_NONE;
860 	fRawDecodedAudio->pkt_dts = AV_NOPTS_VALUE;
861 	fRawDecodedAudio->nb_samples = 0;
862 	memset(fRawDecodedAudio->opaque, 0, sizeof(avformat_codec_context));
863 
864 	return B_OK;
865 }
866 
867 
868 /*!	\brief Checks fDecodedDataBufferSize and fTempPacket for invalid values,
869 		reports them and assigns valid values.
870 
871 	Note: This method is intended to be called before any code is executed that
872 	deals with moving, loading or decoding any audio frames.
873 */
874 void
875 AVCodecDecoder::_CheckAndFixConditionsThatHintAtBrokenAudioCodeBelow()
876 {
877 	if (fDecodedDataBufferSize < 0) {
878 		fprintf(stderr, "Decoding read past the end of the decoded data "
879 			"buffer! %" B_PRId32 "\n", fDecodedDataBufferSize);
880 		fDecodedDataBufferSize = 0;
881 	}
882 	if (fTempPacket->size < 0) {
883 		fprintf(stderr, "Decoding read past the end of the temp packet! %d\n",
884 			fTempPacket->size);
885 		fTempPacket->size = 0;
886 	}
887 }
888 
889 
890 /*!	\brief Moves audio frames from fDecodedDataBuffer to fRawDecodedAudio (and
891 		thus to fDecodedData) and updates the start times of fRawDecodedAudio,
892 		fDecodedDataBuffer and fTempPacket accordingly.
893 
894 	When moving audio frames to fRawDecodedAudio this method also makes sure
895 	that the following important fields of fRawDecodedAudio are populated and
896 	updated with correct values:
897 		- fRawDecodedAudio->data[0]: Points to first free byte of fDecodedData
898 		- fRawDecodedAudio->linesize[0]: Total size of frames in fDecodedData
899 		- fRawDecodedAudio->format: Format of first audio frame
900 		- fRawDecodedAudio->pkt_dts: Start time of first audio frame
901 		- fRawDecodedAudio->nb_samples: Number of audio frames
902 		- fRawDecodedAudio->opaque: Contains the following fields for the first
903 		  audio frame:
904 		      - channels: Channel count of first audio frame
905 		      - sample_rate: Frame rate of first audio frame
906 
907 	This function assumes to be called only when the following assumptions
908 	hold true:
909 		1. There are decoded audio frames available in fDecodedDataBuffer
910 		   meaning that fDecodedDataBufferSize is greater than zero.
911 		2. There is space left in fRawDecodedAudio to move some audio frames
912 		   in. This means that fRawDecodedAudio has lesser audio frames than
913 		   the maximum allowed (specified by fOutputFrameCount).
914 		3. The audio frame rate is known so that we can calculate the time
915 		   range (covered by the moved audio frames) to update the start times
916 		   accordingly.
917 		4. The field fRawDecodedAudio->opaque points to a memory block
918 		   representing a structure of type avformat_codec_context.
919 
920 	After this function returns the caller can safely make the following
921 	assumptions:
922 		1. The number of decoded audio frames in fDecodedDataBuffer is
923 		   decreased though it may still be greater then zero.
924 		2. The number of frames in fRawDecodedAudio has increased and all
925 		   important fields are updated (see listing above).
926 		3. Start times of fDecodedDataBuffer and fTempPacket were increased
927 		   with the time range covered by the moved audio frames.
928 
929 	Note: This function raises an exception (by calling the debugger), when
930 	fDecodedDataBufferSize is not a multiple of fOutputFrameSize.
931 */
932 void
933 AVCodecDecoder::_MoveAudioFramesToRawDecodedAudioAndUpdateStartTimes()
934 {
935 	assert(fDecodedDataBufferSize > 0);
936 	assert(fRawDecodedAudio->nb_samples < fOutputFrameCount);
937 	assert(fOutputFrameRate > 0);
938 
939 	int32 outFrames = fOutputFrameCount - fRawDecodedAudio->nb_samples;
940 	int32 inFrames = fDecodedDataBufferSize;
941 
942 	int32 frames = min_c(outFrames, inFrames);
943 	if (frames == 0)
944 		debugger("fDecodedDataBufferSize not multiple of frame size!");
945 
946 	// Some decoders do not support format conversion on themselves, or use
947 	// "planar" audio (each channel separated instead of interleaved samples).
948 	// In that case, we use swresample to convert the data
949 	if (av_sample_fmt_is_planar(fCodecContext->sample_fmt)) {
950 #if 0
951 		const uint8_t* ptr[8];
952 		for (int i = 0; i < 8; i++) {
953 			if (fDecodedDataBuffer->data[i] == NULL)
954 				ptr[i] = NULL;
955 			else
956 				ptr[i] = fDecodedDataBuffer->data[i] + fDecodedDataBufferOffset;
957 		}
958 
959 		// When there are more input frames than space in the output buffer,
960 		// we could feed everything to swr and it would buffer the extra data.
961 		// However, there is no easy way to flush that data without feeding more
962 		// input, and it makes our timestamp computations fail.
963 		// So, we feed only as much frames as we can get out, and handle the
964 		// buffering ourselves.
965 		// TODO Ideally, we should try to size our output buffer so that it can
966 		// always hold all the output (swr provides helper functions for this)
967 		inFrames = frames;
968 		frames = swr_convert(fResampleContext, fRawDecodedAudio->data,
969 			outFrames, ptr, inFrames);
970 
971 		if (frames < 0)
972 			debugger("resampling failed");
973 #else
974 		// interleave planar audio with same format
975 		uintptr_t out = (uintptr_t)fRawDecodedAudio->data[0];
976 		int32 offset = fDecodedDataBufferOffset;
977 		for (int i = 0; i < frames; i++) {
978 			for (int j = 0; j < get_channel_count(fCodecContext); j++) {
979 				memcpy((void*)out, fDecodedDataBuffer->data[j]
980 					+ offset, fInputFrameSize);
981 				out += fInputFrameSize;
982 			}
983 			offset += fInputFrameSize;
984 		}
985 		outFrames = frames;
986 		inFrames = frames;
987 #endif
988 	} else {
989 		memcpy(fRawDecodedAudio->data[0], fDecodedDataBuffer->data[0]
990 				+ fDecodedDataBufferOffset, frames * fOutputFrameSize);
991 		outFrames = frames;
992 		inFrames = frames;
993 	}
994 
995 	size_t remainingSize = inFrames * fInputFrameSize;
996 	size_t decodedSize = outFrames * fOutputFrameSize;
997 	fDecodedDataBufferSize -= inFrames;
998 
999 	bool firstAudioFramesCopiedToRawDecodedAudio
1000 		= fRawDecodedAudio->data[0] != fDecodedData;
1001 	if (!firstAudioFramesCopiedToRawDecodedAudio) {
1002 		fRawDecodedAudio->format = fDecodedDataBuffer->format;
1003 		fRawDecodedAudio->pkt_dts = fDecodedDataBuffer->pkt_dts;
1004 
1005 		avformat_codec_context* codecContext
1006 			= static_cast<avformat_codec_context*>(fRawDecodedAudio->opaque);
1007 		codecContext->channels = get_channel_count(fCodecContext);
1008 		codecContext->sample_rate = fCodecContext->sample_rate;
1009 	}
1010 
1011 	fRawDecodedAudio->data[0] += decodedSize;
1012 	fRawDecodedAudio->linesize[0] += decodedSize;
1013 	fRawDecodedAudio->nb_samples += outFrames;
1014 
1015 	fDecodedDataBufferOffset += remainingSize;
1016 
1017 	// Update start times accordingly
1018 	bigtime_t framesTimeInterval = static_cast<bigtime_t>(
1019 		(1000000LL * frames) / fOutputFrameRate);
1020 	fDecodedDataBuffer->pkt_dts += framesTimeInterval;
1021 	// Start time of buffer is updated in case that it contains
1022 	// more audio frames to move.
1023 	fTempPacket->dts += framesTimeInterval;
1024 	// Start time of fTempPacket is updated in case the fTempPacket
1025 	// contains more audio frames to decode.
1026 }
1027 
1028 
1029 /*!	\brief Decodes next chunk of audio frames.
1030 
1031 	This method handles all the details of loading the input buffer
1032 	(fChunkBuffer) at the right time and of calling FFMPEG often engouh until
1033 	some audio frames have been decoded.
1034 
1035 	FFMPEG decides how much audio frames belong to a chunk. Because of that
1036 	it is very likely that _DecodeNextAudioFrameChunk has to be called several
1037 	times to decode enough audio frames to please the caller of
1038 	BMediaDecoder::Decode().
1039 
1040 	This function assumes to be called only when the following assumptions
1041 	hold true:
1042 		1. fDecodedDataBufferSize equals zero.
1043 
1044 	After this function returns successfully the caller can safely make the
1045 	following assumptions:
1046 		1. fDecodedDataBufferSize is greater than zero.
1047 		2. fDecodedDataBufferOffset is set to zero.
1048 		3. fDecodedDataBuffer contains audio frames.
1049 
1050 
1051 	\returns B_OK on successfully decoding one audio frame chunk.
1052 	\returns B_LAST_BUFFER_ERROR No more audio frame chunks available. From
1053 		this point on further calls will return this same error.
1054 	\returns B_ERROR Decoding failed
1055 */
1056 status_t
1057 AVCodecDecoder::_DecodeNextAudioFrameChunk()
1058 {
1059 	assert(fDecodedDataBufferSize == 0);
1060 
1061 	while (fDecodedDataBufferSize == 0) {
1062 		status_t loadingChunkStatus
1063 			= _LoadNextChunkIfNeededAndAssignStartTime();
1064 		if (loadingChunkStatus != B_OK)
1065 			return loadingChunkStatus;
1066 
1067 		status_t decodingStatus
1068 			= _DecodeSomeAudioFramesIntoEmptyDecodedDataBuffer();
1069 		if (decodingStatus != B_OK) {
1070 			// Assume the audio decoded until now is broken so replace it with
1071 			// some silence.
1072 			memset(fDecodedData, 0, fRawDecodedAudio->linesize[0]);
1073 
1074 			if (!fAudioDecodeError) {
1075 				// Report failure if not done already
1076 				int32 chunkBufferOffset = fTempPacket->data - fChunkBuffer;
1077 				printf("########### audio decode error, "
1078 					"fTempPacket->size %d, fChunkBuffer data offset %" B_PRId32
1079 					"\n", fTempPacket->size, chunkBufferOffset);
1080 				fAudioDecodeError = true;
1081 			}
1082 
1083 			// Assume that next audio chunk can be decoded so keep decoding.
1084 			continue;
1085 		}
1086 
1087 		fAudioDecodeError = false;
1088 	}
1089 
1090 	return B_OK;
1091 }
1092 
1093 
1094 /*!	\brief Tries to decode at least one audio frame and store it in the
1095 		fDecodedDataBuffer.
1096 
1097 	This function assumes to be called only when the following assumptions
1098 	hold true:
1099 		1. fDecodedDataBufferSize equals zero.
1100 		2. fTempPacket->size is greater than zero.
1101 
1102 	After this function returns successfully the caller can safely make the
1103 	following assumptions:
1104 		1. fDecodedDataBufferSize is greater than zero in the common case.
1105 		   Also see "Note" below.
1106 		2. fTempPacket was updated to exclude the data chunk that was consumed
1107 		   by avcodec_send_packet().
1108 		3. fDecodedDataBufferOffset is set to zero.
1109 
1110 	When this function failed to decode at least one audio frame due to a
1111 	decoding error the caller can safely make the following assumptions:
1112 		1. fDecodedDataBufferSize equals zero.
1113 		2. fTempPacket->size equals zero.
1114 
1115 	Note: It is possible that there wasn't any audio frame decoded into
1116 	fDecodedDataBuffer after calling this function. This is normal and can
1117 	happen when there was either a decoding error or there is some decoding
1118 	delay in FFMPEGs audio decoder. Another call to this method is totally
1119 	safe and is even expected as long as the calling assumptions hold true.
1120 
1121 	\returns B_OK Decoding successful. fDecodedDataBuffer contains decoded
1122 		audio frames only when fDecodedDataBufferSize is greater than zero.
1123 		fDecodedDataBuffer is empty, when avcodec_receive_frame() didn't return
1124 		audio frames due to delayed decoding or incomplete audio frames.
1125 	\returns B_ERROR Decoding failed thus fDecodedDataBuffer contains no audio
1126 		frames.
1127 */
1128 status_t
1129 AVCodecDecoder::_DecodeSomeAudioFramesIntoEmptyDecodedDataBuffer()
1130 {
1131 	assert(fDecodedDataBufferSize == 0);
1132 
1133 	av_frame_unref(fDecodedDataBuffer);
1134 	fDecodedDataBufferOffset = 0;
1135 
1136 	int error = avcodec_receive_frame(fCodecContext, fDecodedDataBuffer);
1137 	if (error == AVERROR_EOF)
1138 		return B_LAST_BUFFER_ERROR;
1139 
1140 	if (error == AVERROR(EAGAIN)) {
1141 		// We need to feed more data into the decoder
1142 		avcodec_send_packet(fCodecContext, fTempPacket);
1143 
1144 		// All the data is always consumed by avcodec_send_packet
1145 		fTempPacket->size = 0;
1146 
1147 		// Try again to see if we can get some decoded audio out now
1148 		error = avcodec_receive_frame(fCodecContext, fDecodedDataBuffer);
1149 	}
1150 
1151 	fDecodedDataBufferSize = fDecodedDataBuffer->nb_samples;
1152 	if (fDecodedDataBufferSize < 0)
1153 		fDecodedDataBufferSize = 0;
1154 
1155 	if (error == 0)
1156 		return B_OK;
1157 	else
1158 		return B_ERROR;
1159 }
1160 
1161 
1162 /*! \brief Updates relevant fields of the class member fHeader with the
1163 		properties of the most recently decoded audio frame.
1164 
1165 	The following fields of fHeader are updated:
1166 		- fHeader.type
1167 		- fHeader.file_pos
1168 		- fHeader.orig_size
1169 		- fHeader.start_time
1170 		- fHeader.size_used
1171 		- fHeader.u.raw_audio.frame_rate
1172 		- fHeader.u.raw_audio.channel_count
1173 
1174 	It is assumed that this function is called only	when the following asserts
1175 	hold true:
1176 		1. We actually got a new audio frame decoded by the audio decoder.
1177 		2. fHeader wasn't updated for the new audio frame yet. You MUST call
1178 		   this method only once per decoded audio frame.
1179 		3. fRawDecodedAudio's fields relate to the first audio frame contained
1180 		   in fDecodedData. Especially the following fields are of importance:
1181 		       - fRawDecodedAudio->pkt_dts: Start time of first audio frame
1182 		       - fRawDecodedAudio->opaque: Contains the following fields for
1183 		         the first audio frame:
1184 			         - channels: Channel count of first audio frame
1185 			         - sample_rate: Frame rate of first audio frame
1186 */
1187 void
1188 AVCodecDecoder::_UpdateMediaHeaderForAudioFrame()
1189 {
1190 	fHeader.type = B_MEDIA_RAW_AUDIO;
1191 	fHeader.file_pos = 0;
1192 	fHeader.orig_size = 0;
1193 	fHeader.start_time = fRawDecodedAudio->pkt_dts;
1194 	fHeader.size_used = fRawDecodedAudio->linesize[0];
1195 
1196 	avformat_codec_context* codecContext
1197 		= static_cast<avformat_codec_context*>(fRawDecodedAudio->opaque);
1198 	fHeader.u.raw_audio.channel_count = codecContext->channels;
1199 	fHeader.u.raw_audio.frame_rate = codecContext->sample_rate;
1200 }
1201 
1202 
1203 /*! \brief Decodes next video frame.
1204 
1205     We decode exactly one video frame into fDecodedData. To achieve this goal,
1206     we might need to request several chunks of encoded data resulting in a
1207     variable execution time of this function.
1208 
1209     The length of the decoded video frame is stored in
1210     fDecodedDataSizeInBytes. If this variable is greater than zero, you can
1211     assert that there is a valid video frame available in fDecodedData.
1212 
1213     The decoded video frame in fDecodedData has color space conversion and
1214     deinterlacing already applied.
1215 
1216     To every decoded video frame there is a media_header populated in
1217     fHeader, containing the corresponding video frame properties.
1218 
1219 	Normally every decoded video frame has a start_time field populated in the
1220 	associated fHeader, that determines the presentation time of the frame.
1221 	This relationship will only hold true, when each data chunk that is
1222 	provided via GetNextChunk() contains data for exactly one encoded video
1223 	frame (one complete frame) - not more and not less.
1224 
1225 	We can decode data chunks that contain partial video frame data, too. In
1226 	that case, you cannot trust the value of the start_time field in fHeader.
1227 	We simply have no logic in place to establish a meaningful relationship
1228 	between an incomplete frame and the start time it should be presented.
1229 	Though this	might change in the future.
1230 
1231 	We can decode data chunks that contain more than one video frame, too. In
1232 	that case, you cannot trust the value of the start_time field in fHeader.
1233 	We simply have no logic in place to track the start_time across multiple
1234 	video frames. So a meaningful relationship between the 2nd, 3rd, ... frame
1235 	and the start time it should be presented isn't established at the moment.
1236 	Though this	might change in the future.
1237 
1238 	On first call the member variables fSwsContext / fFormatConversionFunc	are
1239 	initialized.
1240 
1241 	\returns B_OK when we successfully decoded one video frame
1242 	\returns B_LAST_BUFFER_ERROR when there are no more video frames available.
1243 	\returns B_NO_MEMORY when we have no memory left for correct operation.
1244 	\returns Other Errors
1245 */
1246 status_t
1247 AVCodecDecoder::_DecodeNextVideoFrame()
1248 {
1249 	int error;
1250 	int send_error;
1251 
1252 #if DO_PROFILING
1253 	bigtime_t startTime = system_time();
1254 #endif
1255 
1256 	error = avcodec_receive_frame(fCodecContext, fRawDecodedPicture);
1257 
1258 	if (error == AVERROR_EOF)
1259 		return B_LAST_BUFFER_ERROR;
1260 
1261 	if (error == AVERROR(EAGAIN)) {
1262 		do {
1263 			status_t loadingChunkStatus
1264 				= _LoadNextChunkIfNeededAndAssignStartTime();
1265 			if (loadingChunkStatus == B_LAST_BUFFER_ERROR)
1266 				return _FlushOneVideoFrameFromDecoderBuffer();
1267 			if (loadingChunkStatus != B_OK) {
1268 				TRACE("[v] AVCodecDecoder::_DecodeNextVideoFrame(): error from "
1269 					"GetNextChunk(): %s\n", strerror(loadingChunkStatus));
1270 				return loadingChunkStatus;
1271 			}
1272 
1273 			char timestamp[AV_TS_MAX_STRING_SIZE];
1274 			av_ts_make_time_string(timestamp,
1275 				fTempPacket->dts, &fCodecContext->time_base);
1276 			TRACE("[v] Feed %d more bytes (dts %s)\n", fTempPacket->size,
1277 				timestamp);
1278 
1279 			send_error = avcodec_send_packet(fCodecContext, fTempPacket);
1280 			if (send_error < 0 && send_error != AVERROR(EAGAIN)) {
1281 				TRACE("[v] AVCodecDecoder: ignoring error in decoding frame "
1282 				"%lld: %d\n", fFrame, error);
1283 			}
1284 
1285 			// Packet is consumed, clear it
1286 			fTempPacket->data = NULL;
1287 			fTempPacket->size = 0;
1288 
1289 			error = avcodec_receive_frame(fCodecContext, fRawDecodedPicture);
1290 			if (error != 0 && error != AVERROR(EAGAIN)) {
1291 				TRACE("[v] frame %lld - decoding error, error code: %d, "
1292 					"chunk size: %ld\n", fFrame, error, fChunkBufferSize);
1293 			}
1294 
1295 		} while (error != 0);
1296 	}
1297 
1298 #if DO_PROFILING
1299 	bigtime_t formatConversionStart = system_time();
1300 #endif
1301 
1302 	status_t handleStatus = _HandleNewVideoFrameAndUpdateSystemState();
1303 	if (handleStatus != B_OK)
1304 		return handleStatus;
1305 
1306 #if DO_PROFILING
1307 	bigtime_t doneTime = system_time();
1308 	decodingTime += formatConversionStart - startTime;
1309 	conversionTime += doneTime - formatConversionStart;
1310 	profileCounter++;
1311 	if (!(fFrame % 5)) {
1312 		printf("[v] profile: d1 = %lld, d2 = %lld (%lld)\n",
1313 			decodingTime / profileCounter, conversionTime / profileCounter,
1314 			fFrame);
1315 		decodingTime = 0;
1316 		conversionTime = 0;
1317 		profileCounter = 0;
1318 	}
1319 #endif
1320 	return error;
1321 }
1322 
1323 
1324 /*!	\brief Applies all essential video input properties to fCodecContext that were
1325 		passed to AVCodecDecoder when Setup() was called.
1326 
1327 	Note: This function must be called before the AVCodec is opened via
1328 	avcodec_open2(). Otherwise the behaviour of FFMPEG's video decoding
1329 	function avcodec_decode_video2() is undefined.
1330 
1331 	Essential properties applied from fInputFormat.u.encoded_video.output:
1332 		- display.line_width copied to fCodecContext->width
1333 		- display.line_count copied to fCodecContext->height
1334 		- pixel_width_aspect and pixel_height_aspect converted to
1335 		  fCodecContext->sample_aspect_ratio
1336 		- field_rate converted to fCodecContext->time_base and
1337 		  fCodecContext->ticks_per_frame
1338 
1339 	Other essential properties being applied:
1340 		- fExtraData to fCodecContext->extradata
1341 		- fExtraDataSize to fCodecContext->extradata_size
1342 */
1343 void
1344 AVCodecDecoder::_ApplyEssentialVideoContainerPropertiesToContext()
1345 {
1346 	media_raw_video_format containerProperties
1347 		= fInputFormat.u.encoded_video.output;
1348 
1349 	fCodecContext->width = containerProperties.display.line_width;
1350 	fCodecContext->height = containerProperties.display.line_count;
1351 
1352 	if (containerProperties.pixel_width_aspect > 0
1353 		&& containerProperties.pixel_height_aspect > 0) {
1354 		ConvertVideoAspectWidthAndHeightToAVCodecContext(
1355 			containerProperties.pixel_width_aspect,
1356 			containerProperties.pixel_height_aspect, *fCodecContext);
1357 	}
1358 
1359 	if (containerProperties.field_rate > 0.0) {
1360 		ConvertVideoFrameRateToAVCodecContext(containerProperties.field_rate,
1361 			*fCodecContext);
1362 	}
1363 
1364 	fCodecContext->extradata = reinterpret_cast<uint8_t*>(fExtraData);
1365 	fCodecContext->extradata_size = fExtraDataSize;
1366 }
1367 
1368 
1369 /*! \brief Loads the next  chunk into fChunkBuffer and assigns it (including
1370 		the start time) to fTempPacket but only if fTempPacket is empty.
1371 
1372 	\returns B_OK
1373 		1. meaning: Next chunk is loaded.
1374 		2. meaning: No need to load and assign anything. Proceed as usual.
1375 	\returns B_LAST_BUFFER_ERROR No more chunks available. fChunkBuffer	and
1376 		fTempPacket are left untouched.
1377 	\returns Other errors Caller should bail out because fChunkBuffer and
1378 		fTempPacket are in unknown states. Normal operation cannot be
1379 		guaranteed.
1380 */
1381 status_t
1382 AVCodecDecoder::_LoadNextChunkIfNeededAndAssignStartTime()
1383 {
1384 	if (fTempPacket->size > 0)
1385 		return B_OK;
1386 
1387 	const void* chunkBuffer = NULL;
1388 	size_t chunkBufferSize = 0;
1389 		// In the case that GetNextChunk() returns an error fChunkBufferSize
1390 		// should be left untouched.
1391 	media_header chunkMediaHeader;
1392 
1393 	status_t getNextChunkStatus = GetNextChunk(&chunkBuffer, &chunkBufferSize,
1394 		&chunkMediaHeader);
1395 	if (getNextChunkStatus != B_OK)
1396 		return getNextChunkStatus;
1397 
1398 	status_t chunkBufferPaddingStatus
1399 		= _CopyChunkToChunkBufferAndAddPadding(chunkBuffer, chunkBufferSize);
1400 	if (chunkBufferPaddingStatus != B_OK)
1401 		return chunkBufferPaddingStatus;
1402 
1403 	fTempPacket->data = fChunkBuffer;
1404 	fTempPacket->size = fChunkBufferSize;
1405 	fTempPacket->dts = chunkMediaHeader.start_time;
1406 		// Let FFMPEG handle the correct relationship between start_time and
1407 		// decoded a/v frame. By doing so we are simply copying the way how it
1408 		// is implemented in ffplay.c for video frames (for audio frames it
1409 		// works, too, but isn't used by ffplay.c).
1410 		// \see http://git.videolan.org/?p=ffmpeg.git;a=blob;f=ffplay.c;h=09623db374e5289ed20b7cc28c262c4375a8b2e4;hb=9153b33a742c4e2a85ff6230aea0e75f5a8b26c2#l1502
1411 		//
1412 		// FIXME: Research how to establish a meaningful relationship between
1413 		// start_time and decoded a/v frame when the received chunk buffer
1414 		// contains partial a/v frames. Maybe some data formats do contain time
1415 		// stamps (ake pts / dts fields) that can be evaluated by FFMPEG. But
1416 		// as long as I don't have such video data to test it, it makes no
1417 		// sense trying to implement it.
1418 		//
1419 		// FIXME: Implement tracking start_time of video frames originating in
1420 		// data chunks that encode more than one video frame at a time. In that
1421 		// case on would increment the start_time for each consecutive frame of
1422 		// such a data chunk (like it is done for audio frame decoding). But as
1423 		// long as I don't have such video data to test it, it makes no sense
1424 		// to implement it.
1425 
1426 #ifdef LOG_STREAM_TO_FILE
1427 	BFile* logFile = fIsAudio ? &sAudioStreamLogFile : &sVideoStreamLogFile;
1428 	if (sDumpedPackets < 100) {
1429 		logFile->Write(chunkBuffer, fChunkBufferSize);
1430 		printf("wrote %ld bytes\n", fChunkBufferSize);
1431 		sDumpedPackets++;
1432 	} else if (sDumpedPackets == 100)
1433 		logFile->Unset();
1434 #endif
1435 
1436 	return B_OK;
1437 }
1438 
1439 
1440 /*! \brief Copies a chunk into fChunkBuffer and adds a "safety net" of
1441 		additional memory as required by FFMPEG for input buffers to video
1442 		decoders.
1443 
1444 	This is needed so that some decoders can read safely a predefined number of
1445 	bytes at a time for performance optimization purposes.
1446 
1447 	The additional memory has a size of AV_INPUT_BUFFER_PADDING_SIZE as defined
1448 	in avcodec.h.
1449 
1450 	Ownership of fChunkBuffer memory is with the class so it needs to be freed
1451 	at the right times (on destruction, on seeking).
1452 
1453 	Also update fChunkBufferSize to reflect the size of the contained data
1454 	(leaving out the padding).
1455 
1456 	\param chunk The chunk to copy.
1457 	\param chunkSize Size of the chunk in bytes
1458 
1459 	\returns B_OK Padding was successful. You are responsible for releasing the
1460 		allocated memory. fChunkBufferSize is set to chunkSize.
1461 	\returns B_NO_MEMORY Padding failed.
1462 		fChunkBuffer is set to NULL making it safe to call free() on it.
1463 		fChunkBufferSize is set to 0 to reflect the size of fChunkBuffer.
1464 */
1465 status_t
1466 AVCodecDecoder::_CopyChunkToChunkBufferAndAddPadding(const void* chunk,
1467 	size_t chunkSize)
1468 {
1469 	uint8_t* tmpBuffer = static_cast<uint8_t*>(realloc(fChunkBuffer,
1470 		chunkSize + AV_INPUT_BUFFER_PADDING_SIZE));
1471 	if (tmpBuffer == NULL) {
1472 		free(fChunkBuffer);
1473 		fChunkBuffer = NULL;
1474 		fChunkBufferSize = 0;
1475 		return B_NO_MEMORY;
1476 	} else {
1477 		fChunkBuffer = tmpBuffer;
1478 	}
1479 
1480 	memcpy(fChunkBuffer, chunk, chunkSize);
1481 	memset(fChunkBuffer + chunkSize, 0, AV_INPUT_BUFFER_PADDING_SIZE);
1482 		// Establish safety net, by zero'ing the padding area.
1483 
1484 	fChunkBufferSize = chunkSize;
1485 
1486 	return B_OK;
1487 }
1488 
1489 
1490 /*! \brief Executes all steps needed for a freshly decoded video frame.
1491 
1492 	\see _UpdateMediaHeaderForVideoFrame() and
1493 	\see _DeinterlaceAndColorConvertVideoFrame() for when you are allowed to
1494 	call this method.
1495 
1496 	\returns B_OK when video frame was handled successfully
1497 	\returnb B_NO_MEMORY when no memory is left for correct operation.
1498 */
1499 status_t
1500 AVCodecDecoder::_HandleNewVideoFrameAndUpdateSystemState()
1501 {
1502 	_UpdateMediaHeaderForVideoFrame();
1503 	status_t postProcessStatus = _DeinterlaceAndColorConvertVideoFrame();
1504 	if (postProcessStatus != B_OK)
1505 		return postProcessStatus;
1506 
1507 #ifdef DEBUG
1508 	dump_ffframe_video(fRawDecodedPicture, "ffpict");
1509 #endif
1510 
1511 	fFrame++;
1512 
1513 	return B_OK;
1514 }
1515 
1516 
1517 /*! \brief Flushes one video frame - if any - still buffered by the decoder.
1518 
1519 	Some FFMPEG decoder are buffering video frames. To retrieve those buffered
1520 	frames the decoder needs to be told so.
1521 
1522 	The intended use of this method is to call it, once there are no more data
1523 	chunks for decoding left. Reframed in other words: Once GetNextChunk()
1524 	returns with status B_LAST_BUFFER_ERROR it is time to start flushing.
1525 
1526 	\returns B_OK Retrieved one video frame, handled it accordingly and updated
1527 		the system state accordingly.
1528 		There maybe more video frames left. So it is valid for the client of
1529 		AVCodecDecoder to call it one more time.
1530 
1531 	\returns B_LAST_BUFFER_ERROR No video frame left.
1532 		The client of the AVCodecDecoder should stop calling it now.
1533 
1534 	\returns B_NO_MEMORY No memory left for correct operation.
1535 */
1536 status_t
1537 AVCodecDecoder::_FlushOneVideoFrameFromDecoderBuffer()
1538 {
1539 	// Tell the decoder there is nothing to send anymore
1540 	avcodec_send_packet(fCodecContext, NULL);
1541 
1542 	// Get any remaining frame
1543 	int error = avcodec_receive_frame(fCodecContext, fRawDecodedPicture);
1544 
1545 	if (error != 0 && error != AVERROR(EAGAIN)) {
1546 		// video buffer is flushed successfully
1547 		// (or there is an error, not much we can do about it)
1548 		return B_LAST_BUFFER_ERROR;
1549 	}
1550 
1551 	return _HandleNewVideoFrameAndUpdateSystemState();
1552 }
1553 
1554 
1555 /*! \brief Updates relevant fields of the class member fHeader with the
1556 		properties of the most recently decoded video frame.
1557 
1558 	It is assumed that this function is called only	when the following asserts
1559 	hold true:
1560 		1. We actually got a new picture decoded by the video decoder.
1561 		2. fHeader wasn't updated for the new picture yet. You MUST call this
1562 		   method only once per decoded video frame.
1563 		3. This function MUST be called after
1564 		   _DeinterlaceAndColorConvertVideoFrame() as it relys on an updated
1565 		    fDecodedDataSizeInBytes.
1566 		4. There will be at maximumn only one decoded video frame in our cache
1567 		   at any single point in time. Otherwise you couldn't tell to which
1568 		   cached decoded video frame the properties in fHeader relate to.
1569 		5. AVCodecContext is still valid for this video frame (This is the case
1570 		   when this function is called after avcodec_decode_video2() and
1571 		   before the next call to avcodec_decode_video2().
1572 */
1573 void
1574 AVCodecDecoder::_UpdateMediaHeaderForVideoFrame()
1575 {
1576 	fHeader.type = B_MEDIA_RAW_VIDEO;
1577 	fHeader.file_pos = 0;
1578 	fHeader.orig_size = 0;
1579 	fHeader.start_time = fRawDecodedPicture->pkt_dts;
1580 		// The pkt_dts is already in microseconds, even if ffmpeg docs says
1581 		// 'in codec time_base units'
1582 	fHeader.size_used = av_image_get_buffer_size(
1583 		colorspace_to_pixfmt(fOutputColorSpace), fRawDecodedPicture->width,
1584 		fRawDecodedPicture->height, 1);
1585 	fHeader.u.raw_video.display_line_width = fRawDecodedPicture->width;
1586 	fHeader.u.raw_video.display_line_count = fRawDecodedPicture->height;
1587 	fHeader.u.raw_video.bytes_per_row
1588 		= CalculateBytesPerRowWithColorSpaceAndVideoWidth(fOutputColorSpace,
1589 			fRawDecodedPicture->width);
1590 	fHeader.u.raw_video.field_gamma = 1.0;
1591 	fHeader.u.raw_video.field_sequence = fFrame;
1592 	fHeader.u.raw_video.field_number = 0;
1593 	fHeader.u.raw_video.pulldown_number = 0;
1594 	fHeader.u.raw_video.first_active_line = 1;
1595 	fHeader.u.raw_video.line_count = fRawDecodedPicture->height;
1596 
1597 	ConvertAVCodecContextToVideoAspectWidthAndHeight(*fCodecContext,
1598 		fHeader.u.raw_video.pixel_width_aspect,
1599 		fHeader.u.raw_video.pixel_height_aspect);
1600 
1601 	char timestamp[AV_TS_MAX_STRING_SIZE];
1602 	av_ts_make_time_string(timestamp,
1603 		fRawDecodedPicture->best_effort_timestamp, &fCodecContext->time_base);
1604 
1605 	TRACE("[v] start_time=%s field_sequence=%lu\n",
1606 		timestamp, fHeader.u.raw_video.field_sequence);
1607 }
1608 
1609 
1610 /*! \brief This function applies deinterlacing (only if needed) and color
1611 	conversion to the video frame in fRawDecodedPicture.
1612 
1613 	It is assumed that fRawDecodedPicture wasn't deinterlaced and color
1614 	converted yet (otherwise this function behaves in unknown manners).
1615 
1616 	This function MUST be called after _UpdateMediaHeaderForVideoFrame() as it
1617 	relys on the fHeader.size_used and fHeader.u.raw_video.bytes_per_row fields
1618 	for correct operation
1619 
1620 	You should only call this function when you	got a new picture decoded by
1621 	the video decoder.
1622 
1623 	When this function finishes the postprocessed video frame will be available
1624 	in fPostProcessedDecodedPicture and fDecodedData (fDecodedDataSizeInBytes
1625 	will be set accordingly).
1626 
1627 	\returns B_OK video frame successfully deinterlaced and color converted.
1628 	\returns B_NO_MEMORY Not enough memory available for correct operation.
1629 */
1630 status_t
1631 AVCodecDecoder::_DeinterlaceAndColorConvertVideoFrame()
1632 {
1633 	int displayWidth = fRawDecodedPicture->width;
1634 	int displayHeight = fRawDecodedPicture->height;
1635 	AVFrame deinterlacedPicture;
1636 	bool useDeinterlacedPicture = false;
1637 
1638 #if LIBAVCODEC_VERSION_MAJOR >= 60
1639 	if (fRawDecodedPicture->flags & AV_FRAME_FLAG_INTERLACED) {
1640 #else
1641 	if (fRawDecodedPicture->interlaced_frame) {
1642 #endif
1643 		AVFrame rawPicture;
1644 		rawPicture.data[0] = fRawDecodedPicture->data[0];
1645 		rawPicture.data[1] = fRawDecodedPicture->data[1];
1646 		rawPicture.data[2] = fRawDecodedPicture->data[2];
1647 		rawPicture.data[3] = fRawDecodedPicture->data[3];
1648 		rawPicture.linesize[0] = fRawDecodedPicture->linesize[0];
1649 		rawPicture.linesize[1] = fRawDecodedPicture->linesize[1];
1650 		rawPicture.linesize[2] = fRawDecodedPicture->linesize[2];
1651 		rawPicture.linesize[3] = fRawDecodedPicture->linesize[3];
1652 
1653 		if (av_image_alloc(deinterlacedPicture.data,
1654 				deinterlacedPicture.linesize, displayWidth, displayHeight,
1655 				fCodecContext->pix_fmt, 1) < 0)
1656 			return B_NO_MEMORY;
1657 
1658 		// deinterlace implemented using avfilter
1659 		_ProcessFilterGraph(&deinterlacedPicture, &rawPicture,
1660 			fCodecContext->pix_fmt, displayWidth, displayHeight);
1661 		useDeinterlacedPicture = true;
1662 	}
1663 
1664 	// Some decoders do not set pix_fmt until they have decoded 1 frame
1665 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
1666 	if (fSwsContext == NULL) {
1667 		fSwsContext = sws_getContext(displayWidth, displayHeight,
1668 			fCodecContext->pix_fmt, displayWidth, displayHeight,
1669 			colorspace_to_pixfmt(fOutputColorSpace),
1670 			SWS_FAST_BILINEAR, NULL, NULL, NULL);
1671 	}
1672 #else
1673 	if (fFormatConversionFunc == NULL) {
1674 		fFormatConversionFunc = resolve_colorspace(fOutputColorSpace,
1675 			fCodecContext->pix_fmt, displayWidth, displayHeight);
1676 	}
1677 #endif
1678 
1679 	fDecodedDataSizeInBytes = fHeader.size_used;
1680 
1681 	if (fDecodedData == NULL) {
1682 		const size_t kOptimalAlignmentForColorConversion = 32;
1683 		posix_memalign(reinterpret_cast<void**>(&fDecodedData),
1684 			kOptimalAlignmentForColorConversion, fDecodedDataSizeInBytes);
1685 	}
1686 	if (fDecodedData == NULL)
1687 		return B_NO_MEMORY;
1688 
1689 	fPostProcessedDecodedPicture->data[0] = fDecodedData;
1690 	fPostProcessedDecodedPicture->linesize[0]
1691 		= fHeader.u.raw_video.bytes_per_row;
1692 
1693 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
1694 	if (fSwsContext != NULL) {
1695 #else
1696 	if (fFormatConversionFunc != NULL) {
1697 #endif
1698 		if (useDeinterlacedPicture) {
1699 			AVFrame deinterlacedFrame;
1700 			deinterlacedFrame.data[0] = deinterlacedPicture.data[0];
1701 			deinterlacedFrame.data[1] = deinterlacedPicture.data[1];
1702 			deinterlacedFrame.data[2] = deinterlacedPicture.data[2];
1703 			deinterlacedFrame.data[3] = deinterlacedPicture.data[3];
1704 			deinterlacedFrame.linesize[0]
1705 				= deinterlacedPicture.linesize[0];
1706 			deinterlacedFrame.linesize[1]
1707 				= deinterlacedPicture.linesize[1];
1708 			deinterlacedFrame.linesize[2]
1709 				= deinterlacedPicture.linesize[2];
1710 			deinterlacedFrame.linesize[3]
1711 				= deinterlacedPicture.linesize[3];
1712 
1713 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
1714 			sws_scale(fSwsContext, deinterlacedFrame.data,
1715 				deinterlacedFrame.linesize, 0, displayHeight,
1716 				fPostProcessedDecodedPicture->data,
1717 				fPostProcessedDecodedPicture->linesize);
1718 #else
1719 			(*fFormatConversionFunc)(&deinterlacedFrame,
1720 				fPostProcessedDecodedPicture, displayWidth, displayHeight);
1721 #endif
1722 		} else {
1723 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
1724 			sws_scale(fSwsContext, fRawDecodedPicture->data,
1725 				fRawDecodedPicture->linesize, 0, displayHeight,
1726 				fPostProcessedDecodedPicture->data,
1727 				fPostProcessedDecodedPicture->linesize);
1728 #else
1729 			(*fFormatConversionFunc)(fRawDecodedPicture,
1730 				fPostProcessedDecodedPicture, displayWidth, displayHeight);
1731 #endif
1732 		}
1733 	}
1734 
1735 #if LIBAVCODEC_VERSION_MAJOR >= 60
1736 	if (fRawDecodedPicture->flags & AV_FRAME_FLAG_INTERLACED)
1737 #else
1738 	if (fRawDecodedPicture->interlaced_frame)
1739 #endif
1740 		av_freep(&deinterlacedPicture.data[0]);
1741 
1742 	return B_OK;
1743 }
1744 
1745 
1746 /*! \brief Init the deinterlace filter graph.
1747 
1748 	\returns B_OK the filter graph could be built.
1749 	\returns B_BAD_VALUE something was wrong with building the graph.
1750 */
1751 status_t
1752 AVCodecDecoder::_InitFilterGraph(enum AVPixelFormat pixfmt, int32 width,
1753 	int32 height)
1754 {
1755 	if (fFilterGraph != NULL) {
1756 		av_frame_free(&fFilterFrame);
1757 		avfilter_graph_free(&fFilterGraph);
1758 	}
1759 
1760 	fFilterGraph = avfilter_graph_alloc();
1761 
1762 	BString arguments;
1763 	arguments.SetToFormat("buffer=video_size=%" B_PRId32 "x%" B_PRId32
1764 		":pix_fmt=%d:time_base=1/1:pixel_aspect=0/1[in];[in]yadif[out];"
1765 		"[out]buffersink", width, height,
1766 		pixfmt);
1767 	AVFilterInOut* inputs = NULL;
1768 	AVFilterInOut* outputs = NULL;
1769 	TRACE("[v] _InitFilterGraph(): %s\n", arguments.String());
1770 	int ret = avfilter_graph_parse2(fFilterGraph, arguments.String(), &inputs,
1771 		&outputs);
1772 	if (ret < 0) {
1773 		fprintf(stderr, "avfilter_graph_parse2() failed\n");
1774 		return B_BAD_VALUE;
1775 	}
1776 
1777 	ret = avfilter_graph_config(fFilterGraph, NULL);
1778 	if (ret < 0) {
1779 		fprintf(stderr, "avfilter_graph_config() failed\n");
1780 		return B_BAD_VALUE;
1781 	}
1782 
1783 	fBufferSourceContext = avfilter_graph_get_filter(fFilterGraph,
1784 		"Parsed_buffer_0");
1785 	fBufferSinkContext = avfilter_graph_get_filter(fFilterGraph,
1786 		"Parsed_buffersink_2");
1787 	if (fBufferSourceContext == NULL || fBufferSinkContext == NULL) {
1788 		fprintf(stderr, "avfilter_graph_get_filter() failed\n");
1789 		return B_BAD_VALUE;
1790 	}
1791 	fFilterFrame = av_frame_alloc();
1792 	fLastWidth = width;
1793 	fLastHeight = height;
1794 	fLastPixfmt = pixfmt;
1795 
1796 	return B_OK;
1797 }
1798 
1799 
1800 /*! \brief Process an AVPicture with the deinterlace filter graph.
1801 
1802     We decode exactly one video frame into dst.
1803 	Equivalent function for avpicture_deinterlace() from version 2.x.
1804 
1805 	\returns B_OK video frame successfully deinterlaced.
1806 	\returns B_BAD_DATA No frame could be output.
1807 	\returns B_NO_MEMORY Not enough memory available for correct operation.
1808 */
1809 status_t
1810 AVCodecDecoder::_ProcessFilterGraph(AVFrame *dst, const AVFrame *src,
1811 	enum AVPixelFormat pixfmt, int32 width, int32 height)
1812 {
1813 	if (fFilterGraph == NULL || width != fLastWidth
1814 		|| height != fLastHeight || pixfmt != fLastPixfmt) {
1815 
1816 		status_t err = _InitFilterGraph(pixfmt, width, height);
1817 		if (err != B_OK)
1818 			return err;
1819 	}
1820 
1821 	memcpy(fFilterFrame->data, src->data, sizeof(src->data));
1822 	memcpy(fFilterFrame->linesize, src->linesize, sizeof(src->linesize));
1823 	fFilterFrame->width = width;
1824 	fFilterFrame->height = height;
1825 	fFilterFrame->format = pixfmt;
1826 
1827 	int ret = av_buffersrc_add_frame(fBufferSourceContext, fFilterFrame);
1828 	if (ret < 0)
1829 		return B_NO_MEMORY;
1830 
1831 	ret = av_buffersink_get_frame(fBufferSinkContext, fFilterFrame);
1832 	if (ret < 0)
1833 		return B_BAD_DATA;
1834 
1835 	av_image_copy(dst->data, dst->linesize, (const uint8**)fFilterFrame->data,
1836 		fFilterFrame->linesize, pixfmt, width, height);
1837 	av_frame_unref(fFilterFrame);
1838 	return B_OK;
1839 }
1840