xref: /haiku/src/add-ons/media/plugins/ffmpeg/AVCodecDecoder.cpp (revision 82bfaa954dcfd90582fb2c1a0e918971eea57091)
1 /*
2  * Copyright (C) 2001 Carlos Hasan
3  * Copyright (C) 2001 François Revol
4  * Copyright (C) 2001 Axel Dörfler
5  * Copyright (C) 2004 Marcus Overhagen
6  * Copyright (C) 2009 Stephan Amßus <superstippi@gmx.de>
7  * Copyright (C) 2014 Colin Günther <coling@gmx.de>
8  * Copyright (C) 2015 Adrien Destugues <pulkomandy@pulkomandy.tk>
9  *
10  * All rights reserved. Distributed under the terms of the MIT License.
11  */
12 
13 //! libavcodec based decoder for Haiku
14 
15 
16 #include "AVCodecDecoder.h"
17 
18 #include <new>
19 
20 #include <assert.h>
21 #include <string.h>
22 
23 #include <Bitmap.h>
24 #include <Debug.h>
25 #include <String.h>
26 
27 #include "Utilities.h"
28 
29 
30 #undef TRACE
31 //#define TRACE_AV_CODEC
32 #ifdef TRACE_AV_CODEC
33 #	define TRACE(x...)	printf(x)
34 #	define TRACE_AUDIO(x...)	printf(x)
35 #	define TRACE_VIDEO(x...)	printf(x)
36 #else
37 #	define TRACE(x...)
38 #	define TRACE_AUDIO(x...)
39 #	define TRACE_VIDEO(x...)
40 #endif
41 
42 //#define LOG_STREAM_TO_FILE
43 #ifdef LOG_STREAM_TO_FILE
44 #	include <File.h>
45 	static BFile sAudioStreamLogFile(
46 		"/boot/home/Desktop/AVCodecDebugAudioStream.raw",
47 		B_CREATE_FILE | B_ERASE_FILE | B_WRITE_ONLY);
48 	static BFile sVideoStreamLogFile(
49 		"/boot/home/Desktop/AVCodecDebugVideoStream.raw",
50 		B_CREATE_FILE | B_ERASE_FILE | B_WRITE_ONLY);
51 	static int sDumpedPackets = 0;
52 #endif
53 
54 typedef AVCodecID CodecID;
55 
56 struct wave_format_ex {
57 	uint16 format_tag;
58 	uint16 channels;
59 	uint32 frames_per_sec;
60 	uint32 avg_bytes_per_sec;
61 	uint16 block_align;
62 	uint16 bits_per_sample;
63 	uint16 extra_size;
64 	// extra_data[extra_size]
65 } _PACKED;
66 
67 struct avformat_codec_context {
68 	int sample_rate;
69 	int channels;
70 };
71 
72 
73 // profiling related globals
74 #define DO_PROFILING 0
75 #if DO_PROFILING
76 static bigtime_t decodingTime = 0;
77 static bigtime_t conversionTime = 0;
78 static long profileCounter = 0;
79 #endif
80 
81 
82 AVCodecDecoder::AVCodecDecoder()
83 	:
84 	fHeader(),
85 	fInputFormat(),
86 	fFrame(0),
87 	fIsAudio(false),
88 	fCodec(NULL),
89 	fCodecContext(avcodec_alloc_context3(NULL)),
90 	fResampleContext(NULL),
91 	fDecodedData(NULL),
92 	fDecodedDataSizeInBytes(0),
93 	fPostProcessedDecodedPicture(av_frame_alloc()),
94 	fRawDecodedPicture(av_frame_alloc()),
95 	fRawDecodedAudio(av_frame_alloc()),
96 
97 	fCodecInitDone(false),
98 
99 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
100 	fSwsContext(NULL),
101 #else
102 	fFormatConversionFunc(NULL),
103 #endif
104 
105 	fExtraData(NULL),
106 	fExtraDataSize(0),
107 	fBlockAlign(0),
108 
109 	fOutputColorSpace(B_NO_COLOR_SPACE),
110 	fOutputFrameCount(0),
111 	fOutputFrameRate(1.0),
112 	fOutputFrameSize(0),
113 	fInputFrameSize(0),
114 
115 	fChunkBuffer(NULL),
116 	fChunkBufferSize(0),
117 	fAudioDecodeError(false),
118 
119 	fDecodedDataBuffer(av_frame_alloc()),
120 	fDecodedDataBufferOffset(0),
121 	fDecodedDataBufferSize(0),
122 	fBufferSinkContext(NULL),
123 	fBufferSourceContext(NULL),
124 	fFilterGraph(NULL),
125 	fFilterFrame(NULL)
126 {
127 	TRACE("AVCodecDecoder::AVCodecDecoder()\n");
128 
129 	system_info info;
130 	get_system_info(&info);
131 
132 	fCodecContext->err_recognition = AV_EF_CAREFUL;
133 	fCodecContext->error_concealment = 3;
134 	fCodecContext->thread_count = info.cpu_count;
135 }
136 
137 
138 AVCodecDecoder::~AVCodecDecoder()
139 {
140 	TRACE("[%c] AVCodecDecoder::~AVCodecDecoder()\n", fIsAudio?('a'):('v'));
141 
142 #if DO_PROFILING
143 	if (profileCounter > 0) {
144 		printf("[%c] profile: d1 = %lld, d2 = %lld (%lld)\n",
145 			fIsAudio?('a'):('v'), decodingTime / profileCounter,
146 			conversionTime / profileCounter, fFrame);
147 	}
148 #endif
149 
150 	swr_free(&fResampleContext);
151 	free(fChunkBuffer);
152 	free(fDecodedData);
153 
154 	av_frame_free(&fPostProcessedDecodedPicture);
155 	av_frame_free(&fRawDecodedPicture);
156 	av_free(fRawDecodedAudio->opaque);
157 	av_frame_free(&fRawDecodedAudio);
158 	fCodecContext->extradata = NULL;
159 	avcodec_free_context(&fCodecContext);
160 	av_frame_free(&fDecodedDataBuffer);
161 
162 	av_frame_free(&fFilterFrame);
163 	avfilter_graph_free(&fFilterGraph);
164 
165 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
166 	if (fSwsContext != NULL)
167 		sws_freeContext(fSwsContext);
168 #endif
169 
170 	delete[] fExtraData;
171 }
172 
173 
174 void
175 AVCodecDecoder::GetCodecInfo(media_codec_info* mci)
176 {
177 	snprintf(mci->short_name, 32, "%s", fCodec->name);
178 	snprintf(mci->pretty_name, 96, "%s", fCodec->long_name);
179 	mci->id = 0;
180 	mci->sub_id = fCodec->id;
181 }
182 
183 
184 status_t
185 AVCodecDecoder::Setup(media_format* ioEncodedFormat, const void* infoBuffer,
186 	size_t infoSize)
187 {
188 	if (ioEncodedFormat->type != B_MEDIA_ENCODED_AUDIO
189 		&& ioEncodedFormat->type != B_MEDIA_ENCODED_VIDEO)
190 		return B_ERROR;
191 
192 	fIsAudio = (ioEncodedFormat->type == B_MEDIA_ENCODED_AUDIO);
193 	TRACE("[%c] AVCodecDecoder::Setup()\n", fIsAudio?('a'):('v'));
194 
195 #ifdef TRACE_AV_CODEC
196 	char buffer[1024];
197 	string_for_format(*ioEncodedFormat, buffer, sizeof(buffer));
198 	TRACE("[%c]   input_format = %s\n", fIsAudio?('a'):('v'), buffer);
199 	TRACE("[%c]   infoSize = %ld\n", fIsAudio?('a'):('v'), infoSize);
200 	TRACE("[%c]   user_data_type = %08lx\n", fIsAudio?('a'):('v'),
201 		ioEncodedFormat->user_data_type);
202 	TRACE("[%c]   meta_data_size = %ld\n", fIsAudio?('a'):('v'),
203 		ioEncodedFormat->MetaDataSize());
204 #endif
205 
206 	media_format_description description;
207 	if (BMediaFormats().GetCodeFor(*ioEncodedFormat,
208 			B_MISC_FORMAT_FAMILY, &description) == B_OK) {
209 		if (description.u.misc.file_format != 'ffmp')
210 			return B_NOT_SUPPORTED;
211 		fCodec = avcodec_find_decoder(static_cast<CodecID>(
212 			description.u.misc.codec));
213 		if (fCodec == NULL) {
214 			TRACE("  unable to find the correct FFmpeg "
215 				"decoder (id = %lu)\n", description.u.misc.codec);
216 			return B_ERROR;
217 		}
218 		TRACE("  found decoder %s\n", fCodec->name);
219 
220 		const void* extraData = infoBuffer;
221 		fExtraDataSize = infoSize;
222 		if (description.family == B_WAV_FORMAT_FAMILY
223 				&& infoSize >= sizeof(wave_format_ex)) {
224 			TRACE("  trying to use wave_format_ex\n");
225 			// Special case extra data in B_WAV_FORMAT_FAMILY
226 			const wave_format_ex* waveFormatData
227 				= (const wave_format_ex*)infoBuffer;
228 
229 			size_t waveFormatSize = infoSize;
230 			if (waveFormatData != NULL && waveFormatSize > 0) {
231 				fBlockAlign = waveFormatData->block_align;
232 				TRACE("  found block align: %d\n", fBlockAlign);
233 				fExtraDataSize = waveFormatData->extra_size;
234 				// skip the wave_format_ex from the extra data.
235 				extraData = waveFormatData + 1;
236 			}
237 		} else {
238 			if (fIsAudio) {
239 				fBlockAlign
240 					= ioEncodedFormat->u.encoded_audio.output.buffer_size;
241 				TRACE("  using buffer_size as block align: %d\n",
242 					fBlockAlign);
243 			}
244 		}
245 		if (extraData != NULL && fExtraDataSize > 0) {
246 			TRACE("AVCodecDecoder: extra data size %ld\n", infoSize);
247 			delete[] fExtraData;
248 			fExtraData = new(std::nothrow) char[fExtraDataSize];
249 			if (fExtraData != NULL)
250 				memcpy(fExtraData, infoBuffer, fExtraDataSize);
251 			else
252 				fExtraDataSize = 0;
253 		}
254 
255 		fInputFormat = *ioEncodedFormat;
256 		return B_OK;
257 	} else {
258 		TRACE("AVCodecDecoder: BMediaFormats().GetCodeFor() failed.\n");
259 	}
260 
261 	printf("AVCodecDecoder::Setup failed!\n");
262 	return B_ERROR;
263 }
264 
265 
266 status_t
267 AVCodecDecoder::SeekedTo(int64 frame, bigtime_t time)
268 {
269 	status_t ret = B_OK;
270 	// Reset the FFmpeg codec to flush buffers, so we keep the sync
271 	if (fCodecInitDone) {
272 		avcodec_flush_buffers(fCodecContext);
273 		_ResetTempPacket();
274 	}
275 
276 	// Flush internal buffers as well.
277 	free(fChunkBuffer);
278 	fChunkBuffer = NULL;
279 	fChunkBufferSize = 0;
280 	fDecodedDataBufferOffset = 0;
281 	fDecodedDataBufferSize = 0;
282 	fDecodedDataSizeInBytes = 0;
283 
284 	fFrame = frame;
285 
286 	return ret;
287 }
288 
289 
290 status_t
291 AVCodecDecoder::NegotiateOutputFormat(media_format* inOutFormat)
292 {
293 	TRACE("AVCodecDecoder::NegotiateOutputFormat() [%c] \n",
294 		fIsAudio?('a'):('v'));
295 
296 #ifdef TRACE_AV_CODEC
297 	char buffer[1024];
298 	string_for_format(*inOutFormat, buffer, sizeof(buffer));
299 	TRACE("  [%c]  requested format = %s\n", fIsAudio?('a'):('v'), buffer);
300 #endif
301 
302 	// close any previous instance
303 	fCodecContext->extradata = NULL;
304 	avcodec_free_context(&fCodecContext);
305 	fCodecContext = avcodec_alloc_context3(fCodec);
306 	fCodecInitDone = false;
307 
308 	if (fIsAudio)
309 		return _NegotiateAudioOutputFormat(inOutFormat);
310 	else
311 		return _NegotiateVideoOutputFormat(inOutFormat);
312 }
313 
314 
315 status_t
316 AVCodecDecoder::Decode(void* outBuffer, int64* outFrameCount,
317 	media_header* mediaHeader, media_decode_info* info)
318 {
319 	if (!fCodecInitDone)
320 		return B_NO_INIT;
321 
322 	status_t ret;
323 	if (fIsAudio)
324 		ret = _DecodeAudio(outBuffer, outFrameCount, mediaHeader, info);
325 	else
326 		ret = _DecodeVideo(outBuffer, outFrameCount, mediaHeader, info);
327 
328 	return ret;
329 }
330 
331 
332 // #pragma mark -
333 
334 
335 void
336 AVCodecDecoder::_ResetTempPacket()
337 {
338 	av_init_packet(&fTempPacket);
339 	fTempPacket.size = 0;
340 	fTempPacket.data = NULL;
341 }
342 
343 
344 status_t
345 AVCodecDecoder::_NegotiateAudioOutputFormat(media_format* inOutFormat)
346 {
347 	TRACE("AVCodecDecoder::_NegotiateAudioOutputFormat()\n");
348 
349 	_ApplyEssentialAudioContainerPropertiesToContext();
350 		// This makes audio formats play that encode the audio properties in
351 		// the audio container (e.g. WMA) and not in the audio frames
352 		// themself (e.g. MP3).
353 		// Note: Doing this step unconditionally is OK, because the first call
354 		// to _DecodeNextAudioFrameChunk() will update the essential audio
355 		// format properties accordingly regardless of the settings here.
356 
357 	if (avcodec_open2(fCodecContext, fCodec, NULL) < 0) {
358 		TRACE("avcodec_open() failed to init codec!\n");
359 		return B_ERROR;
360 	}
361 	fCodecInitDone = true;
362 
363 	free(fChunkBuffer);
364 	fChunkBuffer = NULL;
365 	fChunkBufferSize = 0;
366 	fAudioDecodeError = false;
367 	fDecodedDataBufferOffset = 0;
368 	fDecodedDataBufferSize = 0;
369 
370 	_ResetTempPacket();
371 
372 	status_t statusOfDecodingFirstFrameChunk = _DecodeNextAudioFrameChunk();
373 	if (statusOfDecodingFirstFrameChunk != B_OK) {
374 		TRACE("[a] decoding first audio frame chunk failed\n");
375 		return B_ERROR;
376 	}
377 
378 	media_multi_audio_format outputAudioFormat;
379 	outputAudioFormat = media_raw_audio_format::wildcard;
380 	outputAudioFormat.byte_order = B_MEDIA_HOST_ENDIAN;
381 	outputAudioFormat.frame_rate = fCodecContext->sample_rate;
382 	outputAudioFormat.channel_count = fCodecContext->channels;
383 	ConvertAVSampleFormatToRawAudioFormat(fCodecContext->sample_fmt,
384 		outputAudioFormat.format);
385 	// Check that format is not still a wild card!
386 	if (outputAudioFormat.format == 0) {
387 		TRACE("  format still a wild-card, assuming B_AUDIO_SHORT.\n");
388 		outputAudioFormat.format = media_raw_audio_format::B_AUDIO_SHORT;
389 	}
390 	outputAudioFormat.buffer_size = inOutFormat->u.raw_audio.buffer_size;
391 	// Check that buffer_size has a sane value
392 	size_t sampleSize = outputAudioFormat.format
393 		& media_raw_audio_format::B_AUDIO_SIZE_MASK;
394 	if (outputAudioFormat.buffer_size == 0) {
395 		outputAudioFormat.buffer_size = 512 * sampleSize
396 			* outputAudioFormat.channel_count;
397 	}
398 
399 	inOutFormat->type = B_MEDIA_RAW_AUDIO;
400 	inOutFormat->u.raw_audio = outputAudioFormat;
401 	inOutFormat->require_flags = 0;
402 	inOutFormat->deny_flags = B_MEDIA_MAUI_UNDEFINED_FLAGS;
403 
404 	// Initialize variables needed to manage decoding as much audio frames as
405 	// needed to fill the buffer_size.
406 	fOutputFrameSize = sampleSize * outputAudioFormat.channel_count;
407 	fOutputFrameCount = outputAudioFormat.buffer_size / fOutputFrameSize;
408 	fOutputFrameRate = outputAudioFormat.frame_rate;
409 	if (av_sample_fmt_is_planar(fCodecContext->sample_fmt))
410 		fInputFrameSize = sampleSize;
411 	else
412 		fInputFrameSize = fOutputFrameSize;
413 
414 	fRawDecodedAudio->opaque
415 		= av_realloc(fRawDecodedAudio->opaque, sizeof(avformat_codec_context));
416 	if (fRawDecodedAudio->opaque == NULL)
417 		return B_NO_MEMORY;
418 
419 	if (av_sample_fmt_is_planar(fCodecContext->sample_fmt)) {
420 		fResampleContext = swr_alloc_set_opts(NULL,
421 			fCodecContext->channel_layout,
422 			fCodecContext->request_sample_fmt,
423 			fCodecContext->sample_rate,
424 			fCodecContext->channel_layout,
425 			fCodecContext->sample_fmt,
426 			fCodecContext->sample_rate,
427 			0, NULL);
428 		swr_init(fResampleContext);
429 	}
430 
431 	TRACE("  bit_rate = %d, sample_rate = %d, channels = %d, "
432 		"output frame size: %d, count: %ld, rate: %.2f\n",
433 		fCodecContext->bit_rate, fCodecContext->sample_rate, fCodecContext->channels,
434 		fOutputFrameSize, fOutputFrameCount, fOutputFrameRate);
435 
436 	return B_OK;
437 }
438 
439 
440 status_t
441 AVCodecDecoder::_NegotiateVideoOutputFormat(media_format* inOutFormat)
442 {
443 	TRACE("AVCodecDecoder::_NegotiateVideoOutputFormat()\n");
444 
445 	TRACE("  requested video format 0x%x\n",
446 		inOutFormat->u.raw_video.display.format);
447 
448 	_ApplyEssentialVideoContainerPropertiesToContext();
449 		// This makes video formats play that encode the video properties in
450 		// the video container (e.g. WMV) and not in the video frames
451 		// themself (e.g. MPEG2).
452 		// Note: Doing this step unconditionally is OK, because the first call
453 		// to _DecodeNextVideoFrame() will update the essential video format
454 		// properties accordingly regardless of the settings here.
455 
456 	bool codecCanHandleIncompleteFrames
457 		= (fCodec->capabilities & AV_CODEC_CAP_TRUNCATED) != 0;
458 	if (codecCanHandleIncompleteFrames) {
459 		// Expect and handle video frames to be splitted across consecutive
460 		// data chunks.
461 		fCodecContext->flags |= AV_CODEC_FLAG_TRUNCATED;
462 	}
463 
464 	if (avcodec_open2(fCodecContext, fCodec, NULL) < 0) {
465 		TRACE("avcodec_open() failed to init codec!\n");
466 		return B_ERROR;
467 	}
468 	fCodecInitDone = true;
469 
470 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
471 	fOutputColorSpace = B_RGB32;
472 #else
473 	// Make MediaPlayer happy (if not in rgb32 screen depth and no overlay,
474 	// it will only ask for YCbCr, which DrawBitmap doesn't handle, so the
475 	// default colordepth is RGB32).
476 	if (inOutFormat->u.raw_video.display.format == B_YCbCr422)
477 		fOutputColorSpace = B_YCbCr422;
478 	else
479 		fOutputColorSpace = B_RGB32;
480 #endif
481 
482 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
483 	if (fSwsContext != NULL)
484 		sws_freeContext(fSwsContext);
485 	fSwsContext = NULL;
486 #else
487 	fFormatConversionFunc = 0;
488 #endif
489 
490 	free(fChunkBuffer);
491 	fChunkBuffer = NULL;
492 	fChunkBufferSize = 0;
493 
494 	_ResetTempPacket();
495 
496 	status_t statusOfDecodingFirstFrame = _DecodeNextVideoFrame();
497 	if (statusOfDecodingFirstFrame != B_OK) {
498 		TRACE("[v] decoding first video frame failed\n");
499 		return B_ERROR;
500 	}
501 
502 	// Note: fSwsContext / fFormatConversionFunc should have been initialized
503 	// by first call to _DecodeNextVideoFrame() above.
504 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
505 	if (fSwsContext == NULL) {
506 		TRACE("No SWS Scale context or decoder has not set the pixel format "
507 			"yet!\n");
508 	}
509 #else
510 	if (fFormatConversionFunc == NULL) {
511 		TRACE("no pixel format conversion function found or decoder has "
512 			"not set the pixel format yet!\n");
513 	}
514 #endif
515 
516 	inOutFormat->type = B_MEDIA_RAW_VIDEO;
517 	inOutFormat->require_flags = 0;
518 	inOutFormat->deny_flags = B_MEDIA_MAUI_UNDEFINED_FLAGS;
519 	inOutFormat->u.raw_video = fInputFormat.u.encoded_video.output;
520 	inOutFormat->u.raw_video.interlace = 1;
521 		// Progressive (non-interlaced) video frames are delivered
522 	inOutFormat->u.raw_video.first_active
523 		= fHeader.u.raw_video.first_active_line;
524 	inOutFormat->u.raw_video.last_active = fHeader.u.raw_video.line_count;
525 	inOutFormat->u.raw_video.pixel_width_aspect
526 		= fHeader.u.raw_video.pixel_width_aspect;
527 	inOutFormat->u.raw_video.pixel_height_aspect
528 		= fHeader.u.raw_video.pixel_height_aspect;
529 #if 0
530 	// This was added by Colin Günther in order to handle streams with a
531 	// variable frame rate. fOutputFrameRate is computed from the stream
532 	// time_base, but it actually assumes a timebase equal to the FPS. As far
533 	// as I can see, a stream with a variable frame rate would have a higher
534 	// resolution time_base and increment the pts (presentation time) of each
535 	// frame by a value bigger than one.
536 	//
537 	// Fixed rate stream:
538 	// time_base = 1/50s, frame PTS = 1, 2, 3... (for 50Hz)
539 	//
540 	// Variable rate stream:
541 	// time_base = 1/300s, frame PTS = 6, 12, 18, ... (for 50Hz)
542 	// time_base = 1/300s, frame PTS = 5, 10, 15, ... (for 60Hz)
543 	//
544 	// The fOutputFrameRate currently does not take this into account and
545 	// ignores the PTS. This results in playing the above sample at 300Hz
546 	// instead of 50 or 60.
547 	//
548 	// However, comparing the PTS for two consecutive implies we have already
549 	// decoded 2 frames, which may not be the case when this method is first
550 	// called.
551 	inOutFormat->u.raw_video.field_rate = fOutputFrameRate;
552 		// Was calculated by first call to _DecodeNextVideoFrame()
553 #endif
554 	inOutFormat->u.raw_video.display.format = fOutputColorSpace;
555 	inOutFormat->u.raw_video.display.line_width
556 		= fHeader.u.raw_video.display_line_width;
557 	inOutFormat->u.raw_video.display.line_count
558 		= fHeader.u.raw_video.display_line_count;
559 	inOutFormat->u.raw_video.display.bytes_per_row
560 		= fHeader.u.raw_video.bytes_per_row;
561 
562 #ifdef TRACE_AV_CODEC
563 	char buffer[1024];
564 	string_for_format(*inOutFormat, buffer, sizeof(buffer));
565 	TRACE("[v]  outFormat = %s\n", buffer);
566 	TRACE("  returned  video format 0x%x\n",
567 		inOutFormat->u.raw_video.display.format);
568 #endif
569 
570 	return B_OK;
571 }
572 
573 
574 /*! \brief Fills the outBuffer with one or more already decoded audio frames.
575 
576 	Besides the main duty described above, this method also fills out the other
577 	output parameters as documented below.
578 
579 	\param outBuffer Pointer to the output buffer to copy the decoded audio
580 		frames to.
581 	\param outFrameCount Pointer to the output variable to assign the number of
582 		copied audio frames (usually several audio frames at once).
583 	\param mediaHeader Pointer to the output media header that contains the
584 		properties of the decoded audio frame being the first in the outBuffer.
585 	\param info Specifies additional decoding parameters. (Note: unused).
586 
587 	\returns B_OK Decoding audio frames succeeded.
588 	\returns B_LAST_BUFFER_ERROR There are no more audio frames available.
589 	\returns Other error codes
590 */
591 status_t
592 AVCodecDecoder::_DecodeAudio(void* outBuffer, int64* outFrameCount,
593 	media_header* mediaHeader, media_decode_info* info)
594 {
595 	TRACE_AUDIO("AVCodecDecoder::_DecodeAudio(audio start_time %.6fs)\n",
596 		mediaHeader->start_time / 1000000.0);
597 
598 	status_t audioDecodingStatus
599 		= fDecodedDataSizeInBytes > 0 ? B_OK : _DecodeNextAudioFrame();
600 
601 	if (audioDecodingStatus != B_OK)
602 		return audioDecodingStatus;
603 
604 	*outFrameCount = fDecodedDataSizeInBytes / fOutputFrameSize;
605 	*mediaHeader = fHeader;
606 	memcpy(outBuffer, fDecodedData, fDecodedDataSizeInBytes);
607 
608 	fDecodedDataSizeInBytes = 0;
609 
610 	return B_OK;
611 }
612 
613 
614 /*! \brief Fills the outBuffer with an already decoded video frame.
615 
616 	Besides the main duty described above, this method also fills out the other
617 	output parameters as documented below.
618 
619 	\param outBuffer Pointer to the output buffer to copy the decoded video
620 		frame to.
621 	\param outFrameCount Pointer to the output variable to assign the number of
622 		copied video frames (usually one video frame).
623 	\param mediaHeader Pointer to the output media header that contains the
624 		decoded video frame properties.
625 	\param info Specifies additional decoding parameters. (Note: unused).
626 
627 	\returns B_OK Decoding a video frame succeeded.
628 	\returns B_LAST_BUFFER_ERROR There are no more video frames available.
629 	\returns Other error codes
630 */
631 status_t
632 AVCodecDecoder::_DecodeVideo(void* outBuffer, int64* outFrameCount,
633 	media_header* mediaHeader, media_decode_info* info)
634 {
635 	status_t videoDecodingStatus
636 		= fDecodedDataSizeInBytes > 0 ? B_OK : _DecodeNextVideoFrame();
637 
638 	if (videoDecodingStatus != B_OK)
639 		return videoDecodingStatus;
640 
641 	*outFrameCount = 1;
642 	*mediaHeader = fHeader;
643 	memcpy(outBuffer, fDecodedData, mediaHeader->size_used);
644 
645 	fDecodedDataSizeInBytes = 0;
646 
647 	return B_OK;
648 }
649 
650 
651 /*!	\brief Decodes next audio frame.
652 
653 	We decode at least one audio frame into fDecodedData. To achieve this goal,
654     we might need to request several chunks of encoded data resulting in a
655     variable execution time of this function.
656 
657     The length of the decoded audio frame(s) is stored in
658     fDecodedDataSizeInBytes. If this variable is greater than zero you can
659     assert that all audio frames in fDecodedData are valid.
660 
661 	It is assumed that the number of expected audio frames is stored in
662 	fOutputFrameCount. So _DecodeNextAudioFrame() must be called only after
663 	fOutputFrameCount has been set.
664 
665 	Note: fOutputFrameCount contains the maximum number of frames a caller
666 	of BMediaDecoder::Decode() expects to receive. There is a direct
667 	relationship between fOutputFrameCount and the buffer size a caller of
668 	BMediaDecoder::Decode() will provide so we make sure to respect this limit
669 	for fDecodedDataSizeInBytes.
670 
671 	On return with status code B_OK the following conditions hold true:
672 		1. fDecodedData contains as much audio frames as the caller of
673 		   BMediaDecoder::Decode() expects.
674 		2. fDecodedData contains lesser audio frames as the caller of
675 		   BMediaDecoder::Decode() expects only when one of the following
676 		   conditions hold true:
677 		       i  No more audio frames left. Consecutive calls to
678 		          _DecodeNextAudioFrame() will then result in the return of
679 		          status code B_LAST_BUFFER_ERROR.
680 		       ii TODO: A change in the size of the audio frames.
681 		3. fHeader is populated with the audio frame properties of the first
682 		   audio frame in fDecodedData. Especially the start_time field of
683 		   fHeader relates to that first audio frame. Start times of
684 		   consecutive audio frames in fDecodedData have to be calculated
685 		   manually (using the frame rate and the frame duration) if the
686 		   caller needs them.
687 
688 	TODO: Handle change of channel_count. Such a change results in a change of
689 	the audio frame size and thus has different buffer requirements.
690 	The most sane approach for implementing this is to return the audio frames
691 	that were still decoded with the previous channel_count and inform the
692 	client of BMediaDecoder::Decode() about the change so that it can adapt to
693 	it. Furthermore we need to adapt our fDecodedData to the new buffer size
694 	requirements accordingly.
695 
696 	\returns B_OK when we successfully decoded enough audio frames
697 	\returns B_LAST_BUFFER_ERROR when there are no more audio frames available.
698 	\returns Other Errors
699 */
700 status_t
701 AVCodecDecoder::_DecodeNextAudioFrame()
702 {
703 	assert(fTempPacket.size >= 0);
704 	assert(fDecodedDataSizeInBytes == 0);
705 		// _DecodeNextAudioFrame needs to be called on empty fDecodedData only!
706 		// If this assert holds wrong we have a bug somewhere.
707 
708 	status_t resetStatus = _ResetRawDecodedAudio();
709 	if (resetStatus != B_OK)
710 		return resetStatus;
711 
712 	while (fRawDecodedAudio->nb_samples < fOutputFrameCount) {
713 		_CheckAndFixConditionsThatHintAtBrokenAudioCodeBelow();
714 
715 		bool decodedDataBufferHasData = fDecodedDataBufferSize > 0;
716 		if (decodedDataBufferHasData) {
717 			_MoveAudioFramesToRawDecodedAudioAndUpdateStartTimes();
718 			continue;
719 		}
720 
721 		status_t decodeAudioChunkStatus = _DecodeNextAudioFrameChunk();
722 		if (decodeAudioChunkStatus == B_LAST_BUFFER_ERROR
723 				&& fRawDecodedAudio->nb_samples > 0)
724 			break;
725 		if (decodeAudioChunkStatus != B_OK)
726 			return decodeAudioChunkStatus;
727 	}
728 
729 	fFrame += fRawDecodedAudio->nb_samples;
730 	fDecodedDataSizeInBytes = fRawDecodedAudio->linesize[0];
731 
732 	_UpdateMediaHeaderForAudioFrame();
733 
734 #ifdef DEBUG
735 	dump_ffframe_audio(fRawDecodedAudio, "ffaudi");
736 #endif
737 
738 	TRACE_AUDIO("  frame count: %ld current: %lld\n",
739 		fRawDecodedAudio->nb_samples, fFrame);
740 
741 	return B_OK;
742 }
743 
744 
745 /*!	\brief Applies all essential audio input properties to fCodecContext that were
746 		passed to AVCodecDecoder when Setup() was called.
747 
748 	Note: This function must be called before the AVCodec is opened via
749 	avcodec_open2(). Otherwise the behaviour of FFMPEG's audio decoding
750 	function avcodec_receive_frame() is undefined.
751 
752 	Essential properties applied from fInputFormat.u.encoded_audio:
753 		- bit_rate copied to fCodecContext->bit_rate
754 		- frame_size copied to fCodecContext->frame_size
755 		- output.format converted to fCodecContext->sample_fmt
756 		- output.frame_rate copied to fCodecContext->sample_rate
757 		- output.channel_count copied to fCodecContext->channels
758 
759 	Other essential properties being applied:
760 		- fBlockAlign to fCodecContext->block_align
761 		- fExtraData to fCodecContext->extradata
762 		- fExtraDataSize to fCodecContext->extradata_size
763 
764 	TODO: Either the following documentation section should be removed or this
765 	TODO when it is clear whether fInputFormat.MetaData() and
766 	fInputFormat.MetaDataSize() have to be applied to fCodecContext. See the related
767 	TODO in the method implementation.
768 	Only applied when fInputFormat.MetaDataSize() is greater than zero:
769 		- fInputFormat.MetaData() to fCodecContext->extradata
770 		- fInputFormat.MetaDataSize() to fCodecContext->extradata_size
771 */
772 void
773 AVCodecDecoder::_ApplyEssentialAudioContainerPropertiesToContext()
774 {
775 	media_encoded_audio_format containerProperties
776 		= fInputFormat.u.encoded_audio;
777 
778 	fCodecContext->bit_rate
779 		= static_cast<int>(containerProperties.bit_rate);
780 	fCodecContext->frame_size
781 		= static_cast<int>(containerProperties.frame_size);
782 	ConvertRawAudioFormatToAVSampleFormat(
783 		containerProperties.output.format, fCodecContext->sample_fmt);
784 	ConvertRawAudioFormatToAVSampleFormat(
785 		containerProperties.output.format, fCodecContext->request_sample_fmt);
786 	fCodecContext->sample_rate
787 		= static_cast<int>(containerProperties.output.frame_rate);
788 	fCodecContext->channels
789 		= static_cast<int>(containerProperties.output.channel_count);
790 	// Check that channel count is not still a wild card!
791 	if (fCodecContext->channels == 0) {
792 		TRACE("  channel_count still a wild-card, assuming stereo.\n");
793 		fCodecContext->channels = 2;
794 	}
795 
796 	fCodecContext->block_align = fBlockAlign;
797 	fCodecContext->extradata = reinterpret_cast<uint8_t*>(fExtraData);
798 	fCodecContext->extradata_size = fExtraDataSize;
799 
800 	// TODO: This probably needs to go away, there is some misconception
801 	// about extra data / info buffer and meta data. See
802 	// Reader::GetStreamInfo(). The AVFormatReader puts extradata and
803 	// extradata_size into media_format::MetaData(), but used to ignore
804 	// the infoBuffer passed to GetStreamInfo(). I think this may be why
805 	// the code below was added.
806 	if (fInputFormat.MetaDataSize() > 0) {
807 		fCodecContext->extradata = static_cast<uint8_t*>(
808 			const_cast<void*>(fInputFormat.MetaData()));
809 		fCodecContext->extradata_size = fInputFormat.MetaDataSize();
810 	}
811 
812 	TRACE("  bit_rate %d, sample_rate %d, channels %d, block_align %d, "
813 		"extradata_size %d\n",
814 		fCodecContext->bit_rate,
815 		fCodecContext->sample_rate,
816 		fCodecContext->channels,
817 		fCodecContext->block_align,
818 		fCodecContext->extradata_size);
819 }
820 
821 
822 /*!	\brief Resets important fields in fRawDecodedVideo to their default values.
823 
824 	Note: Also initializes fDecodedData if not done already.
825 
826 	\returns B_OK Resetting successfully completed.
827 	\returns B_NO_MEMORY No memory left for correct operation.
828 */
829 status_t
830 AVCodecDecoder::_ResetRawDecodedAudio()
831 {
832 	if (fDecodedData == NULL) {
833 		size_t maximumSizeOfDecodedData = fOutputFrameCount * fOutputFrameSize;
834 		fDecodedData
835 			= static_cast<uint8_t*>(malloc(maximumSizeOfDecodedData));
836 	}
837 	if (fDecodedData == NULL)
838 		return B_NO_MEMORY;
839 
840 	fRawDecodedAudio->data[0] = fDecodedData;
841 	fRawDecodedAudio->linesize[0] = 0;
842 	fRawDecodedAudio->format = AV_SAMPLE_FMT_NONE;
843 	fRawDecodedAudio->pkt_dts = AV_NOPTS_VALUE;
844 	fRawDecodedAudio->nb_samples = 0;
845 	memset(fRawDecodedAudio->opaque, 0, sizeof(avformat_codec_context));
846 
847 	return B_OK;
848 }
849 
850 
851 /*!	\brief Checks fDecodedDataBufferSize and fTempPacket for invalid values,
852 		reports them and assigns valid values.
853 
854 	Note: This method is intended to be called before any code is executed that
855 	deals with moving, loading or decoding any audio frames.
856 */
857 void
858 AVCodecDecoder::_CheckAndFixConditionsThatHintAtBrokenAudioCodeBelow()
859 {
860 	if (fDecodedDataBufferSize < 0) {
861 		fprintf(stderr, "Decoding read past the end of the decoded data "
862 			"buffer! %" B_PRId32 "\n", fDecodedDataBufferSize);
863 		fDecodedDataBufferSize = 0;
864 	}
865 	if (fTempPacket.size < 0) {
866 		fprintf(stderr, "Decoding read past the end of the temp packet! %d\n",
867 			fTempPacket.size);
868 		fTempPacket.size = 0;
869 	}
870 }
871 
872 
873 /*!	\brief Moves audio frames from fDecodedDataBuffer to fRawDecodedAudio (and
874 		thus to fDecodedData) and updates the start times of fRawDecodedAudio,
875 		fDecodedDataBuffer and fTempPacket accordingly.
876 
877 	When moving audio frames to fRawDecodedAudio this method also makes sure
878 	that the following important fields of fRawDecodedAudio are populated and
879 	updated with correct values:
880 		- fRawDecodedAudio->data[0]: Points to first free byte of fDecodedData
881 		- fRawDecodedAudio->linesize[0]: Total size of frames in fDecodedData
882 		- fRawDecodedAudio->format: Format of first audio frame
883 		- fRawDecodedAudio->pkt_dts: Start time of first audio frame
884 		- fRawDecodedAudio->nb_samples: Number of audio frames
885 		- fRawDecodedAudio->opaque: Contains the following fields for the first
886 		  audio frame:
887 		      - channels: Channel count of first audio frame
888 		      - sample_rate: Frame rate of first audio frame
889 
890 	This function assumes to be called only when the following assumptions
891 	hold true:
892 		1. There are decoded audio frames available in fDecodedDataBuffer
893 		   meaning that fDecodedDataBufferSize is greater than zero.
894 		2. There is space left in fRawDecodedAudio to move some audio frames
895 		   in. This means that fRawDecodedAudio has lesser audio frames than
896 		   the maximum allowed (specified by fOutputFrameCount).
897 		3. The audio frame rate is known so that we can calculate the time
898 		   range (covered by the moved audio frames) to update the start times
899 		   accordingly.
900 		4. The field fRawDecodedAudio->opaque points to a memory block
901 		   representing a structure of type avformat_codec_context.
902 
903 	After this function returns the caller can safely make the following
904 	assumptions:
905 		1. The number of decoded audio frames in fDecodedDataBuffer is
906 		   decreased though it may still be greater then zero.
907 		2. The number of frames in fRawDecodedAudio has increased and all
908 		   important fields are updated (see listing above).
909 		3. Start times of fDecodedDataBuffer and fTempPacket were increased
910 		   with the time range covered by the moved audio frames.
911 
912 	Note: This function raises an exception (by calling the debugger), when
913 	fDecodedDataBufferSize is not a multiple of fOutputFrameSize.
914 */
915 void
916 AVCodecDecoder::_MoveAudioFramesToRawDecodedAudioAndUpdateStartTimes()
917 {
918 	assert(fDecodedDataBufferSize > 0);
919 	assert(fRawDecodedAudio->nb_samples < fOutputFrameCount);
920 	assert(fOutputFrameRate > 0);
921 
922 	int32 outFrames = fOutputFrameCount - fRawDecodedAudio->nb_samples;
923 	int32 inFrames = fDecodedDataBufferSize;
924 
925 	int32 frames = min_c(outFrames, inFrames);
926 	if (frames == 0)
927 		debugger("fDecodedDataBufferSize not multiple of frame size!");
928 
929 	// Some decoders do not support format conversion on themselves, or use
930 	// "planar" audio (each channel separated instead of interleaved samples).
931 	// In that case, we use swresample to convert the data
932 	if (av_sample_fmt_is_planar(fCodecContext->sample_fmt)) {
933 #if 0
934 		const uint8_t* ptr[8];
935 		for (int i = 0; i < 8; i++) {
936 			if (fDecodedDataBuffer->data[i] == NULL)
937 				ptr[i] = NULL;
938 			else
939 				ptr[i] = fDecodedDataBuffer->data[i] + fDecodedDataBufferOffset;
940 		}
941 
942 		// When there are more input frames than space in the output buffer,
943 		// we could feed everything to swr and it would buffer the extra data.
944 		// However, there is no easy way to flush that data without feeding more
945 		// input, and it makes our timestamp computations fail.
946 		// So, we feed only as much frames as we can get out, and handle the
947 		// buffering ourselves.
948 		// TODO Ideally, we should try to size our output buffer so that it can
949 		// always hold all the output (swr provides helper functions for this)
950 		inFrames = frames;
951 		frames = swr_convert(fResampleContext, fRawDecodedAudio->data,
952 			outFrames, ptr, inFrames);
953 
954 		if (frames < 0)
955 			debugger("resampling failed");
956 #else
957 		// interleave planar audio with same format
958 		uintptr_t out = (uintptr_t)fRawDecodedAudio->data[0];
959 		int32 offset = fDecodedDataBufferOffset;
960 		for (int i = 0; i < frames; i++) {
961 			for (int j = 0; j < fCodecContext->channels; j++) {
962 				memcpy((void*)out, fDecodedDataBuffer->data[j]
963 					+ offset, fInputFrameSize);
964 				out += fInputFrameSize;
965 			}
966 			offset += fInputFrameSize;
967 		}
968 		outFrames = frames;
969 		inFrames = frames;
970 #endif
971 	} else {
972 		memcpy(fRawDecodedAudio->data[0], fDecodedDataBuffer->data[0]
973 				+ fDecodedDataBufferOffset, frames * fOutputFrameSize);
974 		outFrames = frames;
975 		inFrames = frames;
976 	}
977 
978 	size_t remainingSize = inFrames * fInputFrameSize;
979 	size_t decodedSize = outFrames * fOutputFrameSize;
980 	fDecodedDataBufferSize -= inFrames;
981 
982 	bool firstAudioFramesCopiedToRawDecodedAudio
983 		= fRawDecodedAudio->data[0] != fDecodedData;
984 	if (!firstAudioFramesCopiedToRawDecodedAudio) {
985 		fRawDecodedAudio->format = fDecodedDataBuffer->format;
986 		fRawDecodedAudio->pkt_dts = fDecodedDataBuffer->pkt_dts;
987 
988 		avformat_codec_context* codecContext
989 			= static_cast<avformat_codec_context*>(fRawDecodedAudio->opaque);
990 		codecContext->channels = fCodecContext->channels;
991 		codecContext->sample_rate = fCodecContext->sample_rate;
992 	}
993 
994 	fRawDecodedAudio->data[0] += decodedSize;
995 	fRawDecodedAudio->linesize[0] += decodedSize;
996 	fRawDecodedAudio->nb_samples += outFrames;
997 
998 	fDecodedDataBufferOffset += remainingSize;
999 
1000 	// Update start times accordingly
1001 	bigtime_t framesTimeInterval = static_cast<bigtime_t>(
1002 		(1000000LL * frames) / fOutputFrameRate);
1003 	fDecodedDataBuffer->pkt_dts += framesTimeInterval;
1004 	// Start time of buffer is updated in case that it contains
1005 	// more audio frames to move.
1006 	fTempPacket.dts += framesTimeInterval;
1007 	// Start time of fTempPacket is updated in case the fTempPacket
1008 	// contains more audio frames to decode.
1009 }
1010 
1011 
1012 /*!	\brief Decodes next chunk of audio frames.
1013 
1014 	This method handles all the details of loading the input buffer
1015 	(fChunkBuffer) at the right time and of calling FFMPEG often engouh until
1016 	some audio frames have been decoded.
1017 
1018 	FFMPEG decides how much audio frames belong to a chunk. Because of that
1019 	it is very likely that _DecodeNextAudioFrameChunk has to be called several
1020 	times to decode enough audio frames to please the caller of
1021 	BMediaDecoder::Decode().
1022 
1023 	This function assumes to be called only when the following assumptions
1024 	hold true:
1025 		1. fDecodedDataBufferSize equals zero.
1026 
1027 	After this function returns successfully the caller can safely make the
1028 	following assumptions:
1029 		1. fDecodedDataBufferSize is greater than zero.
1030 		2. fDecodedDataBufferOffset is set to zero.
1031 		3. fDecodedDataBuffer contains audio frames.
1032 
1033 
1034 	\returns B_OK on successfully decoding one audio frame chunk.
1035 	\returns B_LAST_BUFFER_ERROR No more audio frame chunks available. From
1036 		this point on further calls will return this same error.
1037 	\returns B_ERROR Decoding failed
1038 */
1039 status_t
1040 AVCodecDecoder::_DecodeNextAudioFrameChunk()
1041 {
1042 	assert(fDecodedDataBufferSize == 0);
1043 
1044 	while (fDecodedDataBufferSize == 0) {
1045 		status_t loadingChunkStatus
1046 			= _LoadNextChunkIfNeededAndAssignStartTime();
1047 		if (loadingChunkStatus != B_OK)
1048 			return loadingChunkStatus;
1049 
1050 		status_t decodingStatus
1051 			= _DecodeSomeAudioFramesIntoEmptyDecodedDataBuffer();
1052 		if (decodingStatus != B_OK) {
1053 			// Assume the audio decoded until now is broken so replace it with
1054 			// some silence.
1055 			memset(fDecodedData, 0, fRawDecodedAudio->linesize[0]);
1056 
1057 			if (!fAudioDecodeError) {
1058 				// Report failure if not done already
1059 				int32 chunkBufferOffset = fTempPacket.data - fChunkBuffer;
1060 				printf("########### audio decode error, "
1061 					"fTempPacket.size %d, fChunkBuffer data offset %" B_PRId32
1062 					"\n", fTempPacket.size, chunkBufferOffset);
1063 				fAudioDecodeError = true;
1064 			}
1065 
1066 			// Assume that next audio chunk can be decoded so keep decoding.
1067 			continue;
1068 		}
1069 
1070 		fAudioDecodeError = false;
1071 	}
1072 
1073 	return B_OK;
1074 }
1075 
1076 
1077 /*!	\brief Tries to decode at least one audio frame and store it in the
1078 		fDecodedDataBuffer.
1079 
1080 	This function assumes to be called only when the following assumptions
1081 	hold true:
1082 		1. fDecodedDataBufferSize equals zero.
1083 		2. fTempPacket.size is greater than zero.
1084 
1085 	After this function returns successfully the caller can safely make the
1086 	following assumptions:
1087 		1. fDecodedDataBufferSize is greater than zero in the common case.
1088 		   Also see "Note" below.
1089 		2. fTempPacket was updated to exclude the data chunk that was consumed
1090 		   by avcodec_send_packet().
1091 		3. fDecodedDataBufferOffset is set to zero.
1092 
1093 	When this function failed to decode at least one audio frame due to a
1094 	decoding error the caller can safely make the following assumptions:
1095 		1. fDecodedDataBufferSize equals zero.
1096 		2. fTempPacket.size equals zero.
1097 
1098 	Note: It is possible that there wasn't any audio frame decoded into
1099 	fDecodedDataBuffer after calling this function. This is normal and can
1100 	happen when there was either a decoding error or there is some decoding
1101 	delay in FFMPEGs audio decoder. Another call to this method is totally
1102 	safe and is even expected as long as the calling assumptions hold true.
1103 
1104 	\returns B_OK Decoding successful. fDecodedDataBuffer contains decoded
1105 		audio frames only when fDecodedDataBufferSize is greater than zero.
1106 		fDecodedDataBuffer is empty, when avcodec_receive_frame() didn't return
1107 		audio frames due to delayed decoding or incomplete audio frames.
1108 	\returns B_ERROR Decoding failed thus fDecodedDataBuffer contains no audio
1109 		frames.
1110 */
1111 status_t
1112 AVCodecDecoder::_DecodeSomeAudioFramesIntoEmptyDecodedDataBuffer()
1113 {
1114 	assert(fDecodedDataBufferSize == 0);
1115 
1116 	av_frame_unref(fDecodedDataBuffer);
1117 	fDecodedDataBufferOffset = 0;
1118 
1119 	int error = avcodec_receive_frame(fCodecContext, fDecodedDataBuffer);
1120 	if (error == AVERROR_EOF)
1121 		return B_LAST_BUFFER_ERROR;
1122 
1123 	if (error == AVERROR(EAGAIN)) {
1124 		// We need to feed more data into the decoder
1125 		avcodec_send_packet(fCodecContext, &fTempPacket);
1126 
1127 		// All the data is always consumed by avcodec_send_packet
1128 		fTempPacket.size = 0;
1129 
1130 		// Try again to see if we can get some decoded audio out now
1131 		error = avcodec_receive_frame(fCodecContext, fDecodedDataBuffer);
1132 	}
1133 
1134 	fDecodedDataBufferSize = fDecodedDataBuffer->nb_samples;
1135 	if (fDecodedDataBufferSize < 0)
1136 		fDecodedDataBufferSize = 0;
1137 
1138 	if (error == 0)
1139 		return B_OK;
1140 	else
1141 		return B_ERROR;
1142 }
1143 
1144 
1145 /*! \brief Updates relevant fields of the class member fHeader with the
1146 		properties of the most recently decoded audio frame.
1147 
1148 	The following fields of fHeader are updated:
1149 		- fHeader.type
1150 		- fHeader.file_pos
1151 		- fHeader.orig_size
1152 		- fHeader.start_time
1153 		- fHeader.size_used
1154 		- fHeader.u.raw_audio.frame_rate
1155 		- fHeader.u.raw_audio.channel_count
1156 
1157 	It is assumed that this function is called only	when the following asserts
1158 	hold true:
1159 		1. We actually got a new audio frame decoded by the audio decoder.
1160 		2. fHeader wasn't updated for the new audio frame yet. You MUST call
1161 		   this method only once per decoded audio frame.
1162 		3. fRawDecodedAudio's fields relate to the first audio frame contained
1163 		   in fDecodedData. Especially the following fields are of importance:
1164 		       - fRawDecodedAudio->pkt_dts: Start time of first audio frame
1165 		       - fRawDecodedAudio->opaque: Contains the following fields for
1166 		         the first audio frame:
1167 			         - channels: Channel count of first audio frame
1168 			         - sample_rate: Frame rate of first audio frame
1169 */
1170 void
1171 AVCodecDecoder::_UpdateMediaHeaderForAudioFrame()
1172 {
1173 	fHeader.type = B_MEDIA_RAW_AUDIO;
1174 	fHeader.file_pos = 0;
1175 	fHeader.orig_size = 0;
1176 	fHeader.start_time = fRawDecodedAudio->pkt_dts;
1177 	fHeader.size_used = fRawDecodedAudio->linesize[0];
1178 
1179 	avformat_codec_context* codecContext
1180 		= static_cast<avformat_codec_context*>(fRawDecodedAudio->opaque);
1181 	fHeader.u.raw_audio.channel_count = codecContext->channels;
1182 	fHeader.u.raw_audio.frame_rate = codecContext->sample_rate;
1183 }
1184 
1185 
1186 /*! \brief Decodes next video frame.
1187 
1188     We decode exactly one video frame into fDecodedData. To achieve this goal,
1189     we might need to request several chunks of encoded data resulting in a
1190     variable execution time of this function.
1191 
1192     The length of the decoded video frame is stored in
1193     fDecodedDataSizeInBytes. If this variable is greater than zero, you can
1194     assert that there is a valid video frame available in fDecodedData.
1195 
1196     The decoded video frame in fDecodedData has color space conversion and
1197     deinterlacing already applied.
1198 
1199     To every decoded video frame there is a media_header populated in
1200     fHeader, containing the corresponding video frame properties.
1201 
1202 	Normally every decoded video frame has a start_time field populated in the
1203 	associated fHeader, that determines the presentation time of the frame.
1204 	This relationship will only hold true, when each data chunk that is
1205 	provided via GetNextChunk() contains data for exactly one encoded video
1206 	frame (one complete frame) - not more and not less.
1207 
1208 	We can decode data chunks that contain partial video frame data, too. In
1209 	that case, you cannot trust the value of the start_time field in fHeader.
1210 	We simply have no logic in place to establish a meaningful relationship
1211 	between an incomplete frame and the start time it should be presented.
1212 	Though this	might change in the future.
1213 
1214 	We can decode data chunks that contain more than one video frame, too. In
1215 	that case, you cannot trust the value of the start_time field in fHeader.
1216 	We simply have no logic in place to track the start_time across multiple
1217 	video frames. So a meaningful relationship between the 2nd, 3rd, ... frame
1218 	and the start time it should be presented isn't established at the moment.
1219 	Though this	might change in the future.
1220 
1221 	More over the fOutputFrameRate variable is updated for every decoded video
1222 	frame.
1223 
1224 	On first call the member variables fSwsContext / fFormatConversionFunc	are
1225 	initialized.
1226 
1227 	\returns B_OK when we successfully decoded one video frame
1228 	\returns B_LAST_BUFFER_ERROR when there are no more video frames available.
1229 	\returns B_NO_MEMORY when we have no memory left for correct operation.
1230 	\returns Other Errors
1231 */
1232 status_t
1233 AVCodecDecoder::_DecodeNextVideoFrame()
1234 {
1235 	int error;
1236 	int send_error;
1237 
1238 #if DO_PROFILING
1239 	bigtime_t startTime = system_time();
1240 #endif
1241 
1242 	error = avcodec_receive_frame(fCodecContext, fRawDecodedPicture);
1243 
1244 	if (error == AVERROR_EOF)
1245 		return B_LAST_BUFFER_ERROR;
1246 
1247 	if (error == AVERROR(EAGAIN)) {
1248 		do {
1249 			status_t loadingChunkStatus
1250 				= _LoadNextChunkIfNeededAndAssignStartTime();
1251 			if (loadingChunkStatus == B_LAST_BUFFER_ERROR)
1252 				return _FlushOneVideoFrameFromDecoderBuffer();
1253 			if (loadingChunkStatus != B_OK) {
1254 				TRACE("[v] AVCodecDecoder::_DecodeNextVideoFrame(): error from "
1255 					"GetNextChunk(): %s\n", strerror(loadingChunkStatus));
1256 				return loadingChunkStatus;
1257 			}
1258 
1259 			char timestamp[AV_TS_MAX_STRING_SIZE];
1260 			av_ts_make_time_string(timestamp,
1261 				fTempPacket.dts, &fCodecContext->time_base);
1262 			TRACE("[v] Feed %d more bytes (dts %s)\n", fTempPacket.size,
1263 				timestamp);
1264 
1265 			send_error = avcodec_send_packet(fCodecContext, &fTempPacket);
1266 			if (send_error < 0 && send_error != AVERROR(EAGAIN)) {
1267 				TRACE("[v] AVCodecDecoder: ignoring error in decoding frame "
1268 				"%lld: %d\n", fFrame, error);
1269 			}
1270 
1271 			// Packet is consumed, clear it
1272 			fTempPacket.data = NULL;
1273 			fTempPacket.size = 0;
1274 
1275 			error = avcodec_receive_frame(fCodecContext, fRawDecodedPicture);
1276 			if (error != 0 && error != AVERROR(EAGAIN)) {
1277 				TRACE("[v] frame %lld - decoding error, error code: %d, "
1278 					"chunk size: %ld\n", fFrame, error, fChunkBufferSize);
1279 			}
1280 
1281 		} while (error != 0);
1282 	}
1283 
1284 #if DO_PROFILING
1285 	bigtime_t formatConversionStart = system_time();
1286 #endif
1287 
1288 	status_t handleStatus = _HandleNewVideoFrameAndUpdateSystemState();
1289 	if (handleStatus != B_OK)
1290 		return handleStatus;
1291 
1292 #if DO_PROFILING
1293 	bigtime_t doneTime = system_time();
1294 	decodingTime += formatConversionStart - startTime;
1295 	conversionTime += doneTime - formatConversionStart;
1296 	profileCounter++;
1297 	if (!(fFrame % 5)) {
1298 		printf("[v] profile: d1 = %lld, d2 = %lld (%lld) required %lld\n",
1299 			decodingTime / profileCounter, conversionTime / profileCounter,
1300 			fFrame, bigtime_t(1000000LL / fOutputFrameRate));
1301 		decodingTime = 0;
1302 		conversionTime = 0;
1303 		profileCounter = 0;
1304 	}
1305 #endif
1306 	return error;
1307 }
1308 
1309 
1310 /*!	\brief Applies all essential video input properties to fCodecContext that were
1311 		passed to AVCodecDecoder when Setup() was called.
1312 
1313 	Note: This function must be called before the AVCodec is opened via
1314 	avcodec_open2(). Otherwise the behaviour of FFMPEG's video decoding
1315 	function avcodec_decode_video2() is undefined.
1316 
1317 	Essential properties applied from fInputFormat.u.encoded_video.output:
1318 		- display.line_width copied to fCodecContext->width
1319 		- display.line_count copied to fCodecContext->height
1320 		- pixel_width_aspect and pixel_height_aspect converted to
1321 		  fCodecContext->sample_aspect_ratio
1322 		- field_rate converted to fCodecContext->time_base and
1323 		  fCodecContext->ticks_per_frame
1324 
1325 	Other essential properties being applied:
1326 		- fExtraData to fCodecContext->extradata
1327 		- fExtraDataSize to fCodecContext->extradata_size
1328 */
1329 void
1330 AVCodecDecoder::_ApplyEssentialVideoContainerPropertiesToContext()
1331 {
1332 	media_raw_video_format containerProperties
1333 		= fInputFormat.u.encoded_video.output;
1334 
1335 	fCodecContext->width = containerProperties.display.line_width;
1336 	fCodecContext->height = containerProperties.display.line_count;
1337 
1338 	if (containerProperties.pixel_width_aspect > 0
1339 		&& containerProperties.pixel_height_aspect > 0) {
1340 		ConvertVideoAspectWidthAndHeightToAVCodecContext(
1341 			containerProperties.pixel_width_aspect,
1342 			containerProperties.pixel_height_aspect, *fCodecContext);
1343 	}
1344 
1345 	if (containerProperties.field_rate > 0.0) {
1346 		ConvertVideoFrameRateToAVCodecContext(containerProperties.field_rate,
1347 			*fCodecContext);
1348 	}
1349 
1350 	fCodecContext->extradata = reinterpret_cast<uint8_t*>(fExtraData);
1351 	fCodecContext->extradata_size = fExtraDataSize;
1352 }
1353 
1354 
1355 /*! \brief Loads the next  chunk into fChunkBuffer and assigns it (including
1356 		the start time) to fTempPacket but only if fTempPacket is empty.
1357 
1358 	\returns B_OK
1359 		1. meaning: Next chunk is loaded.
1360 		2. meaning: No need to load and assign anything. Proceed as usual.
1361 	\returns B_LAST_BUFFER_ERROR No more chunks available. fChunkBuffer	and
1362 		fTempPacket are left untouched.
1363 	\returns Other errors Caller should bail out because fChunkBuffer and
1364 		fTempPacket are in unknown states. Normal operation cannot be
1365 		guaranteed.
1366 */
1367 status_t
1368 AVCodecDecoder::_LoadNextChunkIfNeededAndAssignStartTime()
1369 {
1370 	if (fTempPacket.size > 0)
1371 		return B_OK;
1372 
1373 	const void* chunkBuffer = NULL;
1374 	size_t chunkBufferSize = 0;
1375 		// In the case that GetNextChunk() returns an error fChunkBufferSize
1376 		// should be left untouched.
1377 	media_header chunkMediaHeader;
1378 
1379 	status_t getNextChunkStatus = GetNextChunk(&chunkBuffer, &chunkBufferSize,
1380 		&chunkMediaHeader);
1381 	if (getNextChunkStatus != B_OK)
1382 		return getNextChunkStatus;
1383 
1384 	status_t chunkBufferPaddingStatus
1385 		= _CopyChunkToChunkBufferAndAddPadding(chunkBuffer, chunkBufferSize);
1386 	if (chunkBufferPaddingStatus != B_OK)
1387 		return chunkBufferPaddingStatus;
1388 
1389 	fTempPacket.data = fChunkBuffer;
1390 	fTempPacket.size = fChunkBufferSize;
1391 	fTempPacket.dts = chunkMediaHeader.start_time;
1392 		// Let FFMPEG handle the correct relationship between start_time and
1393 		// decoded a/v frame. By doing so we are simply copying the way how it
1394 		// is implemented in ffplay.c for video frames (for audio frames it
1395 		// works, too, but isn't used by ffplay.c).
1396 		// \see http://git.videolan.org/?p=ffmpeg.git;a=blob;f=ffplay.c;h=09623db374e5289ed20b7cc28c262c4375a8b2e4;hb=9153b33a742c4e2a85ff6230aea0e75f5a8b26c2#l1502
1397 		//
1398 		// FIXME: Research how to establish a meaningful relationship between
1399 		// start_time and decoded a/v frame when the received chunk buffer
1400 		// contains partial a/v frames. Maybe some data formats do contain time
1401 		// stamps (ake pts / dts fields) that can be evaluated by FFMPEG. But
1402 		// as long as I don't have such video data to test it, it makes no
1403 		// sense trying to implement it.
1404 		//
1405 		// FIXME: Implement tracking start_time of video frames originating in
1406 		// data chunks that encode more than one video frame at a time. In that
1407 		// case on would increment the start_time for each consecutive frame of
1408 		// such a data chunk (like it is done for audio frame decoding). But as
1409 		// long as I don't have such video data to test it, it makes no sense
1410 		// to implement it.
1411 
1412 #ifdef LOG_STREAM_TO_FILE
1413 	BFile* logFile = fIsAudio ? &sAudioStreamLogFile : &sVideoStreamLogFile;
1414 	if (sDumpedPackets < 100) {
1415 		logFile->Write(chunkBuffer, fChunkBufferSize);
1416 		printf("wrote %ld bytes\n", fChunkBufferSize);
1417 		sDumpedPackets++;
1418 	} else if (sDumpedPackets == 100)
1419 		logFile->Unset();
1420 #endif
1421 
1422 	return B_OK;
1423 }
1424 
1425 
1426 /*! \brief Copies a chunk into fChunkBuffer and adds a "safety net" of
1427 		additional memory as required by FFMPEG for input buffers to video
1428 		decoders.
1429 
1430 	This is needed so that some decoders can read safely a predefined number of
1431 	bytes at a time for performance optimization purposes.
1432 
1433 	The additional memory has a size of AV_INPUT_BUFFER_PADDING_SIZE as defined
1434 	in avcodec.h.
1435 
1436 	Ownership of fChunkBuffer memory is with the class so it needs to be freed
1437 	at the right times (on destruction, on seeking).
1438 
1439 	Also update fChunkBufferSize to reflect the size of the contained data
1440 	(leaving out the padding).
1441 
1442 	\param chunk The chunk to copy.
1443 	\param chunkSize Size of the chunk in bytes
1444 
1445 	\returns B_OK Padding was successful. You are responsible for releasing the
1446 		allocated memory. fChunkBufferSize is set to chunkSize.
1447 	\returns B_NO_MEMORY Padding failed.
1448 		fChunkBuffer is set to NULL making it safe to call free() on it.
1449 		fChunkBufferSize is set to 0 to reflect the size of fChunkBuffer.
1450 */
1451 status_t
1452 AVCodecDecoder::_CopyChunkToChunkBufferAndAddPadding(const void* chunk,
1453 	size_t chunkSize)
1454 {
1455 	uint8_t* tmpBuffer = static_cast<uint8_t*>(realloc(fChunkBuffer,
1456 		chunkSize + AV_INPUT_BUFFER_PADDING_SIZE));
1457 	if (tmpBuffer == NULL) {
1458 		free(fChunkBuffer);
1459 		fChunkBuffer = NULL;
1460 		fChunkBufferSize = 0;
1461 		return B_NO_MEMORY;
1462 	} else {
1463 		fChunkBuffer = tmpBuffer;
1464 	}
1465 
1466 	memcpy(fChunkBuffer, chunk, chunkSize);
1467 	memset(fChunkBuffer + chunkSize, 0, AV_INPUT_BUFFER_PADDING_SIZE);
1468 		// Establish safety net, by zero'ing the padding area.
1469 
1470 	fChunkBufferSize = chunkSize;
1471 
1472 	return B_OK;
1473 }
1474 
1475 
1476 /*! \brief Executes all steps needed for a freshly decoded video frame.
1477 
1478 	\see _UpdateMediaHeaderForVideoFrame() and
1479 	\see _DeinterlaceAndColorConvertVideoFrame() for when you are allowed to
1480 	call this method.
1481 
1482 	\returns B_OK when video frame was handled successfully
1483 	\returnb B_NO_MEMORY when no memory is left for correct operation.
1484 */
1485 status_t
1486 AVCodecDecoder::_HandleNewVideoFrameAndUpdateSystemState()
1487 {
1488 	_UpdateMediaHeaderForVideoFrame();
1489 	status_t postProcessStatus = _DeinterlaceAndColorConvertVideoFrame();
1490 	if (postProcessStatus != B_OK)
1491 		return postProcessStatus;
1492 
1493 	ConvertAVCodecContextToVideoFrameRate(*fCodecContext, fOutputFrameRate);
1494 
1495 #ifdef DEBUG
1496 	dump_ffframe_video(fRawDecodedPicture, "ffpict");
1497 #endif
1498 
1499 	fFrame++;
1500 
1501 	return B_OK;
1502 }
1503 
1504 
1505 /*! \brief Flushes one video frame - if any - still buffered by the decoder.
1506 
1507 	Some FFMPEG decoder are buffering video frames. To retrieve those buffered
1508 	frames the decoder needs to be told so.
1509 
1510 	The intended use of this method is to call it, once there are no more data
1511 	chunks for decoding left. Reframed in other words: Once GetNextChunk()
1512 	returns with status B_LAST_BUFFER_ERROR it is time to start flushing.
1513 
1514 	\returns B_OK Retrieved one video frame, handled it accordingly and updated
1515 		the system state accordingly.
1516 		There maybe more video frames left. So it is valid for the client of
1517 		AVCodecDecoder to call it one more time.
1518 
1519 	\returns B_LAST_BUFFER_ERROR No video frame left.
1520 		The client of the AVCodecDecoder should stop calling it now.
1521 
1522 	\returns B_NO_MEMORY No memory left for correct operation.
1523 */
1524 status_t
1525 AVCodecDecoder::_FlushOneVideoFrameFromDecoderBuffer()
1526 {
1527 	// Tell the decoder there is nothing to send anymore
1528 	avcodec_send_packet(fCodecContext, NULL);
1529 
1530 	// Get any remaining frame
1531 	int error = avcodec_receive_frame(fCodecContext, fRawDecodedPicture);
1532 
1533 	if (error != 0 && error != AVERROR(EAGAIN)) {
1534 		// video buffer is flushed successfully
1535 		// (or there is an error, not much we can do about it)
1536 		return B_LAST_BUFFER_ERROR;
1537 	}
1538 
1539 	return _HandleNewVideoFrameAndUpdateSystemState();
1540 }
1541 
1542 
1543 /*! \brief Updates relevant fields of the class member fHeader with the
1544 		properties of the most recently decoded video frame.
1545 
1546 	It is assumed that this function is called only	when the following asserts
1547 	hold true:
1548 		1. We actually got a new picture decoded by the video decoder.
1549 		2. fHeader wasn't updated for the new picture yet. You MUST call this
1550 		   method only once per decoded video frame.
1551 		3. This function MUST be called after
1552 		   _DeinterlaceAndColorConvertVideoFrame() as it relys on an updated
1553 		    fDecodedDataSizeInBytes.
1554 		4. There will be at maximumn only one decoded video frame in our cache
1555 		   at any single point in time. Otherwise you couldn't tell to which
1556 		   cached decoded video frame the properties in fHeader relate to.
1557 		5. AVCodecContext is still valid for this video frame (This is the case
1558 		   when this function is called after avcodec_decode_video2() and
1559 		   before the next call to avcodec_decode_video2().
1560 */
1561 void
1562 AVCodecDecoder::_UpdateMediaHeaderForVideoFrame()
1563 {
1564 	fHeader.type = B_MEDIA_RAW_VIDEO;
1565 	fHeader.file_pos = 0;
1566 	fHeader.orig_size = 0;
1567 	fHeader.start_time = fRawDecodedPicture->pkt_dts;
1568 		// The pkt_dts is already in microseconds, even if ffmpeg docs says
1569 		// 'in codec time_base units'
1570 	fHeader.size_used = av_image_get_buffer_size(
1571 		colorspace_to_pixfmt(fOutputColorSpace), fRawDecodedPicture->width,
1572 		fRawDecodedPicture->height, 1);
1573 	fHeader.u.raw_video.display_line_width = fRawDecodedPicture->width;
1574 	fHeader.u.raw_video.display_line_count = fRawDecodedPicture->height;
1575 	fHeader.u.raw_video.bytes_per_row
1576 		= CalculateBytesPerRowWithColorSpaceAndVideoWidth(fOutputColorSpace,
1577 			fRawDecodedPicture->width);
1578 	fHeader.u.raw_video.field_gamma = 1.0;
1579 	fHeader.u.raw_video.field_sequence = fFrame;
1580 	fHeader.u.raw_video.field_number = 0;
1581 	fHeader.u.raw_video.pulldown_number = 0;
1582 	fHeader.u.raw_video.first_active_line = 1;
1583 	fHeader.u.raw_video.line_count = fRawDecodedPicture->height;
1584 
1585 	ConvertAVCodecContextToVideoAspectWidthAndHeight(*fCodecContext,
1586 		fHeader.u.raw_video.pixel_width_aspect,
1587 		fHeader.u.raw_video.pixel_height_aspect);
1588 
1589 	char timestamp[AV_TS_MAX_STRING_SIZE];
1590 	av_ts_make_time_string(timestamp,
1591 		fRawDecodedPicture->best_effort_timestamp, &fCodecContext->time_base);
1592 
1593 	TRACE("[v] start_time=%s field_sequence=%lu\n",
1594 		timestamp, fHeader.u.raw_video.field_sequence);
1595 }
1596 
1597 
1598 /*! \brief This function applies deinterlacing (only if needed) and color
1599 	conversion to the video frame in fRawDecodedPicture.
1600 
1601 	It is assumed that fRawDecodedPicture wasn't deinterlaced and color
1602 	converted yet (otherwise this function behaves in unknown manners).
1603 
1604 	This function MUST be called after _UpdateMediaHeaderForVideoFrame() as it
1605 	relys on the fHeader.size_used and fHeader.u.raw_video.bytes_per_row fields
1606 	for correct operation
1607 
1608 	You should only call this function when you	got a new picture decoded by
1609 	the video decoder.
1610 
1611 	When this function finishes the postprocessed video frame will be available
1612 	in fPostProcessedDecodedPicture and fDecodedData (fDecodedDataSizeInBytes
1613 	will be set accordingly).
1614 
1615 	\returns B_OK video frame successfully deinterlaced and color converted.
1616 	\returns B_NO_MEMORY Not enough memory available for correct operation.
1617 */
1618 status_t
1619 AVCodecDecoder::_DeinterlaceAndColorConvertVideoFrame()
1620 {
1621 	int displayWidth = fRawDecodedPicture->width;
1622 	int displayHeight = fRawDecodedPicture->height;
1623 	AVFrame deinterlacedPicture;
1624 	bool useDeinterlacedPicture = false;
1625 
1626 	if (fRawDecodedPicture->interlaced_frame) {
1627 		AVFrame rawPicture;
1628 		rawPicture.data[0] = fRawDecodedPicture->data[0];
1629 		rawPicture.data[1] = fRawDecodedPicture->data[1];
1630 		rawPicture.data[2] = fRawDecodedPicture->data[2];
1631 		rawPicture.data[3] = fRawDecodedPicture->data[3];
1632 		rawPicture.linesize[0] = fRawDecodedPicture->linesize[0];
1633 		rawPicture.linesize[1] = fRawDecodedPicture->linesize[1];
1634 		rawPicture.linesize[2] = fRawDecodedPicture->linesize[2];
1635 		rawPicture.linesize[3] = fRawDecodedPicture->linesize[3];
1636 
1637 		if (av_image_alloc(deinterlacedPicture.data,
1638 				deinterlacedPicture.linesize, displayWidth, displayHeight,
1639 				fCodecContext->pix_fmt, 1) < 0)
1640 			return B_NO_MEMORY;
1641 
1642 		// deinterlace implemented using avfilter
1643 		_ProcessFilterGraph(&deinterlacedPicture, &rawPicture,
1644 			fCodecContext->pix_fmt, displayWidth, displayHeight);
1645 		useDeinterlacedPicture = true;
1646 	}
1647 
1648 	// Some decoders do not set pix_fmt until they have decoded 1 frame
1649 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
1650 	if (fSwsContext == NULL) {
1651 		fSwsContext = sws_getContext(displayWidth, displayHeight,
1652 			fCodecContext->pix_fmt, displayWidth, displayHeight,
1653 			colorspace_to_pixfmt(fOutputColorSpace),
1654 			SWS_FAST_BILINEAR, NULL, NULL, NULL);
1655 	}
1656 #else
1657 	if (fFormatConversionFunc == NULL) {
1658 		fFormatConversionFunc = resolve_colorspace(fOutputColorSpace,
1659 			fCodecContext->pix_fmt, displayWidth, displayHeight);
1660 	}
1661 #endif
1662 
1663 	fDecodedDataSizeInBytes = fHeader.size_used;
1664 
1665 	if (fDecodedData == NULL) {
1666 		const size_t kOptimalAlignmentForColorConversion = 32;
1667 		posix_memalign(reinterpret_cast<void**>(&fDecodedData),
1668 			kOptimalAlignmentForColorConversion, fDecodedDataSizeInBytes);
1669 	}
1670 	if (fDecodedData == NULL)
1671 		return B_NO_MEMORY;
1672 
1673 	fPostProcessedDecodedPicture->data[0] = fDecodedData;
1674 	fPostProcessedDecodedPicture->linesize[0]
1675 		= fHeader.u.raw_video.bytes_per_row;
1676 
1677 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
1678 	if (fSwsContext != NULL) {
1679 #else
1680 	if (fFormatConversionFunc != NULL) {
1681 #endif
1682 		if (useDeinterlacedPicture) {
1683 			AVFrame deinterlacedFrame;
1684 			deinterlacedFrame.data[0] = deinterlacedPicture.data[0];
1685 			deinterlacedFrame.data[1] = deinterlacedPicture.data[1];
1686 			deinterlacedFrame.data[2] = deinterlacedPicture.data[2];
1687 			deinterlacedFrame.data[3] = deinterlacedPicture.data[3];
1688 			deinterlacedFrame.linesize[0]
1689 				= deinterlacedPicture.linesize[0];
1690 			deinterlacedFrame.linesize[1]
1691 				= deinterlacedPicture.linesize[1];
1692 			deinterlacedFrame.linesize[2]
1693 				= deinterlacedPicture.linesize[2];
1694 			deinterlacedFrame.linesize[3]
1695 				= deinterlacedPicture.linesize[3];
1696 
1697 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
1698 			sws_scale(fSwsContext, deinterlacedFrame.data,
1699 				deinterlacedFrame.linesize, 0, displayHeight,
1700 				fPostProcessedDecodedPicture->data,
1701 				fPostProcessedDecodedPicture->linesize);
1702 #else
1703 			(*fFormatConversionFunc)(&deinterlacedFrame,
1704 				fPostProcessedDecodedPicture, displayWidth, displayHeight);
1705 #endif
1706 		} else {
1707 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
1708 			sws_scale(fSwsContext, fRawDecodedPicture->data,
1709 				fRawDecodedPicture->linesize, 0, displayHeight,
1710 				fPostProcessedDecodedPicture->data,
1711 				fPostProcessedDecodedPicture->linesize);
1712 #else
1713 			(*fFormatConversionFunc)(fRawDecodedPicture,
1714 				fPostProcessedDecodedPicture, displayWidth, displayHeight);
1715 #endif
1716 		}
1717 	}
1718 
1719 	if (fRawDecodedPicture->interlaced_frame)
1720 		av_freep(&deinterlacedPicture.data[0]);
1721 
1722 	return B_OK;
1723 }
1724 
1725 
1726 /*! \brief Init the deinterlace filter graph.
1727 
1728 	\returns B_OK the filter graph could be built.
1729 	\returns B_BAD_VALUE something was wrong with building the graph.
1730 */
1731 status_t
1732 AVCodecDecoder::_InitFilterGraph(enum AVPixelFormat pixfmt, int32 width,
1733 	int32 height)
1734 {
1735 	if (fFilterGraph != NULL) {
1736 		av_frame_free(&fFilterFrame);
1737 		avfilter_graph_free(&fFilterGraph);
1738 	}
1739 
1740 	fFilterGraph = avfilter_graph_alloc();
1741 
1742 	BString arguments;
1743 	arguments.SetToFormat("buffer=video_size=%" B_PRId32 "x%" B_PRId32
1744 		":pix_fmt=%d:time_base=1/1:pixel_aspect=0/1[in];[in]yadif[out];"
1745 		"[out]buffersink", width, height,
1746 		pixfmt);
1747 	AVFilterInOut* inputs = NULL;
1748 	AVFilterInOut* outputs = NULL;
1749 	TRACE("[v] _InitFilterGraph(): %s\n", arguments.String());
1750 	int ret = avfilter_graph_parse2(fFilterGraph, arguments.String(), &inputs,
1751 		&outputs);
1752 	if (ret < 0) {
1753 		fprintf(stderr, "avfilter_graph_parse2() failed\n");
1754 		return B_BAD_VALUE;
1755 	}
1756 
1757 	ret = avfilter_graph_config(fFilterGraph, NULL);
1758 	if (ret < 0) {
1759 		fprintf(stderr, "avfilter_graph_config() failed\n");
1760 		return B_BAD_VALUE;
1761 	}
1762 
1763 	fBufferSourceContext = avfilter_graph_get_filter(fFilterGraph,
1764 		"Parsed_buffer_0");
1765 	fBufferSinkContext = avfilter_graph_get_filter(fFilterGraph,
1766 		"Parsed_buffersink_2");
1767 	if (fBufferSourceContext == NULL || fBufferSinkContext == NULL) {
1768 		fprintf(stderr, "avfilter_graph_get_filter() failed\n");
1769 		return B_BAD_VALUE;
1770 	}
1771 	fFilterFrame = av_frame_alloc();
1772 	fLastWidth = width;
1773 	fLastHeight = height;
1774 	fLastPixfmt = pixfmt;
1775 
1776 	return B_OK;
1777 }
1778 
1779 
1780 /*! \brief Process an AVPicture with the deinterlace filter graph.
1781 
1782     We decode exactly one video frame into dst.
1783 	Equivalent function for avpicture_deinterlace() from version 2.x.
1784 
1785 	\returns B_OK video frame successfully deinterlaced.
1786 	\returns B_BAD_DATA No frame could be output.
1787 	\returns B_NO_MEMORY Not enough memory available for correct operation.
1788 */
1789 status_t
1790 AVCodecDecoder::_ProcessFilterGraph(AVFrame *dst, const AVFrame *src,
1791 	enum AVPixelFormat pixfmt, int32 width, int32 height)
1792 {
1793 	if (fFilterGraph == NULL || width != fLastWidth
1794 		|| height != fLastHeight || pixfmt != fLastPixfmt) {
1795 
1796 		status_t err = _InitFilterGraph(pixfmt, width, height);
1797 		if (err != B_OK)
1798 			return err;
1799 	}
1800 
1801 	memcpy(fFilterFrame->data, src->data, sizeof(src->data));
1802 	memcpy(fFilterFrame->linesize, src->linesize, sizeof(src->linesize));
1803 	fFilterFrame->width = width;
1804 	fFilterFrame->height = height;
1805 	fFilterFrame->format = pixfmt;
1806 
1807 	int ret = av_buffersrc_add_frame(fBufferSourceContext, fFilterFrame);
1808 	if (ret < 0)
1809 		return B_NO_MEMORY;
1810 
1811 	ret = av_buffersink_get_frame(fBufferSinkContext, fFilterFrame);
1812 	if (ret < 0)
1813 		return B_BAD_DATA;
1814 
1815 	av_image_copy(dst->data, dst->linesize, (const uint8**)fFilterFrame->data,
1816 		fFilterFrame->linesize, pixfmt, width, height);
1817 	av_frame_unref(fFilterFrame);
1818 	return B_OK;
1819 }
1820