xref: /haiku/src/add-ons/media/plugins/ffmpeg/AVCodecDecoder.cpp (revision 3634f142352af2428aed187781fc9d75075e9140)
1 /*
2  * Copyright (C) 2001 Carlos Hasan
3  * Copyright (C) 2001 François Revol
4  * Copyright (C) 2001 Axel Dörfler
5  * Copyright (C) 2004 Marcus Overhagen
6  * Copyright (C) 2009 Stephan Amßus <superstippi@gmx.de>
7  * Copyright (C) 2014 Colin Günther <coling@gmx.de>
8  * Copyright (C) 2015 Adrien Destugues <pulkomandy@pulkomandy.tk>
9  *
10  * All rights reserved. Distributed under the terms of the MIT License.
11  */
12 
13 //! libavcodec based decoder for Haiku
14 
15 
16 #include "AVCodecDecoder.h"
17 
18 #include <new>
19 
20 #include <assert.h>
21 #include <string.h>
22 
23 #include <Bitmap.h>
24 #include <Debug.h>
25 #include <String.h>
26 
27 #include "Utilities.h"
28 
29 
30 #undef TRACE
31 //#define TRACE_AV_CODEC
32 #ifdef TRACE_AV_CODEC
33 #	define TRACE(x...)	printf(x)
34 #	define TRACE_AUDIO(x...)	printf(x)
35 #	define TRACE_VIDEO(x...)	printf(x)
36 #else
37 #	define TRACE(x...)
38 #	define TRACE_AUDIO(x...)
39 #	define TRACE_VIDEO(x...)
40 #endif
41 
42 //#define LOG_STREAM_TO_FILE
43 #ifdef LOG_STREAM_TO_FILE
44 #	include <File.h>
45 	static BFile sAudioStreamLogFile(
46 		"/boot/home/Desktop/AVCodecDebugAudioStream.raw",
47 		B_CREATE_FILE | B_ERASE_FILE | B_WRITE_ONLY);
48 	static BFile sVideoStreamLogFile(
49 		"/boot/home/Desktop/AVCodecDebugVideoStream.raw",
50 		B_CREATE_FILE | B_ERASE_FILE | B_WRITE_ONLY);
51 	static int sDumpedPackets = 0;
52 #endif
53 
54 typedef AVCodecID CodecID;
55 
56 struct wave_format_ex {
57 	uint16 format_tag;
58 	uint16 channels;
59 	uint32 frames_per_sec;
60 	uint32 avg_bytes_per_sec;
61 	uint16 block_align;
62 	uint16 bits_per_sample;
63 	uint16 extra_size;
64 	// extra_data[extra_size]
65 } _PACKED;
66 
67 struct avformat_codec_context {
68 	int sample_rate;
69 	int channels;
70 };
71 
72 
73 // profiling related globals
74 #define DO_PROFILING 0
75 #if DO_PROFILING
76 static bigtime_t decodingTime = 0;
77 static bigtime_t conversionTime = 0;
78 static long profileCounter = 0;
79 #endif
80 
81 
82 AVCodecDecoder::AVCodecDecoder()
83 	:
84 	fHeader(),
85 	fInputFormat(),
86 	fFrame(0),
87 	fIsAudio(false),
88 	fCodec(NULL),
89 	fCodecContext(avcodec_alloc_context3(NULL)),
90 	fResampleContext(NULL),
91 	fDecodedData(NULL),
92 	fDecodedDataSizeInBytes(0),
93 	fPostProcessedDecodedPicture(av_frame_alloc()),
94 	fRawDecodedPicture(av_frame_alloc()),
95 	fRawDecodedAudio(av_frame_alloc()),
96 
97 	fCodecInitDone(false),
98 
99 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
100 	fSwsContext(NULL),
101 #else
102 	fFormatConversionFunc(NULL),
103 #endif
104 
105 	fExtraData(NULL),
106 	fExtraDataSize(0),
107 	fBlockAlign(0),
108 
109 	fOutputColorSpace(B_NO_COLOR_SPACE),
110 	fOutputFrameCount(0),
111 	fOutputFrameRate(1.0),
112 	fOutputFrameSize(0),
113 	fInputFrameSize(0),
114 
115 	fChunkBuffer(NULL),
116 	fChunkBufferSize(0),
117 	fAudioDecodeError(false),
118 
119 	fDecodedDataBuffer(av_frame_alloc()),
120 	fDecodedDataBufferOffset(0),
121 	fDecodedDataBufferSize(0),
122 	fBufferSinkContext(NULL),
123 	fBufferSourceContext(NULL),
124 	fFilterGraph(NULL),
125 	fFilterFrame(NULL)
126 {
127 	TRACE("AVCodecDecoder::AVCodecDecoder()\n");
128 
129 	system_info info;
130 	get_system_info(&info);
131 
132 	fCodecContext->err_recognition = AV_EF_CAREFUL;
133 	fCodecContext->error_concealment = 3;
134 	fCodecContext->thread_count = info.cpu_count;
135 }
136 
137 
138 AVCodecDecoder::~AVCodecDecoder()
139 {
140 	TRACE("[%c] AVCodecDecoder::~AVCodecDecoder()\n", fIsAudio?('a'):('v'));
141 
142 #if DO_PROFILING
143 	if (profileCounter > 0) {
144 		printf("[%c] profile: d1 = %lld, d2 = %lld (%lld)\n",
145 			fIsAudio?('a'):('v'), decodingTime / profileCounter,
146 			conversionTime / profileCounter, fFrame);
147 	}
148 #endif
149 
150 	if (fCodecInitDone)
151 		avcodec_close(fCodecContext);
152 
153 	swr_free(&fResampleContext);
154 	free(fChunkBuffer);
155 	free(fDecodedData);
156 
157 	av_frame_free(&fPostProcessedDecodedPicture);
158 	av_frame_free(&fRawDecodedPicture);
159 	av_free(fRawDecodedAudio->opaque);
160 	av_frame_free(&fRawDecodedAudio);
161 	avcodec_free_context(&fCodecContext);
162 	av_frame_free(&fDecodedDataBuffer);
163 
164 	av_frame_free(&fFilterFrame);
165 	avfilter_graph_free(&fFilterGraph);
166 
167 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
168 	if (fSwsContext != NULL)
169 		sws_freeContext(fSwsContext);
170 #endif
171 
172 	delete[] fExtraData;
173 }
174 
175 
176 void
177 AVCodecDecoder::GetCodecInfo(media_codec_info* mci)
178 {
179 	snprintf(mci->short_name, 32, "%s", fCodec->name);
180 	snprintf(mci->pretty_name, 96, "%s", fCodec->long_name);
181 	mci->id = 0;
182 	mci->sub_id = fCodec->id;
183 }
184 
185 
186 status_t
187 AVCodecDecoder::Setup(media_format* ioEncodedFormat, const void* infoBuffer,
188 	size_t infoSize)
189 {
190 	if (ioEncodedFormat->type != B_MEDIA_ENCODED_AUDIO
191 		&& ioEncodedFormat->type != B_MEDIA_ENCODED_VIDEO)
192 		return B_ERROR;
193 
194 	fIsAudio = (ioEncodedFormat->type == B_MEDIA_ENCODED_AUDIO);
195 	TRACE("[%c] AVCodecDecoder::Setup()\n", fIsAudio?('a'):('v'));
196 
197 #ifdef TRACE_AV_CODEC
198 	char buffer[1024];
199 	string_for_format(*ioEncodedFormat, buffer, sizeof(buffer));
200 	TRACE("[%c]   input_format = %s\n", fIsAudio?('a'):('v'), buffer);
201 	TRACE("[%c]   infoSize = %ld\n", fIsAudio?('a'):('v'), infoSize);
202 	TRACE("[%c]   user_data_type = %08lx\n", fIsAudio?('a'):('v'),
203 		ioEncodedFormat->user_data_type);
204 	TRACE("[%c]   meta_data_size = %ld\n", fIsAudio?('a'):('v'),
205 		ioEncodedFormat->MetaDataSize());
206 #endif
207 
208 	media_format_description description;
209 	if (BMediaFormats().GetCodeFor(*ioEncodedFormat,
210 			B_MISC_FORMAT_FAMILY, &description) == B_OK) {
211 		if (description.u.misc.file_format != 'ffmp')
212 			return B_NOT_SUPPORTED;
213 		fCodec = avcodec_find_decoder(static_cast<CodecID>(
214 			description.u.misc.codec));
215 		if (fCodec == NULL) {
216 			TRACE("  unable to find the correct FFmpeg "
217 				"decoder (id = %lu)\n", description.u.misc.codec);
218 			return B_ERROR;
219 		}
220 		TRACE("  found decoder %s\n", fCodec->name);
221 
222 		const void* extraData = infoBuffer;
223 		fExtraDataSize = infoSize;
224 		if (description.family == B_WAV_FORMAT_FAMILY
225 				&& infoSize >= sizeof(wave_format_ex)) {
226 			TRACE("  trying to use wave_format_ex\n");
227 			// Special case extra data in B_WAV_FORMAT_FAMILY
228 			const wave_format_ex* waveFormatData
229 				= (const wave_format_ex*)infoBuffer;
230 
231 			size_t waveFormatSize = infoSize;
232 			if (waveFormatData != NULL && waveFormatSize > 0) {
233 				fBlockAlign = waveFormatData->block_align;
234 				TRACE("  found block align: %d\n", fBlockAlign);
235 				fExtraDataSize = waveFormatData->extra_size;
236 				// skip the wave_format_ex from the extra data.
237 				extraData = waveFormatData + 1;
238 			}
239 		} else {
240 			if (fIsAudio) {
241 				fBlockAlign
242 					= ioEncodedFormat->u.encoded_audio.output.buffer_size;
243 				TRACE("  using buffer_size as block align: %d\n",
244 					fBlockAlign);
245 			}
246 		}
247 		if (extraData != NULL && fExtraDataSize > 0) {
248 			TRACE("AVCodecDecoder: extra data size %ld\n", infoSize);
249 			delete[] fExtraData;
250 			fExtraData = new(std::nothrow) char[fExtraDataSize];
251 			if (fExtraData != NULL)
252 				memcpy(fExtraData, infoBuffer, fExtraDataSize);
253 			else
254 				fExtraDataSize = 0;
255 		}
256 
257 		fInputFormat = *ioEncodedFormat;
258 		return B_OK;
259 	} else {
260 		TRACE("AVCodecDecoder: BMediaFormats().GetCodeFor() failed.\n");
261 	}
262 
263 	printf("AVCodecDecoder::Setup failed!\n");
264 	return B_ERROR;
265 }
266 
267 
268 status_t
269 AVCodecDecoder::SeekedTo(int64 frame, bigtime_t time)
270 {
271 	status_t ret = B_OK;
272 	// Reset the FFmpeg codec to flush buffers, so we keep the sync
273 	if (fCodecInitDone) {
274 		avcodec_flush_buffers(fCodecContext);
275 		_ResetTempPacket();
276 	}
277 
278 	// Flush internal buffers as well.
279 	free(fChunkBuffer);
280 	fChunkBuffer = NULL;
281 	fChunkBufferSize = 0;
282 	fDecodedDataBufferOffset = 0;
283 	fDecodedDataBufferSize = 0;
284 	fDecodedDataSizeInBytes = 0;
285 
286 	fFrame = frame;
287 
288 	return ret;
289 }
290 
291 
292 status_t
293 AVCodecDecoder::NegotiateOutputFormat(media_format* inOutFormat)
294 {
295 	TRACE("AVCodecDecoder::NegotiateOutputFormat() [%c] \n",
296 		fIsAudio?('a'):('v'));
297 
298 #ifdef TRACE_AV_CODEC
299 	char buffer[1024];
300 	string_for_format(*inOutFormat, buffer, sizeof(buffer));
301 	TRACE("  [%c]  requested format = %s\n", fIsAudio?('a'):('v'), buffer);
302 #endif
303 
304 	if (fIsAudio)
305 		return _NegotiateAudioOutputFormat(inOutFormat);
306 	else
307 		return _NegotiateVideoOutputFormat(inOutFormat);
308 }
309 
310 
311 status_t
312 AVCodecDecoder::Decode(void* outBuffer, int64* outFrameCount,
313 	media_header* mediaHeader, media_decode_info* info)
314 {
315 	if (!fCodecInitDone)
316 		return B_NO_INIT;
317 
318 	status_t ret;
319 	if (fIsAudio)
320 		ret = _DecodeAudio(outBuffer, outFrameCount, mediaHeader, info);
321 	else
322 		ret = _DecodeVideo(outBuffer, outFrameCount, mediaHeader, info);
323 
324 	return ret;
325 }
326 
327 
328 // #pragma mark -
329 
330 
331 void
332 AVCodecDecoder::_ResetTempPacket()
333 {
334 	av_init_packet(&fTempPacket);
335 	fTempPacket.size = 0;
336 	fTempPacket.data = NULL;
337 }
338 
339 
340 status_t
341 AVCodecDecoder::_NegotiateAudioOutputFormat(media_format* inOutFormat)
342 {
343 	TRACE("AVCodecDecoder::_NegotiateAudioOutputFormat()\n");
344 
345 	_ApplyEssentialAudioContainerPropertiesToContext();
346 		// This makes audio formats play that encode the audio properties in
347 		// the audio container (e.g. WMA) and not in the audio frames
348 		// themself (e.g. MP3).
349 		// Note: Doing this step unconditionally is OK, because the first call
350 		// to _DecodeNextAudioFrameChunk() will update the essential audio
351 		// format properties accordingly regardless of the settings here.
352 
353 	// close any previous instance
354 	if (fCodecInitDone) {
355 		fCodecInitDone = false;
356 		avcodec_close(fCodecContext);
357 	}
358 
359 	if (avcodec_open2(fCodecContext, fCodec, NULL) >= 0)
360 		fCodecInitDone = true;
361 	else {
362 		TRACE("avcodec_open() failed to init codec!\n");
363 		return B_ERROR;
364 	}
365 
366 	free(fChunkBuffer);
367 	fChunkBuffer = NULL;
368 	fChunkBufferSize = 0;
369 	fAudioDecodeError = false;
370 	fDecodedDataBufferOffset = 0;
371 	fDecodedDataBufferSize = 0;
372 
373 	_ResetTempPacket();
374 
375 	status_t statusOfDecodingFirstFrameChunk = _DecodeNextAudioFrameChunk();
376 	if (statusOfDecodingFirstFrameChunk != B_OK) {
377 		TRACE("[a] decoding first audio frame chunk failed\n");
378 		return B_ERROR;
379 	}
380 
381 	media_multi_audio_format outputAudioFormat;
382 	outputAudioFormat = media_raw_audio_format::wildcard;
383 	outputAudioFormat.byte_order = B_MEDIA_HOST_ENDIAN;
384 	outputAudioFormat.frame_rate = fCodecContext->sample_rate;
385 	outputAudioFormat.channel_count = fCodecContext->channels;
386 	ConvertAVSampleFormatToRawAudioFormat(fCodecContext->sample_fmt,
387 		outputAudioFormat.format);
388 	// Check that format is not still a wild card!
389 	if (outputAudioFormat.format == 0) {
390 		TRACE("  format still a wild-card, assuming B_AUDIO_SHORT.\n");
391 		outputAudioFormat.format = media_raw_audio_format::B_AUDIO_SHORT;
392 	}
393 	outputAudioFormat.buffer_size = inOutFormat->u.raw_audio.buffer_size;
394 	// Check that buffer_size has a sane value
395 	size_t sampleSize = outputAudioFormat.format
396 		& media_raw_audio_format::B_AUDIO_SIZE_MASK;
397 	if (outputAudioFormat.buffer_size == 0) {
398 		outputAudioFormat.buffer_size = 512 * sampleSize
399 			* outputAudioFormat.channel_count;
400 	}
401 
402 	inOutFormat->type = B_MEDIA_RAW_AUDIO;
403 	inOutFormat->u.raw_audio = outputAudioFormat;
404 	inOutFormat->require_flags = 0;
405 	inOutFormat->deny_flags = B_MEDIA_MAUI_UNDEFINED_FLAGS;
406 
407 	// Initialize variables needed to manage decoding as much audio frames as
408 	// needed to fill the buffer_size.
409 	fOutputFrameSize = sampleSize * outputAudioFormat.channel_count;
410 	fOutputFrameCount = outputAudioFormat.buffer_size / fOutputFrameSize;
411 	fOutputFrameRate = outputAudioFormat.frame_rate;
412 	if (av_sample_fmt_is_planar(fCodecContext->sample_fmt))
413 		fInputFrameSize = sampleSize;
414 	else
415 		fInputFrameSize = fOutputFrameSize;
416 
417 	fRawDecodedAudio->opaque
418 		= av_realloc(fRawDecodedAudio->opaque, sizeof(avformat_codec_context));
419 	if (fRawDecodedAudio->opaque == NULL)
420 		return B_NO_MEMORY;
421 
422 	if (av_sample_fmt_is_planar(fCodecContext->sample_fmt)) {
423 		fResampleContext = swr_alloc_set_opts(NULL,
424 			fCodecContext->channel_layout,
425 			fCodecContext->request_sample_fmt,
426 			fCodecContext->sample_rate,
427 			fCodecContext->channel_layout,
428 			fCodecContext->sample_fmt,
429 			fCodecContext->sample_rate,
430 			0, NULL);
431 		swr_init(fResampleContext);
432 	}
433 
434 	TRACE("  bit_rate = %d, sample_rate = %d, channels = %d, "
435 		"output frame size: %d, count: %ld, rate: %.2f\n",
436 		fCodecContext->bit_rate, fCodecContext->sample_rate, fCodecContext->channels,
437 		fOutputFrameSize, fOutputFrameCount, fOutputFrameRate);
438 
439 	return B_OK;
440 }
441 
442 
443 status_t
444 AVCodecDecoder::_NegotiateVideoOutputFormat(media_format* inOutFormat)
445 {
446 	TRACE("AVCodecDecoder::_NegotiateVideoOutputFormat()\n");
447 
448 	TRACE("  requested video format 0x%x\n",
449 		inOutFormat->u.raw_video.display.format);
450 
451 	_ApplyEssentialVideoContainerPropertiesToContext();
452 		// This makes video formats play that encode the video properties in
453 		// the video container (e.g. WMV) and not in the video frames
454 		// themself (e.g. MPEG2).
455 		// Note: Doing this step unconditionally is OK, because the first call
456 		// to _DecodeNextVideoFrame() will update the essential video format
457 		// properties accordingly regardless of the settings here.
458 
459 	bool codecCanHandleIncompleteFrames
460 		= (fCodec->capabilities & AV_CODEC_CAP_TRUNCATED) != 0;
461 	if (codecCanHandleIncompleteFrames) {
462 		// Expect and handle video frames to be splitted across consecutive
463 		// data chunks.
464 		fCodecContext->flags |= AV_CODEC_FLAG_TRUNCATED;
465 	}
466 
467 	// close any previous instance
468 	if (fCodecInitDone) {
469 		fCodecInitDone = false;
470 		avcodec_close(fCodecContext);
471 	}
472 
473 	if (avcodec_open2(fCodecContext, fCodec, NULL) >= 0)
474 		fCodecInitDone = true;
475 	else {
476 		TRACE("avcodec_open() failed to init codec!\n");
477 		return B_ERROR;
478 	}
479 
480 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
481 	fOutputColorSpace = B_RGB32;
482 #else
483 	// Make MediaPlayer happy (if not in rgb32 screen depth and no overlay,
484 	// it will only ask for YCbCr, which DrawBitmap doesn't handle, so the
485 	// default colordepth is RGB32).
486 	if (inOutFormat->u.raw_video.display.format == B_YCbCr422)
487 		fOutputColorSpace = B_YCbCr422;
488 	else
489 		fOutputColorSpace = B_RGB32;
490 #endif
491 
492 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
493 	if (fSwsContext != NULL)
494 		sws_freeContext(fSwsContext);
495 	fSwsContext = NULL;
496 #else
497 	fFormatConversionFunc = 0;
498 #endif
499 
500 	free(fChunkBuffer);
501 	fChunkBuffer = NULL;
502 	fChunkBufferSize = 0;
503 
504 	_ResetTempPacket();
505 
506 	status_t statusOfDecodingFirstFrame = _DecodeNextVideoFrame();
507 	if (statusOfDecodingFirstFrame != B_OK) {
508 		TRACE("[v] decoding first video frame failed\n");
509 		return B_ERROR;
510 	}
511 
512 	// Note: fSwsContext / fFormatConversionFunc should have been initialized
513 	// by first call to _DecodeNextVideoFrame() above.
514 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
515 	if (fSwsContext == NULL) {
516 		TRACE("No SWS Scale context or decoder has not set the pixel format "
517 			"yet!\n");
518 	}
519 #else
520 	if (fFormatConversionFunc == NULL) {
521 		TRACE("no pixel format conversion function found or decoder has "
522 			"not set the pixel format yet!\n");
523 	}
524 #endif
525 
526 	inOutFormat->type = B_MEDIA_RAW_VIDEO;
527 	inOutFormat->require_flags = 0;
528 	inOutFormat->deny_flags = B_MEDIA_MAUI_UNDEFINED_FLAGS;
529 	inOutFormat->u.raw_video = fInputFormat.u.encoded_video.output;
530 	inOutFormat->u.raw_video.interlace = 1;
531 		// Progressive (non-interlaced) video frames are delivered
532 	inOutFormat->u.raw_video.first_active
533 		= fHeader.u.raw_video.first_active_line;
534 	inOutFormat->u.raw_video.last_active = fHeader.u.raw_video.line_count;
535 	inOutFormat->u.raw_video.pixel_width_aspect
536 		= fHeader.u.raw_video.pixel_width_aspect;
537 	inOutFormat->u.raw_video.pixel_height_aspect
538 		= fHeader.u.raw_video.pixel_height_aspect;
539 #if 0
540 	// This was added by Colin Günther in order to handle streams with a
541 	// variable frame rate. fOutputFrameRate is computed from the stream
542 	// time_base, but it actually assumes a timebase equal to the FPS. As far
543 	// as I can see, a stream with a variable frame rate would have a higher
544 	// resolution time_base and increment the pts (presentation time) of each
545 	// frame by a value bigger than one.
546 	//
547 	// Fixed rate stream:
548 	// time_base = 1/50s, frame PTS = 1, 2, 3... (for 50Hz)
549 	//
550 	// Variable rate stream:
551 	// time_base = 1/300s, frame PTS = 6, 12, 18, ... (for 50Hz)
552 	// time_base = 1/300s, frame PTS = 5, 10, 15, ... (for 60Hz)
553 	//
554 	// The fOutputFrameRate currently does not take this into account and
555 	// ignores the PTS. This results in playing the above sample at 300Hz
556 	// instead of 50 or 60.
557 	//
558 	// However, comparing the PTS for two consecutive implies we have already
559 	// decoded 2 frames, which may not be the case when this method is first
560 	// called.
561 	inOutFormat->u.raw_video.field_rate = fOutputFrameRate;
562 		// Was calculated by first call to _DecodeNextVideoFrame()
563 #endif
564 	inOutFormat->u.raw_video.display.format = fOutputColorSpace;
565 	inOutFormat->u.raw_video.display.line_width
566 		= fHeader.u.raw_video.display_line_width;
567 	inOutFormat->u.raw_video.display.line_count
568 		= fHeader.u.raw_video.display_line_count;
569 	inOutFormat->u.raw_video.display.bytes_per_row
570 		= fHeader.u.raw_video.bytes_per_row;
571 
572 #ifdef TRACE_AV_CODEC
573 	char buffer[1024];
574 	string_for_format(*inOutFormat, buffer, sizeof(buffer));
575 	TRACE("[v]  outFormat = %s\n", buffer);
576 	TRACE("  returned  video format 0x%x\n",
577 		inOutFormat->u.raw_video.display.format);
578 #endif
579 
580 	return B_OK;
581 }
582 
583 
584 /*! \brief Fills the outBuffer with one or more already decoded audio frames.
585 
586 	Besides the main duty described above, this method also fills out the other
587 	output parameters as documented below.
588 
589 	\param outBuffer Pointer to the output buffer to copy the decoded audio
590 		frames to.
591 	\param outFrameCount Pointer to the output variable to assign the number of
592 		copied audio frames (usually several audio frames at once).
593 	\param mediaHeader Pointer to the output media header that contains the
594 		properties of the decoded audio frame being the first in the outBuffer.
595 	\param info Specifies additional decoding parameters. (Note: unused).
596 
597 	\returns B_OK Decoding audio frames succeeded.
598 	\returns B_LAST_BUFFER_ERROR There are no more audio frames available.
599 	\returns Other error codes
600 */
601 status_t
602 AVCodecDecoder::_DecodeAudio(void* outBuffer, int64* outFrameCount,
603 	media_header* mediaHeader, media_decode_info* info)
604 {
605 	TRACE_AUDIO("AVCodecDecoder::_DecodeAudio(audio start_time %.6fs)\n",
606 		mediaHeader->start_time / 1000000.0);
607 
608 	status_t audioDecodingStatus
609 		= fDecodedDataSizeInBytes > 0 ? B_OK : _DecodeNextAudioFrame();
610 
611 	if (audioDecodingStatus != B_OK)
612 		return audioDecodingStatus;
613 
614 	*outFrameCount = fDecodedDataSizeInBytes / fOutputFrameSize;
615 	*mediaHeader = fHeader;
616 	memcpy(outBuffer, fDecodedData, fDecodedDataSizeInBytes);
617 
618 	fDecodedDataSizeInBytes = 0;
619 
620 	return B_OK;
621 }
622 
623 
624 /*! \brief Fills the outBuffer with an already decoded video frame.
625 
626 	Besides the main duty described above, this method also fills out the other
627 	output parameters as documented below.
628 
629 	\param outBuffer Pointer to the output buffer to copy the decoded video
630 		frame to.
631 	\param outFrameCount Pointer to the output variable to assign the number of
632 		copied video frames (usually one video frame).
633 	\param mediaHeader Pointer to the output media header that contains the
634 		decoded video frame properties.
635 	\param info Specifies additional decoding parameters. (Note: unused).
636 
637 	\returns B_OK Decoding a video frame succeeded.
638 	\returns B_LAST_BUFFER_ERROR There are no more video frames available.
639 	\returns Other error codes
640 */
641 status_t
642 AVCodecDecoder::_DecodeVideo(void* outBuffer, int64* outFrameCount,
643 	media_header* mediaHeader, media_decode_info* info)
644 {
645 	status_t videoDecodingStatus
646 		= fDecodedDataSizeInBytes > 0 ? B_OK : _DecodeNextVideoFrame();
647 
648 	if (videoDecodingStatus != B_OK)
649 		return videoDecodingStatus;
650 
651 	*outFrameCount = 1;
652 	*mediaHeader = fHeader;
653 	memcpy(outBuffer, fDecodedData, mediaHeader->size_used);
654 
655 	fDecodedDataSizeInBytes = 0;
656 
657 	return B_OK;
658 }
659 
660 
661 /*!	\brief Decodes next audio frame.
662 
663 	We decode at least one audio frame into fDecodedData. To achieve this goal,
664     we might need to request several chunks of encoded data resulting in a
665     variable execution time of this function.
666 
667     The length of the decoded audio frame(s) is stored in
668     fDecodedDataSizeInBytes. If this variable is greater than zero you can
669     assert that all audio frames in fDecodedData are valid.
670 
671 	It is assumed that the number of expected audio frames is stored in
672 	fOutputFrameCount. So _DecodeNextAudioFrame() must be called only after
673 	fOutputFrameCount has been set.
674 
675 	Note: fOutputFrameCount contains the maximum number of frames a caller
676 	of BMediaDecoder::Decode() expects to receive. There is a direct
677 	relationship between fOutputFrameCount and the buffer size a caller of
678 	BMediaDecoder::Decode() will provide so we make sure to respect this limit
679 	for fDecodedDataSizeInBytes.
680 
681 	On return with status code B_OK the following conditions hold true:
682 		1. fDecodedData contains as much audio frames as the caller of
683 		   BMediaDecoder::Decode() expects.
684 		2. fDecodedData contains lesser audio frames as the caller of
685 		   BMediaDecoder::Decode() expects only when one of the following
686 		   conditions hold true:
687 		       i  No more audio frames left. Consecutive calls to
688 		          _DecodeNextAudioFrame() will then result in the return of
689 		          status code B_LAST_BUFFER_ERROR.
690 		       ii TODO: A change in the size of the audio frames.
691 		3. fHeader is populated with the audio frame properties of the first
692 		   audio frame in fDecodedData. Especially the start_time field of
693 		   fHeader relates to that first audio frame. Start times of
694 		   consecutive audio frames in fDecodedData have to be calculated
695 		   manually (using the frame rate and the frame duration) if the
696 		   caller needs them.
697 
698 	TODO: Handle change of channel_count. Such a change results in a change of
699 	the audio frame size and thus has different buffer requirements.
700 	The most sane approach for implementing this is to return the audio frames
701 	that were still decoded with the previous channel_count and inform the
702 	client of BMediaDecoder::Decode() about the change so that it can adapt to
703 	it. Furthermore we need to adapt our fDecodedData to the new buffer size
704 	requirements accordingly.
705 
706 	\returns B_OK when we successfully decoded enough audio frames
707 	\returns B_LAST_BUFFER_ERROR when there are no more audio frames available.
708 	\returns Other Errors
709 */
710 status_t
711 AVCodecDecoder::_DecodeNextAudioFrame()
712 {
713 	assert(fTempPacket.size >= 0);
714 	assert(fDecodedDataSizeInBytes == 0);
715 		// _DecodeNextAudioFrame needs to be called on empty fDecodedData only!
716 		// If this assert holds wrong we have a bug somewhere.
717 
718 	status_t resetStatus = _ResetRawDecodedAudio();
719 	if (resetStatus != B_OK)
720 		return resetStatus;
721 
722 	while (fRawDecodedAudio->nb_samples < fOutputFrameCount) {
723 		_CheckAndFixConditionsThatHintAtBrokenAudioCodeBelow();
724 
725 		bool decodedDataBufferHasData = fDecodedDataBufferSize > 0;
726 		if (decodedDataBufferHasData) {
727 			_MoveAudioFramesToRawDecodedAudioAndUpdateStartTimes();
728 			continue;
729 		}
730 
731 		status_t decodeAudioChunkStatus = _DecodeNextAudioFrameChunk();
732 		if (decodeAudioChunkStatus == B_LAST_BUFFER_ERROR
733 				&& fRawDecodedAudio->nb_samples > 0)
734 			break;
735 		if (decodeAudioChunkStatus != B_OK)
736 			return decodeAudioChunkStatus;
737 	}
738 
739 	fFrame += fRawDecodedAudio->nb_samples;
740 	fDecodedDataSizeInBytes = fRawDecodedAudio->linesize[0];
741 
742 	_UpdateMediaHeaderForAudioFrame();
743 
744 #ifdef DEBUG
745 	dump_ffframe_audio(fRawDecodedAudio, "ffaudi");
746 #endif
747 
748 	TRACE_AUDIO("  frame count: %ld current: %lld\n",
749 		fRawDecodedAudio->nb_samples, fFrame);
750 
751 	return B_OK;
752 }
753 
754 
755 /*!	\brief Applies all essential audio input properties to fCodecContext that were
756 		passed to AVCodecDecoder when Setup() was called.
757 
758 	Note: This function must be called before the AVCodec is opened via
759 	avcodec_open2(). Otherwise the behaviour of FFMPEG's audio decoding
760 	function avcodec_receive_frame() is undefined.
761 
762 	Essential properties applied from fInputFormat.u.encoded_audio:
763 		- bit_rate copied to fCodecContext->bit_rate
764 		- frame_size copied to fCodecContext->frame_size
765 		- output.format converted to fCodecContext->sample_fmt
766 		- output.frame_rate copied to fCodecContext->sample_rate
767 		- output.channel_count copied to fCodecContext->channels
768 
769 	Other essential properties being applied:
770 		- fBlockAlign to fCodecContext->block_align
771 		- fExtraData to fCodecContext->extradata
772 		- fExtraDataSize to fCodecContext->extradata_size
773 
774 	TODO: Either the following documentation section should be removed or this
775 	TODO when it is clear whether fInputFormat.MetaData() and
776 	fInputFormat.MetaDataSize() have to be applied to fCodecContext. See the related
777 	TODO in the method implementation.
778 	Only applied when fInputFormat.MetaDataSize() is greater than zero:
779 		- fInputFormat.MetaData() to fCodecContext->extradata
780 		- fInputFormat.MetaDataSize() to fCodecContext->extradata_size
781 */
782 void
783 AVCodecDecoder::_ApplyEssentialAudioContainerPropertiesToContext()
784 {
785 	media_encoded_audio_format containerProperties
786 		= fInputFormat.u.encoded_audio;
787 
788 	fCodecContext->bit_rate
789 		= static_cast<int>(containerProperties.bit_rate);
790 	fCodecContext->frame_size
791 		= static_cast<int>(containerProperties.frame_size);
792 	ConvertRawAudioFormatToAVSampleFormat(
793 		containerProperties.output.format, fCodecContext->sample_fmt);
794 	ConvertRawAudioFormatToAVSampleFormat(
795 		containerProperties.output.format, fCodecContext->request_sample_fmt);
796 	fCodecContext->sample_rate
797 		= static_cast<int>(containerProperties.output.frame_rate);
798 	fCodecContext->channels
799 		= static_cast<int>(containerProperties.output.channel_count);
800 	// Check that channel count is not still a wild card!
801 	if (fCodecContext->channels == 0) {
802 		TRACE("  channel_count still a wild-card, assuming stereo.\n");
803 		fCodecContext->channels = 2;
804 	}
805 
806 	fCodecContext->block_align = fBlockAlign;
807 	fCodecContext->extradata = reinterpret_cast<uint8_t*>(fExtraData);
808 	fCodecContext->extradata_size = fExtraDataSize;
809 
810 	// TODO: This probably needs to go away, there is some misconception
811 	// about extra data / info buffer and meta data. See
812 	// Reader::GetStreamInfo(). The AVFormatReader puts extradata and
813 	// extradata_size into media_format::MetaData(), but used to ignore
814 	// the infoBuffer passed to GetStreamInfo(). I think this may be why
815 	// the code below was added.
816 	if (fInputFormat.MetaDataSize() > 0) {
817 		fCodecContext->extradata = static_cast<uint8_t*>(
818 			const_cast<void*>(fInputFormat.MetaData()));
819 		fCodecContext->extradata_size = fInputFormat.MetaDataSize();
820 	}
821 
822 	TRACE("  bit_rate %d, sample_rate %d, channels %d, block_align %d, "
823 		"extradata_size %d\n",
824 		fCodecContext->bit_rate,
825 		fCodecContext->sample_rate,
826 		fCodecContext->channels,
827 		fCodecContext->block_align,
828 		fCodecContext->extradata_size);
829 }
830 
831 
832 /*!	\brief Resets important fields in fRawDecodedVideo to their default values.
833 
834 	Note: Also initializes fDecodedData if not done already.
835 
836 	\returns B_OK Resetting successfully completed.
837 	\returns B_NO_MEMORY No memory left for correct operation.
838 */
839 status_t
840 AVCodecDecoder::_ResetRawDecodedAudio()
841 {
842 	if (fDecodedData == NULL) {
843 		size_t maximumSizeOfDecodedData = fOutputFrameCount * fOutputFrameSize;
844 		fDecodedData
845 			= static_cast<uint8_t*>(malloc(maximumSizeOfDecodedData));
846 	}
847 	if (fDecodedData == NULL)
848 		return B_NO_MEMORY;
849 
850 	fRawDecodedAudio->data[0] = fDecodedData;
851 	fRawDecodedAudio->linesize[0] = 0;
852 	fRawDecodedAudio->format = AV_SAMPLE_FMT_NONE;
853 	fRawDecodedAudio->pkt_dts = AV_NOPTS_VALUE;
854 	fRawDecodedAudio->nb_samples = 0;
855 	memset(fRawDecodedAudio->opaque, 0, sizeof(avformat_codec_context));
856 
857 	return B_OK;
858 }
859 
860 
861 /*!	\brief Checks fDecodedDataBufferSize and fTempPacket for invalid values,
862 		reports them and assigns valid values.
863 
864 	Note: This method is intended to be called before any code is executed that
865 	deals with moving, loading or decoding any audio frames.
866 */
867 void
868 AVCodecDecoder::_CheckAndFixConditionsThatHintAtBrokenAudioCodeBelow()
869 {
870 	if (fDecodedDataBufferSize < 0) {
871 		fprintf(stderr, "Decoding read past the end of the decoded data "
872 			"buffer! %" B_PRId32 "\n", fDecodedDataBufferSize);
873 		fDecodedDataBufferSize = 0;
874 	}
875 	if (fTempPacket.size < 0) {
876 		fprintf(stderr, "Decoding read past the end of the temp packet! %d\n",
877 			fTempPacket.size);
878 		fTempPacket.size = 0;
879 	}
880 }
881 
882 
883 /*!	\brief Moves audio frames from fDecodedDataBuffer to fRawDecodedAudio (and
884 		thus to fDecodedData) and updates the start times of fRawDecodedAudio,
885 		fDecodedDataBuffer and fTempPacket accordingly.
886 
887 	When moving audio frames to fRawDecodedAudio this method also makes sure
888 	that the following important fields of fRawDecodedAudio are populated and
889 	updated with correct values:
890 		- fRawDecodedAudio->data[0]: Points to first free byte of fDecodedData
891 		- fRawDecodedAudio->linesize[0]: Total size of frames in fDecodedData
892 		- fRawDecodedAudio->format: Format of first audio frame
893 		- fRawDecodedAudio->pkt_dts: Start time of first audio frame
894 		- fRawDecodedAudio->nb_samples: Number of audio frames
895 		- fRawDecodedAudio->opaque: Contains the following fields for the first
896 		  audio frame:
897 		      - channels: Channel count of first audio frame
898 		      - sample_rate: Frame rate of first audio frame
899 
900 	This function assumes to be called only when the following assumptions
901 	hold true:
902 		1. There are decoded audio frames available in fDecodedDataBuffer
903 		   meaning that fDecodedDataBufferSize is greater than zero.
904 		2. There is space left in fRawDecodedAudio to move some audio frames
905 		   in. This means that fRawDecodedAudio has lesser audio frames than
906 		   the maximum allowed (specified by fOutputFrameCount).
907 		3. The audio frame rate is known so that we can calculate the time
908 		   range (covered by the moved audio frames) to update the start times
909 		   accordingly.
910 		4. The field fRawDecodedAudio->opaque points to a memory block
911 		   representing a structure of type avformat_codec_context.
912 
913 	After this function returns the caller can safely make the following
914 	assumptions:
915 		1. The number of decoded audio frames in fDecodedDataBuffer is
916 		   decreased though it may still be greater then zero.
917 		2. The number of frames in fRawDecodedAudio has increased and all
918 		   important fields are updated (see listing above).
919 		3. Start times of fDecodedDataBuffer and fTempPacket were increased
920 		   with the time range covered by the moved audio frames.
921 
922 	Note: This function raises an exception (by calling the debugger), when
923 	fDecodedDataBufferSize is not a multiple of fOutputFrameSize.
924 */
925 void
926 AVCodecDecoder::_MoveAudioFramesToRawDecodedAudioAndUpdateStartTimes()
927 {
928 	assert(fDecodedDataBufferSize > 0);
929 	assert(fRawDecodedAudio->nb_samples < fOutputFrameCount);
930 	assert(fOutputFrameRate > 0);
931 
932 	int32 outFrames = fOutputFrameCount - fRawDecodedAudio->nb_samples;
933 	int32 inFrames = fDecodedDataBufferSize;
934 
935 	int32 frames = min_c(outFrames, inFrames);
936 	if (frames == 0)
937 		debugger("fDecodedDataBufferSize not multiple of frame size!");
938 
939 	// Some decoders do not support format conversion on themselves, or use
940 	// "planar" audio (each channel separated instead of interleaved samples).
941 	// In that case, we use swresample to convert the data
942 	if (av_sample_fmt_is_planar(fCodecContext->sample_fmt)) {
943 #if 0
944 		const uint8_t* ptr[8];
945 		for (int i = 0; i < 8; i++) {
946 			if (fDecodedDataBuffer->data[i] == NULL)
947 				ptr[i] = NULL;
948 			else
949 				ptr[i] = fDecodedDataBuffer->data[i] + fDecodedDataBufferOffset;
950 		}
951 
952 		// When there are more input frames than space in the output buffer,
953 		// we could feed everything to swr and it would buffer the extra data.
954 		// However, there is no easy way to flush that data without feeding more
955 		// input, and it makes our timestamp computations fail.
956 		// So, we feed only as much frames as we can get out, and handle the
957 		// buffering ourselves.
958 		// TODO Ideally, we should try to size our output buffer so that it can
959 		// always hold all the output (swr provides helper functions for this)
960 		inFrames = frames;
961 		frames = swr_convert(fResampleContext, fRawDecodedAudio->data,
962 			outFrames, ptr, inFrames);
963 
964 		if (frames < 0)
965 			debugger("resampling failed");
966 #else
967 		// interleave planar audio with same format
968 		uintptr_t out = (uintptr_t)fRawDecodedAudio->data[0];
969 		int32 offset = fDecodedDataBufferOffset;
970 		for (int i = 0; i < frames; i++) {
971 			for (int j = 0; j < fCodecContext->channels; j++) {
972 				memcpy((void*)out, fDecodedDataBuffer->data[j]
973 					+ offset, fInputFrameSize);
974 				out += fInputFrameSize;
975 			}
976 			offset += fInputFrameSize;
977 		}
978 		outFrames = frames;
979 		inFrames = frames;
980 #endif
981 	} else {
982 		memcpy(fRawDecodedAudio->data[0], fDecodedDataBuffer->data[0]
983 				+ fDecodedDataBufferOffset, frames * fOutputFrameSize);
984 		outFrames = frames;
985 		inFrames = frames;
986 	}
987 
988 	size_t remainingSize = inFrames * fInputFrameSize;
989 	size_t decodedSize = outFrames * fOutputFrameSize;
990 	fDecodedDataBufferSize -= inFrames;
991 
992 	bool firstAudioFramesCopiedToRawDecodedAudio
993 		= fRawDecodedAudio->data[0] != fDecodedData;
994 	if (!firstAudioFramesCopiedToRawDecodedAudio) {
995 		fRawDecodedAudio->format = fDecodedDataBuffer->format;
996 		fRawDecodedAudio->pkt_dts = fDecodedDataBuffer->pkt_dts;
997 
998 		avformat_codec_context* codecContext
999 			= static_cast<avformat_codec_context*>(fRawDecodedAudio->opaque);
1000 		codecContext->channels = fCodecContext->channels;
1001 		codecContext->sample_rate = fCodecContext->sample_rate;
1002 	}
1003 
1004 	fRawDecodedAudio->data[0] += decodedSize;
1005 	fRawDecodedAudio->linesize[0] += decodedSize;
1006 	fRawDecodedAudio->nb_samples += outFrames;
1007 
1008 	fDecodedDataBufferOffset += remainingSize;
1009 
1010 	// Update start times accordingly
1011 	bigtime_t framesTimeInterval = static_cast<bigtime_t>(
1012 		(1000000LL * frames) / fOutputFrameRate);
1013 	fDecodedDataBuffer->pkt_dts += framesTimeInterval;
1014 	// Start time of buffer is updated in case that it contains
1015 	// more audio frames to move.
1016 	fTempPacket.dts += framesTimeInterval;
1017 	// Start time of fTempPacket is updated in case the fTempPacket
1018 	// contains more audio frames to decode.
1019 }
1020 
1021 
1022 /*!	\brief Decodes next chunk of audio frames.
1023 
1024 	This method handles all the details of loading the input buffer
1025 	(fChunkBuffer) at the right time and of calling FFMPEG often engouh until
1026 	some audio frames have been decoded.
1027 
1028 	FFMPEG decides how much audio frames belong to a chunk. Because of that
1029 	it is very likely that _DecodeNextAudioFrameChunk has to be called several
1030 	times to decode enough audio frames to please the caller of
1031 	BMediaDecoder::Decode().
1032 
1033 	This function assumes to be called only when the following assumptions
1034 	hold true:
1035 		1. fDecodedDataBufferSize equals zero.
1036 
1037 	After this function returns successfully the caller can safely make the
1038 	following assumptions:
1039 		1. fDecodedDataBufferSize is greater than zero.
1040 		2. fDecodedDataBufferOffset is set to zero.
1041 		3. fDecodedDataBuffer contains audio frames.
1042 
1043 
1044 	\returns B_OK on successfully decoding one audio frame chunk.
1045 	\returns B_LAST_BUFFER_ERROR No more audio frame chunks available. From
1046 		this point on further calls will return this same error.
1047 	\returns B_ERROR Decoding failed
1048 */
1049 status_t
1050 AVCodecDecoder::_DecodeNextAudioFrameChunk()
1051 {
1052 	assert(fDecodedDataBufferSize == 0);
1053 
1054 	while (fDecodedDataBufferSize == 0) {
1055 		status_t loadingChunkStatus
1056 			= _LoadNextChunkIfNeededAndAssignStartTime();
1057 		if (loadingChunkStatus != B_OK)
1058 			return loadingChunkStatus;
1059 
1060 		status_t decodingStatus
1061 			= _DecodeSomeAudioFramesIntoEmptyDecodedDataBuffer();
1062 		if (decodingStatus != B_OK) {
1063 			// Assume the audio decoded until now is broken so replace it with
1064 			// some silence.
1065 			memset(fDecodedData, 0, fRawDecodedAudio->linesize[0]);
1066 
1067 			if (!fAudioDecodeError) {
1068 				// Report failure if not done already
1069 				int32 chunkBufferOffset = fTempPacket.data - fChunkBuffer;
1070 				printf("########### audio decode error, "
1071 					"fTempPacket.size %d, fChunkBuffer data offset %" B_PRId32
1072 					"\n", fTempPacket.size, chunkBufferOffset);
1073 				fAudioDecodeError = true;
1074 			}
1075 
1076 			// Assume that next audio chunk can be decoded so keep decoding.
1077 			continue;
1078 		}
1079 
1080 		fAudioDecodeError = false;
1081 	}
1082 
1083 	return B_OK;
1084 }
1085 
1086 
1087 /*!	\brief Tries to decode at least one audio frame and store it in the
1088 		fDecodedDataBuffer.
1089 
1090 	This function assumes to be called only when the following assumptions
1091 	hold true:
1092 		1. fDecodedDataBufferSize equals zero.
1093 		2. fTempPacket.size is greater than zero.
1094 
1095 	After this function returns successfully the caller can safely make the
1096 	following assumptions:
1097 		1. fDecodedDataBufferSize is greater than zero in the common case.
1098 		   Also see "Note" below.
1099 		2. fTempPacket was updated to exclude the data chunk that was consumed
1100 		   by avcodec_send_packet().
1101 		3. fDecodedDataBufferOffset is set to zero.
1102 
1103 	When this function failed to decode at least one audio frame due to a
1104 	decoding error the caller can safely make the following assumptions:
1105 		1. fDecodedDataBufferSize equals zero.
1106 		2. fTempPacket.size equals zero.
1107 
1108 	Note: It is possible that there wasn't any audio frame decoded into
1109 	fDecodedDataBuffer after calling this function. This is normal and can
1110 	happen when there was either a decoding error or there is some decoding
1111 	delay in FFMPEGs audio decoder. Another call to this method is totally
1112 	safe and is even expected as long as the calling assumptions hold true.
1113 
1114 	\returns B_OK Decoding successful. fDecodedDataBuffer contains decoded
1115 		audio frames only when fDecodedDataBufferSize is greater than zero.
1116 		fDecodedDataBuffer is empty, when avcodec_receive_frame() didn't return
1117 		audio frames due to delayed decoding or incomplete audio frames.
1118 	\returns B_ERROR Decoding failed thus fDecodedDataBuffer contains no audio
1119 		frames.
1120 */
1121 status_t
1122 AVCodecDecoder::_DecodeSomeAudioFramesIntoEmptyDecodedDataBuffer()
1123 {
1124 	assert(fDecodedDataBufferSize == 0);
1125 
1126 	av_frame_unref(fDecodedDataBuffer);
1127 	fDecodedDataBufferOffset = 0;
1128 
1129 	int error = avcodec_receive_frame(fCodecContext, fDecodedDataBuffer);
1130 	if (error == AVERROR_EOF)
1131 		return B_LAST_BUFFER_ERROR;
1132 
1133 	if (error == AVERROR(EAGAIN)) {
1134 		// We need to feed more data into the decoder
1135 		avcodec_send_packet(fCodecContext, &fTempPacket);
1136 
1137 		// All the data is always consumed by avcodec_send_packet
1138 		fTempPacket.size = 0;
1139 
1140 		// Try again to see if we can get some decoded audio out now
1141 		error = avcodec_receive_frame(fCodecContext, fDecodedDataBuffer);
1142 	}
1143 
1144 	fDecodedDataBufferSize = fDecodedDataBuffer->nb_samples;
1145 	if (fDecodedDataBufferSize < 0)
1146 		fDecodedDataBufferSize = 0;
1147 
1148 	if (error == 0)
1149 		return B_OK;
1150 	else
1151 		return B_ERROR;
1152 }
1153 
1154 
1155 /*! \brief Updates relevant fields of the class member fHeader with the
1156 		properties of the most recently decoded audio frame.
1157 
1158 	The following fields of fHeader are updated:
1159 		- fHeader.type
1160 		- fHeader.file_pos
1161 		- fHeader.orig_size
1162 		- fHeader.start_time
1163 		- fHeader.size_used
1164 		- fHeader.u.raw_audio.frame_rate
1165 		- fHeader.u.raw_audio.channel_count
1166 
1167 	It is assumed that this function is called only	when the following asserts
1168 	hold true:
1169 		1. We actually got a new audio frame decoded by the audio decoder.
1170 		2. fHeader wasn't updated for the new audio frame yet. You MUST call
1171 		   this method only once per decoded audio frame.
1172 		3. fRawDecodedAudio's fields relate to the first audio frame contained
1173 		   in fDecodedData. Especially the following fields are of importance:
1174 		       - fRawDecodedAudio->pkt_dts: Start time of first audio frame
1175 		       - fRawDecodedAudio->opaque: Contains the following fields for
1176 		         the first audio frame:
1177 			         - channels: Channel count of first audio frame
1178 			         - sample_rate: Frame rate of first audio frame
1179 */
1180 void
1181 AVCodecDecoder::_UpdateMediaHeaderForAudioFrame()
1182 {
1183 	fHeader.type = B_MEDIA_RAW_AUDIO;
1184 	fHeader.file_pos = 0;
1185 	fHeader.orig_size = 0;
1186 	fHeader.start_time = fRawDecodedAudio->pkt_dts;
1187 	fHeader.size_used = fRawDecodedAudio->linesize[0];
1188 
1189 	avformat_codec_context* codecContext
1190 		= static_cast<avformat_codec_context*>(fRawDecodedAudio->opaque);
1191 	fHeader.u.raw_audio.channel_count = codecContext->channels;
1192 	fHeader.u.raw_audio.frame_rate = codecContext->sample_rate;
1193 }
1194 
1195 
1196 /*! \brief Decodes next video frame.
1197 
1198     We decode exactly one video frame into fDecodedData. To achieve this goal,
1199     we might need to request several chunks of encoded data resulting in a
1200     variable execution time of this function.
1201 
1202     The length of the decoded video frame is stored in
1203     fDecodedDataSizeInBytes. If this variable is greater than zero, you can
1204     assert that there is a valid video frame available in fDecodedData.
1205 
1206     The decoded video frame in fDecodedData has color space conversion and
1207     deinterlacing already applied.
1208 
1209     To every decoded video frame there is a media_header populated in
1210     fHeader, containing the corresponding video frame properties.
1211 
1212 	Normally every decoded video frame has a start_time field populated in the
1213 	associated fHeader, that determines the presentation time of the frame.
1214 	This relationship will only hold true, when each data chunk that is
1215 	provided via GetNextChunk() contains data for exactly one encoded video
1216 	frame (one complete frame) - not more and not less.
1217 
1218 	We can decode data chunks that contain partial video frame data, too. In
1219 	that case, you cannot trust the value of the start_time field in fHeader.
1220 	We simply have no logic in place to establish a meaningful relationship
1221 	between an incomplete frame and the start time it should be presented.
1222 	Though this	might change in the future.
1223 
1224 	We can decode data chunks that contain more than one video frame, too. In
1225 	that case, you cannot trust the value of the start_time field in fHeader.
1226 	We simply have no logic in place to track the start_time across multiple
1227 	video frames. So a meaningful relationship between the 2nd, 3rd, ... frame
1228 	and the start time it should be presented isn't established at the moment.
1229 	Though this	might change in the future.
1230 
1231 	More over the fOutputFrameRate variable is updated for every decoded video
1232 	frame.
1233 
1234 	On first call the member variables fSwsContext / fFormatConversionFunc	are
1235 	initialized.
1236 
1237 	\returns B_OK when we successfully decoded one video frame
1238 	\returns B_LAST_BUFFER_ERROR when there are no more video frames available.
1239 	\returns B_NO_MEMORY when we have no memory left for correct operation.
1240 	\returns Other Errors
1241 */
1242 status_t
1243 AVCodecDecoder::_DecodeNextVideoFrame()
1244 {
1245 	int error;
1246 	int send_error;
1247 
1248 #if DO_PROFILING
1249 	bigtime_t startTime = system_time();
1250 #endif
1251 
1252 	error = avcodec_receive_frame(fCodecContext, fRawDecodedPicture);
1253 
1254 	if (error == AVERROR_EOF)
1255 		return B_LAST_BUFFER_ERROR;
1256 
1257 	if (error == AVERROR(EAGAIN)) {
1258 		do {
1259 			status_t loadingChunkStatus
1260 				= _LoadNextChunkIfNeededAndAssignStartTime();
1261 			if (loadingChunkStatus == B_LAST_BUFFER_ERROR)
1262 				return _FlushOneVideoFrameFromDecoderBuffer();
1263 			if (loadingChunkStatus != B_OK) {
1264 				TRACE("[v] AVCodecDecoder::_DecodeNextVideoFrame(): error from "
1265 					"GetNextChunk(): %s\n", strerror(loadingChunkStatus));
1266 				return loadingChunkStatus;
1267 			}
1268 
1269 			char timestamp[AV_TS_MAX_STRING_SIZE];
1270 			av_ts_make_time_string(timestamp,
1271 				fTempPacket.dts, &fCodecContext->time_base);
1272 			TRACE("[v] Feed %d more bytes (dts %s)\n", fTempPacket.size,
1273 				timestamp);
1274 
1275 			send_error = avcodec_send_packet(fCodecContext, &fTempPacket);
1276 			if (send_error < 0 && send_error != AVERROR(EAGAIN)) {
1277 				TRACE("[v] AVCodecDecoder: ignoring error in decoding frame "
1278 				"%lld: %d\n", fFrame, error);
1279 			}
1280 
1281 			// Packet is consumed, clear it
1282 			fTempPacket.data = NULL;
1283 			fTempPacket.size = 0;
1284 
1285 			error = avcodec_receive_frame(fCodecContext, fRawDecodedPicture);
1286 			if (error != 0 && error != AVERROR(EAGAIN)) {
1287 				TRACE("[v] frame %lld - decoding error, error code: %d, "
1288 					"chunk size: %ld\n", fFrame, error, fChunkBufferSize);
1289 			}
1290 
1291 		} while (error != 0);
1292 	}
1293 
1294 #if DO_PROFILING
1295 	bigtime_t formatConversionStart = system_time();
1296 #endif
1297 
1298 	status_t handleStatus = _HandleNewVideoFrameAndUpdateSystemState();
1299 	if (handleStatus != B_OK)
1300 		return handleStatus;
1301 
1302 #if DO_PROFILING
1303 	bigtime_t doneTime = system_time();
1304 	decodingTime += formatConversionStart - startTime;
1305 	conversionTime += doneTime - formatConversionStart;
1306 	profileCounter++;
1307 	if (!(fFrame % 5)) {
1308 		printf("[v] profile: d1 = %lld, d2 = %lld (%lld) required %lld\n",
1309 			decodingTime / profileCounter, conversionTime / profileCounter,
1310 			fFrame, bigtime_t(1000000LL / fOutputFrameRate));
1311 		decodingTime = 0;
1312 		conversionTime = 0;
1313 		profileCounter = 0;
1314 	}
1315 #endif
1316 	return error;
1317 }
1318 
1319 
1320 /*!	\brief Applies all essential video input properties to fCodecContext that were
1321 		passed to AVCodecDecoder when Setup() was called.
1322 
1323 	Note: This function must be called before the AVCodec is opened via
1324 	avcodec_open2(). Otherwise the behaviour of FFMPEG's video decoding
1325 	function avcodec_decode_video2() is undefined.
1326 
1327 	Essential properties applied from fInputFormat.u.encoded_video.output:
1328 		- display.line_width copied to fCodecContext->width
1329 		- display.line_count copied to fCodecContext->height
1330 		- pixel_width_aspect and pixel_height_aspect converted to
1331 		  fCodecContext->sample_aspect_ratio
1332 		- field_rate converted to fCodecContext->time_base and
1333 		  fCodecContext->ticks_per_frame
1334 
1335 	Other essential properties being applied:
1336 		- fExtraData to fCodecContext->extradata
1337 		- fExtraDataSize to fCodecContext->extradata_size
1338 */
1339 void
1340 AVCodecDecoder::_ApplyEssentialVideoContainerPropertiesToContext()
1341 {
1342 	media_raw_video_format containerProperties
1343 		= fInputFormat.u.encoded_video.output;
1344 
1345 	fCodecContext->width = containerProperties.display.line_width;
1346 	fCodecContext->height = containerProperties.display.line_count;
1347 
1348 	if (containerProperties.pixel_width_aspect > 0
1349 		&& containerProperties.pixel_height_aspect > 0) {
1350 		ConvertVideoAspectWidthAndHeightToAVCodecContext(
1351 			containerProperties.pixel_width_aspect,
1352 			containerProperties.pixel_height_aspect, *fCodecContext);
1353 	}
1354 
1355 	if (containerProperties.field_rate > 0.0) {
1356 		ConvertVideoFrameRateToAVCodecContext(containerProperties.field_rate,
1357 			*fCodecContext);
1358 	}
1359 
1360 	fCodecContext->extradata = reinterpret_cast<uint8_t*>(fExtraData);
1361 	fCodecContext->extradata_size = fExtraDataSize;
1362 }
1363 
1364 
1365 /*! \brief Loads the next  chunk into fChunkBuffer and assigns it (including
1366 		the start time) to fTempPacket but only if fTempPacket is empty.
1367 
1368 	\returns B_OK
1369 		1. meaning: Next chunk is loaded.
1370 		2. meaning: No need to load and assign anything. Proceed as usual.
1371 	\returns B_LAST_BUFFER_ERROR No more chunks available. fChunkBuffer	and
1372 		fTempPacket are left untouched.
1373 	\returns Other errors Caller should bail out because fChunkBuffer and
1374 		fTempPacket are in unknown states. Normal operation cannot be
1375 		guaranteed.
1376 */
1377 status_t
1378 AVCodecDecoder::_LoadNextChunkIfNeededAndAssignStartTime()
1379 {
1380 	if (fTempPacket.size > 0)
1381 		return B_OK;
1382 
1383 	const void* chunkBuffer = NULL;
1384 	size_t chunkBufferSize = 0;
1385 		// In the case that GetNextChunk() returns an error fChunkBufferSize
1386 		// should be left untouched.
1387 	media_header chunkMediaHeader;
1388 
1389 	status_t getNextChunkStatus = GetNextChunk(&chunkBuffer, &chunkBufferSize,
1390 		&chunkMediaHeader);
1391 	if (getNextChunkStatus != B_OK)
1392 		return getNextChunkStatus;
1393 
1394 	status_t chunkBufferPaddingStatus
1395 		= _CopyChunkToChunkBufferAndAddPadding(chunkBuffer, chunkBufferSize);
1396 	if (chunkBufferPaddingStatus != B_OK)
1397 		return chunkBufferPaddingStatus;
1398 
1399 	fTempPacket.data = fChunkBuffer;
1400 	fTempPacket.size = fChunkBufferSize;
1401 	fTempPacket.dts = chunkMediaHeader.start_time;
1402 		// Let FFMPEG handle the correct relationship between start_time and
1403 		// decoded a/v frame. By doing so we are simply copying the way how it
1404 		// is implemented in ffplay.c for video frames (for audio frames it
1405 		// works, too, but isn't used by ffplay.c).
1406 		// \see http://git.videolan.org/?p=ffmpeg.git;a=blob;f=ffplay.c;h=09623db374e5289ed20b7cc28c262c4375a8b2e4;hb=9153b33a742c4e2a85ff6230aea0e75f5a8b26c2#l1502
1407 		//
1408 		// FIXME: Research how to establish a meaningful relationship between
1409 		// start_time and decoded a/v frame when the received chunk buffer
1410 		// contains partial a/v frames. Maybe some data formats do contain time
1411 		// stamps (ake pts / dts fields) that can be evaluated by FFMPEG. But
1412 		// as long as I don't have such video data to test it, it makes no
1413 		// sense trying to implement it.
1414 		//
1415 		// FIXME: Implement tracking start_time of video frames originating in
1416 		// data chunks that encode more than one video frame at a time. In that
1417 		// case on would increment the start_time for each consecutive frame of
1418 		// such a data chunk (like it is done for audio frame decoding). But as
1419 		// long as I don't have such video data to test it, it makes no sense
1420 		// to implement it.
1421 
1422 #ifdef LOG_STREAM_TO_FILE
1423 	BFile* logFile = fIsAudio ? &sAudioStreamLogFile : &sVideoStreamLogFile;
1424 	if (sDumpedPackets < 100) {
1425 		logFile->Write(chunkBuffer, fChunkBufferSize);
1426 		printf("wrote %ld bytes\n", fChunkBufferSize);
1427 		sDumpedPackets++;
1428 	} else if (sDumpedPackets == 100)
1429 		logFile->Unset();
1430 #endif
1431 
1432 	return B_OK;
1433 }
1434 
1435 
1436 /*! \brief Copies a chunk into fChunkBuffer and adds a "safety net" of
1437 		additional memory as required by FFMPEG for input buffers to video
1438 		decoders.
1439 
1440 	This is needed so that some decoders can read safely a predefined number of
1441 	bytes at a time for performance optimization purposes.
1442 
1443 	The additional memory has a size of AV_INPUT_BUFFER_PADDING_SIZE as defined
1444 	in avcodec.h.
1445 
1446 	Ownership of fChunkBuffer memory is with the class so it needs to be freed
1447 	at the right times (on destruction, on seeking).
1448 
1449 	Also update fChunkBufferSize to reflect the size of the contained data
1450 	(leaving out the padding).
1451 
1452 	\param chunk The chunk to copy.
1453 	\param chunkSize Size of the chunk in bytes
1454 
1455 	\returns B_OK Padding was successful. You are responsible for releasing the
1456 		allocated memory. fChunkBufferSize is set to chunkSize.
1457 	\returns B_NO_MEMORY Padding failed.
1458 		fChunkBuffer is set to NULL making it safe to call free() on it.
1459 		fChunkBufferSize is set to 0 to reflect the size of fChunkBuffer.
1460 */
1461 status_t
1462 AVCodecDecoder::_CopyChunkToChunkBufferAndAddPadding(const void* chunk,
1463 	size_t chunkSize)
1464 {
1465 	uint8_t* tmpBuffer = static_cast<uint8_t*>(realloc(fChunkBuffer,
1466 		chunkSize + AV_INPUT_BUFFER_PADDING_SIZE));
1467 	if (tmpBuffer == NULL) {
1468 		free(fChunkBuffer);
1469 		fChunkBuffer = NULL;
1470 		fChunkBufferSize = 0;
1471 		return B_NO_MEMORY;
1472 	} else {
1473 		fChunkBuffer = tmpBuffer;
1474 	}
1475 
1476 	memcpy(fChunkBuffer, chunk, chunkSize);
1477 	memset(fChunkBuffer + chunkSize, 0, AV_INPUT_BUFFER_PADDING_SIZE);
1478 		// Establish safety net, by zero'ing the padding area.
1479 
1480 	fChunkBufferSize = chunkSize;
1481 
1482 	return B_OK;
1483 }
1484 
1485 
1486 /*! \brief Executes all steps needed for a freshly decoded video frame.
1487 
1488 	\see _UpdateMediaHeaderForVideoFrame() and
1489 	\see _DeinterlaceAndColorConvertVideoFrame() for when you are allowed to
1490 	call this method.
1491 
1492 	\returns B_OK when video frame was handled successfully
1493 	\returnb B_NO_MEMORY when no memory is left for correct operation.
1494 */
1495 status_t
1496 AVCodecDecoder::_HandleNewVideoFrameAndUpdateSystemState()
1497 {
1498 	_UpdateMediaHeaderForVideoFrame();
1499 	status_t postProcessStatus = _DeinterlaceAndColorConvertVideoFrame();
1500 	if (postProcessStatus != B_OK)
1501 		return postProcessStatus;
1502 
1503 	ConvertAVCodecContextToVideoFrameRate(*fCodecContext, fOutputFrameRate);
1504 
1505 #ifdef DEBUG
1506 	dump_ffframe_video(fRawDecodedPicture, "ffpict");
1507 #endif
1508 
1509 	fFrame++;
1510 
1511 	return B_OK;
1512 }
1513 
1514 
1515 /*! \brief Flushes one video frame - if any - still buffered by the decoder.
1516 
1517 	Some FFMPEG decoder are buffering video frames. To retrieve those buffered
1518 	frames the decoder needs to be told so.
1519 
1520 	The intended use of this method is to call it, once there are no more data
1521 	chunks for decoding left. Reframed in other words: Once GetNextChunk()
1522 	returns with status B_LAST_BUFFER_ERROR it is time to start flushing.
1523 
1524 	\returns B_OK Retrieved one video frame, handled it accordingly and updated
1525 		the system state accordingly.
1526 		There maybe more video frames left. So it is valid for the client of
1527 		AVCodecDecoder to call it one more time.
1528 
1529 	\returns B_LAST_BUFFER_ERROR No video frame left.
1530 		The client of the AVCodecDecoder should stop calling it now.
1531 
1532 	\returns B_NO_MEMORY No memory left for correct operation.
1533 */
1534 status_t
1535 AVCodecDecoder::_FlushOneVideoFrameFromDecoderBuffer()
1536 {
1537 	// Tell the decoder there is nothing to send anymore
1538 	avcodec_send_packet(fCodecContext, NULL);
1539 
1540 	// Get any remaining frame
1541 	int error = avcodec_receive_frame(fCodecContext, fRawDecodedPicture);
1542 
1543 	if (error != 0 && error != AVERROR(EAGAIN)) {
1544 		// video buffer is flushed successfully
1545 		// (or there is an error, not much we can do about it)
1546 		return B_LAST_BUFFER_ERROR;
1547 	}
1548 
1549 	return _HandleNewVideoFrameAndUpdateSystemState();
1550 }
1551 
1552 
1553 /*! \brief Updates relevant fields of the class member fHeader with the
1554 		properties of the most recently decoded video frame.
1555 
1556 	It is assumed that this function is called only	when the following asserts
1557 	hold true:
1558 		1. We actually got a new picture decoded by the video decoder.
1559 		2. fHeader wasn't updated for the new picture yet. You MUST call this
1560 		   method only once per decoded video frame.
1561 		3. This function MUST be called after
1562 		   _DeinterlaceAndColorConvertVideoFrame() as it relys on an updated
1563 		    fDecodedDataSizeInBytes.
1564 		4. There will be at maximumn only one decoded video frame in our cache
1565 		   at any single point in time. Otherwise you couldn't tell to which
1566 		   cached decoded video frame the properties in fHeader relate to.
1567 		5. AVCodecContext is still valid for this video frame (This is the case
1568 		   when this function is called after avcodec_decode_video2() and
1569 		   before the next call to avcodec_decode_video2().
1570 */
1571 void
1572 AVCodecDecoder::_UpdateMediaHeaderForVideoFrame()
1573 {
1574 	fHeader.type = B_MEDIA_RAW_VIDEO;
1575 	fHeader.file_pos = 0;
1576 	fHeader.orig_size = 0;
1577 	fHeader.start_time = fRawDecodedPicture->pkt_dts;
1578 		// The pkt_dts is already in microseconds, even if ffmpeg docs says
1579 		// 'in codec time_base units'
1580 	fHeader.size_used = av_image_get_buffer_size(
1581 		colorspace_to_pixfmt(fOutputColorSpace), fRawDecodedPicture->width,
1582 		fRawDecodedPicture->height, 1);
1583 	fHeader.u.raw_video.display_line_width = fRawDecodedPicture->width;
1584 	fHeader.u.raw_video.display_line_count = fRawDecodedPicture->height;
1585 	fHeader.u.raw_video.bytes_per_row
1586 		= CalculateBytesPerRowWithColorSpaceAndVideoWidth(fOutputColorSpace,
1587 			fRawDecodedPicture->width);
1588 	fHeader.u.raw_video.field_gamma = 1.0;
1589 	fHeader.u.raw_video.field_sequence = fFrame;
1590 	fHeader.u.raw_video.field_number = 0;
1591 	fHeader.u.raw_video.pulldown_number = 0;
1592 	fHeader.u.raw_video.first_active_line = 1;
1593 	fHeader.u.raw_video.line_count = fRawDecodedPicture->height;
1594 
1595 	ConvertAVCodecContextToVideoAspectWidthAndHeight(*fCodecContext,
1596 		fHeader.u.raw_video.pixel_width_aspect,
1597 		fHeader.u.raw_video.pixel_height_aspect);
1598 
1599 	char timestamp[AV_TS_MAX_STRING_SIZE];
1600 	av_ts_make_time_string(timestamp,
1601 		fRawDecodedPicture->best_effort_timestamp, &fCodecContext->time_base);
1602 
1603 	TRACE("[v] start_time=%s field_sequence=%lu\n",
1604 		timestamp, fHeader.u.raw_video.field_sequence);
1605 }
1606 
1607 
1608 /*! \brief This function applies deinterlacing (only if needed) and color
1609 	conversion to the video frame in fRawDecodedPicture.
1610 
1611 	It is assumed that fRawDecodedPicture wasn't deinterlaced and color
1612 	converted yet (otherwise this function behaves in unknown manners).
1613 
1614 	This function MUST be called after _UpdateMediaHeaderForVideoFrame() as it
1615 	relys on the fHeader.size_used and fHeader.u.raw_video.bytes_per_row fields
1616 	for correct operation
1617 
1618 	You should only call this function when you	got a new picture decoded by
1619 	the video decoder.
1620 
1621 	When this function finishes the postprocessed video frame will be available
1622 	in fPostProcessedDecodedPicture and fDecodedData (fDecodedDataSizeInBytes
1623 	will be set accordingly).
1624 
1625 	\returns B_OK video frame successfully deinterlaced and color converted.
1626 	\returns B_NO_MEMORY Not enough memory available for correct operation.
1627 */
1628 status_t
1629 AVCodecDecoder::_DeinterlaceAndColorConvertVideoFrame()
1630 {
1631 	int displayWidth = fRawDecodedPicture->width;
1632 	int displayHeight = fRawDecodedPicture->height;
1633 	AVFrame deinterlacedPicture;
1634 	bool useDeinterlacedPicture = false;
1635 
1636 	if (fRawDecodedPicture->interlaced_frame) {
1637 		AVFrame rawPicture;
1638 		rawPicture.data[0] = fRawDecodedPicture->data[0];
1639 		rawPicture.data[1] = fRawDecodedPicture->data[1];
1640 		rawPicture.data[2] = fRawDecodedPicture->data[2];
1641 		rawPicture.data[3] = fRawDecodedPicture->data[3];
1642 		rawPicture.linesize[0] = fRawDecodedPicture->linesize[0];
1643 		rawPicture.linesize[1] = fRawDecodedPicture->linesize[1];
1644 		rawPicture.linesize[2] = fRawDecodedPicture->linesize[2];
1645 		rawPicture.linesize[3] = fRawDecodedPicture->linesize[3];
1646 
1647 		if (av_image_alloc(deinterlacedPicture.data,
1648 				deinterlacedPicture.linesize, displayWidth, displayHeight,
1649 				fCodecContext->pix_fmt, 1) < 0)
1650 			return B_NO_MEMORY;
1651 
1652 		// deinterlace implemented using avfilter
1653 		_ProcessFilterGraph(&deinterlacedPicture, &rawPicture,
1654 			fCodecContext->pix_fmt, displayWidth, displayHeight);
1655 		useDeinterlacedPicture = true;
1656 	}
1657 
1658 	// Some decoders do not set pix_fmt until they have decoded 1 frame
1659 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
1660 	if (fSwsContext == NULL) {
1661 		fSwsContext = sws_getContext(displayWidth, displayHeight,
1662 			fCodecContext->pix_fmt, displayWidth, displayHeight,
1663 			colorspace_to_pixfmt(fOutputColorSpace),
1664 			SWS_FAST_BILINEAR, NULL, NULL, NULL);
1665 	}
1666 #else
1667 	if (fFormatConversionFunc == NULL) {
1668 		fFormatConversionFunc = resolve_colorspace(fOutputColorSpace,
1669 			fCodecContext->pix_fmt, displayWidth, displayHeight);
1670 	}
1671 #endif
1672 
1673 	fDecodedDataSizeInBytes = fHeader.size_used;
1674 
1675 	if (fDecodedData == NULL) {
1676 		const size_t kOptimalAlignmentForColorConversion = 32;
1677 		posix_memalign(reinterpret_cast<void**>(&fDecodedData),
1678 			kOptimalAlignmentForColorConversion, fDecodedDataSizeInBytes);
1679 	}
1680 	if (fDecodedData == NULL)
1681 		return B_NO_MEMORY;
1682 
1683 	fPostProcessedDecodedPicture->data[0] = fDecodedData;
1684 	fPostProcessedDecodedPicture->linesize[0]
1685 		= fHeader.u.raw_video.bytes_per_row;
1686 
1687 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
1688 	if (fSwsContext != NULL) {
1689 #else
1690 	if (fFormatConversionFunc != NULL) {
1691 #endif
1692 		if (useDeinterlacedPicture) {
1693 			AVFrame deinterlacedFrame;
1694 			deinterlacedFrame.data[0] = deinterlacedPicture.data[0];
1695 			deinterlacedFrame.data[1] = deinterlacedPicture.data[1];
1696 			deinterlacedFrame.data[2] = deinterlacedPicture.data[2];
1697 			deinterlacedFrame.data[3] = deinterlacedPicture.data[3];
1698 			deinterlacedFrame.linesize[0]
1699 				= deinterlacedPicture.linesize[0];
1700 			deinterlacedFrame.linesize[1]
1701 				= deinterlacedPicture.linesize[1];
1702 			deinterlacedFrame.linesize[2]
1703 				= deinterlacedPicture.linesize[2];
1704 			deinterlacedFrame.linesize[3]
1705 				= deinterlacedPicture.linesize[3];
1706 
1707 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
1708 			sws_scale(fSwsContext, deinterlacedFrame.data,
1709 				deinterlacedFrame.linesize, 0, displayHeight,
1710 				fPostProcessedDecodedPicture->data,
1711 				fPostProcessedDecodedPicture->linesize);
1712 #else
1713 			(*fFormatConversionFunc)(&deinterlacedFrame,
1714 				fPostProcessedDecodedPicture, displayWidth, displayHeight);
1715 #endif
1716 		} else {
1717 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
1718 			sws_scale(fSwsContext, fRawDecodedPicture->data,
1719 				fRawDecodedPicture->linesize, 0, displayHeight,
1720 				fPostProcessedDecodedPicture->data,
1721 				fPostProcessedDecodedPicture->linesize);
1722 #else
1723 			(*fFormatConversionFunc)(fRawDecodedPicture,
1724 				fPostProcessedDecodedPicture, displayWidth, displayHeight);
1725 #endif
1726 		}
1727 	}
1728 
1729 	if (fRawDecodedPicture->interlaced_frame)
1730 		av_freep(&deinterlacedPicture.data[0]);
1731 
1732 	return B_OK;
1733 }
1734 
1735 
1736 /*! \brief Init the deinterlace filter graph.
1737 
1738 	\returns B_OK the filter graph could be built.
1739 	\returns B_BAD_VALUE something was wrong with building the graph.
1740 */
1741 status_t
1742 AVCodecDecoder::_InitFilterGraph(enum AVPixelFormat pixfmt, int32 width,
1743 	int32 height)
1744 {
1745 	if (fFilterGraph != NULL) {
1746 		av_frame_free(&fFilterFrame);
1747 		avfilter_graph_free(&fFilterGraph);
1748 	}
1749 
1750 	fFilterGraph = avfilter_graph_alloc();
1751 
1752 	BString arguments;
1753 	arguments.SetToFormat("buffer=video_size=%" B_PRId32 "x%" B_PRId32
1754 		":pix_fmt=%d:time_base=1/1:pixel_aspect=0/1[in];[in]yadif[out];"
1755 		"[out]buffersink", width, height,
1756 		pixfmt);
1757 	AVFilterInOut* inputs = NULL;
1758 	AVFilterInOut* outputs = NULL;
1759 	TRACE("[v] _InitFilterGraph(): %s\n", arguments.String());
1760 	int ret = avfilter_graph_parse2(fFilterGraph, arguments.String(), &inputs,
1761 		&outputs);
1762 	if (ret < 0) {
1763 		fprintf(stderr, "avfilter_graph_parse2() failed\n");
1764 		return B_BAD_VALUE;
1765 	}
1766 
1767 	ret = avfilter_graph_config(fFilterGraph, NULL);
1768 	if (ret < 0) {
1769 		fprintf(stderr, "avfilter_graph_config() failed\n");
1770 		return B_BAD_VALUE;
1771 	}
1772 
1773 	fBufferSourceContext = avfilter_graph_get_filter(fFilterGraph,
1774 		"Parsed_buffer_0");
1775 	fBufferSinkContext = avfilter_graph_get_filter(fFilterGraph,
1776 		"Parsed_buffersink_2");
1777 	if (fBufferSourceContext == NULL || fBufferSinkContext == NULL) {
1778 		fprintf(stderr, "avfilter_graph_get_filter() failed\n");
1779 		return B_BAD_VALUE;
1780 	}
1781 	fFilterFrame = av_frame_alloc();
1782 	fLastWidth = width;
1783 	fLastHeight = height;
1784 	fLastPixfmt = pixfmt;
1785 
1786 	return B_OK;
1787 }
1788 
1789 
1790 /*! \brief Process an AVPicture with the deinterlace filter graph.
1791 
1792     We decode exactly one video frame into dst.
1793 	Equivalent function for avpicture_deinterlace() from version 2.x.
1794 
1795 	\returns B_OK video frame successfully deinterlaced.
1796 	\returns B_BAD_DATA No frame could be output.
1797 	\returns B_NO_MEMORY Not enough memory available for correct operation.
1798 */
1799 status_t
1800 AVCodecDecoder::_ProcessFilterGraph(AVFrame *dst, const AVFrame *src,
1801 	enum AVPixelFormat pixfmt, int32 width, int32 height)
1802 {
1803 	if (fFilterGraph == NULL || width != fLastWidth
1804 		|| height != fLastHeight || pixfmt != fLastPixfmt) {
1805 
1806 		status_t err = _InitFilterGraph(pixfmt, width, height);
1807 		if (err != B_OK)
1808 			return err;
1809 	}
1810 
1811 	memcpy(fFilterFrame->data, src->data, sizeof(src->data));
1812 	memcpy(fFilterFrame->linesize, src->linesize, sizeof(src->linesize));
1813 	fFilterFrame->width = width;
1814 	fFilterFrame->height = height;
1815 	fFilterFrame->format = pixfmt;
1816 
1817 	int ret = av_buffersrc_add_frame(fBufferSourceContext, fFilterFrame);
1818 	if (ret < 0)
1819 		return B_NO_MEMORY;
1820 
1821 	ret = av_buffersink_get_frame(fBufferSinkContext, fFilterFrame);
1822 	if (ret < 0)
1823 		return B_BAD_DATA;
1824 
1825 	av_image_copy(dst->data, dst->linesize, (const uint8**)fFilterFrame->data,
1826 		fFilterFrame->linesize, pixfmt, width, height);
1827 	av_frame_unref(fFilterFrame);
1828 	return B_OK;
1829 }
1830