xref: /haiku/src/add-ons/media/plugins/ffmpeg/AVCodecDecoder.cpp (revision ab4411e89a079bc0a40d901995f3418d998c51b3)
1 /*
2  * Copyright (C) 2001 Carlos Hasan
3  * Copyright (C) 2001 François Revol
4  * Copyright (C) 2001 Axel Dörfler
5  * Copyright (C) 2004 Marcus Overhagen
6  * Copyright (C) 2009 Stephan Amßus <superstippi@gmx.de>
7  * Copyright (C) 2014 Colin Günther <coling@gmx.de>
8  *
9  * All rights reserved. Distributed under the terms of the MIT License.
10  */
11 
12 //! libavcodec based decoder for Haiku
13 
14 #include "AVCodecDecoder.h"
15 
16 #include <new>
17 
18 #include <assert.h>
19 #include <string.h>
20 
21 #include <Bitmap.h>
22 #include <Debug.h>
23 
24 #include "Utilities.h"
25 
26 
27 #undef TRACE
28 //#define TRACE_AV_CODEC
29 #ifdef TRACE_AV_CODEC
30 #	define TRACE(x...)	printf(x)
31 #	define TRACE_AUDIO(x...)	printf(x)
32 #	define TRACE_VIDEO(x...)	printf(x)
33 #else
34 #	define TRACE(x...)
35 #	define TRACE_AUDIO(x...)
36 #	define TRACE_VIDEO(x...)
37 #endif
38 
39 //#define LOG_STREAM_TO_FILE
40 #ifdef LOG_STREAM_TO_FILE
41 #	include <File.h>
42 	static BFile sStreamLogFile("/boot/home/Desktop/AVCodecDebugStream.raw",
43 		B_CREATE_FILE | B_ERASE_FILE | B_WRITE_ONLY);
44 	static int sDumpedPackets = 0;
45 #endif
46 
47 #ifdef __x86_64
48 #define USE_SWS_FOR_COLOR_SPACE_CONVERSION 1
49 #else
50 #define USE_SWS_FOR_COLOR_SPACE_CONVERSION 0
51 // NOTE: David's color space conversion is much faster than the FFmpeg
52 // version. Perhaps the SWS code can be used for unsupported conversions?
53 // Otherwise the alternative code could simply be removed from this file.
54 #endif
55 
56 
57 struct wave_format_ex {
58 	uint16 format_tag;
59 	uint16 channels;
60 	uint32 frames_per_sec;
61 	uint32 avg_bytes_per_sec;
62 	uint16 block_align;
63 	uint16 bits_per_sample;
64 	uint16 extra_size;
65 	// extra_data[extra_size]
66 } _PACKED;
67 
68 
69 // profiling related globals
70 #define DO_PROFILING 0
71 
72 static bigtime_t decodingTime = 0;
73 static bigtime_t conversionTime = 0;
74 static long profileCounter = 0;
75 
76 
77 AVCodecDecoder::AVCodecDecoder()
78 	:
79 	fHeader(),
80 	fInputFormat(),
81 	fFrame(0),
82 	fIsAudio(false),
83 	fCodec(NULL),
84 	fContext(avcodec_alloc_context3(NULL)),
85 	fDecodedData(NULL),
86 	fDecodedDataSizeInBytes(0),
87 	fPostProcessedDecodedPicture(avcodec_alloc_frame()),
88 	fRawDecodedPicture(avcodec_alloc_frame()),
89 
90 	fCodecInitDone(false),
91 
92 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
93 	fSwsContext(NULL),
94 #else
95 	fFormatConversionFunc(NULL),
96 #endif
97 
98 	fExtraData(NULL),
99 	fExtraDataSize(0),
100 	fBlockAlign(0),
101 
102 	fStartTime(0),
103 	fOutputColorSpace(B_NO_COLOR_SPACE),
104 	fOutputFrameCount(0),
105 	fOutputFrameRate(1.0),
106 	fOutputFrameSize(0),
107 
108 	fChunkBuffer(NULL),
109 	fVideoChunkBuffer(NULL),
110 	fChunkBufferOffset(0),
111 	fChunkBufferSize(0),
112 	fAudioDecodeError(false),
113 
114 	fOutputFrame(avcodec_alloc_frame()),
115 	fOutputBufferOffset(0),
116 	fOutputBufferSize(0)
117 {
118 	TRACE("AVCodecDecoder::AVCodecDecoder()\n");
119 
120 	system_info info;
121 	get_system_info(&info);
122 
123 	fContext->err_recognition = AV_EF_CAREFUL;
124 	fContext->error_concealment = 3;
125 	fContext->thread_count = info.cpu_count;
126 }
127 
128 
129 AVCodecDecoder::~AVCodecDecoder()
130 {
131 	TRACE("[%c] AVCodecDecoder::~AVCodecDecoder()\n", fIsAudio?('a'):('v'));
132 
133 #ifdef DO_PROFILING
134 	if (profileCounter > 0) {
135 		printf("[%c] profile: d1 = %lld, d2 = %lld (%Ld)\n",
136 			fIsAudio?('a'):('v'), decodingTime / profileCounter,
137 			conversionTime / profileCounter, fFrame);
138 	}
139 #endif
140 
141 	if (fCodecInitDone)
142 		avcodec_close(fContext);
143 
144 	free(fVideoChunkBuffer);
145 		// TODO: Replace with fChunkBuffer, once audio path is
146 		// responsible for freeing the chunk buffer, too.
147 	free(fDecodedData);
148 
149 	av_free(fPostProcessedDecodedPicture);
150 	av_free(fRawDecodedPicture);
151 	av_free(fContext);
152 	av_free(fOutputFrame);
153 
154 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
155 	if (fSwsContext != NULL)
156 		sws_freeContext(fSwsContext);
157 #endif
158 
159 	delete[] fExtraData;
160 }
161 
162 
163 void
164 AVCodecDecoder::GetCodecInfo(media_codec_info* mci)
165 {
166 	snprintf(mci->short_name, 32, "%s", fCodec->name);
167 	snprintf(mci->pretty_name, 96, "%s", fCodec->long_name);
168 	mci->id = 0;
169 	mci->sub_id = fCodec->id;
170 }
171 
172 
173 status_t
174 AVCodecDecoder::Setup(media_format* ioEncodedFormat, const void* infoBuffer,
175 	size_t infoSize)
176 {
177 	if (ioEncodedFormat->type != B_MEDIA_ENCODED_AUDIO
178 		&& ioEncodedFormat->type != B_MEDIA_ENCODED_VIDEO)
179 		return B_ERROR;
180 
181 	fIsAudio = (ioEncodedFormat->type == B_MEDIA_ENCODED_AUDIO);
182 	TRACE("[%c] AVCodecDecoder::Setup()\n", fIsAudio?('a'):('v'));
183 
184 #ifdef TRACE_AV_CODEC
185 	char buffer[1024];
186 	string_for_format(*ioEncodedFormat, buffer, sizeof(buffer));
187 	TRACE("[%c]   input_format = %s\n", fIsAudio?('a'):('v'), buffer);
188 	TRACE("[%c]   infoSize = %ld\n", fIsAudio?('a'):('v'), infoSize);
189 	TRACE("[%c]   user_data_type = %08lx\n", fIsAudio?('a'):('v'),
190 		ioEncodedFormat->user_data_type);
191 	TRACE("[%c]   meta_data_size = %ld\n", fIsAudio?('a'):('v'),
192 		ioEncodedFormat->MetaDataSize());
193 #endif
194 
195 	media_format_description description;
196 	if (BMediaFormats().GetCodeFor(*ioEncodedFormat,
197 			B_MISC_FORMAT_FAMILY, &description) == B_OK) {
198 		if (description.u.misc.file_format != 'ffmp')
199 			return B_NOT_SUPPORTED;
200 		fCodec = avcodec_find_decoder(static_cast<CodecID>(
201 			description.u.misc.codec));
202 		if (fCodec == NULL) {
203 			TRACE("  unable to find the correct FFmpeg "
204 				"decoder (id = %lu)\n", description.u.misc.codec);
205 			return B_ERROR;
206 		}
207 		TRACE("  found decoder %s\n", fCodec->name);
208 
209 		const void* extraData = infoBuffer;
210 		fExtraDataSize = infoSize;
211 		if (description.family == B_WAV_FORMAT_FAMILY
212 				&& infoSize >= sizeof(wave_format_ex)) {
213 			TRACE("  trying to use wave_format_ex\n");
214 			// Special case extra data in B_WAV_FORMAT_FAMILY
215 			const wave_format_ex* waveFormatData
216 				= (const wave_format_ex*)infoBuffer;
217 
218 			size_t waveFormatSize = infoSize;
219 			if (waveFormatData != NULL && waveFormatSize > 0) {
220 				fBlockAlign = waveFormatData->block_align;
221 				TRACE("  found block align: %d\n", fBlockAlign);
222 				fExtraDataSize = waveFormatData->extra_size;
223 				// skip the wave_format_ex from the extra data.
224 				extraData = waveFormatData + 1;
225 			}
226 		} else {
227 			if (fIsAudio) {
228 				fBlockAlign
229 					= ioEncodedFormat->u.encoded_audio.output
230 						.buffer_size;
231 				TRACE("  using buffer_size as block align: %d\n",
232 					fBlockAlign);
233 			}
234 		}
235 		if (extraData != NULL && fExtraDataSize > 0) {
236 			TRACE("AVCodecDecoder: extra data size %ld\n", infoSize);
237 			delete[] fExtraData;
238 			fExtraData = new(std::nothrow) char[fExtraDataSize];
239 			if (fExtraData != NULL)
240 				memcpy(fExtraData, infoBuffer, fExtraDataSize);
241 			else
242 				fExtraDataSize = 0;
243 		}
244 
245 		fInputFormat = *ioEncodedFormat;
246 		return B_OK;
247 	} else {
248 		TRACE("AVCodecDecoder: BMediaFormats().GetCodeFor() failed.\n");
249 	}
250 
251 	printf("AVCodecDecoder::Setup failed!\n");
252 	return B_ERROR;
253 }
254 
255 
256 status_t
257 AVCodecDecoder::SeekedTo(int64 frame, bigtime_t time)
258 {
259 	status_t ret = B_OK;
260 	// Reset the FFmpeg codec to flush buffers, so we keep the sync
261 	if (fCodecInitDone) {
262 		avcodec_flush_buffers(fContext);
263 		_ResetTempPacket();
264 	}
265 
266 	// Flush internal buffers as well.
267 	free(fVideoChunkBuffer);
268 		// TODO: Replace with fChunkBuffer, once audio path is
269 		// responsible for freeing the chunk buffer, too.
270 	fVideoChunkBuffer = NULL;
271 	fChunkBuffer = NULL;
272 	fChunkBufferOffset = 0;
273 	fChunkBufferSize = 0;
274 	fOutputBufferOffset = 0;
275 	fOutputBufferSize = 0;
276 	fDecodedDataSizeInBytes = 0;
277 
278 	fFrame = frame;
279 	fStartTime = time;
280 
281 	return ret;
282 }
283 
284 
285 status_t
286 AVCodecDecoder::NegotiateOutputFormat(media_format* inOutFormat)
287 {
288 	TRACE("AVCodecDecoder::NegotiateOutputFormat() [%c] \n",
289 		fIsAudio?('a'):('v'));
290 
291 #ifdef TRACE_AV_CODEC
292 	char buffer[1024];
293 	string_for_format(*inOutFormat, buffer, sizeof(buffer));
294 	TRACE("  [%c]  requested format = %s\n", fIsAudio?('a'):('v'), buffer);
295 #endif
296 
297 	if (fIsAudio)
298 		return _NegotiateAudioOutputFormat(inOutFormat);
299 	else
300 		return _NegotiateVideoOutputFormat(inOutFormat);
301 }
302 
303 
304 status_t
305 AVCodecDecoder::Decode(void* outBuffer, int64* outFrameCount,
306 	media_header* mediaHeader, media_decode_info* info)
307 {
308 	if (!fCodecInitDone)
309 		return B_NO_INIT;
310 
311 //	TRACE("[%c] AVCodecDecoder::Decode() for time %Ld\n", fIsAudio?('a'):('v'),
312 //		fStartTime);
313 
314 	mediaHeader->start_time = fStartTime;
315 
316 	status_t ret;
317 	if (fIsAudio)
318 		ret = _DecodeAudio(outBuffer, outFrameCount, mediaHeader, info);
319 	else
320 		ret = _DecodeVideo(outBuffer, outFrameCount, mediaHeader, info);
321 
322 	return ret;
323 }
324 
325 
326 // #pragma mark -
327 
328 
329 void
330 AVCodecDecoder::_ResetTempPacket()
331 {
332 	av_init_packet(&fTempPacket);
333 	fTempPacket.size = 0;
334 	fTempPacket.data = NULL;
335 }
336 
337 
338 status_t
339 AVCodecDecoder::_NegotiateAudioOutputFormat(media_format* inOutFormat)
340 {
341 	TRACE("AVCodecDecoder::_NegotiateAudioOutputFormat()\n");
342 
343 	media_multi_audio_format outputAudioFormat;
344 	outputAudioFormat = media_raw_audio_format::wildcard;
345 	outputAudioFormat.byte_order = B_MEDIA_HOST_ENDIAN;
346 	outputAudioFormat.frame_rate
347 		= fInputFormat.u.encoded_audio.output.frame_rate;
348 	outputAudioFormat.channel_count
349 		= fInputFormat.u.encoded_audio.output.channel_count;
350 	outputAudioFormat.format = fInputFormat.u.encoded_audio.output.format;
351 	outputAudioFormat.buffer_size
352 		= inOutFormat->u.raw_audio.buffer_size;
353 	// Check that format is not still a wild card!
354 	if (outputAudioFormat.format == 0) {
355 		TRACE("  format still a wild-card, assuming B_AUDIO_SHORT.\n");
356 		outputAudioFormat.format = media_raw_audio_format::B_AUDIO_SHORT;
357 	}
358 	size_t sampleSize = outputAudioFormat.format
359 		& media_raw_audio_format::B_AUDIO_SIZE_MASK;
360 	// Check that channel count is not still a wild card!
361 	if (outputAudioFormat.channel_count == 0) {
362 		TRACE("  channel_count still a wild-card, assuming stereo.\n");
363 		outputAudioFormat.channel_count = 2;
364 	}
365 
366 	if (outputAudioFormat.buffer_size == 0) {
367 		outputAudioFormat.buffer_size = 512
368 			* sampleSize * outputAudioFormat.channel_count;
369 	}
370 	inOutFormat->type = B_MEDIA_RAW_AUDIO;
371 	inOutFormat->u.raw_audio = outputAudioFormat;
372 
373 	fContext->bit_rate = (int)fInputFormat.u.encoded_audio.bit_rate;
374 	fContext->frame_size = (int)fInputFormat.u.encoded_audio.frame_size;
375 	fContext->sample_rate
376 		= (int)fInputFormat.u.encoded_audio.output.frame_rate;
377 	fContext->channels = outputAudioFormat.channel_count;
378 	fContext->block_align = fBlockAlign;
379 	fContext->extradata = (uint8_t*)fExtraData;
380 	fContext->extradata_size = fExtraDataSize;
381 
382 	// TODO: This probably needs to go away, there is some misconception
383 	// about extra data / info buffer and meta data. See
384 	// Reader::GetStreamInfo(). The AVFormatReader puts extradata and
385 	// extradata_size into media_format::MetaData(), but used to ignore
386 	// the infoBuffer passed to GetStreamInfo(). I think this may be why
387 	// the code below was added.
388 	if (fInputFormat.MetaDataSize() > 0) {
389 		fContext->extradata = (uint8_t*)fInputFormat.MetaData();
390 		fContext->extradata_size = fInputFormat.MetaDataSize();
391 	}
392 
393 	TRACE("  bit_rate %d, sample_rate %d, channels %d, block_align %d, "
394 		"extradata_size %d\n", fContext->bit_rate, fContext->sample_rate,
395 		fContext->channels, fContext->block_align, fContext->extradata_size);
396 
397 	// close any previous instance
398 	if (fCodecInitDone) {
399 		fCodecInitDone = false;
400 		avcodec_close(fContext);
401 	}
402 
403 	// open new
404 	int result = avcodec_open2(fContext, fCodec, NULL);
405 	fCodecInitDone = (result >= 0);
406 
407 	fStartTime = 0;
408 	fOutputFrameSize = sampleSize * outputAudioFormat.channel_count;
409 	fOutputFrameCount = outputAudioFormat.buffer_size / fOutputFrameSize;
410 	fOutputFrameRate = outputAudioFormat.frame_rate;
411 
412 	TRACE("  bit_rate = %d, sample_rate = %d, channels = %d, init = %d, "
413 		"output frame size: %d, count: %ld, rate: %.2f\n",
414 		fContext->bit_rate, fContext->sample_rate, fContext->channels,
415 		result, fOutputFrameSize, fOutputFrameCount, fOutputFrameRate);
416 
417 	fChunkBuffer = NULL;
418 	fChunkBufferOffset = 0;
419 	fChunkBufferSize = 0;
420 	fAudioDecodeError = false;
421 	fOutputBufferOffset = 0;
422 	fOutputBufferSize = 0;
423 
424 	_ResetTempPacket();
425 
426 	inOutFormat->require_flags = 0;
427 	inOutFormat->deny_flags = B_MEDIA_MAUI_UNDEFINED_FLAGS;
428 
429 	if (!fCodecInitDone) {
430 		TRACE("avcodec_open() failed!\n");
431 		return B_ERROR;
432 	}
433 
434 	return B_OK;
435 }
436 
437 
438 status_t
439 AVCodecDecoder::_NegotiateVideoOutputFormat(media_format* inOutFormat)
440 {
441 	TRACE("AVCodecDecoder::_NegotiateVideoOutputFormat()\n");
442 
443 	TRACE("  requested video format 0x%x\n",
444 		inOutFormat->u.raw_video.display.format);
445 
446 	// Make MediaPlayer happy (if not in rgb32 screen depth and no overlay,
447 	// it will only ask for YCbCr, which DrawBitmap doesn't handle, so the
448 	// default colordepth is RGB32).
449 	if (inOutFormat->u.raw_video.display.format == B_YCbCr422)
450 		fOutputColorSpace = B_YCbCr422;
451 	else
452 		fOutputColorSpace = B_RGB32;
453 
454 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
455 	if (fSwsContext != NULL)
456 		sws_freeContext(fSwsContext);
457 	fSwsContext = NULL;
458 #else
459 	fFormatConversionFunc = 0;
460 #endif
461 
462 	fContext->extradata = (uint8_t*)fExtraData;
463 	fContext->extradata_size = fExtraDataSize;
464 
465 	bool codecCanHandleIncompleteFrames
466 		= (fCodec->capabilities & CODEC_CAP_TRUNCATED) != 0;
467 	if (codecCanHandleIncompleteFrames) {
468 		// Expect and handle video frames to be splitted across consecutive
469 		// data chunks.
470 		fContext->flags |= CODEC_FLAG_TRUNCATED;
471 	}
472 
473 	// close any previous instance
474 	if (fCodecInitDone) {
475 		fCodecInitDone = false;
476 		avcodec_close(fContext);
477 	}
478 
479 	if (avcodec_open2(fContext, fCodec, NULL) >= 0)
480 		fCodecInitDone = true;
481 	else {
482 		TRACE("avcodec_open() failed to init codec!\n");
483 		return B_ERROR;
484 	}
485 
486 	_ResetTempPacket();
487 
488 	status_t statusOfDecodingFirstFrame = _DecodeNextVideoFrame();
489 	if (statusOfDecodingFirstFrame != B_OK) {
490 		TRACE("[v] decoding first video frame failed\n");
491 		return B_ERROR;
492 	}
493 
494 	// Note: fSwsContext / fFormatConversionFunc should have been initialized
495 	// by first call to _DecodeNextVideoFrame() above.
496 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
497 	if (fSwsContext == NULL) {
498 		TRACE("No SWS Scale context or decoder has not set the pixel format "
499 			"yet!\n");
500 	}
501 #else
502 	if (fFormatConversionFunc == NULL) {
503 		TRACE("no pixel format conversion function found or decoder has "
504 			"not set the pixel format yet!\n");
505 	}
506 #endif
507 
508 	inOutFormat->type = B_MEDIA_RAW_VIDEO;
509 	inOutFormat->require_flags = 0;
510 	inOutFormat->deny_flags = B_MEDIA_MAUI_UNDEFINED_FLAGS;
511 
512 	inOutFormat->u.raw_video = fInputFormat.u.encoded_video.output;
513 
514 	inOutFormat->u.raw_video.interlace = 1;
515 		// Progressive (non-interlaced) video frames are delivered
516 	inOutFormat->u.raw_video.first_active = fHeader.u.raw_video.first_active_line;
517 	inOutFormat->u.raw_video.last_active = fHeader.u.raw_video.line_count;
518 	inOutFormat->u.raw_video.pixel_width_aspect = fHeader.u.raw_video.pixel_width_aspect;
519 	inOutFormat->u.raw_video.pixel_height_aspect = fHeader.u.raw_video.pixel_height_aspect;
520 	inOutFormat->u.raw_video.field_rate = fOutputFrameRate;
521 		// Was calculated by first call to _DecodeNextVideoFrame()
522 
523 	inOutFormat->u.raw_video.display.format = fOutputColorSpace;
524 	inOutFormat->u.raw_video.display.line_width = fHeader.u.raw_video.display_line_width;
525 	inOutFormat->u.raw_video.display.line_count = fHeader.u.raw_video.display_line_count;
526 	inOutFormat->u.raw_video.display.bytes_per_row = fHeader.u.raw_video.bytes_per_row;
527 
528 #ifdef TRACE_AV_CODEC
529 	char buffer[1024];
530 	string_for_format(*inOutFormat, buffer, sizeof(buffer));
531 	TRACE("[v]  outFormat = %s\n", buffer);
532 	TRACE("  returned  video format 0x%x\n",
533 		inOutFormat->u.raw_video.display.format);
534 #endif
535 
536 	return B_OK;
537 }
538 
539 
540 status_t
541 AVCodecDecoder::_DecodeAudio(void* _buffer, int64* outFrameCount,
542 	media_header* mediaHeader, media_decode_info* info)
543 {
544 	TRACE_AUDIO("AVCodecDecoder::_DecodeAudio(audio start_time %.6fs)\n",
545 		mediaHeader->start_time / 1000000.0);
546 
547 	*outFrameCount = 0;
548 
549 	uint8* buffer = reinterpret_cast<uint8*>(_buffer);
550 	while (*outFrameCount < fOutputFrameCount) {
551 		// Check conditions which would hint at broken code below.
552 		if (fOutputBufferSize < 0) {
553 			fprintf(stderr, "Decoding read past the end of the output buffer! "
554 				"%ld\n", fOutputBufferSize);
555 			fOutputBufferSize = 0;
556 		}
557 		if (fChunkBufferSize < 0) {
558 			fprintf(stderr, "Decoding read past the end of the chunk buffer! "
559 				"%ld\n", fChunkBufferSize);
560 			fChunkBufferSize = 0;
561 		}
562 
563 		if (fOutputBufferSize > 0) {
564 			// We still have decoded audio frames from the last
565 			// invokation, which start at fOutputBufferOffset
566 			// and are of fOutputBufferSize. Copy those into the buffer,
567 			// but not more than it can hold.
568 			int32 frames = min_c(fOutputFrameCount - *outFrameCount,
569 				fOutputBufferSize / fOutputFrameSize);
570 			if (frames == 0)
571 				debugger("fOutputBufferSize not multiple of frame size!");
572 			size_t remainingSize = frames * fOutputFrameSize;
573 			memcpy(buffer, fOutputFrame->data[0] + fOutputBufferOffset,
574 				remainingSize);
575 			fOutputBufferOffset += remainingSize;
576 			fOutputBufferSize -= remainingSize;
577 			buffer += remainingSize;
578 			*outFrameCount += frames;
579 			fStartTime += (bigtime_t)((1000000LL * frames) / fOutputFrameRate);
580 			continue;
581 		}
582 		if (fChunkBufferSize == 0) {
583 			// Time to read the next chunk buffer. We use a separate
584 			// media_header, since the chunk header may not belong to
585 			// the start of the decoded audio frames we return. For
586 			// example we may have used frames from a previous invokation,
587 			// or we may have to read several chunks until we fill up the
588 			// output buffer.
589 			media_header chunkMediaHeader;
590 			status_t err = GetNextChunk(&fChunkBuffer, &fChunkBufferSize,
591 				&chunkMediaHeader);
592 			if (err == B_LAST_BUFFER_ERROR) {
593 				TRACE_AUDIO("  Last Chunk with chunk size %ld\n",
594 					fChunkBufferSize);
595 				fChunkBufferSize = 0;
596 				return err;
597 			}
598 			if (err != B_OK || fChunkBufferSize < 0) {
599 				printf("GetNextChunk error %ld\n",fChunkBufferSize);
600 				fChunkBufferSize = 0;
601 				break;
602 			}
603 			fChunkBufferOffset = 0;
604 			fStartTime = chunkMediaHeader.start_time;
605 		}
606 
607 		fTempPacket.data = (uint8_t*)fChunkBuffer + fChunkBufferOffset;
608 		fTempPacket.size = fChunkBufferSize;
609 
610 		avcodec_get_frame_defaults(fOutputFrame);
611 		int gotFrame = 0;
612 		int usedBytes = avcodec_decode_audio4(fContext,
613 			fOutputFrame, &gotFrame, &fTempPacket);
614 		if (usedBytes < 0 && !fAudioDecodeError) {
615 			// Report failure if not done already
616 			printf("########### audio decode error, "
617 				"fChunkBufferSize %ld, fChunkBufferOffset %ld\n",
618 				fChunkBufferSize, fChunkBufferOffset);
619 			fAudioDecodeError = true;
620 		}
621 		if (usedBytes <= 0) {
622 			// Error or failure to produce decompressed output.
623 			// Skip the chunk buffer data entirely.
624 			usedBytes = fChunkBufferSize;
625 			fOutputBufferSize = 0;
626 			// Assume the audio decoded until now is broken.
627 			memset(_buffer, 0, buffer - (uint8*)_buffer);
628 		} else {
629 			// Success
630 			fAudioDecodeError = false;
631 			if (gotFrame == 1) {
632 				fOutputBufferSize = av_samples_get_buffer_size(NULL,
633 					fContext->channels, fOutputFrame->nb_samples,
634 					fContext->sample_fmt, 1);
635 				if (fOutputBufferSize < 0)
636 					fOutputBufferSize = 0;
637 			} else
638 				fOutputBufferSize = 0;
639 		}
640 //printf("  chunk size: %d, decoded: %d, used: %d\n",
641 //fTempPacket.size, decodedBytes, usedBytes);
642 
643 		fChunkBufferOffset += usedBytes;
644 		fChunkBufferSize -= usedBytes;
645 		fOutputBufferOffset = 0;
646 	}
647 	fFrame += *outFrameCount;
648 	TRACE_AUDIO("  frame count: %lld current: %lld\n", *outFrameCount, fFrame);
649 
650 	return B_OK;
651 }
652 
653 
654 /*! \brief Fills the outBuffer with an already decoded video frame.
655 
656 	Besides the main duty described above, this method also fills out the other
657 	output parameters as documented below.
658 
659 	\param outBuffer Pointer to the output buffer to copy the decoded video
660 		frame to.
661 	\param outFrameCount Pointer to the output variable to assign the number of
662 		copied video frames (usually one video frame).
663 	\param mediaHeader Pointer to the output media header that contains the
664 		decoded video frame properties.
665 	\param info TODO (not used at the moment)
666 
667 	\returns B_OK Decoding a video frame succeeded.
668 	\returns B_LAST_BUFFER_ERROR There are no more video frames available.
669 	\returns other error codes
670 */
671 status_t
672 AVCodecDecoder::_DecodeVideo(void* outBuffer, int64* outFrameCount,
673 	media_header* mediaHeader, media_decode_info* info)
674 {
675 	status_t videoDecodingStatus
676 		= fDecodedDataSizeInBytes > 0 ? B_OK : _DecodeNextVideoFrame();
677 
678 	if (videoDecodingStatus != B_OK)
679 		return videoDecodingStatus;
680 
681 	*outFrameCount = 1;
682 	*mediaHeader = fHeader;
683 	memcpy(outBuffer, fDecodedData, mediaHeader->size_used);
684 
685 	fDecodedDataSizeInBytes = 0;
686 
687 	return B_OK;
688 }
689 
690 
691 /*! \brief Decodes next video frame.
692 
693     We decode exactly one video frame into fDecodedData. To achieve this goal,
694     we might need to request several chunks of encoded data resulting in a
695     variable execution time of this function.
696 
697     The length of the decoded video frame is stored in
698     fDecodedDataSizeInBytes. If this variable is greater than zero, you can
699     assert that there is a valid video frame available in fDecodedData.
700 
701     The decoded video frame in fDecodedData has color space conversion and
702     deinterlacing already applied.
703 
704     To every decoded video frame there is a media_header populated in
705     fHeader, containing the corresponding video frame properties.
706 
707 	Normally every decoded video frame has a start_time field populated in the
708 	associated fHeader, that determines the presentation time of the frame.
709 	This relationship will only hold true, when each data chunk that is
710 	provided via GetNextChunk() contains data for exactly one encoded video
711 	frame (one complete frame) - not more and not less.
712 
713 	We can decode data chunks that contain partial video frame data, too. In
714 	that case, you cannot trust the value of the start_time field in fHeader.
715 	We simply have no logic in place to establish a meaningful relationship
716 	between an incomplete frame and the start time it should be presented.
717 	Though this	might change in the future.
718 
719 	We can decode data chunks that contain more than one video frame, too. In
720 	that case, you cannot trust the value of the start_time field in fHeader.
721 	We simply have no logic in place to track the start_time across multiple
722 	video frames. So a meaningful relationship between the 2nd, 3rd, ... frame
723 	and the start time it should be presented isn't established at the moment.
724 	Though this	might change in the future.
725 
726     More over the fOutputFrameRate variable is updated for every decoded video
727     frame.
728 
729 	On first call the member variables fSwsContext / fFormatConversionFunc	are
730 	initialized.
731 
732 	\returns B_OK when we successfully decoded one video frame
733 	\returns B_LAST_BUFFER_ERROR when there are no more video frames available.
734 	\returns B_NO_MEMORY when we have no memory left for correct operation.
735 	\returns Other Errors
736  */
737 status_t
738 AVCodecDecoder::_DecodeNextVideoFrame()
739 {
740 	assert(fTempPacket.size >= 0);
741 
742 	while (true) {
743 		status_t loadingChunkStatus
744 			= _LoadNextVideoChunkIfNeededAndAssignStartTime();
745 
746 		if (loadingChunkStatus == B_LAST_BUFFER_ERROR)
747 			return _FlushOneVideoFrameFromDecoderBuffer();
748 
749 		if (loadingChunkStatus != B_OK) {
750 			TRACE("AVCodecDecoder::_DecodeNextVideoFrame(): error from "
751 				"GetNextChunk(): %s\n", strerror(err));
752 			return loadingChunkStatus;
753 		}
754 
755 #if DO_PROFILING
756 		bigtime_t startTime = system_time();
757 #endif
758 
759 		// NOTE: In the FFMPEG 0.10.2 code example decoding_encoding.c, the
760 		// length returned by avcodec_decode_video2() is used to update the
761 		// packet buffer size (here it is fTempPacket.size). This way the
762 		// packet buffer is allowed to contain incomplete frames so we are
763 		// required to buffer the packets between different calls to
764 		// _DecodeNextVideoFrame().
765 		int gotVideoFrame = 0;
766 		int decodedDataSizeInBytes = avcodec_decode_video2(fContext,
767 			fRawDecodedPicture, &gotVideoFrame, &fTempPacket);
768 		if (decodedDataSizeInBytes < 0) {
769 			TRACE("[v] AVCodecDecoder: ignoring error in decoding frame %lld:"
770 				" %d\n", fFrame, len);
771 			// NOTE: An error from avcodec_decode_video2() is ignored by the
772 			// FFMPEG 0.10.2 example decoding_encoding.c. Only the packet
773 			// buffers are flushed accordingly
774 			fTempPacket.data = NULL;
775 			fTempPacket.size = 0;
776 			continue;
777 		}
778 
779 		fTempPacket.size -= decodedDataSizeInBytes;
780 		fTempPacket.data += decodedDataSizeInBytes;
781 
782 		bool gotNoVideoFrame = gotVideoFrame == 0;
783 		if (gotNoVideoFrame) {
784 			TRACE("frame %lld - no picture yet, decodedDataSizeInBytes: %d, "
785 				"chunk size: %ld\n", fFrame, decodedDataSizeInBytes,
786 				fChunkBufferSize);
787 			continue;
788 		}
789 
790 #if DO_PROFILING
791 		bigtime_t formatConversionStart = system_time();
792 #endif
793 
794 		_HandleNewVideoFrameAndUpdateSystemState();
795 
796 #if DO_PROFILING
797 		bigtime_t doneTime = system_time();
798 		decodingTime += formatConversionStart - startTime;
799 		conversionTime += doneTime - formatConversionStart;
800 		profileCounter++;
801 		if (!(fFrame % 5)) {
802 			if (info) {
803 				printf("[v] profile: d1 = %lld, d2 = %lld (%lld) required "
804 					"%Ld\n",
805 					decodingTime / profileCounter,
806 					conversionTime / profileCounter,
807 					fFrame, info->time_to_decode);
808 			} else {
809 				printf("[v] profile: d1 = %lld, d2 = %lld (%lld) required "
810 					"%Ld\n",
811 					decodingTime / profileCounter,
812 					conversionTime / profileCounter,
813 					fFrame, bigtime_t(1000000LL / fOutputFrameRate));
814 			}
815 			decodingTime = 0;
816 			conversionTime = 0;
817 			profileCounter = 0;
818 		}
819 #endif
820 		return B_OK;
821 	}
822 }
823 
824 
825 /*! \brief Loads the next video chunk into fVideoChunkBuffer and assigns it
826 		(including the start time) to fTempPacket accordingly only if
827 		fTempPacket is empty.
828 
829 	\returns B_OK
830 		1. meaning: Next video chunk is loaded.
831 		2. meaning: No need to load and assign anything. Proceed as usual.
832 	\returns B_LAST_BUFFER_ERROR No more video chunks available.
833 		fVideoChunkBuffer and fTempPacket are left untouched.
834 	\returns Other errors Caller should bail out because fVideoChunkBuffer and
835 		fTempPacket are in unknown states. Normal operation cannot be
836 		guaranteed.
837 */
838 status_t
839 AVCodecDecoder::_LoadNextVideoChunkIfNeededAndAssignStartTime()
840 {
841 	// TODO: Rename fVideoChunkBuffer to fChunkBuffer, once the audio path is
842 	// responsible for releasing the chunk buffer, too.
843 
844 	if (fTempPacket.size > 0)
845 		return B_OK;
846 
847 	const void* chunkBuffer = NULL;
848 	size_t chunkBufferSize = 0;
849 		// In the case that GetNextChunk() returns an error fChunkBufferSize
850 		// should be left untouched.
851 	media_header chunkMediaHeader;
852 
853 	status_t getNextChunkStatus = GetNextChunk(&chunkBuffer,
854 		&chunkBufferSize, &chunkMediaHeader);
855 	if (getNextChunkStatus != B_OK)
856 		return getNextChunkStatus;
857 
858 	status_t chunkBufferPaddingStatus
859 		= _CopyChunkToVideoChunkBufferAndAddPadding(chunkBuffer,
860 		chunkBufferSize);
861 	if (chunkBufferPaddingStatus != B_OK)
862 		return chunkBufferPaddingStatus;
863 
864 	fTempPacket.data = fVideoChunkBuffer;
865 	fTempPacket.size = fChunkBufferSize;
866 	fTempPacket.dts = chunkMediaHeader.start_time;
867 		// Let FFMPEG handle the correct relationship between start_time and
868 		// decoded video frame. By doing so we are simply copying the way how
869 		// it is implemented in ffplay.c
870 		// \see http://git.videolan.org/?p=ffmpeg.git;a=blob;f=ffplay.c;h=09623db374e5289ed20b7cc28c262c4375a8b2e4;hb=9153b33a742c4e2a85ff6230aea0e75f5a8b26c2#l1502
871 		//
872 		// FIXME: Research how to establish a meaningful relationship
873 		// between start_time and decoded video frame when the received
874 		// chunk buffer contains partial video frames. Maybe some data
875 		// formats contain time stamps (ake pts / dts fields) that can
876 		// be evaluated by FFMPEG. But as long as I don't have such
877 		// video data to test it, it makes no sense to implement it.
878 		//
879 		// FIXME: Implement tracking start_time of video frames
880 		// originating in data chunks that encode more than one video
881 		// frame at a time. In that case on would increment the
882 		// start_time for each consecutive frame of such a data chunk
883 		// (like it is done for audio frame decoding). But as long as
884 		// I don't have such video data to test it, it makes no sense
885 		// to implement it.
886 
887 #ifdef LOG_STREAM_TO_FILE
888 	if (sDumpedPackets < 100) {
889 		sStreamLogFile.Write(chunkBuffer, fChunkBufferSize);
890 		printf("wrote %ld bytes\n", fChunkBufferSize);
891 		sDumpedPackets++;
892 	} else if (sDumpedPackets == 100)
893 		sStreamLogFile.Unset();
894 #endif
895 
896 	return B_OK;
897 }
898 
899 
900 /*! \brief Copies a chunk into fVideoChunkBuffer and adds a "safety net" of
901 		additional memory as required by FFMPEG for input buffers to video
902 		decoders.
903 
904 	This is needed so that some decoders can read safely a predefined number of
905 	bytes at a time for performance optimization purposes.
906 
907 	The additional memory has a size of FF_INPUT_BUFFER_PADDING_SIZE as defined
908 	in avcodec.h.
909 
910 	Ownership of fVideoChunkBuffer memory is with the class so it needs to be
911 	freed at the right times (on destruction, on seeking).
912 
913 	Also update fChunkBufferSize to reflect the size of the contained video
914 	data (leaving out the padding).
915 
916 	\param chunk The chunk to copy.
917 	\param chunkSize Size of the chunk in bytes
918 
919 	\returns B_OK Padding was successful. You are responsible for releasing the
920 		allocated memory. fChunkBufferSize is set to chunkSize.
921 	\returns B_NO_MEMORY Padding failed.
922 		fVideoChunkBuffer is set to NULL making it safe to call free() on it.
923 		fChunkBufferSize is set to 0 to reflect the size of fVideoChunkBuffer.
924 */
925 status_t
926 AVCodecDecoder::_CopyChunkToVideoChunkBufferAndAddPadding(const void* chunk,
927 	size_t chunkSize)
928 {
929 	// TODO: Rename fVideoChunkBuffer to fChunkBuffer, once the audio path is
930 	// responsible for releasing the chunk buffer, too.
931 
932 	fVideoChunkBuffer = static_cast<uint8_t*>(realloc(fVideoChunkBuffer,
933 		chunkSize + FF_INPUT_BUFFER_PADDING_SIZE));
934 	if (fVideoChunkBuffer == NULL) {
935 		fChunkBufferSize = 0;
936 		return B_NO_MEMORY;
937 	}
938 
939 	memcpy(fVideoChunkBuffer, chunk, chunkSize);
940 	memset(fVideoChunkBuffer + chunkSize, 0, FF_INPUT_BUFFER_PADDING_SIZE);
941 		// Establish safety net, by zero'ing the padding area.
942 
943 	fChunkBufferSize = chunkSize;
944 
945 	return B_OK;
946 }
947 
948 
949 /*! \brief Executes all steps needed for a freshly decoded video frame.
950 
951 	\see _UpdateMediaHeaderForVideoFrame() and
952 	\see _DeinterlaceAndColorConvertVideoFrame() for when you are allowed to
953 	call this method.
954 */
955 void
956 AVCodecDecoder::_HandleNewVideoFrameAndUpdateSystemState()
957 {
958 	_DeinterlaceAndColorConvertVideoFrame();
959 	_UpdateMediaHeaderForVideoFrame();
960 
961 	ConvertAVCodecContextToVideoFrameRate(*fContext, fOutputFrameRate);
962 
963 #ifdef DEBUG
964 	dump_ffframe(fRawDecodedPicture, "ffpict");
965 //	dump_ffframe(fPostProcessedDecodedPicture, "opict");
966 #endif
967 	fFrame++;
968 }
969 
970 
971 /*! \brief Flushes one video frame - if any - still buffered by the decoder.
972 
973 	Some FFMPEG decoder are buffering video frames. To retrieve those buffered
974 	frames the decoder needs to be told so.
975 
976 	The intended use of this method is to call it, once there are no more data
977 	chunks for decoding left. Reframed in other words: Once GetNextChunk()
978 	returns with status B_LAST_BUFFER_ERROR it is time to start flushing.
979 
980 	\returns B_OK Retrieved one video frame, handled it accordingly and updated
981 		the system state accordingly.
982 		There maybe more video frames left. So it is valid for the client of
983 		AVCodecDecoder to call it one more time.
984 
985 	\returns B_LAST_BUFFER_ERROR No video frame left.
986 		The client of the AVCodecDecoder should stop calling it now.
987 */
988 status_t
989 AVCodecDecoder::_FlushOneVideoFrameFromDecoderBuffer()
990 {
991 	// Create empty fTempPacket to tell the video decoder it is time to flush
992 	fTempPacket.data = NULL;
993 	fTempPacket.size = 0;
994 
995 	int gotVideoFrame = 0;
996 	avcodec_decode_video2(fContext,	fRawDecodedPicture, &gotVideoFrame,
997 		&fTempPacket);
998 		// We are only interested in complete frames now, so ignore the return
999 		// value.
1000 
1001 	bool gotNoVideoFrame = gotVideoFrame == 0;
1002 	if (gotNoVideoFrame) {
1003 		// video buffer is flushed successfully
1004 		return B_LAST_BUFFER_ERROR;
1005 	}
1006 
1007 	_HandleNewVideoFrameAndUpdateSystemState();
1008 
1009 	return B_OK;
1010 }
1011 
1012 
1013 /*! \brief Updates relevant fields of the class member fHeader with the
1014 		properties of the most recently decoded video frame.
1015 
1016 	It is assumed that this function is called only	when the following asserts
1017 	hold true:
1018 		1. We actually got a new picture decoded by the video decoder.
1019 		2. fHeader wasn't updated for the new picture yet. You MUST call this
1020 		   method only once per decoded video frame.
1021 		3. This function MUST be called after
1022 		   _DeinterlaceAndColorConvertVideoFrame() as it relys on an updated
1023 		    fDecodedDataSizeInBytes.
1024 		4. There will be at maximumn only one decoded video frame in our cache
1025 		   at any single point in time. Otherwise you couldn't tell to which
1026 		   cached decoded video frame the properties in fHeader relate to.
1027 		5. AVCodecContext is still valid for this video frame (This is the case
1028 		   when this function is called after avcodec_decode_video2() and
1029 		   before the next call to avcodec_decode_video2().
1030 */
1031 void
1032 AVCodecDecoder::_UpdateMediaHeaderForVideoFrame()
1033 {
1034 	fHeader.type = B_MEDIA_RAW_VIDEO;
1035 	fHeader.file_pos = 0;
1036 	fHeader.orig_size = 0;
1037 	fHeader.start_time = fRawDecodedPicture->pkt_dts;
1038 	fHeader.size_used = fDecodedDataSizeInBytes;
1039 	fHeader.u.raw_video.display_line_width = fRawDecodedPicture->width;
1040 	fHeader.u.raw_video.display_line_count = fRawDecodedPicture->height;
1041 	fHeader.u.raw_video.bytes_per_row
1042 		= CalculateBytesPerRowWithColorSpaceAndVideoWidth(fOutputColorSpace,
1043 			fRawDecodedPicture->width);
1044 	fHeader.u.raw_video.field_gamma = 1.0;
1045 	fHeader.u.raw_video.field_sequence = fFrame;
1046 	fHeader.u.raw_video.field_number = 0;
1047 	fHeader.u.raw_video.pulldown_number = 0;
1048 	fHeader.u.raw_video.first_active_line = 1;
1049 	fHeader.u.raw_video.line_count = fRawDecodedPicture->height;
1050 
1051 	ConvertAVCodecContextToVideoAspectWidthAndHeight(*fContext,
1052 		fHeader.u.raw_video.pixel_width_aspect,
1053 		fHeader.u.raw_video.pixel_height_aspect);
1054 
1055 	TRACE("[v] start_time=%02d:%02d.%02d field_sequence=%lu\n",
1056 		int((fHeader.start_time / 60000000) % 60),
1057 		int((fHeader.start_time / 1000000) % 60),
1058 		int((fHeader.start_time / 10000) % 100),
1059 		fHeader.u.raw_video.field_sequence);
1060 }
1061 
1062 
1063 /*! \brief This function applies deinterlacing (only if needed) and color
1064 	conversion to the video frame in fRawDecodedPicture.
1065 
1066 	It is assumed that fRawDecodedPicture wasn't deinterlaced and color
1067 	converted yet (otherwise this function behaves in unknown manners).
1068 
1069 	You should only call this function when you	got a new picture decoded by
1070 	the video decoder..
1071 
1072 	When this function finishes the postprocessed video frame will be available
1073 	in fPostProcessedDecodedPicture and fDecodedData (fDecodedDataSizeInBytes
1074 	will be set accordingly).
1075 */
1076 void
1077 AVCodecDecoder::_DeinterlaceAndColorConvertVideoFrame()
1078 {
1079 	int displayWidth = fRawDecodedPicture->width;
1080 	int displayHeight = fRawDecodedPicture->height;
1081 	AVPicture deinterlacedPicture;
1082 	bool useDeinterlacedPicture = false;
1083 
1084 	if (fRawDecodedPicture->interlaced_frame) {
1085 		AVPicture rawPicture;
1086 		rawPicture.data[0] = fRawDecodedPicture->data[0];
1087 		rawPicture.data[1] = fRawDecodedPicture->data[1];
1088 		rawPicture.data[2] = fRawDecodedPicture->data[2];
1089 		rawPicture.data[3] = fRawDecodedPicture->data[3];
1090 		rawPicture.linesize[0] = fRawDecodedPicture->linesize[0];
1091 		rawPicture.linesize[1] = fRawDecodedPicture->linesize[1];
1092 		rawPicture.linesize[2] = fRawDecodedPicture->linesize[2];
1093 		rawPicture.linesize[3] = fRawDecodedPicture->linesize[3];
1094 
1095 		avpicture_alloc(&deinterlacedPicture, fContext->pix_fmt, displayWidth,
1096 			displayHeight);
1097 
1098 		if (avpicture_deinterlace(&deinterlacedPicture, &rawPicture,
1099 				fContext->pix_fmt, displayWidth, displayHeight) < 0) {
1100 			TRACE("[v] avpicture_deinterlace() - error\n");
1101 		} else
1102 			useDeinterlacedPicture = true;
1103 	}
1104 
1105 	// Some decoders do not set pix_fmt until they have decoded 1 frame
1106 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
1107 	if (fSwsContext == NULL) {
1108 		fSwsContext = sws_getContext(displayWidth, displayHeight,
1109 			fContext->pix_fmt, displayWidth, displayHeight,
1110 			colorspace_to_pixfmt(fOutputColorSpace),
1111 			SWS_FAST_BILINEAR, NULL, NULL, NULL);
1112 	}
1113 #else
1114 	if (fFormatConversionFunc == NULL) {
1115 		fFormatConversionFunc = resolve_colorspace(fOutputColorSpace,
1116 			fContext->pix_fmt, displayWidth, displayHeight);
1117 	}
1118 #endif
1119 
1120 	fDecodedDataSizeInBytes = avpicture_get_size(
1121 		colorspace_to_pixfmt(fOutputColorSpace), displayWidth, displayHeight);
1122 
1123 	if (fDecodedData == NULL)
1124 		fDecodedData
1125 			= static_cast<uint8_t*>(malloc(fDecodedDataSizeInBytes));
1126 
1127 	fPostProcessedDecodedPicture->data[0] = fDecodedData;
1128 	fPostProcessedDecodedPicture->linesize[0]
1129 		= fHeader.u.raw_video.bytes_per_row;
1130 
1131 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
1132 	if (fSwsContext != NULL) {
1133 #else
1134 	if (fFormatConversionFunc != NULL) {
1135 #endif
1136 		if (useDeinterlacedPicture) {
1137 			AVFrame deinterlacedFrame;
1138 			deinterlacedFrame.data[0] = deinterlacedPicture.data[0];
1139 			deinterlacedFrame.data[1] = deinterlacedPicture.data[1];
1140 			deinterlacedFrame.data[2] = deinterlacedPicture.data[2];
1141 			deinterlacedFrame.data[3] = deinterlacedPicture.data[3];
1142 			deinterlacedFrame.linesize[0]
1143 				= deinterlacedPicture.linesize[0];
1144 			deinterlacedFrame.linesize[1]
1145 				= deinterlacedPicture.linesize[1];
1146 			deinterlacedFrame.linesize[2]
1147 				= deinterlacedPicture.linesize[2];
1148 			deinterlacedFrame.linesize[3]
1149 				= deinterlacedPicture.linesize[3];
1150 
1151 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
1152 			sws_scale(fSwsContext, deinterlacedFrame.data,
1153 				deinterlacedFrame.linesize, 0, displayHeight,
1154 				fPostProcessedDecodedPicture->data,
1155 				fPostProcessedDecodedPicture->linesize);
1156 #else
1157 			(*fFormatConversionFunc)(&deinterlacedFrame,
1158 				fPostProcessedDecodedPicture, displayWidth, displayHeight);
1159 #endif
1160 		} else {
1161 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
1162 			sws_scale(fSwsContext, fRawDecodedPicture->data,
1163 				fRawDecodedPicture->linesize, 0, displayHeight,
1164 				fPostProcessedDecodedPicture->data,
1165 				fPostProcessedDecodedPicture->linesize);
1166 #else
1167 			(*fFormatConversionFunc)(fRawDecodedPicture,
1168 				fPostProcessedDecodedPicture, displayWidth, displayHeight);
1169 #endif
1170 		}
1171 	}
1172 
1173 	if (fRawDecodedPicture->interlaced_frame)
1174 		avpicture_free(&deinterlacedPicture);
1175 }
1176