xref: /haiku/src/add-ons/media/plugins/ffmpeg/AVCodecDecoder.cpp (revision 9760dcae2038d47442f4658c2575844c6cf92c40)
1 /*
2  * Copyright (C) 2001 Carlos Hasan
3  * Copyright (C) 2001 François Revol
4  * Copyright (C) 2001 Axel Dörfler
5  * Copyright (C) 2004 Marcus Overhagen
6  * Copyright (C) 2009 Stephan Amßus <superstippi@gmx.de>
7  *
8  * All rights reserved. Distributed under the terms of the MIT License.
9  */
10 
11 //! libavcodec based decoder for Haiku
12 
13 #include "AVCodecDecoder.h"
14 
15 #include <new>
16 
17 #include <string.h>
18 
19 #include <Bitmap.h>
20 #include <Debug.h>
21 
22 
23 #undef TRACE
24 //#define TRACE_AV_CODEC
25 #ifdef TRACE_AV_CODEC
26 #	define TRACE(x...)	printf(x)
27 #else
28 #	define TRACE(x...)
29 #endif
30 
31 //#define LOG_STREAM_TO_FILE
32 #ifdef LOG_STREAM_TO_FILE
33 #	include <File.h>
34 	static BFile sStreamLogFile("/boot/home/Desktop/AVCodecDebugStream.raw",
35 		B_CREATE_FILE | B_ERASE_FILE | B_WRITE_ONLY);
36 	static int sDumpedPackets = 0;
37 #endif
38 
39 #define USE_SWS_FOR_COLOR_SPACE_CONVERSION 0
40 
41 
42 struct wave_format_ex {
43 	uint16 format_tag;
44 	uint16 channels;
45 	uint32 frames_per_sec;
46 	uint32 avg_bytes_per_sec;
47 	uint16 block_align;
48 	uint16 bits_per_sample;
49 	uint16 extra_size;
50 	// extra_data[extra_size]
51 } _PACKED;
52 
53 
54 // profiling related globals
55 #define DO_PROFILING 0
56 
57 static bigtime_t decodingTime = 0;
58 static bigtime_t conversionTime = 0;
59 static long profileCounter = 0;
60 
61 
62 AVCodecDecoder::AVCodecDecoder()
63 	:
64 	fHeader(),
65 	fInputFormat(),
66 	fOutputVideoFormat(),
67 	fFrame(0),
68 	fIsAudio(false),
69 	fCodecIndexInTable(-1),
70 	fCodec(NULL),
71 	fContext(avcodec_alloc_context()),
72 	fInputPicture(avcodec_alloc_frame()),
73 	fOutputPicture(avcodec_alloc_frame()),
74 
75 	fCodecInitDone(false),
76 
77 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
78 	fSwsContext(NULL),
79 #else
80 	fFormatConversionFunc(NULL),
81 #endif
82 
83 	fExtraData(NULL),
84 	fExtraDataSize(0),
85 	fBlockAlign(0),
86 
87 	fStartTime(0),
88 	fOutputFrameCount(0),
89 	fOutputFrameRate(1.0),
90 	fOutputFrameSize(0),
91 
92 	fChunkBuffer(NULL),
93 	fChunkBufferOffset(0),
94 	fChunkBufferSize(0),
95 	fAudioDecodeError(false),
96 
97 	fOutputBuffer(NULL),
98 	fOutputBufferOffset(0),
99 	fOutputBufferSize(0)
100 {
101 	TRACE("AVCodecDecoder::AVCodecDecoder()\n");
102 }
103 
104 
105 AVCodecDecoder::~AVCodecDecoder()
106 {
107 	TRACE("[%c] AVCodecDecoder::~AVCodecDecoder()\n", fIsAudio?('a'):('v'));
108 
109 #ifdef DO_PROFILING
110 	if (profileCounter > 0) {
111 			printf("[%c] profile: d1 = %lld, d2 = %lld (%Ld)\n",
112 				fIsAudio?('a'):('v'), decodingTime / profileCounter, conversionTime / profileCounter,
113 				fFrame);
114 	}
115 #endif
116 
117 	if (fCodecInitDone)
118 		avcodec_close(fContext);
119 
120 	free(fOutputPicture);
121 	free(fInputPicture);
122 	free(fContext);
123 
124 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
125 	if (fSwsContext != NULL)
126 		sws_freeContext(fSwsContext);
127 #endif
128 
129 	delete[] fExtraData;
130 	delete[] fOutputBuffer;
131 }
132 
133 
134 void
135 AVCodecDecoder::GetCodecInfo(media_codec_info* mci)
136 {
137 	sprintf(mci->short_name, "ff:%s", fCodec->name);
138 	sprintf(mci->pretty_name, "%s (libavcodec %s)",
139 		gCodecTable[fCodecIndexInTable].prettyname, fCodec->name);
140 	mci->id = 0;
141 	mci->sub_id = gCodecTable[fCodecIndexInTable].id;
142 }
143 
144 
145 status_t
146 AVCodecDecoder::Setup(media_format* ioEncodedFormat, const void* infoBuffer,
147 	size_t infoSize)
148 {
149 	if (ioEncodedFormat->type != B_MEDIA_ENCODED_AUDIO
150 		&& ioEncodedFormat->type != B_MEDIA_ENCODED_VIDEO)
151 		return B_ERROR;
152 
153 	fIsAudio = (ioEncodedFormat->type == B_MEDIA_ENCODED_AUDIO);
154 	TRACE("[%c] AVCodecDecoder::Setup()\n", fIsAudio?('a'):('v'));
155 
156 	if (fIsAudio && !fOutputBuffer)
157 		fOutputBuffer = new char[AVCODEC_MAX_AUDIO_FRAME_SIZE];
158 
159 #ifdef TRACE_AV_CODEC
160 	char buffer[1024];
161 	string_for_format(*ioEncodedFormat, buffer, sizeof(buffer));
162 	TRACE("[%c]   input_format = %s\n", fIsAudio?('a'):('v'), buffer);
163 	TRACE("[%c]   infoSize = %ld\n", fIsAudio?('a'):('v'), infoSize);
164 	TRACE("[%c]   user_data_type = %08lx\n", fIsAudio?('a'):('v'),
165 		ioEncodedFormat->user_data_type);
166 	TRACE("[%c]   meta_data_size = %ld\n", fIsAudio?('a'):('v'),
167 		ioEncodedFormat->MetaDataSize());
168 #endif
169 
170 	media_format_description descr;
171 	for (int32 i = 0; gCodecTable[i].id; i++) {
172 		fCodecIndexInTable = i;
173 		uint64 cid;
174 
175 		if (BMediaFormats().GetCodeFor(*ioEncodedFormat,
176 				gCodecTable[i].family, &descr) == B_OK
177 		    && gCodecTable[i].type == ioEncodedFormat->type) {
178 			switch(gCodecTable[i].family) {
179 				case B_WAV_FORMAT_FAMILY:
180 					cid = descr.u.wav.codec;
181 					break;
182 				case B_AIFF_FORMAT_FAMILY:
183 					cid = descr.u.aiff.codec;
184 					break;
185 				case B_AVI_FORMAT_FAMILY:
186 					cid = descr.u.avi.codec;
187 					break;
188 				case B_MPEG_FORMAT_FAMILY:
189 					cid = descr.u.mpeg.id;
190 					break;
191 				case B_QUICKTIME_FORMAT_FAMILY:
192 					cid = descr.u.quicktime.codec;
193 					break;
194 				case B_MISC_FORMAT_FAMILY:
195 					cid = (((uint64)descr.u.misc.file_format) << 32)
196 						| descr.u.misc.codec;
197 					break;
198 				default:
199 					puts("ERR family");
200 					return B_ERROR;
201 			}
202 
203 			if (gCodecTable[i].family == descr.family
204 				&& gCodecTable[i].fourcc == cid) {
205 
206 				TRACE("  0x%04lx codec id = \"%c%c%c%c\"\n", uint32(cid),
207 					(char)((cid >> 24) & 0xff), (char)((cid >> 16) & 0xff),
208 					(char)((cid >> 8) & 0xff), (char)(cid & 0xff));
209 
210 				fCodec = avcodec_find_decoder(gCodecTable[i].id);
211 				if (fCodec == NULL) {
212 					TRACE("  unable to find the correct FFmpeg "
213 						"decoder (id = %d)\n", gCodecTable[i].id);
214 					return B_ERROR;
215 				}
216 				TRACE("  found decoder %s\n", fCodec->name);
217 
218 				const void* extraData = infoBuffer;
219 				fExtraDataSize = infoSize;
220 				if (gCodecTable[i].family == B_WAV_FORMAT_FAMILY
221 						&& infoSize >= sizeof(wave_format_ex)) {
222 					TRACE("  trying to use wave_format_ex\n");
223 					// Special case extra data in B_WAV_FORMAT_FAMILY
224 					const wave_format_ex* waveFormatData
225 						= (const wave_format_ex*)infoBuffer;
226 
227 					size_t waveFormatSize = infoSize;
228 					if (waveFormatData != NULL && waveFormatSize > 0) {
229 						fBlockAlign = waveFormatData->block_align;
230 						TRACE("  found block align: %d\n", fBlockAlign);
231 						fExtraDataSize = waveFormatData->extra_size;
232 						// skip the wave_format_ex from the extra data.
233 						extraData = waveFormatData + 1;
234 					}
235 				} else {
236 					if (fIsAudio) {
237 						fBlockAlign
238 							= ioEncodedFormat->u.encoded_audio.output
239 								.buffer_size;
240 						TRACE("  using buffer_size as block align: %d\n",
241 							fBlockAlign);
242 					}
243 				}
244 				if (extraData != NULL && fExtraDataSize > 0) {
245 					TRACE("AVCodecDecoder: extra data size %ld\n", infoSize);
246 					fExtraData = new(std::nothrow) char[fExtraDataSize];
247 					if (fExtraData != NULL)
248 						memcpy(fExtraData, infoBuffer, fExtraDataSize);
249 					else
250 						fExtraDataSize = 0;
251 				}
252 
253 				fInputFormat = *ioEncodedFormat;
254 				return B_OK;
255 			}
256 		}
257 	}
258 	printf("AVCodecDecoder::Setup failed!\n");
259 	return B_ERROR;
260 }
261 
262 
263 status_t
264 AVCodecDecoder::Seek(uint32 seekTo, int64 seekFrame, int64* frame,
265 	bigtime_t seekTime, bigtime_t* time)
266 {
267 	// Reset the FFmpeg codec to flush buffers, so we keep the sync
268 #if 1
269 	if (fCodecInitDone) {
270 		fCodecInitDone = false;
271 		avcodec_close(fContext);
272 		fCodecInitDone = (avcodec_open(fContext, fCodec) >= 0);
273 	}
274 #else
275 	// For example, this doesn't work on the H.264 codec. :-/
276 	if (fCodecInitDone)
277 		avcodec_flush_buffers(fContext);
278 #endif
279 
280 	if (seekTo == B_MEDIA_SEEK_TO_TIME) {
281 		TRACE("AVCodecDecoder::Seek by time ");
282 		TRACE("from frame %Ld and time %.6f TO Required Time %.6f. ",
283 			fFrame, fStartTime / 1000000.0, seekTime / 1000000.0);
284 
285 		*frame = (int64)(seekTime * fOutputFrameRate / 1000000LL);
286 		*time = seekTime;
287 	} else if (seekTo == B_MEDIA_SEEK_TO_FRAME) {
288 		TRACE("AVCodecDecoder::Seek by Frame ");
289 		TRACE("from time %.6f and frame %Ld TO Required Frame %Ld. ",
290 			fStartTime / 1000000.0, fFrame, seekFrame);
291 
292 		*time = (bigtime_t)(seekFrame * 1000000LL / fOutputFrameRate);
293 		*frame = seekFrame;
294 	} else
295 		return B_BAD_VALUE;
296 
297 	fFrame = *frame;
298 	fStartTime = *time;
299 	TRACE("so new frame is %Ld at time %.6f\n", *frame, *time / 1000000.0);
300 	return B_OK;
301 }
302 
303 
304 status_t
305 AVCodecDecoder::NegotiateOutputFormat(media_format* inOutFormat)
306 {
307 	TRACE("AVCodecDecoder::NegotiateOutputFormat() [%c] \n",
308 		fIsAudio?('a'):('v'));
309 
310 #ifdef TRACE_AV_CODEC
311 	char buffer[1024];
312 	string_for_format(*inOutFormat, buffer, sizeof(buffer));
313 	TRACE("  [%c]  requested format = %s\n", fIsAudio?('a'):('v'), buffer);
314 #endif
315 
316 	if (fIsAudio)
317 		return _NegotiateAudioOutputFormat(inOutFormat);
318 	else
319 		return _NegotiateVideoOutputFormat(inOutFormat);
320 }
321 
322 
323 status_t
324 AVCodecDecoder::Decode(void* outBuffer, int64* outFrameCount,
325 	media_header* mediaHeader, media_decode_info* info)
326 {
327 	if (!fCodecInitDone)
328 		return B_NO_INIT;
329 
330 //	TRACE("[%c] AVCodecDecoder::Decode() for time %Ld\n", fIsAudio?('a'):('v'),
331 //		fStartTime);
332 
333 	mediaHeader->start_time = fStartTime;
334 
335 	status_t ret;
336 	if (fIsAudio)
337 		ret = _DecodeAudio(outBuffer, outFrameCount, mediaHeader, info);
338 	else
339 		ret = _DecodeVideo(outBuffer, outFrameCount, mediaHeader, info);
340 
341 	fStartTime = (bigtime_t)(1000000LL * fFrame / fOutputFrameRate);
342 
343 	return ret;
344 }
345 
346 
347 // #pragma mark -
348 
349 
350 status_t
351 AVCodecDecoder::_NegotiateAudioOutputFormat(media_format* inOutFormat)
352 {
353 	TRACE("AVCodecDecoder::_NegotiateAudioOutputFormat()\n");
354 
355 	media_multi_audio_format outputAudioFormat;
356 	outputAudioFormat = media_raw_audio_format::wildcard;
357 	outputAudioFormat.byte_order = B_MEDIA_HOST_ENDIAN;
358 	outputAudioFormat.frame_rate
359 		= fInputFormat.u.encoded_audio.output.frame_rate;
360 	outputAudioFormat.channel_count
361 		= fInputFormat.u.encoded_audio.output.channel_count;
362 	outputAudioFormat.format = fInputFormat.u.encoded_audio.output.format;
363 	// Check that format is not still a wild card!
364 	if (outputAudioFormat.format == 0)
365 		outputAudioFormat.format = media_raw_audio_format::B_AUDIO_SHORT;
366 
367 	outputAudioFormat.buffer_size
368 		= 1024 * fInputFormat.u.encoded_audio.output.channel_count;
369 	inOutFormat->type = B_MEDIA_RAW_AUDIO;
370 	inOutFormat->u.raw_audio = outputAudioFormat;
371 
372 	fContext->bit_rate = (int)fInputFormat.u.encoded_audio.bit_rate;
373 	fContext->sample_rate
374 		= (int)fInputFormat.u.encoded_audio.output.frame_rate;
375 	fContext->channels = fInputFormat.u.encoded_audio.output.channel_count;
376 	fContext->block_align = fBlockAlign;
377 	fContext->extradata = (uint8_t*)fExtraData;
378 	fContext->extradata_size = fExtraDataSize;
379 
380 	// TODO: This probably needs to go away, there is some misconception
381 	// about extra data / info buffer and meta data. See
382 	// Reader::GetStreamInfo(). The AVFormatReader puts extradata and
383 	// extradata_size into media_format::MetaData(), but used to ignore
384 	// the infoBuffer passed to GetStreamInfo(). I think this may be why
385 	// the code below was added.
386 	if (fInputFormat.MetaDataSize() > 0) {
387 		fContext->extradata = (uint8_t*)fInputFormat.MetaData();
388 		fContext->extradata_size = fInputFormat.MetaDataSize();
389 	}
390 
391 	TRACE("  bit_rate %d, sample_rate %d, channels %d, block_align %d, "
392 		"extradata_size %d\n", fContext->bit_rate, fContext->sample_rate,
393 		fContext->channels, fContext->block_align, fContext->extradata_size);
394 
395 	// close any previous instance
396 	if (fCodecInitDone) {
397 		fCodecInitDone = false;
398 		avcodec_close(fContext);
399 	}
400 
401 	// open new
402 	int result = avcodec_open(fContext, fCodec);
403 	fCodecInitDone = (result >= 0);
404 
405 	fStartTime = 0;
406 	size_t sampleSize = outputAudioFormat.format
407 		& media_raw_audio_format::B_AUDIO_SIZE_MASK;
408 	fOutputFrameSize = sampleSize * outputAudioFormat.channel_count;
409 	fOutputFrameCount = outputAudioFormat.buffer_size / fOutputFrameSize;
410 	fOutputFrameRate = outputAudioFormat.frame_rate;
411 
412 	TRACE("  bit_rate = %d, sample_rate = %d, channels = %d, init = %d, "
413 		"output frame size: %d, count: %ld, rate: %.2f\n",
414 		fContext->bit_rate, fContext->sample_rate, fContext->channels,
415 		result, fOutputFrameSize, fOutputFrameCount, fOutputFrameRate);
416 
417 	fChunkBuffer = NULL;
418 	fChunkBufferOffset = 0;
419 	fChunkBufferSize = 0;
420 	fAudioDecodeError = false;
421 	fOutputBufferOffset = 0;
422 	fOutputBufferSize = 0;
423 
424 	inOutFormat->require_flags = 0;
425 	inOutFormat->deny_flags = B_MEDIA_MAUI_UNDEFINED_FLAGS;
426 
427 	if (!fCodecInitDone) {
428 		TRACE("avcodec_open() failed!\n");
429 		return B_ERROR;
430 	}
431 
432 	return B_OK;
433 }
434 
435 
436 status_t
437 AVCodecDecoder::_NegotiateVideoOutputFormat(media_format* inOutFormat)
438 {
439 	TRACE("AVCodecDecoder::_NegotiateVideoOutputFormat()\n");
440 
441 	fOutputVideoFormat = fInputFormat.u.encoded_video.output;
442 
443 	fContext->width = fOutputVideoFormat.display.line_width;
444 	fContext->height = fOutputVideoFormat.display.line_count;
445 //	fContext->frame_rate = (int)(fOutputVideoFormat.field_rate
446 //		* fContext->frame_rate_base);
447 
448 	fOutputFrameRate = fOutputVideoFormat.field_rate;
449 
450 	fContext->extradata = (uint8_t*)fExtraData;
451 	fContext->extradata_size = fExtraDataSize;
452 
453 	TRACE("  requested video format 0x%x\n",
454 		inOutFormat->u.raw_video.display.format);
455 
456 	// Make MediaPlayer happy (if not in rgb32 screen depth and no overlay,
457 	// it will only ask for YCbCr, which DrawBitmap doesn't handle, so the
458 	// default colordepth is RGB32).
459 	if (inOutFormat->u.raw_video.display.format == B_YCbCr422)
460 		fOutputVideoFormat.display.format = B_YCbCr422;
461 	else
462 		fOutputVideoFormat.display.format = B_RGB32;
463 
464 	// Search for a pixel-format the codec handles
465 	// TODO: We should try this a couple of times until it succeeds, each
466 	// time using another pixel-format that is supported by the decoder.
467 	// But libavcodec doesn't seem to offer any way to tell the decoder
468 	// which format it should use.
469 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
470 	fSwsContext = NULL;
471 #else
472 	fFormatConversionFunc = 0;
473 #endif
474 	// Iterate over supported codec formats
475 	for (int i = 0; i < 1; i++) {
476 		// close any previous instance
477 		if (fCodecInitDone) {
478 			fCodecInitDone = false;
479 			avcodec_close(fContext);
480 		}
481 		// TODO: Set n-th fContext->pix_fmt here
482 		if (avcodec_open(fContext, fCodec) >= 0) {
483 			fCodecInitDone = true;
484 
485 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
486 			fSwsContext = sws_getContext(fContext->width, fContext->height,
487 				fContext->pix_fmt, fContext->width, fContext->height,
488 				colorspace_to_pixfmt(fOutputVideoFormat.display.format),
489 				SWS_FAST_BILINEAR, NULL, NULL, NULL);
490 		}
491 #else
492 			fFormatConversionFunc = resolve_colorspace(
493 				fOutputVideoFormat.display.format, fContext->pix_fmt,
494 				fContext->width, fContext->height);
495 		}
496 		if (fFormatConversionFunc != NULL)
497 			break;
498 #endif
499 	}
500 
501 	if (!fCodecInitDone) {
502 		TRACE("avcodec_open() failed to init codec!\n");
503 		return B_ERROR;
504 	}
505 
506 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
507 	if (fSwsContext == NULL) {
508 		TRACE("No SWS Scale context or decoder has not set the pixel format "
509 			"yet!\n");
510 	}
511 #else
512 	if (fFormatConversionFunc == NULL) {
513 		TRACE("no pixel format conversion function found or decoder has "
514 			"not set the pixel format yet!\n");
515 	}
516 #endif
517 
518 	if (fOutputVideoFormat.display.format == B_YCbCr422) {
519 		fOutputVideoFormat.display.bytes_per_row
520 			= 2 * fOutputVideoFormat.display.line_width;
521 	} else {
522 		fOutputVideoFormat.display.bytes_per_row
523 			= 4 * fOutputVideoFormat.display.line_width;
524 	}
525 
526 	inOutFormat->type = B_MEDIA_RAW_VIDEO;
527 	inOutFormat->u.raw_video = fOutputVideoFormat;
528 
529 	inOutFormat->require_flags = 0;
530 	inOutFormat->deny_flags = B_MEDIA_MAUI_UNDEFINED_FLAGS;
531 
532 #ifdef TRACE_AV_CODEC
533 	char buffer[1024];
534 	string_for_format(*inOutFormat, buffer, sizeof(buffer));
535 	TRACE("[v]  outFormat = %s\n", buffer);
536 	TRACE("  returned  video format 0x%x\n",
537 		inOutFormat->u.raw_video.display.format);
538 #endif
539 
540 	return B_OK;
541 }
542 
543 
544 status_t
545 AVCodecDecoder::_DecodeAudio(void* outBuffer, int64* outFrameCount,
546 	media_header* mediaHeader, media_decode_info* info)
547 {
548 //	TRACE("audio start_time %.6f\n", mediaHeader->start_time / 1000000.0);
549 
550 	char* output_buffer = (char*)outBuffer;
551 	*outFrameCount = 0;
552 	while (*outFrameCount < fOutputFrameCount) {
553 		if (fOutputBufferSize < 0) {
554 			TRACE("############ fOutputBufferSize %ld\n",
555 				fOutputBufferSize);
556 			fOutputBufferSize = 0;
557 		}
558 		if (fChunkBufferSize < 0) {
559 			TRACE("############ fChunkBufferSize %ld\n",
560 				fChunkBufferSize);
561 			fChunkBufferSize = 0;
562 		}
563 
564 		if (fOutputBufferSize > 0) {
565 			int32 frames = min_c(fOutputFrameCount - *outFrameCount,
566 				fOutputBufferSize / fOutputFrameSize);
567 			memcpy(output_buffer, fOutputBuffer + fOutputBufferOffset,
568 				frames * fOutputFrameSize);
569 			fOutputBufferOffset += frames * fOutputFrameSize;
570 			fOutputBufferSize -= frames * fOutputFrameSize;
571 			output_buffer += frames * fOutputFrameSize;
572 			*outFrameCount += frames;
573 			fStartTime += (bigtime_t)((1000000LL * frames) / fOutputFrameRate);
574 			continue;
575 		}
576 		if (fChunkBufferSize == 0) {
577 			media_header chunkMediaHeader;
578 			status_t err;
579 			err = GetNextChunk(&fChunkBuffer, &fChunkBufferSize, &chunkMediaHeader);
580 			if (err == B_LAST_BUFFER_ERROR) {
581 				TRACE("Last Chunk with chunk size %ld\n",fChunkBufferSize);
582 				fChunkBufferSize = 0;
583 				return err;
584 			}
585 			if (err != B_OK || fChunkBufferSize < 0) {
586 				printf("GetNextChunk error %ld\n",fChunkBufferSize);
587 				fChunkBufferSize = 0;
588 				break;
589 			}
590 			fChunkBufferOffset = 0;
591 			fStartTime = chunkMediaHeader.start_time;
592 			if (*outFrameCount == 0)
593 				mediaHeader->start_time = chunkMediaHeader.start_time;
594 			continue;
595 		}
596 		if (fOutputBufferSize == 0) {
597 			int len;
598 			int out_size = AVCODEC_MAX_AUDIO_FRAME_SIZE;
599 			len = avcodec_decode_audio2(fContext, (short *)fOutputBuffer,
600 				&out_size, (uint8_t*)fChunkBuffer + fChunkBufferOffset,
601 				fChunkBufferSize);
602 			if (len < 0) {
603 				if (!fAudioDecodeError) {
604 					TRACE("########### audio decode error, "
605 						"fChunkBufferSize %ld, fChunkBufferOffset %ld\n",
606 						fChunkBufferSize, fChunkBufferOffset);
607 					fAudioDecodeError = true;
608 				}
609 				out_size = 0;
610 				len = 0;
611 				fChunkBufferOffset = 0;
612 				fChunkBufferSize = 0;
613 			} else
614 				fAudioDecodeError = false;
615 
616 			fChunkBufferOffset += len;
617 			fChunkBufferSize -= len;
618 			fOutputBufferOffset = 0;
619 			fOutputBufferSize = out_size;
620 		}
621 	}
622 	fFrame += *outFrameCount;
623 
624 //	TRACE("Played %Ld frames at time %Ld\n",*outFrameCount, mediaHeader->start_time);
625 	return B_OK;
626 }
627 
628 
629 status_t
630 AVCodecDecoder::_DecodeVideo(void* outBuffer, int64* outFrameCount,
631 	media_header* mediaHeader, media_decode_info* info)
632 {
633 	bool firstRun = true;
634 	while (true) {
635 		const void* data;
636 		size_t size;
637 		media_header chunkMediaHeader;
638 		status_t err = GetNextChunk(&data, &size, &chunkMediaHeader);
639 		if (err != B_OK) {
640 			TRACE("AVCodecDecoder::_DecodeVideo(): error from "
641 				"GetNextChunk(): %s\n", strerror(err));
642 			return err;
643 		}
644 #ifdef LOG_STREAM_TO_FILE
645 		if (sDumpedPackets < 100) {
646 			sStreamLogFile.Write(data, size);
647 			printf("wrote %ld bytes\n", size);
648 			sDumpedPackets++;
649 		} else if (sDumpedPackets == 100)
650 			sStreamLogFile.Unset();
651 #endif
652 
653 		if (firstRun) {
654 			firstRun = false;
655 
656 			mediaHeader->type = B_MEDIA_RAW_VIDEO;
657 //			mediaHeader->start_time = chunkMediaHeader.start_time;
658 			mediaHeader->file_pos = 0;
659 			mediaHeader->orig_size = 0;
660 			mediaHeader->u.raw_video.field_gamma = 1.0;
661 			mediaHeader->u.raw_video.field_sequence = fFrame;
662 			mediaHeader->u.raw_video.field_number = 0;
663 			mediaHeader->u.raw_video.pulldown_number = 0;
664 			mediaHeader->u.raw_video.first_active_line = 1;
665 			mediaHeader->u.raw_video.line_count
666 				= fOutputVideoFormat.display.line_count;
667 
668 			TRACE("[v] start_time=%02d:%02d.%02d field_sequence=%lu\n",
669 				int((mediaHeader->start_time / 60000000) % 60),
670 				int((mediaHeader->start_time / 1000000) % 60),
671 				int((mediaHeader->start_time / 10000) % 100),
672 				mediaHeader->u.raw_video.field_sequence);
673 		}
674 
675 #if DO_PROFILING
676 		bigtime_t startTime = system_time();
677 #endif
678 
679 		// NOTE: In the FFmpeg code example I've read, the length returned by
680 		// avcodec_decode_video() is completely ignored. Furthermore, the
681 		// packet buffers are supposed to contain complete frames only so we
682 		// don't seem to be required to buffer any packets because not the
683 		// complete packet has been read.
684 		int gotPicture = 0;
685 		int len = avcodec_decode_video(fContext, fInputPicture, &gotPicture,
686 			(uint8_t*)data, size);
687 		if (len < 0) {
688 			TRACE("[v] AVCodecDecoder: error in decoding frame %lld: %d\n",
689 				fFrame, len);
690 			// NOTE: An error from avcodec_decode_video() seems to be ignored
691 			// in the ffplay sample code.
692 //			return B_ERROR;
693 		}
694 
695 
696 //TRACE("FFDEC: PTS = %d:%d:%d.%d - fContext->frame_number = %ld "
697 //	"fContext->frame_rate = %ld\n", (int)(fContext->pts / (60*60*1000000)),
698 //	(int)(fContext->pts / (60*1000000)), (int)(fContext->pts / (1000000)),
699 //	(int)(fContext->pts % 1000000), fContext->frame_number,
700 //	fContext->frame_rate);
701 //TRACE("FFDEC: PTS = %d:%d:%d.%d - fContext->frame_number = %ld "
702 //	"fContext->frame_rate = %ld\n",
703 //	(int)(fInputPicture->pts / (60*60*1000000)),
704 //	(int)(fInputPicture->pts / (60*1000000)),
705 //	(int)(fInputPicture->pts / (1000000)),
706 //	(int)(fInputPicture->pts % 1000000), fContext->frame_number,
707 //	fContext->frame_rate);
708 
709 		if (gotPicture) {
710 			int width = fOutputVideoFormat.display.line_width;
711 			int height = fOutputVideoFormat.display.line_count;
712 			AVPicture deinterlacedPicture;
713 			bool useDeinterlacedPicture = false;
714 
715 			if (fInputPicture->interlaced_frame) {
716 				AVPicture source;
717 				source.data[0] = fInputPicture->data[0];
718 				source.data[1] = fInputPicture->data[1];
719 				source.data[2] = fInputPicture->data[2];
720 				source.data[3] = fInputPicture->data[3];
721 				source.linesize[0] = fInputPicture->linesize[0];
722 				source.linesize[1] = fInputPicture->linesize[1];
723 				source.linesize[2] = fInputPicture->linesize[2];
724 				source.linesize[3] = fInputPicture->linesize[3];
725 
726 				avpicture_alloc(&deinterlacedPicture,
727 					fContext->pix_fmt, width, height);
728 
729 				if (avpicture_deinterlace(&deinterlacedPicture, &source,
730 						fContext->pix_fmt, width, height) < 0) {
731 					TRACE("[v] avpicture_deinterlace() - error\n");
732 				} else
733 					useDeinterlacedPicture = true;
734 			}
735 
736 #if DO_PROFILING
737 			bigtime_t formatConversionStart = system_time();
738 #endif
739 //			TRACE("ONE FRAME OUT !! len=%d size=%ld (%s)\n", len, size,
740 //				pixfmt_to_string(fContext->pix_fmt));
741 
742 			// Some decoders do not set pix_fmt until they have decoded 1 frame
743 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
744 			if (fSwsContext == NULL) {
745 				fSwsContext = sws_getContext(fContext->width, fContext->height,
746 					fContext->pix_fmt, fContext->width, fContext->height,
747 					colorspace_to_pixfmt(fOutputVideoFormat.display.format),
748 					SWS_FAST_BILINEAR, NULL, NULL, NULL);
749 			}
750 #else
751 			if (fFormatConversionFunc == NULL) {
752 				fFormatConversionFunc = resolve_colorspace(
753 					fOutputVideoFormat.display.format, fContext->pix_fmt,
754 					fContext->width, fContext->height);
755 			}
756 #endif
757 
758 			fOutputPicture->data[0] = (uint8_t*)outBuffer;
759 			fOutputPicture->linesize[0]
760 				= fOutputVideoFormat.display.bytes_per_row;
761 
762 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
763 			if (fSwsContext != NULL) {
764 #else
765 			if (fFormatConversionFunc != NULL) {
766 #endif
767 				if (useDeinterlacedPicture) {
768 					AVFrame inputFrame;
769 					inputFrame.data[0] = deinterlacedPicture.data[0];
770 					inputFrame.data[1] = deinterlacedPicture.data[1];
771 					inputFrame.data[2] = deinterlacedPicture.data[2];
772 					inputFrame.data[3] = deinterlacedPicture.data[3];
773 					inputFrame.linesize[0] = deinterlacedPicture.linesize[0];
774 					inputFrame.linesize[1] = deinterlacedPicture.linesize[1];
775 					inputFrame.linesize[2] = deinterlacedPicture.linesize[2];
776 					inputFrame.linesize[3] = deinterlacedPicture.linesize[3];
777 
778 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
779 					sws_scale(fSwsContext, inputFrame.data,
780 						inputFrame.linesize, 0, fContext->height,
781 						fOutputPicture->data, fOutputPicture->linesize);
782 #else
783 					(*fFormatConversionFunc)(&inputFrame,
784 						fOutputPicture, width, height);
785 #endif
786 				} else {
787 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
788 					sws_scale(fSwsContext, fInputPicture->data,
789 						fInputPicture->linesize, 0, fContext->height,
790 						fOutputPicture->data, fOutputPicture->linesize);
791 #else
792 					(*fFormatConversionFunc)(fInputPicture, fOutputPicture,
793 						width, height);
794 #endif
795 				}
796 			}
797 			if (fInputPicture->interlaced_frame)
798 				avpicture_free(&deinterlacedPicture);
799 #ifdef DEBUG
800 			dump_ffframe(fInputPicture, "ffpict");
801 //			dump_ffframe(fOutputPicture, "opict");
802 #endif
803 			*outFrameCount = 1;
804 			fFrame++;
805 
806 #if DO_PROFILING
807 			bigtime_t doneTime = system_time();
808 			decodingTime += formatConversionStart - startTime;
809 			conversionTime += doneTime - formatConversionStart;
810 			profileCounter++;
811 			if (!(fFrame % 10)) {
812 				if (info) {
813 					printf("[v] profile: d1 = %lld, d2 = %lld (%Ld) required "
814 						"%Ld\n",
815 						decodingTime / profileCounter,
816 						conversionTime / profileCounter,
817 						fFrame, info->time_to_decode);
818 				} else {
819 					printf("[v] profile: d1 = %lld, d2 = %lld (%Ld) required "
820 						"%Ld\n",
821 						decodingTime / profileCounter,
822 						conversionTime / profileCounter,
823 						fFrame, bigtime_t(1000000LL / fOutputFrameRate));
824 				}
825 			}
826 #endif
827 			return B_OK;
828 		} else {
829 			TRACE("frame %lld - no picture yet, len: %d, chunk size: %ld\n",
830 				fFrame, len, size);
831 		}
832 	}
833 }
834 
835 
836