xref: /haiku/src/add-ons/media/plugins/ffmpeg/AVCodecDecoder.cpp (revision 1345706a9ff6ad0dc041339a02d4259998b0765d)
1 /*
2  * Copyright (C) 2001 Carlos Hasan
3  * Copyright (C) 2001 François Revol
4  * Copyright (C) 2001 Axel Dörfler
5  * Copyright (C) 2004 Marcus Overhagen
6  * Copyright (C) 2009 Stephan Amßus <superstippi@gmx.de>
7  *
8  * All rights reserved. Distributed under the terms of the MIT License.
9  */
10 
11 //! libavcodec based decoder for Haiku
12 
13 #include "AVCodecDecoder.h"
14 
15 #include <new>
16 
17 #include <string.h>
18 
19 #include <Bitmap.h>
20 #include <Debug.h>
21 
22 
23 #undef TRACE
24 //#define TRACE_AV_CODEC
25 #ifdef TRACE_AV_CODEC
26 #	define TRACE(x...)	printf(x)
27 #	define TRACE_AUDIO(x...)	printf(x)
28 #	define TRACE_VIDEO(x...)	printf(x)
29 #else
30 #	define TRACE(x...)
31 #	define TRACE_AUDIO(x...)
32 #	define TRACE_VIDEO(x...)
33 #endif
34 
35 //#define LOG_STREAM_TO_FILE
36 #ifdef LOG_STREAM_TO_FILE
37 #	include <File.h>
38 	static BFile sStreamLogFile("/boot/home/Desktop/AVCodecDebugStream.raw",
39 		B_CREATE_FILE | B_ERASE_FILE | B_WRITE_ONLY);
40 	static int sDumpedPackets = 0;
41 #endif
42 
43 #define USE_SWS_FOR_COLOR_SPACE_CONVERSION 0
44 
45 
46 struct wave_format_ex {
47 	uint16 format_tag;
48 	uint16 channels;
49 	uint32 frames_per_sec;
50 	uint32 avg_bytes_per_sec;
51 	uint16 block_align;
52 	uint16 bits_per_sample;
53 	uint16 extra_size;
54 	// extra_data[extra_size]
55 } _PACKED;
56 
57 
58 // profiling related globals
59 #define DO_PROFILING 0
60 
61 static bigtime_t decodingTime = 0;
62 static bigtime_t conversionTime = 0;
63 static long profileCounter = 0;
64 
65 
66 AVCodecDecoder::AVCodecDecoder()
67 	:
68 	fHeader(),
69 	fInputFormat(),
70 	fOutputVideoFormat(),
71 	fFrame(0),
72 	fIsAudio(false),
73 	fCodecIndexInTable(-1),
74 	fCodec(NULL),
75 	fContext(avcodec_alloc_context()),
76 	fInputPicture(avcodec_alloc_frame()),
77 	fOutputPicture(avcodec_alloc_frame()),
78 
79 	fCodecInitDone(false),
80 
81 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
82 	fSwsContext(NULL),
83 #else
84 	fFormatConversionFunc(NULL),
85 #endif
86 
87 	fExtraData(NULL),
88 	fExtraDataSize(0),
89 	fBlockAlign(0),
90 
91 	fStartTime(0),
92 	fOutputFrameCount(0),
93 	fOutputFrameRate(1.0),
94 	fOutputFrameSize(0),
95 
96 	fChunkBuffer(NULL),
97 	fChunkBufferOffset(0),
98 	fChunkBufferSize(0),
99 	fAudioDecodeError(false),
100 
101 	fOutputBuffer(NULL),
102 	fOutputBufferOffset(0),
103 	fOutputBufferSize(0)
104 {
105 	TRACE("AVCodecDecoder::AVCodecDecoder()\n");
106 }
107 
108 
109 AVCodecDecoder::~AVCodecDecoder()
110 {
111 	TRACE("[%c] AVCodecDecoder::~AVCodecDecoder()\n", fIsAudio?('a'):('v'));
112 
113 #ifdef DO_PROFILING
114 	if (profileCounter > 0) {
115 			printf("[%c] profile: d1 = %lld, d2 = %lld (%Ld)\n",
116 				fIsAudio?('a'):('v'), decodingTime / profileCounter, conversionTime / profileCounter,
117 				fFrame);
118 	}
119 #endif
120 
121 	if (fCodecInitDone)
122 		avcodec_close(fContext);
123 
124 	free(fOutputPicture);
125 	free(fInputPicture);
126 	free(fContext);
127 
128 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
129 	if (fSwsContext != NULL)
130 		sws_freeContext(fSwsContext);
131 #endif
132 
133 	delete[] fExtraData;
134 	delete[] fOutputBuffer;
135 }
136 
137 
138 void
139 AVCodecDecoder::GetCodecInfo(media_codec_info* mci)
140 {
141 	sprintf(mci->short_name, "ff:%s", fCodec->name);
142 	sprintf(mci->pretty_name, "%s (libavcodec %s)",
143 		gCodecTable[fCodecIndexInTable].prettyname, fCodec->name);
144 	mci->id = 0;
145 	mci->sub_id = gCodecTable[fCodecIndexInTable].id;
146 }
147 
148 
149 status_t
150 AVCodecDecoder::Setup(media_format* ioEncodedFormat, const void* infoBuffer,
151 	size_t infoSize)
152 {
153 	if (ioEncodedFormat->type != B_MEDIA_ENCODED_AUDIO
154 		&& ioEncodedFormat->type != B_MEDIA_ENCODED_VIDEO)
155 		return B_ERROR;
156 
157 	fIsAudio = (ioEncodedFormat->type == B_MEDIA_ENCODED_AUDIO);
158 	TRACE("[%c] AVCodecDecoder::Setup()\n", fIsAudio?('a'):('v'));
159 
160 	if (fIsAudio && !fOutputBuffer)
161 		fOutputBuffer = new char[AVCODEC_MAX_AUDIO_FRAME_SIZE];
162 
163 #ifdef TRACE_AV_CODEC
164 	char buffer[1024];
165 	string_for_format(*ioEncodedFormat, buffer, sizeof(buffer));
166 	TRACE("[%c]   input_format = %s\n", fIsAudio?('a'):('v'), buffer);
167 	TRACE("[%c]   infoSize = %ld\n", fIsAudio?('a'):('v'), infoSize);
168 	TRACE("[%c]   user_data_type = %08lx\n", fIsAudio?('a'):('v'),
169 		ioEncodedFormat->user_data_type);
170 	TRACE("[%c]   meta_data_size = %ld\n", fIsAudio?('a'):('v'),
171 		ioEncodedFormat->MetaDataSize());
172 #endif
173 
174 	media_format_description descr;
175 	for (int32 i = 0; gCodecTable[i].id; i++) {
176 		fCodecIndexInTable = i;
177 		uint64 cid;
178 
179 		if (BMediaFormats().GetCodeFor(*ioEncodedFormat,
180 				gCodecTable[i].family, &descr) == B_OK
181 		    && gCodecTable[i].type == ioEncodedFormat->type) {
182 			switch(gCodecTable[i].family) {
183 				case B_WAV_FORMAT_FAMILY:
184 					cid = descr.u.wav.codec;
185 					break;
186 				case B_AIFF_FORMAT_FAMILY:
187 					cid = descr.u.aiff.codec;
188 					break;
189 				case B_AVI_FORMAT_FAMILY:
190 					cid = descr.u.avi.codec;
191 					break;
192 				case B_MPEG_FORMAT_FAMILY:
193 					cid = descr.u.mpeg.id;
194 					break;
195 				case B_QUICKTIME_FORMAT_FAMILY:
196 					cid = descr.u.quicktime.codec;
197 					break;
198 				case B_MISC_FORMAT_FAMILY:
199 					cid = (((uint64)descr.u.misc.file_format) << 32)
200 						| descr.u.misc.codec;
201 					break;
202 				default:
203 					puts("ERR family");
204 					return B_ERROR;
205 			}
206 
207 			if (gCodecTable[i].family == descr.family
208 				&& gCodecTable[i].fourcc == cid) {
209 
210 				TRACE("  0x%04lx codec id = \"%c%c%c%c\"\n", uint32(cid),
211 					(char)((cid >> 24) & 0xff), (char)((cid >> 16) & 0xff),
212 					(char)((cid >> 8) & 0xff), (char)(cid & 0xff));
213 
214 				fCodec = avcodec_find_decoder(gCodecTable[i].id);
215 				if (fCodec == NULL) {
216 					TRACE("  unable to find the correct FFmpeg "
217 						"decoder (id = %d)\n", gCodecTable[i].id);
218 					return B_ERROR;
219 				}
220 				TRACE("  found decoder %s\n", fCodec->name);
221 
222 				const void* extraData = infoBuffer;
223 				fExtraDataSize = infoSize;
224 				if (gCodecTable[i].family == B_WAV_FORMAT_FAMILY
225 						&& infoSize >= sizeof(wave_format_ex)) {
226 					TRACE("  trying to use wave_format_ex\n");
227 					// Special case extra data in B_WAV_FORMAT_FAMILY
228 					const wave_format_ex* waveFormatData
229 						= (const wave_format_ex*)infoBuffer;
230 
231 					size_t waveFormatSize = infoSize;
232 					if (waveFormatData != NULL && waveFormatSize > 0) {
233 						fBlockAlign = waveFormatData->block_align;
234 						TRACE("  found block align: %d\n", fBlockAlign);
235 						fExtraDataSize = waveFormatData->extra_size;
236 						// skip the wave_format_ex from the extra data.
237 						extraData = waveFormatData + 1;
238 					}
239 				} else {
240 					if (fIsAudio) {
241 						fBlockAlign
242 							= ioEncodedFormat->u.encoded_audio.output
243 								.buffer_size;
244 						TRACE("  using buffer_size as block align: %d\n",
245 							fBlockAlign);
246 					}
247 				}
248 				if (extraData != NULL && fExtraDataSize > 0) {
249 					TRACE("AVCodecDecoder: extra data size %ld\n", infoSize);
250 					fExtraData = new(std::nothrow) char[fExtraDataSize];
251 					if (fExtraData != NULL)
252 						memcpy(fExtraData, infoBuffer, fExtraDataSize);
253 					else
254 						fExtraDataSize = 0;
255 				}
256 
257 				fInputFormat = *ioEncodedFormat;
258 				return B_OK;
259 			}
260 		}
261 	}
262 	printf("AVCodecDecoder::Setup failed!\n");
263 	return B_ERROR;
264 }
265 
266 
267 status_t
268 AVCodecDecoder::Seek(uint32 seekTo, int64 seekFrame, int64* frame,
269 	bigtime_t seekTime, bigtime_t* time)
270 {
271 	// Reset the FFmpeg codec to flush buffers, so we keep the sync
272 #if 1
273 	if (fCodecInitDone) {
274 		fCodecInitDone = false;
275 		avcodec_close(fContext);
276 		fCodecInitDone = (avcodec_open(fContext, fCodec) >= 0);
277 	}
278 #else
279 	// For example, this doesn't work on the H.264 codec. :-/
280 	if (fCodecInitDone)
281 		avcodec_flush_buffers(fContext);
282 #endif
283 
284 	if (seekTo == B_MEDIA_SEEK_TO_TIME) {
285 		TRACE("AVCodecDecoder::Seek by time ");
286 		TRACE("from frame %Ld and time %.6f TO Required Time %.6f. ",
287 			fFrame, fStartTime / 1000000.0, seekTime / 1000000.0);
288 
289 		*frame = (int64)(seekTime * fOutputFrameRate / 1000000LL);
290 		*time = seekTime;
291 	} else if (seekTo == B_MEDIA_SEEK_TO_FRAME) {
292 		TRACE("AVCodecDecoder::Seek by Frame ");
293 		TRACE("from time %.6f and frame %Ld TO Required Frame %Ld. ",
294 			fStartTime / 1000000.0, fFrame, seekFrame);
295 
296 		*time = (bigtime_t)(seekFrame * 1000000LL / fOutputFrameRate);
297 		*frame = seekFrame;
298 	} else
299 		return B_BAD_VALUE;
300 
301 	fFrame = *frame;
302 	fStartTime = *time;
303 	TRACE("so new frame is %Ld at time %.6f\n", *frame, *time / 1000000.0);
304 	return B_OK;
305 }
306 
307 
308 status_t
309 AVCodecDecoder::NegotiateOutputFormat(media_format* inOutFormat)
310 {
311 	TRACE("AVCodecDecoder::NegotiateOutputFormat() [%c] \n",
312 		fIsAudio?('a'):('v'));
313 
314 #ifdef TRACE_AV_CODEC
315 	char buffer[1024];
316 	string_for_format(*inOutFormat, buffer, sizeof(buffer));
317 	TRACE("  [%c]  requested format = %s\n", fIsAudio?('a'):('v'), buffer);
318 #endif
319 
320 	if (fIsAudio)
321 		return _NegotiateAudioOutputFormat(inOutFormat);
322 	else
323 		return _NegotiateVideoOutputFormat(inOutFormat);
324 }
325 
326 
327 status_t
328 AVCodecDecoder::Decode(void* outBuffer, int64* outFrameCount,
329 	media_header* mediaHeader, media_decode_info* info)
330 {
331 	if (!fCodecInitDone)
332 		return B_NO_INIT;
333 
334 //	TRACE("[%c] AVCodecDecoder::Decode() for time %Ld\n", fIsAudio?('a'):('v'),
335 //		fStartTime);
336 
337 	mediaHeader->start_time = fStartTime;
338 
339 	status_t ret;
340 	if (fIsAudio)
341 		ret = _DecodeAudio(outBuffer, outFrameCount, mediaHeader, info);
342 	else
343 		ret = _DecodeVideo(outBuffer, outFrameCount, mediaHeader, info);
344 
345 	fStartTime = (bigtime_t)(1000000LL * fFrame / fOutputFrameRate);
346 
347 	return ret;
348 }
349 
350 
351 // #pragma mark -
352 
353 
354 status_t
355 AVCodecDecoder::_NegotiateAudioOutputFormat(media_format* inOutFormat)
356 {
357 	TRACE("AVCodecDecoder::_NegotiateAudioOutputFormat()\n");
358 
359 	media_multi_audio_format outputAudioFormat;
360 	outputAudioFormat = media_raw_audio_format::wildcard;
361 	outputAudioFormat.byte_order = B_MEDIA_HOST_ENDIAN;
362 	outputAudioFormat.frame_rate
363 		= fInputFormat.u.encoded_audio.output.frame_rate;
364 	outputAudioFormat.channel_count
365 		= fInputFormat.u.encoded_audio.output.channel_count;
366 	outputAudioFormat.format = fInputFormat.u.encoded_audio.output.format;
367 	// Check that format is not still a wild card!
368 	if (outputAudioFormat.format == 0)
369 		outputAudioFormat.format = media_raw_audio_format::B_AUDIO_SHORT;
370 
371 	outputAudioFormat.buffer_size
372 		= 1024 * fInputFormat.u.encoded_audio.output.channel_count;
373 	inOutFormat->type = B_MEDIA_RAW_AUDIO;
374 	inOutFormat->u.raw_audio = outputAudioFormat;
375 
376 	fContext->bit_rate = (int)fInputFormat.u.encoded_audio.bit_rate;
377 	fContext->frame_size = (int)fInputFormat.u.encoded_audio.frame_size;
378 	fContext->sample_rate
379 		= (int)fInputFormat.u.encoded_audio.output.frame_rate;
380 	fContext->channels = fInputFormat.u.encoded_audio.output.channel_count;
381 	fContext->block_align = fBlockAlign;
382 	fContext->extradata = (uint8_t*)fExtraData;
383 	fContext->extradata_size = fExtraDataSize;
384 
385 	// TODO: This probably needs to go away, there is some misconception
386 	// about extra data / info buffer and meta data. See
387 	// Reader::GetStreamInfo(). The AVFormatReader puts extradata and
388 	// extradata_size into media_format::MetaData(), but used to ignore
389 	// the infoBuffer passed to GetStreamInfo(). I think this may be why
390 	// the code below was added.
391 	if (fInputFormat.MetaDataSize() > 0) {
392 		fContext->extradata = (uint8_t*)fInputFormat.MetaData();
393 		fContext->extradata_size = fInputFormat.MetaDataSize();
394 	}
395 
396 	TRACE("  bit_rate %d, sample_rate %d, channels %d, block_align %d, "
397 		"extradata_size %d\n", fContext->bit_rate, fContext->sample_rate,
398 		fContext->channels, fContext->block_align, fContext->extradata_size);
399 
400 	// close any previous instance
401 	if (fCodecInitDone) {
402 		fCodecInitDone = false;
403 		avcodec_close(fContext);
404 	}
405 
406 	// open new
407 	int result = avcodec_open(fContext, fCodec);
408 	fCodecInitDone = (result >= 0);
409 
410 	fStartTime = 0;
411 	size_t sampleSize = outputAudioFormat.format
412 		& media_raw_audio_format::B_AUDIO_SIZE_MASK;
413 	fOutputFrameSize = sampleSize * outputAudioFormat.channel_count;
414 	fOutputFrameCount = outputAudioFormat.buffer_size / fOutputFrameSize;
415 	fOutputFrameRate = outputAudioFormat.frame_rate;
416 
417 	TRACE("  bit_rate = %d, sample_rate = %d, channels = %d, init = %d, "
418 		"output frame size: %d, count: %ld, rate: %.2f\n",
419 		fContext->bit_rate, fContext->sample_rate, fContext->channels,
420 		result, fOutputFrameSize, fOutputFrameCount, fOutputFrameRate);
421 
422 	fChunkBuffer = NULL;
423 	fChunkBufferOffset = 0;
424 	fChunkBufferSize = 0;
425 	fAudioDecodeError = false;
426 	fOutputBufferOffset = 0;
427 	fOutputBufferSize = 0;
428 
429 	inOutFormat->require_flags = 0;
430 	inOutFormat->deny_flags = B_MEDIA_MAUI_UNDEFINED_FLAGS;
431 
432 	if (!fCodecInitDone) {
433 		TRACE("avcodec_open() failed!\n");
434 		return B_ERROR;
435 	}
436 
437 	return B_OK;
438 }
439 
440 
441 status_t
442 AVCodecDecoder::_NegotiateVideoOutputFormat(media_format* inOutFormat)
443 {
444 	TRACE("AVCodecDecoder::_NegotiateVideoOutputFormat()\n");
445 
446 	fOutputVideoFormat = fInputFormat.u.encoded_video.output;
447 
448 	fContext->width = fOutputVideoFormat.display.line_width;
449 	fContext->height = fOutputVideoFormat.display.line_count;
450 //	fContext->frame_rate = (int)(fOutputVideoFormat.field_rate
451 //		* fContext->frame_rate_base);
452 
453 	fOutputFrameRate = fOutputVideoFormat.field_rate;
454 
455 	fContext->extradata = (uint8_t*)fExtraData;
456 	fContext->extradata_size = fExtraDataSize;
457 
458 	TRACE("  requested video format 0x%x\n",
459 		inOutFormat->u.raw_video.display.format);
460 
461 	// Make MediaPlayer happy (if not in rgb32 screen depth and no overlay,
462 	// it will only ask for YCbCr, which DrawBitmap doesn't handle, so the
463 	// default colordepth is RGB32).
464 	if (inOutFormat->u.raw_video.display.format == B_YCbCr422)
465 		fOutputVideoFormat.display.format = B_YCbCr422;
466 	else
467 		fOutputVideoFormat.display.format = B_RGB32;
468 
469 	// Search for a pixel-format the codec handles
470 	// TODO: We should try this a couple of times until it succeeds, each
471 	// time using another pixel-format that is supported by the decoder.
472 	// But libavcodec doesn't seem to offer any way to tell the decoder
473 	// which format it should use.
474 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
475 	fSwsContext = NULL;
476 #else
477 	fFormatConversionFunc = 0;
478 #endif
479 	// Iterate over supported codec formats
480 	for (int i = 0; i < 1; i++) {
481 		// close any previous instance
482 		if (fCodecInitDone) {
483 			fCodecInitDone = false;
484 			avcodec_close(fContext);
485 		}
486 		// TODO: Set n-th fContext->pix_fmt here
487 		if (avcodec_open(fContext, fCodec) >= 0) {
488 			fCodecInitDone = true;
489 
490 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
491 			fSwsContext = sws_getContext(fContext->width, fContext->height,
492 				fContext->pix_fmt, fContext->width, fContext->height,
493 				colorspace_to_pixfmt(fOutputVideoFormat.display.format),
494 				SWS_FAST_BILINEAR, NULL, NULL, NULL);
495 		}
496 #else
497 			fFormatConversionFunc = resolve_colorspace(
498 				fOutputVideoFormat.display.format, fContext->pix_fmt,
499 				fContext->width, fContext->height);
500 		}
501 		if (fFormatConversionFunc != NULL)
502 			break;
503 #endif
504 	}
505 
506 	if (!fCodecInitDone) {
507 		TRACE("avcodec_open() failed to init codec!\n");
508 		return B_ERROR;
509 	}
510 
511 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
512 	if (fSwsContext == NULL) {
513 		TRACE("No SWS Scale context or decoder has not set the pixel format "
514 			"yet!\n");
515 	}
516 #else
517 	if (fFormatConversionFunc == NULL) {
518 		TRACE("no pixel format conversion function found or decoder has "
519 			"not set the pixel format yet!\n");
520 	}
521 #endif
522 
523 	if (fOutputVideoFormat.display.format == B_YCbCr422) {
524 		fOutputVideoFormat.display.bytes_per_row
525 			= 2 * fOutputVideoFormat.display.line_width;
526 	} else {
527 		fOutputVideoFormat.display.bytes_per_row
528 			= 4 * fOutputVideoFormat.display.line_width;
529 	}
530 
531 	inOutFormat->type = B_MEDIA_RAW_VIDEO;
532 	inOutFormat->u.raw_video = fOutputVideoFormat;
533 
534 	inOutFormat->require_flags = 0;
535 	inOutFormat->deny_flags = B_MEDIA_MAUI_UNDEFINED_FLAGS;
536 
537 #ifdef TRACE_AV_CODEC
538 	char buffer[1024];
539 	string_for_format(*inOutFormat, buffer, sizeof(buffer));
540 	TRACE("[v]  outFormat = %s\n", buffer);
541 	TRACE("  returned  video format 0x%x\n",
542 		inOutFormat->u.raw_video.display.format);
543 #endif
544 
545 	return B_OK;
546 }
547 
548 
549 status_t
550 AVCodecDecoder::_DecodeAudio(void* outBuffer, int64* outFrameCount,
551 	media_header* mediaHeader, media_decode_info* info)
552 {
553 	TRACE_AUDIO("AVCodecDecoder::_DecodeAudio()\n");
554 //	TRACE_AUDIO("  audio start_time %.6f\n",
555 //		mediaHeader->start_time / 1000000.0);
556 
557 	char* output_buffer = (char*)outBuffer;
558 	*outFrameCount = 0;
559 	while (*outFrameCount < fOutputFrameCount) {
560 		if (fOutputBufferSize < 0) {
561 			TRACE_AUDIO("  ############ fOutputBufferSize %ld\n",
562 				fOutputBufferSize);
563 			fOutputBufferSize = 0;
564 		}
565 		if (fChunkBufferSize < 0) {
566 			TRACE_AUDIO("  ############ fChunkBufferSize %ld\n",
567 				fChunkBufferSize);
568 			fChunkBufferSize = 0;
569 		}
570 
571 		if (fOutputBufferSize > 0) {
572 			int32 frames = min_c(fOutputFrameCount - *outFrameCount,
573 				fOutputBufferSize / fOutputFrameSize);
574 			memcpy(output_buffer, fOutputBuffer + fOutputBufferOffset,
575 				frames * fOutputFrameSize);
576 			fOutputBufferOffset += frames * fOutputFrameSize;
577 			fOutputBufferSize -= frames * fOutputFrameSize;
578 			output_buffer += frames * fOutputFrameSize;
579 			*outFrameCount += frames;
580 			fStartTime += (bigtime_t)((1000000LL * frames) / fOutputFrameRate);
581 			continue;
582 		}
583 		if (fChunkBufferSize == 0) {
584 			media_header chunkMediaHeader;
585 			status_t err;
586 			err = GetNextChunk(&fChunkBuffer, &fChunkBufferSize,
587 				&chunkMediaHeader);
588 			if (err == B_LAST_BUFFER_ERROR) {
589 				TRACE_AUDIO("  Last Chunk with chunk size %ld\n",
590 					fChunkBufferSize);
591 				fChunkBufferSize = 0;
592 				return err;
593 			}
594 			if (err != B_OK || fChunkBufferSize < 0) {
595 				printf("GetNextChunk error %ld\n",fChunkBufferSize);
596 				fChunkBufferSize = 0;
597 				break;
598 			}
599 			fChunkBufferOffset = 0;
600 			fStartTime = chunkMediaHeader.start_time;
601 			if (*outFrameCount == 0)
602 				mediaHeader->start_time = chunkMediaHeader.start_time;
603 			continue;
604 		}
605 		if (fOutputBufferSize == 0) {
606 			int len;
607 			int out_size = AVCODEC_MAX_AUDIO_FRAME_SIZE;
608 			len = avcodec_decode_audio2(fContext, (short *)fOutputBuffer,
609 				&out_size, (uint8_t*)fChunkBuffer + fChunkBufferOffset,
610 				fChunkBufferSize);
611 			if (len < 0) {
612 				if (!fAudioDecodeError) {
613 					printf("########### audio decode error, "
614 						"fChunkBufferSize %ld, fChunkBufferOffset %ld\n",
615 						fChunkBufferSize, fChunkBufferOffset);
616 					fAudioDecodeError = true;
617 				}
618 				out_size = 0;
619 				len = 0;
620 				fChunkBufferOffset = 0;
621 				fChunkBufferSize = 0;
622 			} else
623 				fAudioDecodeError = false;
624 
625 			fChunkBufferOffset += len;
626 			fChunkBufferSize -= len;
627 			fOutputBufferOffset = 0;
628 			fOutputBufferSize = out_size;
629 		}
630 	}
631 	TRACE_AUDIO("  frame count: %lld\n", *outFrameCount);
632 	fFrame += *outFrameCount;
633 
634 //	TRACE("Played %Ld frames at time %Ld\n",*outFrameCount, mediaHeader->start_time);
635 	return B_OK;
636 }
637 
638 
639 status_t
640 AVCodecDecoder::_DecodeVideo(void* outBuffer, int64* outFrameCount,
641 	media_header* mediaHeader, media_decode_info* info)
642 {
643 	bool firstRun = true;
644 	while (true) {
645 		const void* data;
646 		size_t size;
647 		media_header chunkMediaHeader;
648 		status_t err = GetNextChunk(&data, &size, &chunkMediaHeader);
649 		if (err != B_OK) {
650 			TRACE("AVCodecDecoder::_DecodeVideo(): error from "
651 				"GetNextChunk(): %s\n", strerror(err));
652 			return err;
653 		}
654 #ifdef LOG_STREAM_TO_FILE
655 		if (sDumpedPackets < 100) {
656 			sStreamLogFile.Write(data, size);
657 			printf("wrote %ld bytes\n", size);
658 			sDumpedPackets++;
659 		} else if (sDumpedPackets == 100)
660 			sStreamLogFile.Unset();
661 #endif
662 
663 		if (firstRun) {
664 			firstRun = false;
665 
666 			mediaHeader->type = B_MEDIA_RAW_VIDEO;
667 //			mediaHeader->start_time = chunkMediaHeader.start_time;
668 			mediaHeader->file_pos = 0;
669 			mediaHeader->orig_size = 0;
670 			mediaHeader->u.raw_video.field_gamma = 1.0;
671 			mediaHeader->u.raw_video.field_sequence = fFrame;
672 			mediaHeader->u.raw_video.field_number = 0;
673 			mediaHeader->u.raw_video.pulldown_number = 0;
674 			mediaHeader->u.raw_video.first_active_line = 1;
675 			mediaHeader->u.raw_video.line_count
676 				= fOutputVideoFormat.display.line_count;
677 
678 			TRACE("[v] start_time=%02d:%02d.%02d field_sequence=%lu\n",
679 				int((mediaHeader->start_time / 60000000) % 60),
680 				int((mediaHeader->start_time / 1000000) % 60),
681 				int((mediaHeader->start_time / 10000) % 100),
682 				mediaHeader->u.raw_video.field_sequence);
683 		}
684 
685 #if DO_PROFILING
686 		bigtime_t startTime = system_time();
687 #endif
688 
689 		// NOTE: In the FFmpeg code example I've read, the length returned by
690 		// avcodec_decode_video() is completely ignored. Furthermore, the
691 		// packet buffers are supposed to contain complete frames only so we
692 		// don't seem to be required to buffer any packets because not the
693 		// complete packet has been read.
694 		int gotPicture = 0;
695 		int len = avcodec_decode_video(fContext, fInputPicture, &gotPicture,
696 			(uint8_t*)data, size);
697 		if (len < 0) {
698 			TRACE("[v] AVCodecDecoder: error in decoding frame %lld: %d\n",
699 				fFrame, len);
700 			// NOTE: An error from avcodec_decode_video() seems to be ignored
701 			// in the ffplay sample code.
702 //			return B_ERROR;
703 		}
704 
705 
706 //TRACE("FFDEC: PTS = %d:%d:%d.%d - fContext->frame_number = %ld "
707 //	"fContext->frame_rate = %ld\n", (int)(fContext->pts / (60*60*1000000)),
708 //	(int)(fContext->pts / (60*1000000)), (int)(fContext->pts / (1000000)),
709 //	(int)(fContext->pts % 1000000), fContext->frame_number,
710 //	fContext->frame_rate);
711 //TRACE("FFDEC: PTS = %d:%d:%d.%d - fContext->frame_number = %ld "
712 //	"fContext->frame_rate = %ld\n",
713 //	(int)(fInputPicture->pts / (60*60*1000000)),
714 //	(int)(fInputPicture->pts / (60*1000000)),
715 //	(int)(fInputPicture->pts / (1000000)),
716 //	(int)(fInputPicture->pts % 1000000), fContext->frame_number,
717 //	fContext->frame_rate);
718 
719 		if (gotPicture) {
720 			int width = fOutputVideoFormat.display.line_width;
721 			int height = fOutputVideoFormat.display.line_count;
722 			AVPicture deinterlacedPicture;
723 			bool useDeinterlacedPicture = false;
724 
725 			if (fInputPicture->interlaced_frame) {
726 				AVPicture source;
727 				source.data[0] = fInputPicture->data[0];
728 				source.data[1] = fInputPicture->data[1];
729 				source.data[2] = fInputPicture->data[2];
730 				source.data[3] = fInputPicture->data[3];
731 				source.linesize[0] = fInputPicture->linesize[0];
732 				source.linesize[1] = fInputPicture->linesize[1];
733 				source.linesize[2] = fInputPicture->linesize[2];
734 				source.linesize[3] = fInputPicture->linesize[3];
735 
736 				avpicture_alloc(&deinterlacedPicture,
737 					fContext->pix_fmt, width, height);
738 
739 				if (avpicture_deinterlace(&deinterlacedPicture, &source,
740 						fContext->pix_fmt, width, height) < 0) {
741 					TRACE("[v] avpicture_deinterlace() - error\n");
742 				} else
743 					useDeinterlacedPicture = true;
744 			}
745 
746 #if DO_PROFILING
747 			bigtime_t formatConversionStart = system_time();
748 #endif
749 //			TRACE("ONE FRAME OUT !! len=%d size=%ld (%s)\n", len, size,
750 //				pixfmt_to_string(fContext->pix_fmt));
751 
752 			// Some decoders do not set pix_fmt until they have decoded 1 frame
753 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
754 			if (fSwsContext == NULL) {
755 				fSwsContext = sws_getContext(fContext->width, fContext->height,
756 					fContext->pix_fmt, fContext->width, fContext->height,
757 					colorspace_to_pixfmt(fOutputVideoFormat.display.format),
758 					SWS_FAST_BILINEAR, NULL, NULL, NULL);
759 			}
760 #else
761 			if (fFormatConversionFunc == NULL) {
762 				fFormatConversionFunc = resolve_colorspace(
763 					fOutputVideoFormat.display.format, fContext->pix_fmt,
764 					fContext->width, fContext->height);
765 			}
766 #endif
767 
768 			fOutputPicture->data[0] = (uint8_t*)outBuffer;
769 			fOutputPicture->linesize[0]
770 				= fOutputVideoFormat.display.bytes_per_row;
771 
772 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
773 			if (fSwsContext != NULL) {
774 #else
775 			if (fFormatConversionFunc != NULL) {
776 #endif
777 				if (useDeinterlacedPicture) {
778 					AVFrame inputFrame;
779 					inputFrame.data[0] = deinterlacedPicture.data[0];
780 					inputFrame.data[1] = deinterlacedPicture.data[1];
781 					inputFrame.data[2] = deinterlacedPicture.data[2];
782 					inputFrame.data[3] = deinterlacedPicture.data[3];
783 					inputFrame.linesize[0] = deinterlacedPicture.linesize[0];
784 					inputFrame.linesize[1] = deinterlacedPicture.linesize[1];
785 					inputFrame.linesize[2] = deinterlacedPicture.linesize[2];
786 					inputFrame.linesize[3] = deinterlacedPicture.linesize[3];
787 
788 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
789 					sws_scale(fSwsContext, inputFrame.data,
790 						inputFrame.linesize, 0, fContext->height,
791 						fOutputPicture->data, fOutputPicture->linesize);
792 #else
793 					(*fFormatConversionFunc)(&inputFrame,
794 						fOutputPicture, width, height);
795 #endif
796 				} else {
797 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
798 					sws_scale(fSwsContext, fInputPicture->data,
799 						fInputPicture->linesize, 0, fContext->height,
800 						fOutputPicture->data, fOutputPicture->linesize);
801 #else
802 					(*fFormatConversionFunc)(fInputPicture, fOutputPicture,
803 						width, height);
804 #endif
805 				}
806 			}
807 			if (fInputPicture->interlaced_frame)
808 				avpicture_free(&deinterlacedPicture);
809 #ifdef DEBUG
810 			dump_ffframe(fInputPicture, "ffpict");
811 //			dump_ffframe(fOutputPicture, "opict");
812 #endif
813 			*outFrameCount = 1;
814 			fFrame++;
815 
816 #if DO_PROFILING
817 			bigtime_t doneTime = system_time();
818 			decodingTime += formatConversionStart - startTime;
819 			conversionTime += doneTime - formatConversionStart;
820 			profileCounter++;
821 			if (!(fFrame % 10)) {
822 				if (info) {
823 					printf("[v] profile: d1 = %lld, d2 = %lld (%Ld) required "
824 						"%Ld\n",
825 						decodingTime / profileCounter,
826 						conversionTime / profileCounter,
827 						fFrame, info->time_to_decode);
828 				} else {
829 					printf("[v] profile: d1 = %lld, d2 = %lld (%Ld) required "
830 						"%Ld\n",
831 						decodingTime / profileCounter,
832 						conversionTime / profileCounter,
833 						fFrame, bigtime_t(1000000LL / fOutputFrameRate));
834 				}
835 			}
836 #endif
837 			return B_OK;
838 		} else {
839 			TRACE("frame %lld - no picture yet, len: %d, chunk size: %ld\n",
840 				fFrame, len, size);
841 		}
842 	}
843 }
844 
845 
846