xref: /haiku/src/add-ons/media/plugins/ffmpeg/AVCodecDecoder.cpp (revision 5b6fca06392320a9c32d542b519d8f4fe876aef2)
1 /*
2  * Copyright (C) 2001 Carlos Hasan
3  * Copyright (C) 2001 François Revol
4  * Copyright (C) 2001 Axel Dörfler
5  * Copyright (C) 2004 Marcus Overhagen
6  * Copyright (C) 2009 Stephan Amßus <superstippi@gmx.de>
7  *
8  * All rights reserved. Distributed under the terms of the MIT License.
9  */
10 
11 //! libavcodec based decoder for Haiku
12 
13 #include "AVCodecDecoder.h"
14 
15 #include <new>
16 
17 #include <string.h>
18 
19 #include <Bitmap.h>
20 #include <Debug.h>
21 
22 
23 #undef TRACE
24 //#define TRACE_AV_CODEC
25 #ifdef TRACE_AV_CODEC
26 #	define TRACE(x...)	printf(x)
27 #	define TRACE_AUDIO(x...)	printf(x)
28 #	define TRACE_VIDEO(x...)	printf(x)
29 #else
30 #	define TRACE(x...)
31 #	define TRACE_AUDIO(x...)
32 #	define TRACE_VIDEO(x...)
33 #endif
34 
35 //#define LOG_STREAM_TO_FILE
36 #ifdef LOG_STREAM_TO_FILE
37 #	include <File.h>
38 	static BFile sStreamLogFile("/boot/home/Desktop/AVCodecDebugStream.raw",
39 		B_CREATE_FILE | B_ERASE_FILE | B_WRITE_ONLY);
40 	static int sDumpedPackets = 0;
41 #endif
42 
43 #define USE_SWS_FOR_COLOR_SPACE_CONVERSION 0
44 // NOTE: David's color space conversion is much faster than the FFmpeg
45 // version. Perhaps the SWS code can be used for unsupported conversions?
46 // Otherwise the alternative code could simply be removed from this file.
47 
48 
49 struct wave_format_ex {
50 	uint16 format_tag;
51 	uint16 channels;
52 	uint32 frames_per_sec;
53 	uint32 avg_bytes_per_sec;
54 	uint16 block_align;
55 	uint16 bits_per_sample;
56 	uint16 extra_size;
57 	// extra_data[extra_size]
58 } _PACKED;
59 
60 
61 // profiling related globals
62 #define DO_PROFILING 0
63 
64 static bigtime_t decodingTime = 0;
65 static bigtime_t conversionTime = 0;
66 static long profileCounter = 0;
67 
68 
69 AVCodecDecoder::AVCodecDecoder()
70 	:
71 	fHeader(),
72 	fInputFormat(),
73 	fOutputVideoFormat(),
74 	fFrame(0),
75 	fIsAudio(false),
76 	fCodec(NULL),
77 	fContext(avcodec_alloc_context()),
78 	fInputPicture(avcodec_alloc_frame()),
79 	fOutputPicture(avcodec_alloc_frame()),
80 
81 	fCodecInitDone(false),
82 
83 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
84 	fSwsContext(NULL),
85 #else
86 	fFormatConversionFunc(NULL),
87 #endif
88 
89 	fExtraData(NULL),
90 	fExtraDataSize(0),
91 	fBlockAlign(0),
92 
93 	fStartTime(0),
94 	fOutputFrameCount(0),
95 	fOutputFrameRate(1.0),
96 	fOutputFrameSize(0),
97 
98 	fChunkBuffer(NULL),
99 	fChunkBufferOffset(0),
100 	fChunkBufferSize(0),
101 	fAudioDecodeError(false),
102 
103 	fOutputBuffer(NULL),
104 	fOutputBufferOffset(0),
105 	fOutputBufferSize(0)
106 {
107 	TRACE("AVCodecDecoder::AVCodecDecoder()\n");
108 
109 	fContext->error_recognition = FF_ER_CAREFUL;
110 	fContext->error_concealment = 3;
111 	avcodec_thread_init(fContext, 1);
112 }
113 
114 
115 AVCodecDecoder::~AVCodecDecoder()
116 {
117 	TRACE("[%c] AVCodecDecoder::~AVCodecDecoder()\n", fIsAudio?('a'):('v'));
118 
119 #ifdef DO_PROFILING
120 	if (profileCounter > 0) {
121 		printf("[%c] profile: d1 = %lld, d2 = %lld (%Ld)\n",
122 			fIsAudio?('a'):('v'), decodingTime / profileCounter,
123 			conversionTime / profileCounter, fFrame);
124 	}
125 #endif
126 
127 	if (fCodecInitDone)
128 		avcodec_close(fContext);
129 
130 	free(fOutputPicture);
131 	free(fInputPicture);
132 	free(fContext);
133 
134 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
135 	if (fSwsContext != NULL)
136 		sws_freeContext(fSwsContext);
137 #endif
138 
139 	delete[] fExtraData;
140 	delete[] fOutputBuffer;
141 }
142 
143 
144 void
145 AVCodecDecoder::GetCodecInfo(media_codec_info* mci)
146 {
147 	snprintf(mci->short_name, 32, "%s", fCodec->name);
148 	snprintf(mci->pretty_name, 96, "%s", fCodec->long_name);
149 	mci->id = 0;
150 	mci->sub_id = fCodec->id;
151 }
152 
153 
154 status_t
155 AVCodecDecoder::Setup(media_format* ioEncodedFormat, const void* infoBuffer,
156 	size_t infoSize)
157 {
158 	if (ioEncodedFormat->type != B_MEDIA_ENCODED_AUDIO
159 		&& ioEncodedFormat->type != B_MEDIA_ENCODED_VIDEO)
160 		return B_ERROR;
161 
162 	fIsAudio = (ioEncodedFormat->type == B_MEDIA_ENCODED_AUDIO);
163 	TRACE("[%c] AVCodecDecoder::Setup()\n", fIsAudio?('a'):('v'));
164 
165 	if (fIsAudio && fOutputBuffer == NULL) {
166 		fOutputBuffer = new(std::nothrow) char[AVCODEC_MAX_AUDIO_FRAME_SIZE];
167 		if (fOutputBuffer == NULL)
168 			return B_NO_MEMORY;
169 	}
170 
171 #ifdef TRACE_AV_CODEC
172 	char buffer[1024];
173 	string_for_format(*ioEncodedFormat, buffer, sizeof(buffer));
174 	TRACE("[%c]   input_format = %s\n", fIsAudio?('a'):('v'), buffer);
175 	TRACE("[%c]   infoSize = %ld\n", fIsAudio?('a'):('v'), infoSize);
176 	TRACE("[%c]   user_data_type = %08lx\n", fIsAudio?('a'):('v'),
177 		ioEncodedFormat->user_data_type);
178 	TRACE("[%c]   meta_data_size = %ld\n", fIsAudio?('a'):('v'),
179 		ioEncodedFormat->MetaDataSize());
180 #endif
181 
182 	media_format_description description;
183 	if (BMediaFormats().GetCodeFor(*ioEncodedFormat,
184 			B_MISC_FORMAT_FAMILY, &description) == B_OK) {
185 		if (description.u.misc.file_format != 'ffmp')
186 			return B_NOT_SUPPORTED;
187 		fCodec = avcodec_find_decoder(static_cast<CodecID>(
188 			description.u.misc.codec));
189 		if (fCodec == NULL) {
190 			TRACE("  unable to find the correct FFmpeg "
191 				"decoder (id = %lu)\n", description.u.misc.codec);
192 			return B_ERROR;
193 		}
194 		TRACE("  found decoder %s\n", fCodec->name);
195 
196 		const void* extraData = infoBuffer;
197 		fExtraDataSize = infoSize;
198 		if (description.family == B_WAV_FORMAT_FAMILY
199 				&& infoSize >= sizeof(wave_format_ex)) {
200 			TRACE("  trying to use wave_format_ex\n");
201 			// Special case extra data in B_WAV_FORMAT_FAMILY
202 			const wave_format_ex* waveFormatData
203 				= (const wave_format_ex*)infoBuffer;
204 
205 			size_t waveFormatSize = infoSize;
206 			if (waveFormatData != NULL && waveFormatSize > 0) {
207 				fBlockAlign = waveFormatData->block_align;
208 				TRACE("  found block align: %d\n", fBlockAlign);
209 				fExtraDataSize = waveFormatData->extra_size;
210 				// skip the wave_format_ex from the extra data.
211 				extraData = waveFormatData + 1;
212 			}
213 		} else {
214 			if (fIsAudio) {
215 				fBlockAlign
216 					= ioEncodedFormat->u.encoded_audio.output
217 						.buffer_size;
218 				TRACE("  using buffer_size as block align: %d\n",
219 					fBlockAlign);
220 			}
221 		}
222 		if (extraData != NULL && fExtraDataSize > 0) {
223 			TRACE("AVCodecDecoder: extra data size %ld\n", infoSize);
224 			delete[] fExtraData;
225 			fExtraData = new(std::nothrow) char[fExtraDataSize];
226 			if (fExtraData != NULL)
227 				memcpy(fExtraData, infoBuffer, fExtraDataSize);
228 			else
229 				fExtraDataSize = 0;
230 		}
231 
232 		fInputFormat = *ioEncodedFormat;
233 		return B_OK;
234 	} else {
235 		TRACE("AVCodecDecoder: BMediaFormats().GetCodeFor() failed.\n");
236 	}
237 
238 	printf("AVCodecDecoder::Setup failed!\n");
239 	return B_ERROR;
240 }
241 
242 
243 status_t
244 AVCodecDecoder::SeekedTo(int64 frame, bigtime_t time)
245 {
246 	status_t ret = B_OK;
247 	// Reset the FFmpeg codec to flush buffers, so we keep the sync
248 	if (fCodecInitDone)
249 		avcodec_flush_buffers(fContext);
250 
251 	// Flush internal buffers as well.
252 	fChunkBuffer = NULL;
253 	fChunkBufferOffset = 0;
254 	fChunkBufferSize = 0;
255 	fOutputBufferOffset = 0;
256 	fOutputBufferSize = 0;
257 
258 	fFrame = frame;
259 	fStartTime = time;
260 
261 	return ret;
262 }
263 
264 
265 status_t
266 AVCodecDecoder::NegotiateOutputFormat(media_format* inOutFormat)
267 {
268 	TRACE("AVCodecDecoder::NegotiateOutputFormat() [%c] \n",
269 		fIsAudio?('a'):('v'));
270 
271 #ifdef TRACE_AV_CODEC
272 	char buffer[1024];
273 	string_for_format(*inOutFormat, buffer, sizeof(buffer));
274 	TRACE("  [%c]  requested format = %s\n", fIsAudio?('a'):('v'), buffer);
275 #endif
276 
277 	if (fIsAudio)
278 		return _NegotiateAudioOutputFormat(inOutFormat);
279 	else
280 		return _NegotiateVideoOutputFormat(inOutFormat);
281 }
282 
283 
284 status_t
285 AVCodecDecoder::Decode(void* outBuffer, int64* outFrameCount,
286 	media_header* mediaHeader, media_decode_info* info)
287 {
288 	if (!fCodecInitDone)
289 		return B_NO_INIT;
290 
291 //	TRACE("[%c] AVCodecDecoder::Decode() for time %Ld\n", fIsAudio?('a'):('v'),
292 //		fStartTime);
293 
294 	mediaHeader->start_time = fStartTime;
295 
296 	status_t ret;
297 	if (fIsAudio)
298 		ret = _DecodeAudio(outBuffer, outFrameCount, mediaHeader, info);
299 	else
300 		ret = _DecodeVideo(outBuffer, outFrameCount, mediaHeader, info);
301 
302 	return ret;
303 }
304 
305 
306 // #pragma mark -
307 
308 
309 status_t
310 AVCodecDecoder::_NegotiateAudioOutputFormat(media_format* inOutFormat)
311 {
312 	TRACE("AVCodecDecoder::_NegotiateAudioOutputFormat()\n");
313 
314 	media_multi_audio_format outputAudioFormat;
315 	outputAudioFormat = media_raw_audio_format::wildcard;
316 	outputAudioFormat.byte_order = B_MEDIA_HOST_ENDIAN;
317 	outputAudioFormat.frame_rate
318 		= fInputFormat.u.encoded_audio.output.frame_rate;
319 	outputAudioFormat.channel_count
320 		= fInputFormat.u.encoded_audio.output.channel_count;
321 	outputAudioFormat.format = fInputFormat.u.encoded_audio.output.format;
322 	outputAudioFormat.buffer_size
323 		= inOutFormat->u.raw_audio.buffer_size;
324 	// Check that format is not still a wild card!
325 	if (outputAudioFormat.format == 0) {
326 		TRACE("  format still a wild-card, assuming B_AUDIO_SHORT.\n");
327 		outputAudioFormat.format = media_raw_audio_format::B_AUDIO_SHORT;
328 	}
329 	size_t sampleSize = outputAudioFormat.format
330 		& media_raw_audio_format::B_AUDIO_SIZE_MASK;
331 	// Check that channel count is not still a wild card!
332 	if (outputAudioFormat.channel_count == 0) {
333 		TRACE("  channel_count still a wild-card, assuming stereo.\n");
334 		outputAudioFormat.channel_count = 2;
335 	}
336 
337 	if (outputAudioFormat.buffer_size == 0) {
338 		outputAudioFormat.buffer_size = 512
339 			* sampleSize * outputAudioFormat.channel_count;
340 	}
341 	inOutFormat->type = B_MEDIA_RAW_AUDIO;
342 	inOutFormat->u.raw_audio = outputAudioFormat;
343 
344 	fContext->bit_rate = (int)fInputFormat.u.encoded_audio.bit_rate;
345 	fContext->frame_size = (int)fInputFormat.u.encoded_audio.frame_size;
346 	fContext->sample_rate
347 		= (int)fInputFormat.u.encoded_audio.output.frame_rate;
348 	fContext->channels = outputAudioFormat.channel_count;
349 	fContext->block_align = fBlockAlign;
350 	fContext->extradata = (uint8_t*)fExtraData;
351 	fContext->extradata_size = fExtraDataSize;
352 
353 	// TODO: This probably needs to go away, there is some misconception
354 	// about extra data / info buffer and meta data. See
355 	// Reader::GetStreamInfo(). The AVFormatReader puts extradata and
356 	// extradata_size into media_format::MetaData(), but used to ignore
357 	// the infoBuffer passed to GetStreamInfo(). I think this may be why
358 	// the code below was added.
359 	if (fInputFormat.MetaDataSize() > 0) {
360 		fContext->extradata = (uint8_t*)fInputFormat.MetaData();
361 		fContext->extradata_size = fInputFormat.MetaDataSize();
362 	}
363 
364 	TRACE("  bit_rate %d, sample_rate %d, channels %d, block_align %d, "
365 		"extradata_size %d\n", fContext->bit_rate, fContext->sample_rate,
366 		fContext->channels, fContext->block_align, fContext->extradata_size);
367 
368 	// close any previous instance
369 	if (fCodecInitDone) {
370 		fCodecInitDone = false;
371 		avcodec_close(fContext);
372 	}
373 
374 	// open new
375 	int result = avcodec_open(fContext, fCodec);
376 	fCodecInitDone = (result >= 0);
377 
378 	fStartTime = 0;
379 	fOutputFrameSize = sampleSize * outputAudioFormat.channel_count;
380 	fOutputFrameCount = outputAudioFormat.buffer_size / fOutputFrameSize;
381 	fOutputFrameRate = outputAudioFormat.frame_rate;
382 
383 	TRACE("  bit_rate = %d, sample_rate = %d, channels = %d, init = %d, "
384 		"output frame size: %d, count: %ld, rate: %.2f\n",
385 		fContext->bit_rate, fContext->sample_rate, fContext->channels,
386 		result, fOutputFrameSize, fOutputFrameCount, fOutputFrameRate);
387 
388 	fChunkBuffer = NULL;
389 	fChunkBufferOffset = 0;
390 	fChunkBufferSize = 0;
391 	fAudioDecodeError = false;
392 	fOutputBufferOffset = 0;
393 	fOutputBufferSize = 0;
394 
395 	av_init_packet(&fTempPacket);
396 
397 	inOutFormat->require_flags = 0;
398 	inOutFormat->deny_flags = B_MEDIA_MAUI_UNDEFINED_FLAGS;
399 
400 	if (!fCodecInitDone) {
401 		TRACE("avcodec_open() failed!\n");
402 		return B_ERROR;
403 	}
404 
405 	return B_OK;
406 }
407 
408 
409 status_t
410 AVCodecDecoder::_NegotiateVideoOutputFormat(media_format* inOutFormat)
411 {
412 	TRACE("AVCodecDecoder::_NegotiateVideoOutputFormat()\n");
413 
414 	fOutputVideoFormat = fInputFormat.u.encoded_video.output;
415 
416 	fContext->width = fOutputVideoFormat.display.line_width;
417 	fContext->height = fOutputVideoFormat.display.line_count;
418 //	fContext->frame_rate = (int)(fOutputVideoFormat.field_rate
419 //		* fContext->frame_rate_base);
420 
421 	fOutputFrameRate = fOutputVideoFormat.field_rate;
422 
423 	fContext->extradata = (uint8_t*)fExtraData;
424 	fContext->extradata_size = fExtraDataSize;
425 
426 	TRACE("  requested video format 0x%x\n",
427 		inOutFormat->u.raw_video.display.format);
428 
429 	// Make MediaPlayer happy (if not in rgb32 screen depth and no overlay,
430 	// it will only ask for YCbCr, which DrawBitmap doesn't handle, so the
431 	// default colordepth is RGB32).
432 	if (inOutFormat->u.raw_video.display.format == B_YCbCr422)
433 		fOutputVideoFormat.display.format = B_YCbCr422;
434 	else
435 		fOutputVideoFormat.display.format = B_RGB32;
436 
437 	// Search for a pixel-format the codec handles
438 	// TODO: We should try this a couple of times until it succeeds, each
439 	// time using another pixel-format that is supported by the decoder.
440 	// But libavcodec doesn't seem to offer any way to tell the decoder
441 	// which format it should use.
442 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
443 	if (fSwsContext != NULL)
444 		sws_freeContext(fSwsContext);
445 	fSwsContext = NULL;
446 #else
447 	fFormatConversionFunc = 0;
448 #endif
449 	// Iterate over supported codec formats
450 	for (int i = 0; i < 1; i++) {
451 		// close any previous instance
452 		if (fCodecInitDone) {
453 			fCodecInitDone = false;
454 			avcodec_close(fContext);
455 		}
456 		// TODO: Set n-th fContext->pix_fmt here
457 		if (avcodec_open(fContext, fCodec) >= 0) {
458 			fCodecInitDone = true;
459 
460 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
461 			fSwsContext = sws_getContext(fContext->width, fContext->height,
462 				fContext->pix_fmt, fContext->width, fContext->height,
463 				colorspace_to_pixfmt(fOutputVideoFormat.display.format),
464 				SWS_FAST_BILINEAR, NULL, NULL, NULL);
465 		}
466 #else
467 			fFormatConversionFunc = resolve_colorspace(
468 				fOutputVideoFormat.display.format, fContext->pix_fmt,
469 				fContext->width, fContext->height);
470 		}
471 		if (fFormatConversionFunc != NULL)
472 			break;
473 #endif
474 	}
475 
476 	if (!fCodecInitDone) {
477 		TRACE("avcodec_open() failed to init codec!\n");
478 		return B_ERROR;
479 	}
480 
481 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
482 	if (fSwsContext == NULL) {
483 		TRACE("No SWS Scale context or decoder has not set the pixel format "
484 			"yet!\n");
485 	}
486 #else
487 	if (fFormatConversionFunc == NULL) {
488 		TRACE("no pixel format conversion function found or decoder has "
489 			"not set the pixel format yet!\n");
490 	}
491 #endif
492 
493 	if (fOutputVideoFormat.display.format == B_YCbCr422) {
494 		fOutputVideoFormat.display.bytes_per_row
495 			= 2 * fOutputVideoFormat.display.line_width;
496 	} else {
497 		fOutputVideoFormat.display.bytes_per_row
498 			= 4 * fOutputVideoFormat.display.line_width;
499 	}
500 
501 	inOutFormat->type = B_MEDIA_RAW_VIDEO;
502 	inOutFormat->u.raw_video = fOutputVideoFormat;
503 
504 	inOutFormat->require_flags = 0;
505 	inOutFormat->deny_flags = B_MEDIA_MAUI_UNDEFINED_FLAGS;
506 
507 #ifdef TRACE_AV_CODEC
508 	char buffer[1024];
509 	string_for_format(*inOutFormat, buffer, sizeof(buffer));
510 	TRACE("[v]  outFormat = %s\n", buffer);
511 	TRACE("  returned  video format 0x%x\n",
512 		inOutFormat->u.raw_video.display.format);
513 #endif
514 
515 	return B_OK;
516 }
517 
518 
519 status_t
520 AVCodecDecoder::_DecodeAudio(void* _buffer, int64* outFrameCount,
521 	media_header* mediaHeader, media_decode_info* info)
522 {
523 	TRACE_AUDIO("AVCodecDecoder::_DecodeAudio(audio start_time %.6fs)\n",
524 		mediaHeader->start_time / 1000000.0);
525 
526 	*outFrameCount = 0;
527 
528 	uint8* buffer = reinterpret_cast<uint8*>(_buffer);
529 	while (*outFrameCount < fOutputFrameCount) {
530 		// Check conditions which would hint at broken code below.
531 		if (fOutputBufferSize < 0) {
532 			debugger("Decoding read past the end of the output buffer!");
533 			fOutputBufferSize = 0;
534 		}
535 		if (fChunkBufferSize < 0) {
536 			debugger("Decoding read past the end of the chunk buffer!");
537 			fChunkBufferSize = 0;
538 		}
539 
540 		if (fOutputBufferSize > 0) {
541 			// We still have decoded audio frames from the last
542 			// invokation, which start at fOutputBuffer + fOutputBufferOffset
543 			// and are of fOutputBufferSize. Copy those into the buffer,
544 			// but not more than it can hold.
545 			int32 frames = min_c(fOutputFrameCount - *outFrameCount,
546 				fOutputBufferSize / fOutputFrameSize);
547 			if (frames == 0)
548 				debugger("fOutputBufferSize not multiple of frame size!");
549 			size_t remainingSize = frames * fOutputFrameSize;
550 			memcpy(buffer, fOutputBuffer + fOutputBufferOffset, remainingSize);
551 			fOutputBufferOffset += remainingSize;
552 			fOutputBufferSize -= remainingSize;
553 			buffer += remainingSize;
554 			*outFrameCount += frames;
555 			fStartTime += (bigtime_t)((1000000LL * frames) / fOutputFrameRate);
556 			continue;
557 		}
558 		if (fChunkBufferSize == 0) {
559 			// Time to read the next chunk buffer. We use a separate
560 			// media_header, since the chunk header may not belong to
561 			// the start of the decoded audio frames we return. For
562 			// example we may have used frames from a previous invokation,
563 			// or we may have to read several chunks until we fill up the
564 			// output buffer.
565 			media_header chunkMediaHeader;
566 			status_t err = GetNextChunk(&fChunkBuffer, &fChunkBufferSize,
567 				&chunkMediaHeader);
568 			if (err == B_LAST_BUFFER_ERROR) {
569 				TRACE_AUDIO("  Last Chunk with chunk size %ld\n",
570 					fChunkBufferSize);
571 				fChunkBufferSize = 0;
572 				return err;
573 			}
574 			if (err != B_OK || fChunkBufferSize < 0) {
575 				printf("GetNextChunk error %ld\n",fChunkBufferSize);
576 				fChunkBufferSize = 0;
577 				break;
578 			}
579 			fChunkBufferOffset = 0;
580 			fStartTime = chunkMediaHeader.start_time;
581 		}
582 
583 		fTempPacket.data = (uint8_t*)fChunkBuffer + fChunkBufferOffset;
584 		fTempPacket.size = fChunkBufferSize;
585 		// Initialize decodedBytes to the output buffer size.
586 		int decodedBytes = AVCODEC_MAX_AUDIO_FRAME_SIZE;
587 		int usedBytes = avcodec_decode_audio3(fContext,
588 			(int16*)fOutputBuffer, &decodedBytes, &fTempPacket);
589 		if (usedBytes < 0 && !fAudioDecodeError) {
590 			// Report failure if not done already
591 			printf("########### audio decode error, "
592 				"fChunkBufferSize %ld, fChunkBufferOffset %ld\n",
593 				fChunkBufferSize, fChunkBufferOffset);
594 			fAudioDecodeError = true;
595 		}
596 		if (usedBytes <= 0) {
597 			// Error or failure to produce decompressed output.
598 			// Skip the chunk buffer data entirely.
599 			usedBytes = fChunkBufferSize;
600 			decodedBytes = 0;
601 			// Assume the audio decoded until now is broken.
602 			memset(_buffer, 0, buffer - (uint8*)_buffer);
603 		} else {
604 			// Success
605 			fAudioDecodeError = false;
606 		}
607 //printf("  chunk size: %d, decoded: %d, used: %d\n",
608 //fTempPacket.size, decodedBytes, usedBytes);
609 
610 		fChunkBufferOffset += usedBytes;
611 		fChunkBufferSize -= usedBytes;
612 		fOutputBufferOffset = 0;
613 		fOutputBufferSize = decodedBytes;
614 	}
615 	fFrame += *outFrameCount;
616 	TRACE_AUDIO("  frame count: %lld current: %lld\n", *outFrameCount, fFrame);
617 
618 	return B_OK;
619 }
620 
621 
622 status_t
623 AVCodecDecoder::_DecodeVideo(void* outBuffer, int64* outFrameCount,
624 	media_header* mediaHeader, media_decode_info* info)
625 {
626 	bool firstRun = true;
627 	while (true) {
628 		const void* data;
629 		size_t size;
630 		media_header chunkMediaHeader;
631 		status_t err = GetNextChunk(&data, &size, &chunkMediaHeader);
632 		if (err != B_OK) {
633 			TRACE("AVCodecDecoder::_DecodeVideo(): error from "
634 				"GetNextChunk(): %s\n", strerror(err));
635 			return err;
636 		}
637 #ifdef LOG_STREAM_TO_FILE
638 		if (sDumpedPackets < 100) {
639 			sStreamLogFile.Write(data, size);
640 			printf("wrote %ld bytes\n", size);
641 			sDumpedPackets++;
642 		} else if (sDumpedPackets == 100)
643 			sStreamLogFile.Unset();
644 #endif
645 
646 		if (firstRun) {
647 			firstRun = false;
648 
649 			mediaHeader->type = B_MEDIA_RAW_VIDEO;
650 			mediaHeader->start_time = chunkMediaHeader.start_time;
651 			fStartTime = chunkMediaHeader.start_time;
652 			mediaHeader->file_pos = 0;
653 			mediaHeader->orig_size = 0;
654 			mediaHeader->u.raw_video.field_gamma = 1.0;
655 			mediaHeader->u.raw_video.field_sequence = fFrame;
656 			mediaHeader->u.raw_video.field_number = 0;
657 			mediaHeader->u.raw_video.pulldown_number = 0;
658 			mediaHeader->u.raw_video.first_active_line = 1;
659 			mediaHeader->u.raw_video.line_count
660 				= fOutputVideoFormat.display.line_count;
661 
662 			TRACE("[v] start_time=%02d:%02d.%02d field_sequence=%lu\n",
663 				int((mediaHeader->start_time / 60000000) % 60),
664 				int((mediaHeader->start_time / 1000000) % 60),
665 				int((mediaHeader->start_time / 10000) % 100),
666 				mediaHeader->u.raw_video.field_sequence);
667 		}
668 
669 #if DO_PROFILING
670 		bigtime_t startTime = system_time();
671 #endif
672 
673 		// NOTE: In the FFmpeg code example I've read, the length returned by
674 		// avcodec_decode_video() is completely ignored. Furthermore, the
675 		// packet buffers are supposed to contain complete frames only so we
676 		// don't seem to be required to buffer any packets because not the
677 		// complete packet has been read.
678 		fTempPacket.data = (uint8_t*)data;
679 		fTempPacket.size = size;
680 		int gotPicture = 0;
681 		int len = avcodec_decode_video2(fContext, fInputPicture, &gotPicture,
682 			&fTempPacket);
683 		if (len < 0) {
684 			TRACE("[v] AVCodecDecoder: error in decoding frame %lld: %d\n",
685 				fFrame, len);
686 			// NOTE: An error from avcodec_decode_video() seems to be ignored
687 			// in the ffplay sample code.
688 //			return B_ERROR;
689 		}
690 
691 
692 //TRACE("FFDEC: PTS = %d:%d:%d.%d - fContext->frame_number = %ld "
693 //	"fContext->frame_rate = %ld\n", (int)(fContext->pts / (60*60*1000000)),
694 //	(int)(fContext->pts / (60*1000000)), (int)(fContext->pts / (1000000)),
695 //	(int)(fContext->pts % 1000000), fContext->frame_number,
696 //	fContext->frame_rate);
697 //TRACE("FFDEC: PTS = %d:%d:%d.%d - fContext->frame_number = %ld "
698 //	"fContext->frame_rate = %ld\n",
699 //	(int)(fInputPicture->pts / (60*60*1000000)),
700 //	(int)(fInputPicture->pts / (60*1000000)),
701 //	(int)(fInputPicture->pts / (1000000)),
702 //	(int)(fInputPicture->pts % 1000000), fContext->frame_number,
703 //	fContext->frame_rate);
704 
705 		if (gotPicture) {
706 			int width = fOutputVideoFormat.display.line_width;
707 			int height = fOutputVideoFormat.display.line_count;
708 			AVPicture deinterlacedPicture;
709 			bool useDeinterlacedPicture = false;
710 
711 			if (fInputPicture->interlaced_frame) {
712 				AVPicture source;
713 				source.data[0] = fInputPicture->data[0];
714 				source.data[1] = fInputPicture->data[1];
715 				source.data[2] = fInputPicture->data[2];
716 				source.data[3] = fInputPicture->data[3];
717 				source.linesize[0] = fInputPicture->linesize[0];
718 				source.linesize[1] = fInputPicture->linesize[1];
719 				source.linesize[2] = fInputPicture->linesize[2];
720 				source.linesize[3] = fInputPicture->linesize[3];
721 
722 				avpicture_alloc(&deinterlacedPicture,
723 					fContext->pix_fmt, width, height);
724 
725 				if (avpicture_deinterlace(&deinterlacedPicture, &source,
726 						fContext->pix_fmt, width, height) < 0) {
727 					TRACE("[v] avpicture_deinterlace() - error\n");
728 				} else
729 					useDeinterlacedPicture = true;
730 			}
731 
732 #if DO_PROFILING
733 			bigtime_t formatConversionStart = system_time();
734 #endif
735 //			TRACE("ONE FRAME OUT !! len=%d size=%ld (%s)\n", len, size,
736 //				pixfmt_to_string(fContext->pix_fmt));
737 
738 			// Some decoders do not set pix_fmt until they have decoded 1 frame
739 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
740 			if (fSwsContext == NULL) {
741 				fSwsContext = sws_getContext(fContext->width, fContext->height,
742 					fContext->pix_fmt, fContext->width, fContext->height,
743 					colorspace_to_pixfmt(fOutputVideoFormat.display.format),
744 					SWS_FAST_BILINEAR, NULL, NULL, NULL);
745 			}
746 #else
747 			if (fFormatConversionFunc == NULL) {
748 				fFormatConversionFunc = resolve_colorspace(
749 					fOutputVideoFormat.display.format, fContext->pix_fmt,
750 					fContext->width, fContext->height);
751 			}
752 #endif
753 
754 			fOutputPicture->data[0] = (uint8_t*)outBuffer;
755 			fOutputPicture->linesize[0]
756 				= fOutputVideoFormat.display.bytes_per_row;
757 
758 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
759 			if (fSwsContext != NULL) {
760 #else
761 			if (fFormatConversionFunc != NULL) {
762 #endif
763 				if (useDeinterlacedPicture) {
764 					AVFrame inputFrame;
765 					inputFrame.data[0] = deinterlacedPicture.data[0];
766 					inputFrame.data[1] = deinterlacedPicture.data[1];
767 					inputFrame.data[2] = deinterlacedPicture.data[2];
768 					inputFrame.data[3] = deinterlacedPicture.data[3];
769 					inputFrame.linesize[0] = deinterlacedPicture.linesize[0];
770 					inputFrame.linesize[1] = deinterlacedPicture.linesize[1];
771 					inputFrame.linesize[2] = deinterlacedPicture.linesize[2];
772 					inputFrame.linesize[3] = deinterlacedPicture.linesize[3];
773 
774 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
775 					sws_scale(fSwsContext, inputFrame.data,
776 						inputFrame.linesize, 0, fContext->height,
777 						fOutputPicture->data, fOutputPicture->linesize);
778 #else
779 					(*fFormatConversionFunc)(&inputFrame,
780 						fOutputPicture, width, height);
781 #endif
782 				} else {
783 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
784 					sws_scale(fSwsContext, fInputPicture->data,
785 						fInputPicture->linesize, 0, fContext->height,
786 						fOutputPicture->data, fOutputPicture->linesize);
787 #else
788 					(*fFormatConversionFunc)(fInputPicture, fOutputPicture,
789 						width, height);
790 #endif
791 				}
792 			}
793 			if (fInputPicture->interlaced_frame)
794 				avpicture_free(&deinterlacedPicture);
795 #ifdef DEBUG
796 			dump_ffframe(fInputPicture, "ffpict");
797 //			dump_ffframe(fOutputPicture, "opict");
798 #endif
799 			*outFrameCount = 1;
800 			fFrame++;
801 
802 #if DO_PROFILING
803 			bigtime_t doneTime = system_time();
804 			decodingTime += formatConversionStart - startTime;
805 			conversionTime += doneTime - formatConversionStart;
806 			profileCounter++;
807 			if (!(fFrame % 5)) {
808 				if (info) {
809 					printf("[v] profile: d1 = %lld, d2 = %lld (%lld) required "
810 						"%Ld\n",
811 						decodingTime / profileCounter,
812 						conversionTime / profileCounter,
813 						fFrame, info->time_to_decode);
814 				} else {
815 					printf("[v] profile: d1 = %lld, d2 = %lld (%lld) required "
816 						"%Ld\n",
817 						decodingTime / profileCounter,
818 						conversionTime / profileCounter,
819 						fFrame, bigtime_t(1000000LL / fOutputFrameRate));
820 				}
821 				decodingTime = 0;
822 				conversionTime = 0;
823 				profileCounter = 0;
824 			}
825 #endif
826 			return B_OK;
827 		} else {
828 			TRACE("frame %lld - no picture yet, len: %d, chunk size: %ld\n",
829 				fFrame, len, size);
830 		}
831 	}
832 }
833 
834 
835