xref: /haiku/src/add-ons/media/plugins/ffmpeg/AVCodecDecoder.cpp (revision a629567a9001547736cfe892cdf992be16868fed)
1 /*
2  * Copyright (C) 2001 Carlos Hasan
3  * Copyright (C) 2001 François Revol
4  * Copyright (C) 2001 Axel Dörfler
5  * Copyright (C) 2004 Marcus Overhagen
6  * Copyright (C) 2009 Stephan Amßus <superstippi@gmx.de>
7  *
8  * All rights reserved. Distributed under the terms of the MIT License.
9  */
10 
11 //! libavcodec based decoder for Haiku
12 
13 #include "AVCodecDecoder.h"
14 
15 #include <new>
16 
17 #include <string.h>
18 
19 #include <Bitmap.h>
20 #include <Debug.h>
21 
22 
23 #undef TRACE
24 //#define TRACE_AV_CODEC
25 #ifdef TRACE_AV_CODEC
26 #	define TRACE(x...)	printf(x)
27 #	define TRACE_AUDIO(x...)	printf(x)
28 #	define TRACE_VIDEO(x...)	printf(x)
29 #else
30 #	define TRACE(x...)
31 #	define TRACE_AUDIO(x...)
32 #	define TRACE_VIDEO(x...)
33 #endif
34 
35 //#define LOG_STREAM_TO_FILE
36 #ifdef LOG_STREAM_TO_FILE
37 #	include <File.h>
38 	static BFile sStreamLogFile("/boot/home/Desktop/AVCodecDebugStream.raw",
39 		B_CREATE_FILE | B_ERASE_FILE | B_WRITE_ONLY);
40 	static int sDumpedPackets = 0;
41 #endif
42 
43 #ifdef __x86_64
44 #define USE_SWS_FOR_COLOR_SPACE_CONVERSION 1
45 #else
46 #define USE_SWS_FOR_COLOR_SPACE_CONVERSION 0
47 // NOTE: David's color space conversion is much faster than the FFmpeg
48 // version. Perhaps the SWS code can be used for unsupported conversions?
49 // Otherwise the alternative code could simply be removed from this file.
50 #endif
51 
52 
53 struct wave_format_ex {
54 	uint16 format_tag;
55 	uint16 channels;
56 	uint32 frames_per_sec;
57 	uint32 avg_bytes_per_sec;
58 	uint16 block_align;
59 	uint16 bits_per_sample;
60 	uint16 extra_size;
61 	// extra_data[extra_size]
62 } _PACKED;
63 
64 
65 // profiling related globals
66 #define DO_PROFILING 0
67 
68 static bigtime_t decodingTime = 0;
69 static bigtime_t conversionTime = 0;
70 static long profileCounter = 0;
71 
72 
73 AVCodecDecoder::AVCodecDecoder()
74 	:
75 	fHeader(),
76 	fInputFormat(),
77 	fOutputVideoFormat(),
78 	fFrame(0),
79 	fIsAudio(false),
80 	fCodec(NULL),
81 	fContext(avcodec_alloc_context3(NULL)),
82 	fInputPicture(avcodec_alloc_frame()),
83 	fOutputPicture(avcodec_alloc_frame()),
84 
85 	fCodecInitDone(false),
86 
87 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
88 	fSwsContext(NULL),
89 #else
90 	fFormatConversionFunc(NULL),
91 #endif
92 
93 	fExtraData(NULL),
94 	fExtraDataSize(0),
95 	fBlockAlign(0),
96 
97 	fStartTime(0),
98 	fOutputFrameCount(0),
99 	fOutputFrameRate(1.0),
100 	fOutputFrameSize(0),
101 
102 	fChunkBuffer(NULL),
103 	fChunkBufferOffset(0),
104 	fChunkBufferSize(0),
105 	fAudioDecodeError(false),
106 
107 	fOutputFrame(avcodec_alloc_frame()),
108 	fOutputBufferOffset(0),
109 	fOutputBufferSize(0)
110 {
111 	TRACE("AVCodecDecoder::AVCodecDecoder()\n");
112 
113 	system_info info;
114 	get_system_info(&info);
115 
116 	fContext->err_recognition = AV_EF_CAREFUL;
117 	fContext->error_concealment = 3;
118 	fContext->thread_count = info.cpu_count;
119 }
120 
121 
122 AVCodecDecoder::~AVCodecDecoder()
123 {
124 	TRACE("[%c] AVCodecDecoder::~AVCodecDecoder()\n", fIsAudio?('a'):('v'));
125 
126 #ifdef DO_PROFILING
127 	if (profileCounter > 0) {
128 		printf("[%c] profile: d1 = %lld, d2 = %lld (%Ld)\n",
129 			fIsAudio?('a'):('v'), decodingTime / profileCounter,
130 			conversionTime / profileCounter, fFrame);
131 	}
132 #endif
133 
134 	if (fCodecInitDone)
135 		avcodec_close(fContext);
136 
137 	av_free(fOutputPicture);
138 	av_free(fInputPicture);
139 	av_free(fContext);
140 	av_free(fOutputFrame);
141 
142 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
143 	if (fSwsContext != NULL)
144 		sws_freeContext(fSwsContext);
145 #endif
146 
147 	delete[] fExtraData;
148 }
149 
150 
151 void
152 AVCodecDecoder::GetCodecInfo(media_codec_info* mci)
153 {
154 	snprintf(mci->short_name, 32, "%s", fCodec->name);
155 	snprintf(mci->pretty_name, 96, "%s", fCodec->long_name);
156 	mci->id = 0;
157 	mci->sub_id = fCodec->id;
158 }
159 
160 
161 status_t
162 AVCodecDecoder::Setup(media_format* ioEncodedFormat, const void* infoBuffer,
163 	size_t infoSize)
164 {
165 	if (ioEncodedFormat->type != B_MEDIA_ENCODED_AUDIO
166 		&& ioEncodedFormat->type != B_MEDIA_ENCODED_VIDEO)
167 		return B_ERROR;
168 
169 	fIsAudio = (ioEncodedFormat->type == B_MEDIA_ENCODED_AUDIO);
170 	TRACE("[%c] AVCodecDecoder::Setup()\n", fIsAudio?('a'):('v'));
171 
172 #ifdef TRACE_AV_CODEC
173 	char buffer[1024];
174 	string_for_format(*ioEncodedFormat, buffer, sizeof(buffer));
175 	TRACE("[%c]   input_format = %s\n", fIsAudio?('a'):('v'), buffer);
176 	TRACE("[%c]   infoSize = %ld\n", fIsAudio?('a'):('v'), infoSize);
177 	TRACE("[%c]   user_data_type = %08lx\n", fIsAudio?('a'):('v'),
178 		ioEncodedFormat->user_data_type);
179 	TRACE("[%c]   meta_data_size = %ld\n", fIsAudio?('a'):('v'),
180 		ioEncodedFormat->MetaDataSize());
181 #endif
182 
183 	media_format_description description;
184 	if (BMediaFormats().GetCodeFor(*ioEncodedFormat,
185 			B_MISC_FORMAT_FAMILY, &description) == B_OK) {
186 		if (description.u.misc.file_format != 'ffmp')
187 			return B_NOT_SUPPORTED;
188 		fCodec = avcodec_find_decoder(static_cast<CodecID>(
189 			description.u.misc.codec));
190 		if (fCodec == NULL) {
191 			TRACE("  unable to find the correct FFmpeg "
192 				"decoder (id = %lu)\n", description.u.misc.codec);
193 			return B_ERROR;
194 		}
195 		TRACE("  found decoder %s\n", fCodec->name);
196 
197 		const void* extraData = infoBuffer;
198 		fExtraDataSize = infoSize;
199 		if (description.family == B_WAV_FORMAT_FAMILY
200 				&& infoSize >= sizeof(wave_format_ex)) {
201 			TRACE("  trying to use wave_format_ex\n");
202 			// Special case extra data in B_WAV_FORMAT_FAMILY
203 			const wave_format_ex* waveFormatData
204 				= (const wave_format_ex*)infoBuffer;
205 
206 			size_t waveFormatSize = infoSize;
207 			if (waveFormatData != NULL && waveFormatSize > 0) {
208 				fBlockAlign = waveFormatData->block_align;
209 				TRACE("  found block align: %d\n", fBlockAlign);
210 				fExtraDataSize = waveFormatData->extra_size;
211 				// skip the wave_format_ex from the extra data.
212 				extraData = waveFormatData + 1;
213 			}
214 		} else {
215 			if (fIsAudio) {
216 				fBlockAlign
217 					= ioEncodedFormat->u.encoded_audio.output
218 						.buffer_size;
219 				TRACE("  using buffer_size as block align: %d\n",
220 					fBlockAlign);
221 			}
222 		}
223 		if (extraData != NULL && fExtraDataSize > 0) {
224 			TRACE("AVCodecDecoder: extra data size %ld\n", infoSize);
225 			delete[] fExtraData;
226 			fExtraData = new(std::nothrow) char[fExtraDataSize];
227 			if (fExtraData != NULL)
228 				memcpy(fExtraData, infoBuffer, fExtraDataSize);
229 			else
230 				fExtraDataSize = 0;
231 		}
232 
233 		fInputFormat = *ioEncodedFormat;
234 		return B_OK;
235 	} else {
236 		TRACE("AVCodecDecoder: BMediaFormats().GetCodeFor() failed.\n");
237 	}
238 
239 	printf("AVCodecDecoder::Setup failed!\n");
240 	return B_ERROR;
241 }
242 
243 
244 status_t
245 AVCodecDecoder::SeekedTo(int64 frame, bigtime_t time)
246 {
247 	status_t ret = B_OK;
248 	// Reset the FFmpeg codec to flush buffers, so we keep the sync
249 	if (fCodecInitDone)
250 		avcodec_flush_buffers(fContext);
251 
252 	// Flush internal buffers as well.
253 	fChunkBuffer = NULL;
254 	fChunkBufferOffset = 0;
255 	fChunkBufferSize = 0;
256 	fOutputBufferOffset = 0;
257 	fOutputBufferSize = 0;
258 
259 	fFrame = frame;
260 	fStartTime = time;
261 
262 	return ret;
263 }
264 
265 
266 status_t
267 AVCodecDecoder::NegotiateOutputFormat(media_format* inOutFormat)
268 {
269 	TRACE("AVCodecDecoder::NegotiateOutputFormat() [%c] \n",
270 		fIsAudio?('a'):('v'));
271 
272 #ifdef TRACE_AV_CODEC
273 	char buffer[1024];
274 	string_for_format(*inOutFormat, buffer, sizeof(buffer));
275 	TRACE("  [%c]  requested format = %s\n", fIsAudio?('a'):('v'), buffer);
276 #endif
277 
278 	if (fIsAudio)
279 		return _NegotiateAudioOutputFormat(inOutFormat);
280 	else
281 		return _NegotiateVideoOutputFormat(inOutFormat);
282 }
283 
284 
285 status_t
286 AVCodecDecoder::Decode(void* outBuffer, int64* outFrameCount,
287 	media_header* mediaHeader, media_decode_info* info)
288 {
289 	if (!fCodecInitDone)
290 		return B_NO_INIT;
291 
292 //	TRACE("[%c] AVCodecDecoder::Decode() for time %Ld\n", fIsAudio?('a'):('v'),
293 //		fStartTime);
294 
295 	mediaHeader->start_time = fStartTime;
296 
297 	status_t ret;
298 	if (fIsAudio)
299 		ret = _DecodeAudio(outBuffer, outFrameCount, mediaHeader, info);
300 	else
301 		ret = _DecodeVideo(outBuffer, outFrameCount, mediaHeader, info);
302 
303 	return ret;
304 }
305 
306 
307 // #pragma mark -
308 
309 
310 status_t
311 AVCodecDecoder::_NegotiateAudioOutputFormat(media_format* inOutFormat)
312 {
313 	TRACE("AVCodecDecoder::_NegotiateAudioOutputFormat()\n");
314 
315 	media_multi_audio_format outputAudioFormat;
316 	outputAudioFormat = media_raw_audio_format::wildcard;
317 	outputAudioFormat.byte_order = B_MEDIA_HOST_ENDIAN;
318 	outputAudioFormat.frame_rate
319 		= fInputFormat.u.encoded_audio.output.frame_rate;
320 	outputAudioFormat.channel_count
321 		= fInputFormat.u.encoded_audio.output.channel_count;
322 	outputAudioFormat.format = fInputFormat.u.encoded_audio.output.format;
323 	outputAudioFormat.buffer_size
324 		= inOutFormat->u.raw_audio.buffer_size;
325 	// Check that format is not still a wild card!
326 	if (outputAudioFormat.format == 0) {
327 		TRACE("  format still a wild-card, assuming B_AUDIO_SHORT.\n");
328 		outputAudioFormat.format = media_raw_audio_format::B_AUDIO_SHORT;
329 	}
330 	size_t sampleSize = outputAudioFormat.format
331 		& media_raw_audio_format::B_AUDIO_SIZE_MASK;
332 	// Check that channel count is not still a wild card!
333 	if (outputAudioFormat.channel_count == 0) {
334 		TRACE("  channel_count still a wild-card, assuming stereo.\n");
335 		outputAudioFormat.channel_count = 2;
336 	}
337 
338 	if (outputAudioFormat.buffer_size == 0) {
339 		outputAudioFormat.buffer_size = 512
340 			* sampleSize * outputAudioFormat.channel_count;
341 	}
342 	inOutFormat->type = B_MEDIA_RAW_AUDIO;
343 	inOutFormat->u.raw_audio = outputAudioFormat;
344 
345 	fContext->bit_rate = (int)fInputFormat.u.encoded_audio.bit_rate;
346 	fContext->frame_size = (int)fInputFormat.u.encoded_audio.frame_size;
347 	fContext->sample_rate
348 		= (int)fInputFormat.u.encoded_audio.output.frame_rate;
349 	fContext->channels = outputAudioFormat.channel_count;
350 	fContext->block_align = fBlockAlign;
351 	fContext->extradata = (uint8_t*)fExtraData;
352 	fContext->extradata_size = fExtraDataSize;
353 
354 	// TODO: This probably needs to go away, there is some misconception
355 	// about extra data / info buffer and meta data. See
356 	// Reader::GetStreamInfo(). The AVFormatReader puts extradata and
357 	// extradata_size into media_format::MetaData(), but used to ignore
358 	// the infoBuffer passed to GetStreamInfo(). I think this may be why
359 	// the code below was added.
360 	if (fInputFormat.MetaDataSize() > 0) {
361 		fContext->extradata = (uint8_t*)fInputFormat.MetaData();
362 		fContext->extradata_size = fInputFormat.MetaDataSize();
363 	}
364 
365 	TRACE("  bit_rate %d, sample_rate %d, channels %d, block_align %d, "
366 		"extradata_size %d\n", fContext->bit_rate, fContext->sample_rate,
367 		fContext->channels, fContext->block_align, fContext->extradata_size);
368 
369 	// close any previous instance
370 	if (fCodecInitDone) {
371 		fCodecInitDone = false;
372 		avcodec_close(fContext);
373 	}
374 
375 	// open new
376 	int result = avcodec_open2(fContext, fCodec, NULL);
377 	fCodecInitDone = (result >= 0);
378 
379 	fStartTime = 0;
380 	fOutputFrameSize = sampleSize * outputAudioFormat.channel_count;
381 	fOutputFrameCount = outputAudioFormat.buffer_size / fOutputFrameSize;
382 	fOutputFrameRate = outputAudioFormat.frame_rate;
383 
384 	TRACE("  bit_rate = %d, sample_rate = %d, channels = %d, init = %d, "
385 		"output frame size: %d, count: %ld, rate: %.2f\n",
386 		fContext->bit_rate, fContext->sample_rate, fContext->channels,
387 		result, fOutputFrameSize, fOutputFrameCount, fOutputFrameRate);
388 
389 	fChunkBuffer = NULL;
390 	fChunkBufferOffset = 0;
391 	fChunkBufferSize = 0;
392 	fAudioDecodeError = false;
393 	fOutputBufferOffset = 0;
394 	fOutputBufferSize = 0;
395 
396 	av_init_packet(&fTempPacket);
397 
398 	inOutFormat->require_flags = 0;
399 	inOutFormat->deny_flags = B_MEDIA_MAUI_UNDEFINED_FLAGS;
400 
401 	if (!fCodecInitDone) {
402 		TRACE("avcodec_open() failed!\n");
403 		return B_ERROR;
404 	}
405 
406 	return B_OK;
407 }
408 
409 
410 status_t
411 AVCodecDecoder::_NegotiateVideoOutputFormat(media_format* inOutFormat)
412 {
413 	TRACE("AVCodecDecoder::_NegotiateVideoOutputFormat()\n");
414 
415 	fOutputVideoFormat = fInputFormat.u.encoded_video.output;
416 
417 	fContext->width = fOutputVideoFormat.display.line_width;
418 	fContext->height = fOutputVideoFormat.display.line_count;
419 //	fContext->frame_rate = (int)(fOutputVideoFormat.field_rate
420 //		* fContext->frame_rate_base);
421 
422 	fOutputFrameRate = fOutputVideoFormat.field_rate;
423 
424 	fContext->extradata = (uint8_t*)fExtraData;
425 	fContext->extradata_size = fExtraDataSize;
426 
427 	TRACE("  requested video format 0x%x\n",
428 		inOutFormat->u.raw_video.display.format);
429 
430 	// Make MediaPlayer happy (if not in rgb32 screen depth and no overlay,
431 	// it will only ask for YCbCr, which DrawBitmap doesn't handle, so the
432 	// default colordepth is RGB32).
433 	if (inOutFormat->u.raw_video.display.format == B_YCbCr422)
434 		fOutputVideoFormat.display.format = B_YCbCr422;
435 	else
436 		fOutputVideoFormat.display.format = B_RGB32;
437 
438 	// Search for a pixel-format the codec handles
439 	// TODO: We should try this a couple of times until it succeeds, each
440 	// time using another pixel-format that is supported by the decoder.
441 	// But libavcodec doesn't seem to offer any way to tell the decoder
442 	// which format it should use.
443 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
444 	if (fSwsContext != NULL)
445 		sws_freeContext(fSwsContext);
446 	fSwsContext = NULL;
447 #else
448 	fFormatConversionFunc = 0;
449 #endif
450 	// Iterate over supported codec formats
451 	for (int i = 0; i < 1; i++) {
452 		// close any previous instance
453 		if (fCodecInitDone) {
454 			fCodecInitDone = false;
455 			avcodec_close(fContext);
456 		}
457 		// TODO: Set n-th fContext->pix_fmt here
458 		if (avcodec_open2(fContext, fCodec, NULL) >= 0) {
459 			fCodecInitDone = true;
460 
461 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
462 			fSwsContext = sws_getContext(fContext->width, fContext->height,
463 				fContext->pix_fmt, fContext->width, fContext->height,
464 				colorspace_to_pixfmt(fOutputVideoFormat.display.format),
465 				SWS_FAST_BILINEAR, NULL, NULL, NULL);
466 		}
467 #else
468 			fFormatConversionFunc = resolve_colorspace(
469 				fOutputVideoFormat.display.format, fContext->pix_fmt,
470 				fContext->width, fContext->height);
471 		}
472 		if (fFormatConversionFunc != NULL)
473 			break;
474 #endif
475 	}
476 
477 	if (!fCodecInitDone) {
478 		TRACE("avcodec_open() failed to init codec!\n");
479 		return B_ERROR;
480 	}
481 
482 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
483 	if (fSwsContext == NULL) {
484 		TRACE("No SWS Scale context or decoder has not set the pixel format "
485 			"yet!\n");
486 	}
487 #else
488 	if (fFormatConversionFunc == NULL) {
489 		TRACE("no pixel format conversion function found or decoder has "
490 			"not set the pixel format yet!\n");
491 	}
492 #endif
493 
494 	if (fOutputVideoFormat.display.format == B_YCbCr422) {
495 		fOutputVideoFormat.display.bytes_per_row
496 			= 2 * fOutputVideoFormat.display.line_width;
497 	} else {
498 		fOutputVideoFormat.display.bytes_per_row
499 			= 4 * fOutputVideoFormat.display.line_width;
500 	}
501 
502 	inOutFormat->type = B_MEDIA_RAW_VIDEO;
503 	inOutFormat->u.raw_video = fOutputVideoFormat;
504 
505 	inOutFormat->require_flags = 0;
506 	inOutFormat->deny_flags = B_MEDIA_MAUI_UNDEFINED_FLAGS;
507 
508 #ifdef TRACE_AV_CODEC
509 	char buffer[1024];
510 	string_for_format(*inOutFormat, buffer, sizeof(buffer));
511 	TRACE("[v]  outFormat = %s\n", buffer);
512 	TRACE("  returned  video format 0x%x\n",
513 		inOutFormat->u.raw_video.display.format);
514 #endif
515 
516 	return B_OK;
517 }
518 
519 
520 status_t
521 AVCodecDecoder::_DecodeAudio(void* _buffer, int64* outFrameCount,
522 	media_header* mediaHeader, media_decode_info* info)
523 {
524 	TRACE_AUDIO("AVCodecDecoder::_DecodeAudio(audio start_time %.6fs)\n",
525 		mediaHeader->start_time / 1000000.0);
526 
527 	*outFrameCount = 0;
528 
529 	uint8* buffer = reinterpret_cast<uint8*>(_buffer);
530 	while (*outFrameCount < fOutputFrameCount) {
531 		// Check conditions which would hint at broken code below.
532 		if (fOutputBufferSize < 0) {
533 			fprintf(stderr, "Decoding read past the end of the output buffer! "
534 				"%ld\n", fOutputBufferSize);
535 			fOutputBufferSize = 0;
536 		}
537 		if (fChunkBufferSize < 0) {
538 			fprintf(stderr, "Decoding read past the end of the chunk buffer! "
539 				"%ld\n", fChunkBufferSize);
540 			fChunkBufferSize = 0;
541 		}
542 
543 		if (fOutputBufferSize > 0) {
544 			// We still have decoded audio frames from the last
545 			// invokation, which start at fOutputBufferOffset
546 			// and are of fOutputBufferSize. Copy those into the buffer,
547 			// but not more than it can hold.
548 			int32 frames = min_c(fOutputFrameCount - *outFrameCount,
549 				fOutputBufferSize / fOutputFrameSize);
550 			if (frames == 0)
551 				debugger("fOutputBufferSize not multiple of frame size!");
552 			size_t remainingSize = frames * fOutputFrameSize;
553 			memcpy(buffer, fOutputFrame->data[0] + fOutputBufferOffset,
554 				remainingSize);
555 			fOutputBufferOffset += remainingSize;
556 			fOutputBufferSize -= remainingSize;
557 			buffer += remainingSize;
558 			*outFrameCount += frames;
559 			fStartTime += (bigtime_t)((1000000LL * frames) / fOutputFrameRate);
560 			continue;
561 		}
562 		if (fChunkBufferSize == 0) {
563 			// Time to read the next chunk buffer. We use a separate
564 			// media_header, since the chunk header may not belong to
565 			// the start of the decoded audio frames we return. For
566 			// example we may have used frames from a previous invokation,
567 			// or we may have to read several chunks until we fill up the
568 			// output buffer.
569 			media_header chunkMediaHeader;
570 			status_t err = GetNextChunk(&fChunkBuffer, &fChunkBufferSize,
571 				&chunkMediaHeader);
572 			if (err == B_LAST_BUFFER_ERROR) {
573 				TRACE_AUDIO("  Last Chunk with chunk size %ld\n",
574 					fChunkBufferSize);
575 				fChunkBufferSize = 0;
576 				return err;
577 			}
578 			if (err != B_OK || fChunkBufferSize < 0) {
579 				printf("GetNextChunk error %ld\n",fChunkBufferSize);
580 				fChunkBufferSize = 0;
581 				break;
582 			}
583 			fChunkBufferOffset = 0;
584 			fStartTime = chunkMediaHeader.start_time;
585 		}
586 
587 		fTempPacket.data = (uint8_t*)fChunkBuffer + fChunkBufferOffset;
588 		fTempPacket.size = fChunkBufferSize;
589 
590 		avcodec_get_frame_defaults(fOutputFrame);
591 		int gotFrame = 0;
592 		int usedBytes = avcodec_decode_audio4(fContext,
593 			fOutputFrame, &gotFrame, &fTempPacket);
594 		if (usedBytes < 0 && !fAudioDecodeError) {
595 			// Report failure if not done already
596 			printf("########### audio decode error, "
597 				"fChunkBufferSize %ld, fChunkBufferOffset %ld\n",
598 				fChunkBufferSize, fChunkBufferOffset);
599 			fAudioDecodeError = true;
600 		}
601 		if (usedBytes <= 0) {
602 			// Error or failure to produce decompressed output.
603 			// Skip the chunk buffer data entirely.
604 			usedBytes = fChunkBufferSize;
605 			fOutputBufferSize = 0;
606 			// Assume the audio decoded until now is broken.
607 			memset(_buffer, 0, buffer - (uint8*)_buffer);
608 		} else {
609 			// Success
610 			fAudioDecodeError = false;
611 			if (gotFrame == 1) {
612 				fOutputBufferSize = av_samples_get_buffer_size(NULL,
613 					fContext->channels, fOutputFrame->nb_samples,
614 					fContext->sample_fmt, 1);
615 				if (fOutputBufferSize < 0)
616 					fOutputBufferSize = 0;
617 			} else
618 				fOutputBufferSize = 0;
619 		}
620 //printf("  chunk size: %d, decoded: %d, used: %d\n",
621 //fTempPacket.size, decodedBytes, usedBytes);
622 
623 		fChunkBufferOffset += usedBytes;
624 		fChunkBufferSize -= usedBytes;
625 		fOutputBufferOffset = 0;
626 	}
627 	fFrame += *outFrameCount;
628 	TRACE_AUDIO("  frame count: %lld current: %lld\n", *outFrameCount, fFrame);
629 
630 	return B_OK;
631 }
632 
633 
634 status_t
635 AVCodecDecoder::_DecodeVideo(void* outBuffer, int64* outFrameCount,
636 	media_header* mediaHeader, media_decode_info* info)
637 {
638 	bool firstRun = true;
639 	while (true) {
640 		const void* data;
641 		size_t size;
642 		media_header chunkMediaHeader;
643 		status_t err = GetNextChunk(&data, &size, &chunkMediaHeader);
644 		if (err != B_OK) {
645 			TRACE("AVCodecDecoder::_DecodeVideo(): error from "
646 				"GetNextChunk(): %s\n", strerror(err));
647 			return err;
648 		}
649 #ifdef LOG_STREAM_TO_FILE
650 		if (sDumpedPackets < 100) {
651 			sStreamLogFile.Write(data, size);
652 			printf("wrote %ld bytes\n", size);
653 			sDumpedPackets++;
654 		} else if (sDumpedPackets == 100)
655 			sStreamLogFile.Unset();
656 #endif
657 
658 		if (firstRun) {
659 			firstRun = false;
660 
661 			mediaHeader->type = B_MEDIA_RAW_VIDEO;
662 			mediaHeader->start_time = chunkMediaHeader.start_time;
663 			fStartTime = chunkMediaHeader.start_time;
664 			mediaHeader->file_pos = 0;
665 			mediaHeader->orig_size = 0;
666 			mediaHeader->u.raw_video.field_gamma = 1.0;
667 			mediaHeader->u.raw_video.field_sequence = fFrame;
668 			mediaHeader->u.raw_video.field_number = 0;
669 			mediaHeader->u.raw_video.pulldown_number = 0;
670 			mediaHeader->u.raw_video.first_active_line = 1;
671 			mediaHeader->u.raw_video.line_count
672 				= fOutputVideoFormat.display.line_count;
673 
674 			TRACE("[v] start_time=%02d:%02d.%02d field_sequence=%lu\n",
675 				int((mediaHeader->start_time / 60000000) % 60),
676 				int((mediaHeader->start_time / 1000000) % 60),
677 				int((mediaHeader->start_time / 10000) % 100),
678 				mediaHeader->u.raw_video.field_sequence);
679 		}
680 
681 #if DO_PROFILING
682 		bigtime_t startTime = system_time();
683 #endif
684 
685 		// NOTE: In the FFmpeg code example I've read, the length returned by
686 		// avcodec_decode_video() is completely ignored. Furthermore, the
687 		// packet buffers are supposed to contain complete frames only so we
688 		// don't seem to be required to buffer any packets because not the
689 		// complete packet has been read.
690 		fTempPacket.data = (uint8_t*)data;
691 		fTempPacket.size = size;
692 		int gotPicture = 0;
693 		int len = avcodec_decode_video2(fContext, fInputPicture, &gotPicture,
694 			&fTempPacket);
695 		if (len < 0) {
696 			TRACE("[v] AVCodecDecoder: error in decoding frame %lld: %d\n",
697 				fFrame, len);
698 			// NOTE: An error from avcodec_decode_video() seems to be ignored
699 			// in the ffplay sample code.
700 //			return B_ERROR;
701 		}
702 
703 
704 //TRACE("FFDEC: PTS = %d:%d:%d.%d - fContext->frame_number = %ld "
705 //	"fContext->frame_rate = %ld\n", (int)(fContext->pts / (60*60*1000000)),
706 //	(int)(fContext->pts / (60*1000000)), (int)(fContext->pts / (1000000)),
707 //	(int)(fContext->pts % 1000000), fContext->frame_number,
708 //	fContext->frame_rate);
709 //TRACE("FFDEC: PTS = %d:%d:%d.%d - fContext->frame_number = %ld "
710 //	"fContext->frame_rate = %ld\n",
711 //	(int)(fInputPicture->pts / (60*60*1000000)),
712 //	(int)(fInputPicture->pts / (60*1000000)),
713 //	(int)(fInputPicture->pts / (1000000)),
714 //	(int)(fInputPicture->pts % 1000000), fContext->frame_number,
715 //	fContext->frame_rate);
716 
717 		if (gotPicture) {
718 			int width = fOutputVideoFormat.display.line_width;
719 			int height = fOutputVideoFormat.display.line_count;
720 			AVPicture deinterlacedPicture;
721 			bool useDeinterlacedPicture = false;
722 
723 			if (fInputPicture->interlaced_frame) {
724 				AVPicture source;
725 				source.data[0] = fInputPicture->data[0];
726 				source.data[1] = fInputPicture->data[1];
727 				source.data[2] = fInputPicture->data[2];
728 				source.data[3] = fInputPicture->data[3];
729 				source.linesize[0] = fInputPicture->linesize[0];
730 				source.linesize[1] = fInputPicture->linesize[1];
731 				source.linesize[2] = fInputPicture->linesize[2];
732 				source.linesize[3] = fInputPicture->linesize[3];
733 
734 				avpicture_alloc(&deinterlacedPicture,
735 					fContext->pix_fmt, width, height);
736 
737 				if (avpicture_deinterlace(&deinterlacedPicture, &source,
738 						fContext->pix_fmt, width, height) < 0) {
739 					TRACE("[v] avpicture_deinterlace() - error\n");
740 				} else
741 					useDeinterlacedPicture = true;
742 			}
743 
744 #if DO_PROFILING
745 			bigtime_t formatConversionStart = system_time();
746 #endif
747 //			TRACE("ONE FRAME OUT !! len=%d size=%ld (%s)\n", len, size,
748 //				pixfmt_to_string(fContext->pix_fmt));
749 
750 			// Some decoders do not set pix_fmt until they have decoded 1 frame
751 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
752 			if (fSwsContext == NULL) {
753 				fSwsContext = sws_getContext(fContext->width, fContext->height,
754 					fContext->pix_fmt, fContext->width, fContext->height,
755 					colorspace_to_pixfmt(fOutputVideoFormat.display.format),
756 					SWS_FAST_BILINEAR, NULL, NULL, NULL);
757 			}
758 #else
759 			if (fFormatConversionFunc == NULL) {
760 				fFormatConversionFunc = resolve_colorspace(
761 					fOutputVideoFormat.display.format, fContext->pix_fmt,
762 					fContext->width, fContext->height);
763 			}
764 #endif
765 
766 			fOutputPicture->data[0] = (uint8_t*)outBuffer;
767 			fOutputPicture->linesize[0]
768 				= fOutputVideoFormat.display.bytes_per_row;
769 
770 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
771 			if (fSwsContext != NULL) {
772 #else
773 			if (fFormatConversionFunc != NULL) {
774 #endif
775 				if (useDeinterlacedPicture) {
776 					AVFrame inputFrame;
777 					inputFrame.data[0] = deinterlacedPicture.data[0];
778 					inputFrame.data[1] = deinterlacedPicture.data[1];
779 					inputFrame.data[2] = deinterlacedPicture.data[2];
780 					inputFrame.data[3] = deinterlacedPicture.data[3];
781 					inputFrame.linesize[0] = deinterlacedPicture.linesize[0];
782 					inputFrame.linesize[1] = deinterlacedPicture.linesize[1];
783 					inputFrame.linesize[2] = deinterlacedPicture.linesize[2];
784 					inputFrame.linesize[3] = deinterlacedPicture.linesize[3];
785 
786 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
787 					sws_scale(fSwsContext, inputFrame.data,
788 						inputFrame.linesize, 0, fContext->height,
789 						fOutputPicture->data, fOutputPicture->linesize);
790 #else
791 					(*fFormatConversionFunc)(&inputFrame,
792 						fOutputPicture, width, height);
793 #endif
794 				} else {
795 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION
796 					sws_scale(fSwsContext, fInputPicture->data,
797 						fInputPicture->linesize, 0, fContext->height,
798 						fOutputPicture->data, fOutputPicture->linesize);
799 #else
800 					(*fFormatConversionFunc)(fInputPicture, fOutputPicture,
801 						width, height);
802 #endif
803 				}
804 			}
805 			if (fInputPicture->interlaced_frame)
806 				avpicture_free(&deinterlacedPicture);
807 #ifdef DEBUG
808 			dump_ffframe(fInputPicture, "ffpict");
809 //			dump_ffframe(fOutputPicture, "opict");
810 #endif
811 			*outFrameCount = 1;
812 			fFrame++;
813 
814 #if DO_PROFILING
815 			bigtime_t doneTime = system_time();
816 			decodingTime += formatConversionStart - startTime;
817 			conversionTime += doneTime - formatConversionStart;
818 			profileCounter++;
819 			if (!(fFrame % 5)) {
820 				if (info) {
821 					printf("[v] profile: d1 = %lld, d2 = %lld (%lld) required "
822 						"%Ld\n",
823 						decodingTime / profileCounter,
824 						conversionTime / profileCounter,
825 						fFrame, info->time_to_decode);
826 				} else {
827 					printf("[v] profile: d1 = %lld, d2 = %lld (%lld) required "
828 						"%Ld\n",
829 						decodingTime / profileCounter,
830 						conversionTime / profileCounter,
831 						fFrame, bigtime_t(1000000LL / fOutputFrameRate));
832 				}
833 				decodingTime = 0;
834 				conversionTime = 0;
835 				profileCounter = 0;
836 			}
837 #endif
838 			return B_OK;
839 		} else {
840 			TRACE("frame %lld - no picture yet, len: %d, chunk size: %ld\n",
841 				fFrame, len, size);
842 		}
843 	}
844 }
845 
846 
847