1 /* 2 * Copyright (C) 2001 Carlos Hasan 3 * Copyright (C) 2001 François Revol 4 * Copyright (C) 2001 Axel Dörfler 5 * Copyright (C) 2004 Marcus Overhagen 6 * Copyright (C) 2009 Stephan Amßus <superstippi@gmx.de> 7 * 8 * All rights reserved. Distributed under the terms of the MIT License. 9 */ 10 11 //! libavcodec based decoder for Haiku 12 13 #include "AVCodecDecoder.h" 14 15 #include <new> 16 17 #include <string.h> 18 19 #include <Bitmap.h> 20 #include <Debug.h> 21 22 23 #undef TRACE 24 //#define TRACE_AV_CODEC 25 #ifdef TRACE_AV_CODEC 26 # define TRACE(x...) printf(x) 27 # define TRACE_AUDIO(x...) printf(x) 28 # define TRACE_VIDEO(x...) printf(x) 29 #else 30 # define TRACE(x...) 31 # define TRACE_AUDIO(x...) 32 # define TRACE_VIDEO(x...) 33 #endif 34 35 //#define LOG_STREAM_TO_FILE 36 #ifdef LOG_STREAM_TO_FILE 37 # include <File.h> 38 static BFile sStreamLogFile("/boot/home/Desktop/AVCodecDebugStream.raw", 39 B_CREATE_FILE | B_ERASE_FILE | B_WRITE_ONLY); 40 static int sDumpedPackets = 0; 41 #endif 42 43 #define USE_SWS_FOR_COLOR_SPACE_CONVERSION 0 44 // NOTE: David's color space conversion is much faster than the FFmpeg 45 // version. Perhaps the SWS code can be used for unsupported conversions? 46 // Otherwise the alternative code could simply be removed from this file. 47 48 49 struct wave_format_ex { 50 uint16 format_tag; 51 uint16 channels; 52 uint32 frames_per_sec; 53 uint32 avg_bytes_per_sec; 54 uint16 block_align; 55 uint16 bits_per_sample; 56 uint16 extra_size; 57 // extra_data[extra_size] 58 } _PACKED; 59 60 61 // profiling related globals 62 #define DO_PROFILING 0 63 64 static bigtime_t decodingTime = 0; 65 static bigtime_t conversionTime = 0; 66 static long profileCounter = 0; 67 68 69 AVCodecDecoder::AVCodecDecoder() 70 : 71 fHeader(), 72 fInputFormat(), 73 fOutputVideoFormat(), 74 fFrame(0), 75 fIsAudio(false), 76 fCodec(NULL), 77 fContext(avcodec_alloc_context3(NULL)), 78 fInputPicture(avcodec_alloc_frame()), 79 fOutputPicture(avcodec_alloc_frame()), 80 81 fCodecInitDone(false), 82 83 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION 84 fSwsContext(NULL), 85 #else 86 fFormatConversionFunc(NULL), 87 #endif 88 89 fExtraData(NULL), 90 fExtraDataSize(0), 91 fBlockAlign(0), 92 93 fStartTime(0), 94 fOutputFrameCount(0), 95 fOutputFrameRate(1.0), 96 fOutputFrameSize(0), 97 98 fChunkBuffer(NULL), 99 fChunkBufferOffset(0), 100 fChunkBufferSize(0), 101 fAudioDecodeError(false), 102 103 fOutputFrame(avcodec_alloc_frame()), 104 fOutputBufferOffset(0), 105 fOutputBufferSize(0) 106 { 107 TRACE("AVCodecDecoder::AVCodecDecoder()\n"); 108 109 system_info info; 110 get_system_info(&info); 111 112 fContext->err_recognition = AV_EF_CAREFUL; 113 fContext->error_concealment = 3; 114 fContext->thread_count = info.cpu_count; 115 } 116 117 118 AVCodecDecoder::~AVCodecDecoder() 119 { 120 TRACE("[%c] AVCodecDecoder::~AVCodecDecoder()\n", fIsAudio?('a'):('v')); 121 122 #ifdef DO_PROFILING 123 if (profileCounter > 0) { 124 printf("[%c] profile: d1 = %lld, d2 = %lld (%Ld)\n", 125 fIsAudio?('a'):('v'), decodingTime / profileCounter, 126 conversionTime / profileCounter, fFrame); 127 } 128 #endif 129 130 if (fCodecInitDone) 131 avcodec_close(fContext); 132 133 av_free(fOutputPicture); 134 av_free(fInputPicture); 135 av_free(fContext); 136 av_free(fOutputFrame); 137 138 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION 139 if (fSwsContext != NULL) 140 sws_freeContext(fSwsContext); 141 #endif 142 143 delete[] fExtraData; 144 } 145 146 147 void 148 AVCodecDecoder::GetCodecInfo(media_codec_info* mci) 149 { 150 snprintf(mci->short_name, 32, "%s", fCodec->name); 151 snprintf(mci->pretty_name, 96, "%s", fCodec->long_name); 152 mci->id = 0; 153 mci->sub_id = fCodec->id; 154 } 155 156 157 status_t 158 AVCodecDecoder::Setup(media_format* ioEncodedFormat, const void* infoBuffer, 159 size_t infoSize) 160 { 161 if (ioEncodedFormat->type != B_MEDIA_ENCODED_AUDIO 162 && ioEncodedFormat->type != B_MEDIA_ENCODED_VIDEO) 163 return B_ERROR; 164 165 fIsAudio = (ioEncodedFormat->type == B_MEDIA_ENCODED_AUDIO); 166 TRACE("[%c] AVCodecDecoder::Setup()\n", fIsAudio?('a'):('v')); 167 168 #ifdef TRACE_AV_CODEC 169 char buffer[1024]; 170 string_for_format(*ioEncodedFormat, buffer, sizeof(buffer)); 171 TRACE("[%c] input_format = %s\n", fIsAudio?('a'):('v'), buffer); 172 TRACE("[%c] infoSize = %ld\n", fIsAudio?('a'):('v'), infoSize); 173 TRACE("[%c] user_data_type = %08lx\n", fIsAudio?('a'):('v'), 174 ioEncodedFormat->user_data_type); 175 TRACE("[%c] meta_data_size = %ld\n", fIsAudio?('a'):('v'), 176 ioEncodedFormat->MetaDataSize()); 177 #endif 178 179 media_format_description description; 180 if (BMediaFormats().GetCodeFor(*ioEncodedFormat, 181 B_MISC_FORMAT_FAMILY, &description) == B_OK) { 182 if (description.u.misc.file_format != 'ffmp') 183 return B_NOT_SUPPORTED; 184 fCodec = avcodec_find_decoder(static_cast<CodecID>( 185 description.u.misc.codec)); 186 if (fCodec == NULL) { 187 TRACE(" unable to find the correct FFmpeg " 188 "decoder (id = %lu)\n", description.u.misc.codec); 189 return B_ERROR; 190 } 191 TRACE(" found decoder %s\n", fCodec->name); 192 193 const void* extraData = infoBuffer; 194 fExtraDataSize = infoSize; 195 if (description.family == B_WAV_FORMAT_FAMILY 196 && infoSize >= sizeof(wave_format_ex)) { 197 TRACE(" trying to use wave_format_ex\n"); 198 // Special case extra data in B_WAV_FORMAT_FAMILY 199 const wave_format_ex* waveFormatData 200 = (const wave_format_ex*)infoBuffer; 201 202 size_t waveFormatSize = infoSize; 203 if (waveFormatData != NULL && waveFormatSize > 0) { 204 fBlockAlign = waveFormatData->block_align; 205 TRACE(" found block align: %d\n", fBlockAlign); 206 fExtraDataSize = waveFormatData->extra_size; 207 // skip the wave_format_ex from the extra data. 208 extraData = waveFormatData + 1; 209 } 210 } else { 211 if (fIsAudio) { 212 fBlockAlign 213 = ioEncodedFormat->u.encoded_audio.output 214 .buffer_size; 215 TRACE(" using buffer_size as block align: %d\n", 216 fBlockAlign); 217 } 218 } 219 if (extraData != NULL && fExtraDataSize > 0) { 220 TRACE("AVCodecDecoder: extra data size %ld\n", infoSize); 221 delete[] fExtraData; 222 fExtraData = new(std::nothrow) char[fExtraDataSize]; 223 if (fExtraData != NULL) 224 memcpy(fExtraData, infoBuffer, fExtraDataSize); 225 else 226 fExtraDataSize = 0; 227 } 228 229 fInputFormat = *ioEncodedFormat; 230 return B_OK; 231 } else { 232 TRACE("AVCodecDecoder: BMediaFormats().GetCodeFor() failed.\n"); 233 } 234 235 printf("AVCodecDecoder::Setup failed!\n"); 236 return B_ERROR; 237 } 238 239 240 status_t 241 AVCodecDecoder::SeekedTo(int64 frame, bigtime_t time) 242 { 243 status_t ret = B_OK; 244 // Reset the FFmpeg codec to flush buffers, so we keep the sync 245 if (fCodecInitDone) 246 avcodec_flush_buffers(fContext); 247 248 // Flush internal buffers as well. 249 fChunkBuffer = NULL; 250 fChunkBufferOffset = 0; 251 fChunkBufferSize = 0; 252 fOutputBufferOffset = 0; 253 fOutputBufferSize = 0; 254 255 fFrame = frame; 256 fStartTime = time; 257 258 return ret; 259 } 260 261 262 status_t 263 AVCodecDecoder::NegotiateOutputFormat(media_format* inOutFormat) 264 { 265 TRACE("AVCodecDecoder::NegotiateOutputFormat() [%c] \n", 266 fIsAudio?('a'):('v')); 267 268 #ifdef TRACE_AV_CODEC 269 char buffer[1024]; 270 string_for_format(*inOutFormat, buffer, sizeof(buffer)); 271 TRACE(" [%c] requested format = %s\n", fIsAudio?('a'):('v'), buffer); 272 #endif 273 274 if (fIsAudio) 275 return _NegotiateAudioOutputFormat(inOutFormat); 276 else 277 return _NegotiateVideoOutputFormat(inOutFormat); 278 } 279 280 281 status_t 282 AVCodecDecoder::Decode(void* outBuffer, int64* outFrameCount, 283 media_header* mediaHeader, media_decode_info* info) 284 { 285 if (!fCodecInitDone) 286 return B_NO_INIT; 287 288 // TRACE("[%c] AVCodecDecoder::Decode() for time %Ld\n", fIsAudio?('a'):('v'), 289 // fStartTime); 290 291 mediaHeader->start_time = fStartTime; 292 293 status_t ret; 294 if (fIsAudio) 295 ret = _DecodeAudio(outBuffer, outFrameCount, mediaHeader, info); 296 else 297 ret = _DecodeVideo(outBuffer, outFrameCount, mediaHeader, info); 298 299 return ret; 300 } 301 302 303 // #pragma mark - 304 305 306 status_t 307 AVCodecDecoder::_NegotiateAudioOutputFormat(media_format* inOutFormat) 308 { 309 TRACE("AVCodecDecoder::_NegotiateAudioOutputFormat()\n"); 310 311 media_multi_audio_format outputAudioFormat; 312 outputAudioFormat = media_raw_audio_format::wildcard; 313 outputAudioFormat.byte_order = B_MEDIA_HOST_ENDIAN; 314 outputAudioFormat.frame_rate 315 = fInputFormat.u.encoded_audio.output.frame_rate; 316 outputAudioFormat.channel_count 317 = fInputFormat.u.encoded_audio.output.channel_count; 318 outputAudioFormat.format = fInputFormat.u.encoded_audio.output.format; 319 outputAudioFormat.buffer_size 320 = inOutFormat->u.raw_audio.buffer_size; 321 // Check that format is not still a wild card! 322 if (outputAudioFormat.format == 0) { 323 TRACE(" format still a wild-card, assuming B_AUDIO_SHORT.\n"); 324 outputAudioFormat.format = media_raw_audio_format::B_AUDIO_SHORT; 325 } 326 size_t sampleSize = outputAudioFormat.format 327 & media_raw_audio_format::B_AUDIO_SIZE_MASK; 328 // Check that channel count is not still a wild card! 329 if (outputAudioFormat.channel_count == 0) { 330 TRACE(" channel_count still a wild-card, assuming stereo.\n"); 331 outputAudioFormat.channel_count = 2; 332 } 333 334 if (outputAudioFormat.buffer_size == 0) { 335 outputAudioFormat.buffer_size = 512 336 * sampleSize * outputAudioFormat.channel_count; 337 } 338 inOutFormat->type = B_MEDIA_RAW_AUDIO; 339 inOutFormat->u.raw_audio = outputAudioFormat; 340 341 fContext->bit_rate = (int)fInputFormat.u.encoded_audio.bit_rate; 342 fContext->frame_size = (int)fInputFormat.u.encoded_audio.frame_size; 343 fContext->sample_rate 344 = (int)fInputFormat.u.encoded_audio.output.frame_rate; 345 fContext->channels = outputAudioFormat.channel_count; 346 fContext->block_align = fBlockAlign; 347 fContext->extradata = (uint8_t*)fExtraData; 348 fContext->extradata_size = fExtraDataSize; 349 350 // TODO: This probably needs to go away, there is some misconception 351 // about extra data / info buffer and meta data. See 352 // Reader::GetStreamInfo(). The AVFormatReader puts extradata and 353 // extradata_size into media_format::MetaData(), but used to ignore 354 // the infoBuffer passed to GetStreamInfo(). I think this may be why 355 // the code below was added. 356 if (fInputFormat.MetaDataSize() > 0) { 357 fContext->extradata = (uint8_t*)fInputFormat.MetaData(); 358 fContext->extradata_size = fInputFormat.MetaDataSize(); 359 } 360 361 TRACE(" bit_rate %d, sample_rate %d, channels %d, block_align %d, " 362 "extradata_size %d\n", fContext->bit_rate, fContext->sample_rate, 363 fContext->channels, fContext->block_align, fContext->extradata_size); 364 365 // close any previous instance 366 if (fCodecInitDone) { 367 fCodecInitDone = false; 368 avcodec_close(fContext); 369 } 370 371 // open new 372 int result = avcodec_open2(fContext, fCodec, NULL); 373 fCodecInitDone = (result >= 0); 374 375 fStartTime = 0; 376 fOutputFrameSize = sampleSize * outputAudioFormat.channel_count; 377 fOutputFrameCount = outputAudioFormat.buffer_size / fOutputFrameSize; 378 fOutputFrameRate = outputAudioFormat.frame_rate; 379 380 TRACE(" bit_rate = %d, sample_rate = %d, channels = %d, init = %d, " 381 "output frame size: %d, count: %ld, rate: %.2f\n", 382 fContext->bit_rate, fContext->sample_rate, fContext->channels, 383 result, fOutputFrameSize, fOutputFrameCount, fOutputFrameRate); 384 385 fChunkBuffer = NULL; 386 fChunkBufferOffset = 0; 387 fChunkBufferSize = 0; 388 fAudioDecodeError = false; 389 fOutputBufferOffset = 0; 390 fOutputBufferSize = 0; 391 392 av_init_packet(&fTempPacket); 393 394 inOutFormat->require_flags = 0; 395 inOutFormat->deny_flags = B_MEDIA_MAUI_UNDEFINED_FLAGS; 396 397 if (!fCodecInitDone) { 398 TRACE("avcodec_open() failed!\n"); 399 return B_ERROR; 400 } 401 402 return B_OK; 403 } 404 405 406 status_t 407 AVCodecDecoder::_NegotiateVideoOutputFormat(media_format* inOutFormat) 408 { 409 TRACE("AVCodecDecoder::_NegotiateVideoOutputFormat()\n"); 410 411 fOutputVideoFormat = fInputFormat.u.encoded_video.output; 412 413 fContext->width = fOutputVideoFormat.display.line_width; 414 fContext->height = fOutputVideoFormat.display.line_count; 415 // fContext->frame_rate = (int)(fOutputVideoFormat.field_rate 416 // * fContext->frame_rate_base); 417 418 fOutputFrameRate = fOutputVideoFormat.field_rate; 419 420 fContext->extradata = (uint8_t*)fExtraData; 421 fContext->extradata_size = fExtraDataSize; 422 423 TRACE(" requested video format 0x%x\n", 424 inOutFormat->u.raw_video.display.format); 425 426 // Make MediaPlayer happy (if not in rgb32 screen depth and no overlay, 427 // it will only ask for YCbCr, which DrawBitmap doesn't handle, so the 428 // default colordepth is RGB32). 429 if (inOutFormat->u.raw_video.display.format == B_YCbCr422) 430 fOutputVideoFormat.display.format = B_YCbCr422; 431 else 432 fOutputVideoFormat.display.format = B_RGB32; 433 434 // Search for a pixel-format the codec handles 435 // TODO: We should try this a couple of times until it succeeds, each 436 // time using another pixel-format that is supported by the decoder. 437 // But libavcodec doesn't seem to offer any way to tell the decoder 438 // which format it should use. 439 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION 440 if (fSwsContext != NULL) 441 sws_freeContext(fSwsContext); 442 fSwsContext = NULL; 443 #else 444 fFormatConversionFunc = 0; 445 #endif 446 // Iterate over supported codec formats 447 for (int i = 0; i < 1; i++) { 448 // close any previous instance 449 if (fCodecInitDone) { 450 fCodecInitDone = false; 451 avcodec_close(fContext); 452 } 453 // TODO: Set n-th fContext->pix_fmt here 454 if (avcodec_open2(fContext, fCodec, NULL) >= 0) { 455 fCodecInitDone = true; 456 457 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION 458 fSwsContext = sws_getContext(fContext->width, fContext->height, 459 fContext->pix_fmt, fContext->width, fContext->height, 460 colorspace_to_pixfmt(fOutputVideoFormat.display.format), 461 SWS_FAST_BILINEAR, NULL, NULL, NULL); 462 } 463 #else 464 fFormatConversionFunc = resolve_colorspace( 465 fOutputVideoFormat.display.format, fContext->pix_fmt, 466 fContext->width, fContext->height); 467 } 468 if (fFormatConversionFunc != NULL) 469 break; 470 #endif 471 } 472 473 if (!fCodecInitDone) { 474 TRACE("avcodec_open() failed to init codec!\n"); 475 return B_ERROR; 476 } 477 478 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION 479 if (fSwsContext == NULL) { 480 TRACE("No SWS Scale context or decoder has not set the pixel format " 481 "yet!\n"); 482 } 483 #else 484 if (fFormatConversionFunc == NULL) { 485 TRACE("no pixel format conversion function found or decoder has " 486 "not set the pixel format yet!\n"); 487 } 488 #endif 489 490 if (fOutputVideoFormat.display.format == B_YCbCr422) { 491 fOutputVideoFormat.display.bytes_per_row 492 = 2 * fOutputVideoFormat.display.line_width; 493 } else { 494 fOutputVideoFormat.display.bytes_per_row 495 = 4 * fOutputVideoFormat.display.line_width; 496 } 497 498 inOutFormat->type = B_MEDIA_RAW_VIDEO; 499 inOutFormat->u.raw_video = fOutputVideoFormat; 500 501 inOutFormat->require_flags = 0; 502 inOutFormat->deny_flags = B_MEDIA_MAUI_UNDEFINED_FLAGS; 503 504 #ifdef TRACE_AV_CODEC 505 char buffer[1024]; 506 string_for_format(*inOutFormat, buffer, sizeof(buffer)); 507 TRACE("[v] outFormat = %s\n", buffer); 508 TRACE(" returned video format 0x%x\n", 509 inOutFormat->u.raw_video.display.format); 510 #endif 511 512 return B_OK; 513 } 514 515 516 status_t 517 AVCodecDecoder::_DecodeAudio(void* _buffer, int64* outFrameCount, 518 media_header* mediaHeader, media_decode_info* info) 519 { 520 TRACE_AUDIO("AVCodecDecoder::_DecodeAudio(audio start_time %.6fs)\n", 521 mediaHeader->start_time / 1000000.0); 522 523 *outFrameCount = 0; 524 525 uint8* buffer = reinterpret_cast<uint8*>(_buffer); 526 while (*outFrameCount < fOutputFrameCount) { 527 // Check conditions which would hint at broken code below. 528 if (fOutputBufferSize < 0) { 529 fprintf(stderr, "Decoding read past the end of the output buffer! " 530 "%ld\n", fOutputBufferSize); 531 fOutputBufferSize = 0; 532 } 533 if (fChunkBufferSize < 0) { 534 fprintf(stderr, "Decoding read past the end of the chunk buffer! " 535 "%ld\n", fChunkBufferSize); 536 fChunkBufferSize = 0; 537 } 538 539 if (fOutputBufferSize > 0) { 540 // We still have decoded audio frames from the last 541 // invokation, which start at fOutputBufferOffset 542 // and are of fOutputBufferSize. Copy those into the buffer, 543 // but not more than it can hold. 544 int32 frames = min_c(fOutputFrameCount - *outFrameCount, 545 fOutputBufferSize / fOutputFrameSize); 546 if (frames == 0) 547 debugger("fOutputBufferSize not multiple of frame size!"); 548 size_t remainingSize = frames * fOutputFrameSize; 549 memcpy(buffer, fOutputFrame->data[0] + fOutputBufferOffset, 550 remainingSize); 551 fOutputBufferOffset += remainingSize; 552 fOutputBufferSize -= remainingSize; 553 buffer += remainingSize; 554 *outFrameCount += frames; 555 fStartTime += (bigtime_t)((1000000LL * frames) / fOutputFrameRate); 556 continue; 557 } 558 if (fChunkBufferSize == 0) { 559 // Time to read the next chunk buffer. We use a separate 560 // media_header, since the chunk header may not belong to 561 // the start of the decoded audio frames we return. For 562 // example we may have used frames from a previous invokation, 563 // or we may have to read several chunks until we fill up the 564 // output buffer. 565 media_header chunkMediaHeader; 566 status_t err = GetNextChunk(&fChunkBuffer, &fChunkBufferSize, 567 &chunkMediaHeader); 568 if (err == B_LAST_BUFFER_ERROR) { 569 TRACE_AUDIO(" Last Chunk with chunk size %ld\n", 570 fChunkBufferSize); 571 fChunkBufferSize = 0; 572 return err; 573 } 574 if (err != B_OK || fChunkBufferSize < 0) { 575 printf("GetNextChunk error %ld\n",fChunkBufferSize); 576 fChunkBufferSize = 0; 577 break; 578 } 579 fChunkBufferOffset = 0; 580 fStartTime = chunkMediaHeader.start_time; 581 } 582 583 fTempPacket.data = (uint8_t*)fChunkBuffer + fChunkBufferOffset; 584 fTempPacket.size = fChunkBufferSize; 585 586 avcodec_get_frame_defaults(fOutputFrame); 587 int gotFrame = 0; 588 int usedBytes = avcodec_decode_audio4(fContext, 589 fOutputFrame, &gotFrame, &fTempPacket); 590 if (usedBytes < 0 && !fAudioDecodeError) { 591 // Report failure if not done already 592 printf("########### audio decode error, " 593 "fChunkBufferSize %ld, fChunkBufferOffset %ld\n", 594 fChunkBufferSize, fChunkBufferOffset); 595 fAudioDecodeError = true; 596 } 597 if (usedBytes <= 0) { 598 // Error or failure to produce decompressed output. 599 // Skip the chunk buffer data entirely. 600 usedBytes = fChunkBufferSize; 601 fOutputBufferSize = 0; 602 // Assume the audio decoded until now is broken. 603 memset(_buffer, 0, buffer - (uint8*)_buffer); 604 } else { 605 // Success 606 fAudioDecodeError = false; 607 if (gotFrame == 1) { 608 fOutputBufferSize = av_samples_get_buffer_size(NULL, 609 fContext->channels, fOutputFrame->nb_samples, 610 fContext->sample_fmt, 1); 611 if (fOutputBufferSize < 0) 612 fOutputBufferSize = 0; 613 } else 614 fOutputBufferSize = 0; 615 } 616 //printf(" chunk size: %d, decoded: %d, used: %d\n", 617 //fTempPacket.size, decodedBytes, usedBytes); 618 619 fChunkBufferOffset += usedBytes; 620 fChunkBufferSize -= usedBytes; 621 fOutputBufferOffset = 0; 622 } 623 fFrame += *outFrameCount; 624 TRACE_AUDIO(" frame count: %lld current: %lld\n", *outFrameCount, fFrame); 625 626 return B_OK; 627 } 628 629 630 status_t 631 AVCodecDecoder::_DecodeVideo(void* outBuffer, int64* outFrameCount, 632 media_header* mediaHeader, media_decode_info* info) 633 { 634 bool firstRun = true; 635 while (true) { 636 const void* data; 637 size_t size; 638 media_header chunkMediaHeader; 639 status_t err = GetNextChunk(&data, &size, &chunkMediaHeader); 640 if (err != B_OK) { 641 TRACE("AVCodecDecoder::_DecodeVideo(): error from " 642 "GetNextChunk(): %s\n", strerror(err)); 643 return err; 644 } 645 #ifdef LOG_STREAM_TO_FILE 646 if (sDumpedPackets < 100) { 647 sStreamLogFile.Write(data, size); 648 printf("wrote %ld bytes\n", size); 649 sDumpedPackets++; 650 } else if (sDumpedPackets == 100) 651 sStreamLogFile.Unset(); 652 #endif 653 654 if (firstRun) { 655 firstRun = false; 656 657 mediaHeader->type = B_MEDIA_RAW_VIDEO; 658 mediaHeader->start_time = chunkMediaHeader.start_time; 659 fStartTime = chunkMediaHeader.start_time; 660 mediaHeader->file_pos = 0; 661 mediaHeader->orig_size = 0; 662 mediaHeader->u.raw_video.field_gamma = 1.0; 663 mediaHeader->u.raw_video.field_sequence = fFrame; 664 mediaHeader->u.raw_video.field_number = 0; 665 mediaHeader->u.raw_video.pulldown_number = 0; 666 mediaHeader->u.raw_video.first_active_line = 1; 667 mediaHeader->u.raw_video.line_count 668 = fOutputVideoFormat.display.line_count; 669 670 TRACE("[v] start_time=%02d:%02d.%02d field_sequence=%lu\n", 671 int((mediaHeader->start_time / 60000000) % 60), 672 int((mediaHeader->start_time / 1000000) % 60), 673 int((mediaHeader->start_time / 10000) % 100), 674 mediaHeader->u.raw_video.field_sequence); 675 } 676 677 #if DO_PROFILING 678 bigtime_t startTime = system_time(); 679 #endif 680 681 // NOTE: In the FFmpeg code example I've read, the length returned by 682 // avcodec_decode_video() is completely ignored. Furthermore, the 683 // packet buffers are supposed to contain complete frames only so we 684 // don't seem to be required to buffer any packets because not the 685 // complete packet has been read. 686 fTempPacket.data = (uint8_t*)data; 687 fTempPacket.size = size; 688 int gotPicture = 0; 689 int len = avcodec_decode_video2(fContext, fInputPicture, &gotPicture, 690 &fTempPacket); 691 if (len < 0) { 692 TRACE("[v] AVCodecDecoder: error in decoding frame %lld: %d\n", 693 fFrame, len); 694 // NOTE: An error from avcodec_decode_video() seems to be ignored 695 // in the ffplay sample code. 696 // return B_ERROR; 697 } 698 699 700 //TRACE("FFDEC: PTS = %d:%d:%d.%d - fContext->frame_number = %ld " 701 // "fContext->frame_rate = %ld\n", (int)(fContext->pts / (60*60*1000000)), 702 // (int)(fContext->pts / (60*1000000)), (int)(fContext->pts / (1000000)), 703 // (int)(fContext->pts % 1000000), fContext->frame_number, 704 // fContext->frame_rate); 705 //TRACE("FFDEC: PTS = %d:%d:%d.%d - fContext->frame_number = %ld " 706 // "fContext->frame_rate = %ld\n", 707 // (int)(fInputPicture->pts / (60*60*1000000)), 708 // (int)(fInputPicture->pts / (60*1000000)), 709 // (int)(fInputPicture->pts / (1000000)), 710 // (int)(fInputPicture->pts % 1000000), fContext->frame_number, 711 // fContext->frame_rate); 712 713 if (gotPicture) { 714 int width = fOutputVideoFormat.display.line_width; 715 int height = fOutputVideoFormat.display.line_count; 716 AVPicture deinterlacedPicture; 717 bool useDeinterlacedPicture = false; 718 719 if (fInputPicture->interlaced_frame) { 720 AVPicture source; 721 source.data[0] = fInputPicture->data[0]; 722 source.data[1] = fInputPicture->data[1]; 723 source.data[2] = fInputPicture->data[2]; 724 source.data[3] = fInputPicture->data[3]; 725 source.linesize[0] = fInputPicture->linesize[0]; 726 source.linesize[1] = fInputPicture->linesize[1]; 727 source.linesize[2] = fInputPicture->linesize[2]; 728 source.linesize[3] = fInputPicture->linesize[3]; 729 730 avpicture_alloc(&deinterlacedPicture, 731 fContext->pix_fmt, width, height); 732 733 if (avpicture_deinterlace(&deinterlacedPicture, &source, 734 fContext->pix_fmt, width, height) < 0) { 735 TRACE("[v] avpicture_deinterlace() - error\n"); 736 } else 737 useDeinterlacedPicture = true; 738 } 739 740 #if DO_PROFILING 741 bigtime_t formatConversionStart = system_time(); 742 #endif 743 // TRACE("ONE FRAME OUT !! len=%d size=%ld (%s)\n", len, size, 744 // pixfmt_to_string(fContext->pix_fmt)); 745 746 // Some decoders do not set pix_fmt until they have decoded 1 frame 747 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION 748 if (fSwsContext == NULL) { 749 fSwsContext = sws_getContext(fContext->width, fContext->height, 750 fContext->pix_fmt, fContext->width, fContext->height, 751 colorspace_to_pixfmt(fOutputVideoFormat.display.format), 752 SWS_FAST_BILINEAR, NULL, NULL, NULL); 753 } 754 #else 755 if (fFormatConversionFunc == NULL) { 756 fFormatConversionFunc = resolve_colorspace( 757 fOutputVideoFormat.display.format, fContext->pix_fmt, 758 fContext->width, fContext->height); 759 } 760 #endif 761 762 fOutputPicture->data[0] = (uint8_t*)outBuffer; 763 fOutputPicture->linesize[0] 764 = fOutputVideoFormat.display.bytes_per_row; 765 766 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION 767 if (fSwsContext != NULL) { 768 #else 769 if (fFormatConversionFunc != NULL) { 770 #endif 771 if (useDeinterlacedPicture) { 772 AVFrame inputFrame; 773 inputFrame.data[0] = deinterlacedPicture.data[0]; 774 inputFrame.data[1] = deinterlacedPicture.data[1]; 775 inputFrame.data[2] = deinterlacedPicture.data[2]; 776 inputFrame.data[3] = deinterlacedPicture.data[3]; 777 inputFrame.linesize[0] = deinterlacedPicture.linesize[0]; 778 inputFrame.linesize[1] = deinterlacedPicture.linesize[1]; 779 inputFrame.linesize[2] = deinterlacedPicture.linesize[2]; 780 inputFrame.linesize[3] = deinterlacedPicture.linesize[3]; 781 782 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION 783 sws_scale(fSwsContext, inputFrame.data, 784 inputFrame.linesize, 0, fContext->height, 785 fOutputPicture->data, fOutputPicture->linesize); 786 #else 787 (*fFormatConversionFunc)(&inputFrame, 788 fOutputPicture, width, height); 789 #endif 790 } else { 791 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION 792 sws_scale(fSwsContext, fInputPicture->data, 793 fInputPicture->linesize, 0, fContext->height, 794 fOutputPicture->data, fOutputPicture->linesize); 795 #else 796 (*fFormatConversionFunc)(fInputPicture, fOutputPicture, 797 width, height); 798 #endif 799 } 800 } 801 if (fInputPicture->interlaced_frame) 802 avpicture_free(&deinterlacedPicture); 803 #ifdef DEBUG 804 dump_ffframe(fInputPicture, "ffpict"); 805 // dump_ffframe(fOutputPicture, "opict"); 806 #endif 807 *outFrameCount = 1; 808 fFrame++; 809 810 #if DO_PROFILING 811 bigtime_t doneTime = system_time(); 812 decodingTime += formatConversionStart - startTime; 813 conversionTime += doneTime - formatConversionStart; 814 profileCounter++; 815 if (!(fFrame % 5)) { 816 if (info) { 817 printf("[v] profile: d1 = %lld, d2 = %lld (%lld) required " 818 "%Ld\n", 819 decodingTime / profileCounter, 820 conversionTime / profileCounter, 821 fFrame, info->time_to_decode); 822 } else { 823 printf("[v] profile: d1 = %lld, d2 = %lld (%lld) required " 824 "%Ld\n", 825 decodingTime / profileCounter, 826 conversionTime / profileCounter, 827 fFrame, bigtime_t(1000000LL / fOutputFrameRate)); 828 } 829 decodingTime = 0; 830 conversionTime = 0; 831 profileCounter = 0; 832 } 833 #endif 834 return B_OK; 835 } else { 836 TRACE("frame %lld - no picture yet, len: %d, chunk size: %ld\n", 837 fFrame, len, size); 838 } 839 } 840 } 841 842 843