1 /* 2 * Copyright (C) 2001 Carlos Hasan 3 * Copyright (C) 2001 François Revol 4 * Copyright (C) 2001 Axel Dörfler 5 * Copyright (C) 2004 Marcus Overhagen 6 * Copyright (C) 2009 Stephan Amßus <superstippi@gmx.de> 7 * 8 * All rights reserved. Distributed under the terms of the MIT License. 9 */ 10 11 //! libavcodec based decoder for Haiku 12 13 #include "AVCodecDecoder.h" 14 15 #include <new> 16 17 #include <string.h> 18 19 #include <Bitmap.h> 20 #include <Debug.h> 21 22 23 #undef TRACE 24 //#define TRACE_AV_CODEC 25 #ifdef TRACE_AV_CODEC 26 # define TRACE(x...) printf(x) 27 #else 28 # define TRACE(x...) 29 #endif 30 31 //#define LOG_STREAM_TO_FILE 32 #ifdef LOG_STREAM_TO_FILE 33 # include <File.h> 34 static BFile sStreamLogFile("/boot/home/Desktop/AVCodecDebugStream.raw", 35 B_CREATE_FILE | B_ERASE_FILE | B_WRITE_ONLY); 36 static int sDumpedPackets = 0; 37 #endif 38 39 40 struct wave_format_ex { 41 uint16 format_tag; 42 uint16 channels; 43 uint32 frames_per_sec; 44 uint32 avg_bytes_per_sec; 45 uint16 block_align; 46 uint16 bits_per_sample; 47 uint16 extra_size; 48 // extra_data[extra_size] 49 } _PACKED; 50 51 52 // profiling related globals 53 #define DO_PROFILING 0 54 55 static bigtime_t decodingTime = 0; 56 static bigtime_t conversionTime = 0; 57 static long profileCounter = 0; 58 59 60 AVCodecDecoder::AVCodecDecoder() 61 : 62 fHeader(), 63 fInputFormat(), 64 fOutputVideoFormat(), 65 fFrame(0), 66 fIsAudio(false), 67 fCodecIndexInTable(-1), 68 fCodec(NULL), 69 fContext(avcodec_alloc_context()), 70 fInputPicture(avcodec_alloc_frame()), 71 fOutputPicture(avcodec_alloc_frame()), 72 73 fCodecInitDone(false), 74 75 fFormatConversionFunc(NULL), 76 77 fExtraData(NULL), 78 fExtraDataSize(0), 79 fBlockAlign(0), 80 81 fStartTime(0), 82 fOutputFrameCount(0), 83 fOutputFrameRate(1.0), 84 fOutputFrameSize(0), 85 86 fChunkBuffer(NULL), 87 fChunkBufferOffset(0), 88 fChunkBufferSize(0), 89 fAudioDecodeError(false), 90 91 fOutputBuffer(NULL), 92 fOutputBufferOffset(0), 93 fOutputBufferSize(0) 94 { 95 TRACE("AVCodecDecoder::AVCodecDecoder()\n"); 96 } 97 98 99 AVCodecDecoder::~AVCodecDecoder() 100 { 101 TRACE("[%c] AVCodecDecoder::~AVCodecDecoder()\n", fIsAudio?('a'):('v')); 102 103 #ifdef DO_PROFILING 104 if (profileCounter > 0) { 105 printf("[%c] profile: d1 = %lld, d2 = %lld (%Ld)\n", 106 fIsAudio?('a'):('v'), decodingTime / profileCounter, conversionTime / profileCounter, 107 fFrame); 108 } 109 #endif 110 111 if (fCodecInitDone) 112 avcodec_close(fContext); 113 114 free(fOutputPicture); 115 free(fInputPicture); 116 free(fContext); 117 118 delete[] fExtraData; 119 delete[] fOutputBuffer; 120 } 121 122 123 void 124 AVCodecDecoder::GetCodecInfo(media_codec_info* mci) 125 { 126 sprintf(mci->short_name, "ff:%s", fCodec->name); 127 sprintf(mci->pretty_name, "%s (libavcodec %s)", 128 gCodecTable[fCodecIndexInTable].prettyname, fCodec->name); 129 } 130 131 132 status_t 133 AVCodecDecoder::Setup(media_format* ioEncodedFormat, const void* infoBuffer, 134 size_t infoSize) 135 { 136 if (ioEncodedFormat->type != B_MEDIA_ENCODED_AUDIO 137 && ioEncodedFormat->type != B_MEDIA_ENCODED_VIDEO) 138 return B_ERROR; 139 140 fIsAudio = (ioEncodedFormat->type == B_MEDIA_ENCODED_AUDIO); 141 TRACE("[%c] AVCodecDecoder::Setup()\n", fIsAudio?('a'):('v')); 142 143 if (fIsAudio && !fOutputBuffer) 144 fOutputBuffer = new char[AVCODEC_MAX_AUDIO_FRAME_SIZE]; 145 146 #ifdef TRACE_AV_CODEC 147 char buffer[1024]; 148 string_for_format(*ioEncodedFormat, buffer, sizeof(buffer)); 149 TRACE("[%c] input_format = %s\n", fIsAudio?('a'):('v'), buffer); 150 TRACE("[%c] infoSize = %ld\n", fIsAudio?('a'):('v'), infoSize); 151 TRACE("[%c] user_data_type = %08lx\n", fIsAudio?('a'):('v'), 152 ioEncodedFormat->user_data_type); 153 TRACE("[%c] meta_data_size = %ld\n", fIsAudio?('a'):('v'), 154 ioEncodedFormat->MetaDataSize()); 155 #endif 156 157 media_format_description descr; 158 for (int32 i = 0; gCodecTable[i].id; i++) { 159 fCodecIndexInTable = i; 160 uint64 cid; 161 162 if (BMediaFormats().GetCodeFor(*ioEncodedFormat, 163 gCodecTable[i].family, &descr) == B_OK 164 && gCodecTable[i].type == ioEncodedFormat->type) { 165 switch(gCodecTable[i].family) { 166 case B_WAV_FORMAT_FAMILY: 167 cid = descr.u.wav.codec; 168 break; 169 case B_AIFF_FORMAT_FAMILY: 170 cid = descr.u.aiff.codec; 171 break; 172 case B_AVI_FORMAT_FAMILY: 173 cid = descr.u.avi.codec; 174 break; 175 case B_MPEG_FORMAT_FAMILY: 176 cid = descr.u.mpeg.id; 177 break; 178 case B_QUICKTIME_FORMAT_FAMILY: 179 cid = descr.u.quicktime.codec; 180 break; 181 case B_MISC_FORMAT_FAMILY: 182 cid = (((uint64)descr.u.misc.file_format) << 32) 183 | descr.u.misc.codec; 184 break; 185 default: 186 puts("ERR family"); 187 return B_ERROR; 188 } 189 190 if (gCodecTable[i].family == descr.family 191 && gCodecTable[i].fourcc == cid) { 192 193 TRACE(" 0x%04lx codec id = \"%c%c%c%c\"\n", uint32(cid), 194 (char)((cid >> 24) & 0xff), (char)((cid >> 16) & 0xff), 195 (char)((cid >> 8) & 0xff), (char)(cid & 0xff)); 196 197 fCodec = avcodec_find_decoder(gCodecTable[i].id); 198 if (fCodec == NULL) { 199 TRACE(" unable to find the correct FFmpeg " 200 "decoder (id = %d)\n", gCodecTable[i].id); 201 return B_ERROR; 202 } 203 TRACE(" found decoder %s\n", fCodec->name); 204 205 const void* extraData = infoBuffer; 206 fExtraDataSize = infoSize; 207 if (gCodecTable[i].family == B_WAV_FORMAT_FAMILY 208 && infoSize >= sizeof(wave_format_ex)) { 209 TRACE(" trying to use wave_format_ex\n"); 210 // Special case extra data in B_WAV_FORMAT_FAMILY 211 const wave_format_ex* waveFormatData 212 = (const wave_format_ex*)infoBuffer; 213 214 size_t waveFormatSize = infoSize; 215 if (waveFormatData != NULL && waveFormatSize > 0) { 216 fBlockAlign = waveFormatData->block_align; 217 TRACE(" found block align: %d\n", fBlockAlign); 218 fExtraDataSize = waveFormatData->extra_size; 219 // skip the wave_format_ex from the extra data. 220 extraData = waveFormatData + 1; 221 } 222 } else { 223 if (fIsAudio) { 224 fBlockAlign 225 = ioEncodedFormat->u.encoded_audio.output 226 .buffer_size; 227 TRACE(" using buffer_size as block align: %d\n", 228 fBlockAlign); 229 } 230 } 231 if (extraData != NULL && fExtraDataSize > 0) { 232 TRACE("AVCodecDecoder: extra data size %ld\n", infoSize); 233 fExtraData = new(std::nothrow) char[fExtraDataSize]; 234 if (fExtraData != NULL) 235 memcpy(fExtraData, infoBuffer, fExtraDataSize); 236 else 237 fExtraDataSize = 0; 238 } 239 240 fInputFormat = *ioEncodedFormat; 241 return B_OK; 242 } 243 } 244 } 245 printf("AVCodecDecoder::Setup failed!\n"); 246 return B_ERROR; 247 } 248 249 250 status_t 251 AVCodecDecoder::Seek(uint32 seekTo, int64 seekFrame, int64* frame, 252 bigtime_t seekTime, bigtime_t* time) 253 { 254 // Reset the FFmpeg codec to flush buffers, so we keep the sync 255 #if 1 256 if (fCodecInitDone) { 257 fCodecInitDone = false; 258 avcodec_close(fContext); 259 fCodecInitDone = (avcodec_open(fContext, fCodec) >= 0); 260 } 261 #else 262 // For example, this doesn't work on the H.264 codec. :-/ 263 if (fCodecInitDone) 264 avcodec_flush_buffers(fContext); 265 #endif 266 267 if (seekTo == B_MEDIA_SEEK_TO_TIME) { 268 TRACE("AVCodecDecoder::Seek by time "); 269 TRACE("from frame %Ld and time %.6f TO Required Time %.6f. ", 270 fFrame, fStartTime / 1000000.0, seekTime / 1000000.0); 271 272 *frame = (int64)(seekTime * fOutputFrameRate / 1000000LL); 273 *time = seekTime; 274 } else if (seekTo == B_MEDIA_SEEK_TO_FRAME) { 275 TRACE("AVCodecDecoder::Seek by Frame "); 276 TRACE("from time %.6f and frame %Ld TO Required Frame %Ld. ", 277 fStartTime / 1000000.0, fFrame, seekFrame); 278 279 *time = (bigtime_t)(seekFrame * 1000000LL / fOutputFrameRate); 280 *frame = seekFrame; 281 } else 282 return B_BAD_VALUE; 283 284 fFrame = *frame; 285 fStartTime = *time; 286 TRACE("so new frame is %Ld at time %.6f\n", *frame, *time / 1000000.0); 287 return B_OK; 288 } 289 290 291 status_t 292 AVCodecDecoder::NegotiateOutputFormat(media_format* inOutFormat) 293 { 294 TRACE("AVCodecDecoder::NegotiateOutputFormat() [%c] \n", 295 fIsAudio?('a'):('v')); 296 297 #ifdef TRACE_AV_CODEC 298 char buffer[1024]; 299 string_for_format(*inOutFormat, buffer, sizeof(buffer)); 300 TRACE(" [%c] requested format = %s\n", fIsAudio?('a'):('v'), buffer); 301 #endif 302 303 if (fIsAudio) 304 return _NegotiateAudioOutputFormat(inOutFormat); 305 else 306 return _NegotiateVideoOutputFormat(inOutFormat); 307 } 308 309 310 status_t 311 AVCodecDecoder::Decode(void* outBuffer, int64* outFrameCount, 312 media_header* mediaHeader, media_decode_info* info) 313 { 314 if (!fCodecInitDone) 315 return B_NO_INIT; 316 317 // TRACE("[%c] AVCodecDecoder::Decode() for time %Ld\n", fIsAudio?('a'):('v'), 318 // fStartTime); 319 320 mediaHeader->start_time = fStartTime; 321 322 status_t ret; 323 if (fIsAudio) 324 ret = _DecodeAudio(outBuffer, outFrameCount, mediaHeader, info); 325 else 326 ret = _DecodeVideo(outBuffer, outFrameCount, mediaHeader, info); 327 328 fStartTime = (bigtime_t)(1000000LL * fFrame / fOutputFrameRate); 329 330 return ret; 331 } 332 333 334 // #pragma mark - 335 336 337 status_t 338 AVCodecDecoder::_NegotiateAudioOutputFormat(media_format* inOutFormat) 339 { 340 TRACE("AVCodecDecoder::_NegotiateAudioOutputFormat()\n"); 341 342 media_multi_audio_format outputAudioFormat; 343 outputAudioFormat = media_raw_audio_format::wildcard; 344 outputAudioFormat.byte_order = B_MEDIA_HOST_ENDIAN; 345 outputAudioFormat.frame_rate 346 = fInputFormat.u.encoded_audio.output.frame_rate; 347 outputAudioFormat.channel_count 348 = fInputFormat.u.encoded_audio.output.channel_count; 349 outputAudioFormat.format = fInputFormat.u.encoded_audio.output.format; 350 // Check that format is not still a wild card! 351 if (outputAudioFormat.format == 0) 352 outputAudioFormat.format = media_raw_audio_format::B_AUDIO_SHORT; 353 354 outputAudioFormat.buffer_size 355 = 1024 * fInputFormat.u.encoded_audio.output.channel_count; 356 inOutFormat->type = B_MEDIA_RAW_AUDIO; 357 inOutFormat->u.raw_audio = outputAudioFormat; 358 359 fContext->bit_rate = (int)fInputFormat.u.encoded_audio.bit_rate; 360 fContext->sample_rate 361 = (int)fInputFormat.u.encoded_audio.output.frame_rate; 362 fContext->channels = fInputFormat.u.encoded_audio.output.channel_count; 363 fContext->block_align = fBlockAlign; 364 fContext->extradata = (uint8_t*)fExtraData; 365 fContext->extradata_size = fExtraDataSize; 366 367 // TODO: This probably needs to go away, there is some misconception 368 // about extra data / info buffer and meta data. See 369 // Reader::GetStreamInfo(). The AVFormatReader puts extradata and 370 // extradata_size into media_format::MetaData(), but used to ignore 371 // the infoBuffer passed to GetStreamInfo(). I think this may be why 372 // the code below was added. 373 if (fInputFormat.MetaDataSize() > 0) { 374 fContext->extradata = (uint8_t*)fInputFormat.MetaData(); 375 fContext->extradata_size = fInputFormat.MetaDataSize(); 376 } 377 378 TRACE(" bit_rate %d, sample_rate %d, channels %d, block_align %d, " 379 "extradata_size %d\n", fContext->bit_rate, fContext->sample_rate, 380 fContext->channels, fContext->block_align, fContext->extradata_size); 381 382 // close any previous instance 383 if (fCodecInitDone) { 384 fCodecInitDone = false; 385 avcodec_close(fContext); 386 } 387 388 // open new 389 int result = avcodec_open(fContext, fCodec); 390 fCodecInitDone = (result >= 0); 391 392 fStartTime = 0; 393 size_t sampleSize = outputAudioFormat.format 394 & media_raw_audio_format::B_AUDIO_SIZE_MASK; 395 fOutputFrameSize = sampleSize * outputAudioFormat.channel_count; 396 fOutputFrameCount = outputAudioFormat.buffer_size / fOutputFrameSize; 397 fOutputFrameRate = outputAudioFormat.frame_rate; 398 399 TRACE(" bit_rate = %d, sample_rate = %d, channels = %d, init = %d, " 400 "output frame size: %d, count: %ld, rate: %.2f\n", 401 fContext->bit_rate, fContext->sample_rate, fContext->channels, 402 result, fOutputFrameSize, fOutputFrameCount, fOutputFrameRate); 403 404 fChunkBuffer = NULL; 405 fChunkBufferOffset = 0; 406 fChunkBufferSize = 0; 407 fAudioDecodeError = false; 408 fOutputBufferOffset = 0; 409 fOutputBufferSize = 0; 410 411 inOutFormat->require_flags = 0; 412 inOutFormat->deny_flags = B_MEDIA_MAUI_UNDEFINED_FLAGS; 413 414 if (!fCodecInitDone) { 415 TRACE("avcodec_open() failed!\n"); 416 return B_ERROR; 417 } 418 419 return B_OK; 420 } 421 422 423 status_t 424 AVCodecDecoder::_NegotiateVideoOutputFormat(media_format* inOutFormat) 425 { 426 TRACE("AVCodecDecoder::_NegotiateVideoOutputFormat()\n"); 427 428 fOutputVideoFormat = fInputFormat.u.encoded_video.output; 429 430 fContext->width = fOutputVideoFormat.display.line_width; 431 fContext->height = fOutputVideoFormat.display.line_count; 432 // fContext->frame_rate = (int)(fOutputVideoFormat.field_rate 433 // * fContext->frame_rate_base); 434 435 fOutputFrameRate = fOutputVideoFormat.field_rate; 436 437 fContext->extradata = (uint8_t*)fExtraData; 438 fContext->extradata_size = fExtraDataSize; 439 440 TRACE(" requested video format 0x%x\n", 441 inOutFormat->u.raw_video.display.format); 442 443 // Make MediaPlayer happy (if not in rgb32 screen depth and no overlay, 444 // it will only ask for YCbCr, which DrawBitmap doesn't handle, so the 445 // default colordepth is RGB32). 446 if (inOutFormat->u.raw_video.display.format == B_YCbCr422) 447 fOutputVideoFormat.display.format = B_YCbCr422; 448 else 449 fOutputVideoFormat.display.format = B_RGB32; 450 451 // Search for a pixel-format the codec handles 452 // TODO: We should try this a couple of times until it succeeds, each 453 // time using another pixel-format that is supported by the decoder. 454 // But libavcodec doesn't seem to offer any way to tell the decoder 455 // which format it should use. 456 fFormatConversionFunc = 0; 457 // Iterate over supported codec formats 458 for (int i = 0; i < 1; i++) { 459 // close any previous instance 460 if (fCodecInitDone) { 461 fCodecInitDone = false; 462 avcodec_close(fContext); 463 } 464 // TODO: Set n-th fContext->pix_fmt here 465 if (avcodec_open(fContext, fCodec) >= 0) { 466 fCodecInitDone = true; 467 468 fFormatConversionFunc = resolve_colorspace( 469 fOutputVideoFormat.display.format, fContext->pix_fmt); 470 } 471 if (fFormatConversionFunc != NULL) 472 break; 473 } 474 475 if (!fCodecInitDone) { 476 TRACE("avcodec_open() failed to init codec!\n"); 477 return B_ERROR; 478 } 479 480 if (fFormatConversionFunc == NULL) { 481 TRACE("no pixel format conversion function found or decoder has " 482 "not set the pixel format yet!\n"); 483 } 484 485 if (fOutputVideoFormat.display.format == B_YCbCr422) { 486 fOutputVideoFormat.display.bytes_per_row 487 = 2 * fOutputVideoFormat.display.line_width; 488 } else { 489 fOutputVideoFormat.display.bytes_per_row 490 = 4 * fOutputVideoFormat.display.line_width; 491 } 492 493 inOutFormat->type = B_MEDIA_RAW_VIDEO; 494 inOutFormat->u.raw_video = fOutputVideoFormat; 495 496 inOutFormat->require_flags = 0; 497 inOutFormat->deny_flags = B_MEDIA_MAUI_UNDEFINED_FLAGS; 498 499 #ifdef TRACE_AV_CODEC 500 char buffer[1024]; 501 string_for_format(*inOutFormat, buffer, sizeof(buffer)); 502 TRACE("[v] outFormat = %s\n", buffer); 503 TRACE(" returned video format 0x%x\n", 504 inOutFormat->u.raw_video.display.format); 505 #endif 506 507 return B_OK; 508 } 509 510 511 status_t 512 AVCodecDecoder::_DecodeAudio(void* outBuffer, int64* outFrameCount, 513 media_header* mediaHeader, media_decode_info* info) 514 { 515 // TRACE("audio start_time %.6f\n", mediaHeader->start_time / 1000000.0); 516 517 char* output_buffer = (char*)outBuffer; 518 *outFrameCount = 0; 519 while (*outFrameCount < fOutputFrameCount) { 520 if (fOutputBufferSize < 0) { 521 TRACE("############ fOutputBufferSize %ld\n", 522 fOutputBufferSize); 523 fOutputBufferSize = 0; 524 } 525 if (fChunkBufferSize < 0) { 526 TRACE("############ fChunkBufferSize %ld\n", 527 fChunkBufferSize); 528 fChunkBufferSize = 0; 529 } 530 531 if (fOutputBufferSize > 0) { 532 int32 frames = min_c(fOutputFrameCount - *outFrameCount, 533 fOutputBufferSize / fOutputFrameSize); 534 memcpy(output_buffer, fOutputBuffer + fOutputBufferOffset, 535 frames * fOutputFrameSize); 536 fOutputBufferOffset += frames * fOutputFrameSize; 537 fOutputBufferSize -= frames * fOutputFrameSize; 538 output_buffer += frames * fOutputFrameSize; 539 *outFrameCount += frames; 540 fStartTime += (bigtime_t)((1000000LL * frames) / fOutputFrameRate); 541 continue; 542 } 543 if (fChunkBufferSize == 0) { 544 media_header chunkMediaHeader; 545 status_t err; 546 err = GetNextChunk(&fChunkBuffer, &fChunkBufferSize, &chunkMediaHeader); 547 if (err == B_LAST_BUFFER_ERROR) { 548 TRACE("Last Chunk with chunk size %ld\n",fChunkBufferSize); 549 fChunkBufferSize = 0; 550 return err; 551 } 552 if (err != B_OK || fChunkBufferSize < 0) { 553 printf("GetNextChunk error %ld\n",fChunkBufferSize); 554 fChunkBufferSize = 0; 555 break; 556 } 557 fChunkBufferOffset = 0; 558 fStartTime = chunkMediaHeader.start_time; 559 if (*outFrameCount == 0) 560 mediaHeader->start_time = chunkMediaHeader.start_time; 561 continue; 562 } 563 if (fOutputBufferSize == 0) { 564 int len; 565 int out_size = AVCODEC_MAX_AUDIO_FRAME_SIZE; 566 len = avcodec_decode_audio2(fContext, (short *)fOutputBuffer, 567 &out_size, (uint8_t*)fChunkBuffer + fChunkBufferOffset, 568 fChunkBufferSize); 569 if (len < 0) { 570 if (!fAudioDecodeError) { 571 TRACE("########### audio decode error, " 572 "fChunkBufferSize %ld, fChunkBufferOffset %ld\n", 573 fChunkBufferSize, fChunkBufferOffset); 574 fAudioDecodeError = true; 575 } 576 out_size = 0; 577 len = 0; 578 fChunkBufferOffset = 0; 579 fChunkBufferSize = 0; 580 } else 581 fAudioDecodeError = false; 582 583 fChunkBufferOffset += len; 584 fChunkBufferSize -= len; 585 fOutputBufferOffset = 0; 586 fOutputBufferSize = out_size; 587 } 588 } 589 fFrame += *outFrameCount; 590 591 // TRACE("Played %Ld frames at time %Ld\n",*outFrameCount, mediaHeader->start_time); 592 return B_OK; 593 } 594 595 596 status_t 597 AVCodecDecoder::_DecodeVideo(void* outBuffer, int64* outFrameCount, 598 media_header* mediaHeader, media_decode_info* info) 599 { 600 bool firstRun = true; 601 while (true) { 602 const void* data; 603 size_t size; 604 media_header chunkMediaHeader; 605 status_t err = GetNextChunk(&data, &size, &chunkMediaHeader); 606 if (err != B_OK) { 607 TRACE("AVCodecDecoder::_DecodeVideo(): error from " 608 "GetNextChunk(): %s\n", strerror(err)); 609 return err; 610 } 611 #ifdef LOG_STREAM_TO_FILE 612 if (sDumpedPackets < 100) { 613 sStreamLogFile.Write(data, size); 614 printf("wrote %ld bytes\n", size); 615 sDumpedPackets++; 616 } else if (sDumpedPackets == 100) 617 sStreamLogFile.Unset(); 618 #endif 619 620 if (firstRun) { 621 firstRun = false; 622 623 mediaHeader->type = B_MEDIA_RAW_VIDEO; 624 // mediaHeader->start_time = chunkMediaHeader.start_time; 625 mediaHeader->file_pos = 0; 626 mediaHeader->orig_size = 0; 627 mediaHeader->u.raw_video.field_gamma = 1.0; 628 mediaHeader->u.raw_video.field_sequence = fFrame; 629 mediaHeader->u.raw_video.field_number = 0; 630 mediaHeader->u.raw_video.pulldown_number = 0; 631 mediaHeader->u.raw_video.first_active_line = 1; 632 mediaHeader->u.raw_video.line_count 633 = fOutputVideoFormat.display.line_count; 634 635 TRACE("[v] start_time=%02d:%02d.%02d field_sequence=%lu\n", 636 int((mediaHeader->start_time / 60000000) % 60), 637 int((mediaHeader->start_time / 1000000) % 60), 638 int((mediaHeader->start_time / 10000) % 100), 639 mediaHeader->u.raw_video.field_sequence); 640 } 641 642 #if DO_PROFILING 643 bigtime_t startTime = system_time(); 644 #endif 645 646 // NOTE: In the FFmpeg code example I've read, the length returned by 647 // avcodec_decode_video() is completely ignored. Furthermore, the 648 // packet buffers are supposed to contain complete frames only so we 649 // don't seem to be required to buffer any packets because not the 650 // complete packet has been read. 651 int gotPicture = 0; 652 int len = avcodec_decode_video(fContext, fInputPicture, &gotPicture, 653 (uint8_t*)data, size); 654 if (len < 0) { 655 TRACE("[v] AVCodecDecoder: error in decoding frame %lld: %d\n", 656 fFrame, len); 657 // NOTE: An error from avcodec_decode_video() seems to be ignored 658 // in the ffplay sample code. 659 // return B_ERROR; 660 } 661 662 663 //TRACE("FFDEC: PTS = %d:%d:%d.%d - fContext->frame_number = %ld " 664 // "fContext->frame_rate = %ld\n", (int)(fContext->pts / (60*60*1000000)), 665 // (int)(fContext->pts / (60*1000000)), (int)(fContext->pts / (1000000)), 666 // (int)(fContext->pts % 1000000), fContext->frame_number, 667 // fContext->frame_rate); 668 //TRACE("FFDEC: PTS = %d:%d:%d.%d - fContext->frame_number = %ld " 669 // "fContext->frame_rate = %ld\n", 670 // (int)(fInputPicture->pts / (60*60*1000000)), 671 // (int)(fInputPicture->pts / (60*1000000)), 672 // (int)(fInputPicture->pts / (1000000)), 673 // (int)(fInputPicture->pts % 1000000), fContext->frame_number, 674 // fContext->frame_rate); 675 676 if (gotPicture) { 677 int width = fOutputVideoFormat.display.line_width; 678 int height = fOutputVideoFormat.display.line_count; 679 AVPicture deinterlacedPicture; 680 bool useDeinterlacedPicture = false; 681 682 if (fInputPicture->interlaced_frame) { 683 AVPicture source; 684 source.data[0] = fInputPicture->data[0]; 685 source.data[1] = fInputPicture->data[1]; 686 source.data[2] = fInputPicture->data[2]; 687 source.data[3] = fInputPicture->data[3]; 688 source.linesize[0] = fInputPicture->linesize[0]; 689 source.linesize[1] = fInputPicture->linesize[1]; 690 source.linesize[2] = fInputPicture->linesize[2]; 691 source.linesize[3] = fInputPicture->linesize[3]; 692 693 avpicture_alloc(&deinterlacedPicture, 694 fContext->pix_fmt, width, height); 695 696 if (avpicture_deinterlace(&deinterlacedPicture, &source, 697 fContext->pix_fmt, width, height) < 0) { 698 TRACE("[v] avpicture_deinterlace() - error\n"); 699 } else 700 useDeinterlacedPicture = true; 701 } 702 703 #if DO_PROFILING 704 bigtime_t formatConversionStart = system_time(); 705 #endif 706 // TRACE("ONE FRAME OUT !! len=%d size=%ld (%s)\n", len, size, 707 // pixfmt_to_string(fContext->pix_fmt)); 708 709 // Some decoders do not set pix_fmt until they have decoded 1 frame 710 if (fFormatConversionFunc == NULL) { 711 fFormatConversionFunc = resolve_colorspace( 712 fOutputVideoFormat.display.format, fContext->pix_fmt); 713 } 714 fOutputPicture->data[0] = (uint8_t*)outBuffer; 715 fOutputPicture->linesize[0] 716 = fOutputVideoFormat.display.bytes_per_row; 717 718 if (fFormatConversionFunc != NULL) { 719 if (useDeinterlacedPicture) { 720 AVFrame inputFrame; 721 inputFrame.data[0] = deinterlacedPicture.data[0]; 722 inputFrame.data[1] = deinterlacedPicture.data[1]; 723 inputFrame.data[2] = deinterlacedPicture.data[2]; 724 inputFrame.data[3] = deinterlacedPicture.data[3]; 725 inputFrame.linesize[0] = deinterlacedPicture.linesize[0]; 726 inputFrame.linesize[1] = deinterlacedPicture.linesize[1]; 727 inputFrame.linesize[2] = deinterlacedPicture.linesize[2]; 728 inputFrame.linesize[3] = deinterlacedPicture.linesize[3]; 729 730 (*fFormatConversionFunc)(&inputFrame, 731 fOutputPicture, width, height); 732 } else { 733 (*fFormatConversionFunc)(fInputPicture, fOutputPicture, 734 width, height); 735 } 736 } 737 if (fInputPicture->interlaced_frame) 738 avpicture_free(&deinterlacedPicture); 739 #ifdef DEBUG 740 dump_ffframe(fInputPicture, "ffpict"); 741 // dump_ffframe(fOutputPicture, "opict"); 742 #endif 743 *outFrameCount = 1; 744 fFrame++; 745 746 #if DO_PROFILING 747 bigtime_t doneTime = system_time(); 748 decodingTime += formatConversionStart - startTime; 749 conversionTime += doneTime - formatConversionStart; 750 profileCounter++; 751 if (!(fFrame % 10)) { 752 if (info) { 753 printf("[v] profile: d1 = %lld, d2 = %lld (%Ld) required " 754 "%Ld\n", 755 decodingTime / profileCounter, 756 conversionTime / profileCounter, 757 fFrame, info->time_to_decode); 758 } else { 759 printf("[v] profile: d1 = %lld, d2 = %lld (%Ld) required " 760 "%Ld\n", 761 decodingTime / profileCounter, 762 conversionTime / profileCounter, 763 fFrame, bigtime_t(1000000LL / fOutputFrameRate)); 764 } 765 } 766 #endif 767 return B_OK; 768 } else { 769 TRACE("frame %lld - no picture yet, len: %d, chunk size: %ld\n", 770 fFrame, len, size); 771 } 772 } 773 } 774 775 776