1 /* 2 * Copyright (C) 2001 Carlos Hasan 3 * Copyright (C) 2001 François Revol 4 * Copyright (C) 2001 Axel Dörfler 5 * Copyright (C) 2004 Marcus Overhagen 6 * Copyright (C) 2009 Stephan Amßus <superstippi@gmx.de> 7 * Copyright (C) 2014 Colin Günther <coling@gmx.de> 8 * Copyright (C) 2015 Adrien Destugues <pulkomandy@pulkomandy.tk> 9 * 10 * All rights reserved. Distributed under the terms of the MIT License. 11 */ 12 13 //! libavcodec based decoder for Haiku 14 15 16 #include "AVCodecDecoder.h" 17 18 #include <new> 19 20 #include <assert.h> 21 #include <string.h> 22 23 #include <Bitmap.h> 24 #include <Debug.h> 25 #include <String.h> 26 27 #include "Utilities.h" 28 29 30 #undef TRACE 31 //#define TRACE_AV_CODEC 32 #ifdef TRACE_AV_CODEC 33 # define TRACE(x...) printf(x) 34 # define TRACE_AUDIO(x...) printf(x) 35 # define TRACE_VIDEO(x...) printf(x) 36 #else 37 # define TRACE(x...) 38 # define TRACE_AUDIO(x...) 39 # define TRACE_VIDEO(x...) 40 #endif 41 42 //#define LOG_STREAM_TO_FILE 43 #ifdef LOG_STREAM_TO_FILE 44 # include <File.h> 45 static BFile sAudioStreamLogFile( 46 "/boot/home/Desktop/AVCodecDebugAudioStream.raw", 47 B_CREATE_FILE | B_ERASE_FILE | B_WRITE_ONLY); 48 static BFile sVideoStreamLogFile( 49 "/boot/home/Desktop/AVCodecDebugVideoStream.raw", 50 B_CREATE_FILE | B_ERASE_FILE | B_WRITE_ONLY); 51 static int sDumpedPackets = 0; 52 #endif 53 54 typedef AVCodecID CodecID; 55 56 struct wave_format_ex { 57 uint16 format_tag; 58 uint16 channels; 59 uint32 frames_per_sec; 60 uint32 avg_bytes_per_sec; 61 uint16 block_align; 62 uint16 bits_per_sample; 63 uint16 extra_size; 64 // extra_data[extra_size] 65 } _PACKED; 66 67 struct avformat_codec_context { 68 int sample_rate; 69 int channels; 70 }; 71 72 73 // profiling related globals 74 #define DO_PROFILING 0 75 #if DO_PROFILING 76 static bigtime_t decodingTime = 0; 77 static bigtime_t conversionTime = 0; 78 static long profileCounter = 0; 79 #endif 80 81 82 AVCodecDecoder::AVCodecDecoder() 83 : 84 fHeader(), 85 fInputFormat(), 86 fFrame(0), 87 fIsAudio(false), 88 fCodec(NULL), 89 fCodecContext(avcodec_alloc_context3(NULL)), 90 fResampleContext(NULL), 91 fDecodedData(NULL), 92 fDecodedDataSizeInBytes(0), 93 fPostProcessedDecodedPicture(av_frame_alloc()), 94 fRawDecodedPicture(av_frame_alloc()), 95 fRawDecodedAudio(av_frame_alloc()), 96 97 fCodecInitDone(false), 98 99 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION 100 fSwsContext(NULL), 101 #else 102 fFormatConversionFunc(NULL), 103 #endif 104 105 fExtraData(NULL), 106 fExtraDataSize(0), 107 fBlockAlign(0), 108 109 fOutputColorSpace(B_NO_COLOR_SPACE), 110 fOutputFrameCount(0), 111 fOutputFrameRate(1.0), 112 fOutputFrameSize(0), 113 fInputFrameSize(0), 114 115 fChunkBuffer(NULL), 116 fChunkBufferSize(0), 117 fAudioDecodeError(false), 118 119 fDecodedDataBuffer(av_frame_alloc()), 120 fDecodedDataBufferOffset(0), 121 fDecodedDataBufferSize(0), 122 fBufferSinkContext(NULL), 123 fBufferSourceContext(NULL), 124 fFilterGraph(NULL), 125 fFilterFrame(NULL) 126 { 127 TRACE("AVCodecDecoder::AVCodecDecoder()\n"); 128 129 system_info info; 130 get_system_info(&info); 131 132 fCodecContext->err_recognition = AV_EF_CAREFUL; 133 fCodecContext->error_concealment = 3; 134 fCodecContext->thread_count = info.cpu_count; 135 } 136 137 138 AVCodecDecoder::~AVCodecDecoder() 139 { 140 TRACE("[%c] AVCodecDecoder::~AVCodecDecoder()\n", fIsAudio?('a'):('v')); 141 142 #if DO_PROFILING 143 if (profileCounter > 0) { 144 printf("[%c] profile: d1 = %lld, d2 = %lld (%Ld)\n", 145 fIsAudio?('a'):('v'), decodingTime / profileCounter, 146 conversionTime / profileCounter, fFrame); 147 } 148 #endif 149 150 if (fCodecInitDone) 151 avcodec_close(fCodecContext); 152 153 swr_free(&fResampleContext); 154 free(fChunkBuffer); 155 free(fDecodedData); 156 157 av_free(fPostProcessedDecodedPicture); 158 av_free(fRawDecodedPicture); 159 av_free(fRawDecodedAudio->opaque); 160 av_free(fRawDecodedAudio); 161 av_free(fCodecContext); 162 av_free(fDecodedDataBuffer); 163 164 av_frame_free(&fFilterFrame); 165 avfilter_graph_free(&fFilterGraph); 166 167 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION 168 if (fSwsContext != NULL) 169 sws_freeContext(fSwsContext); 170 #endif 171 172 delete[] fExtraData; 173 } 174 175 176 void 177 AVCodecDecoder::GetCodecInfo(media_codec_info* mci) 178 { 179 snprintf(mci->short_name, 32, "%s", fCodec->name); 180 snprintf(mci->pretty_name, 96, "%s", fCodec->long_name); 181 mci->id = 0; 182 mci->sub_id = fCodec->id; 183 } 184 185 186 status_t 187 AVCodecDecoder::Setup(media_format* ioEncodedFormat, const void* infoBuffer, 188 size_t infoSize) 189 { 190 if (ioEncodedFormat->type != B_MEDIA_ENCODED_AUDIO 191 && ioEncodedFormat->type != B_MEDIA_ENCODED_VIDEO) 192 return B_ERROR; 193 194 fIsAudio = (ioEncodedFormat->type == B_MEDIA_ENCODED_AUDIO); 195 TRACE("[%c] AVCodecDecoder::Setup()\n", fIsAudio?('a'):('v')); 196 197 #ifdef TRACE_AV_CODEC 198 char buffer[1024]; 199 string_for_format(*ioEncodedFormat, buffer, sizeof(buffer)); 200 TRACE("[%c] input_format = %s\n", fIsAudio?('a'):('v'), buffer); 201 TRACE("[%c] infoSize = %ld\n", fIsAudio?('a'):('v'), infoSize); 202 TRACE("[%c] user_data_type = %08lx\n", fIsAudio?('a'):('v'), 203 ioEncodedFormat->user_data_type); 204 TRACE("[%c] meta_data_size = %ld\n", fIsAudio?('a'):('v'), 205 ioEncodedFormat->MetaDataSize()); 206 #endif 207 208 media_format_description description; 209 if (BMediaFormats().GetCodeFor(*ioEncodedFormat, 210 B_MISC_FORMAT_FAMILY, &description) == B_OK) { 211 if (description.u.misc.file_format != 'ffmp') 212 return B_NOT_SUPPORTED; 213 fCodec = avcodec_find_decoder(static_cast<CodecID>( 214 description.u.misc.codec)); 215 if (fCodec == NULL) { 216 TRACE(" unable to find the correct FFmpeg " 217 "decoder (id = %lu)\n", description.u.misc.codec); 218 return B_ERROR; 219 } 220 TRACE(" found decoder %s\n", fCodec->name); 221 222 const void* extraData = infoBuffer; 223 fExtraDataSize = infoSize; 224 if (description.family == B_WAV_FORMAT_FAMILY 225 && infoSize >= sizeof(wave_format_ex)) { 226 TRACE(" trying to use wave_format_ex\n"); 227 // Special case extra data in B_WAV_FORMAT_FAMILY 228 const wave_format_ex* waveFormatData 229 = (const wave_format_ex*)infoBuffer; 230 231 size_t waveFormatSize = infoSize; 232 if (waveFormatData != NULL && waveFormatSize > 0) { 233 fBlockAlign = waveFormatData->block_align; 234 TRACE(" found block align: %d\n", fBlockAlign); 235 fExtraDataSize = waveFormatData->extra_size; 236 // skip the wave_format_ex from the extra data. 237 extraData = waveFormatData + 1; 238 } 239 } else { 240 if (fIsAudio) { 241 fBlockAlign 242 = ioEncodedFormat->u.encoded_audio.output.buffer_size; 243 TRACE(" using buffer_size as block align: %d\n", 244 fBlockAlign); 245 } 246 } 247 if (extraData != NULL && fExtraDataSize > 0) { 248 TRACE("AVCodecDecoder: extra data size %ld\n", infoSize); 249 delete[] fExtraData; 250 fExtraData = new(std::nothrow) char[fExtraDataSize]; 251 if (fExtraData != NULL) 252 memcpy(fExtraData, infoBuffer, fExtraDataSize); 253 else 254 fExtraDataSize = 0; 255 } 256 257 fInputFormat = *ioEncodedFormat; 258 return B_OK; 259 } else { 260 TRACE("AVCodecDecoder: BMediaFormats().GetCodeFor() failed.\n"); 261 } 262 263 printf("AVCodecDecoder::Setup failed!\n"); 264 return B_ERROR; 265 } 266 267 268 status_t 269 AVCodecDecoder::SeekedTo(int64 frame, bigtime_t time) 270 { 271 status_t ret = B_OK; 272 // Reset the FFmpeg codec to flush buffers, so we keep the sync 273 if (fCodecInitDone) { 274 avcodec_flush_buffers(fCodecContext); 275 _ResetTempPacket(); 276 } 277 278 // Flush internal buffers as well. 279 free(fChunkBuffer); 280 fChunkBuffer = NULL; 281 fChunkBufferSize = 0; 282 fDecodedDataBufferOffset = 0; 283 fDecodedDataBufferSize = 0; 284 fDecodedDataSizeInBytes = 0; 285 286 fFrame = frame; 287 288 return ret; 289 } 290 291 292 status_t 293 AVCodecDecoder::NegotiateOutputFormat(media_format* inOutFormat) 294 { 295 TRACE("AVCodecDecoder::NegotiateOutputFormat() [%c] \n", 296 fIsAudio?('a'):('v')); 297 298 #ifdef TRACE_AV_CODEC 299 char buffer[1024]; 300 string_for_format(*inOutFormat, buffer, sizeof(buffer)); 301 TRACE(" [%c] requested format = %s\n", fIsAudio?('a'):('v'), buffer); 302 #endif 303 304 if (fIsAudio) 305 return _NegotiateAudioOutputFormat(inOutFormat); 306 else 307 return _NegotiateVideoOutputFormat(inOutFormat); 308 } 309 310 311 status_t 312 AVCodecDecoder::Decode(void* outBuffer, int64* outFrameCount, 313 media_header* mediaHeader, media_decode_info* info) 314 { 315 if (!fCodecInitDone) 316 return B_NO_INIT; 317 318 status_t ret; 319 if (fIsAudio) 320 ret = _DecodeAudio(outBuffer, outFrameCount, mediaHeader, info); 321 else 322 ret = _DecodeVideo(outBuffer, outFrameCount, mediaHeader, info); 323 324 return ret; 325 } 326 327 328 // #pragma mark - 329 330 331 void 332 AVCodecDecoder::_ResetTempPacket() 333 { 334 av_init_packet(&fTempPacket); 335 fTempPacket.size = 0; 336 fTempPacket.data = NULL; 337 } 338 339 340 status_t 341 AVCodecDecoder::_NegotiateAudioOutputFormat(media_format* inOutFormat) 342 { 343 TRACE("AVCodecDecoder::_NegotiateAudioOutputFormat()\n"); 344 345 _ApplyEssentialAudioContainerPropertiesToContext(); 346 // This makes audio formats play that encode the audio properties in 347 // the audio container (e.g. WMA) and not in the audio frames 348 // themself (e.g. MP3). 349 // Note: Doing this step unconditionally is OK, because the first call 350 // to _DecodeNextAudioFrameChunk() will update the essential audio 351 // format properties accordingly regardless of the settings here. 352 353 // close any previous instance 354 if (fCodecInitDone) { 355 fCodecInitDone = false; 356 avcodec_close(fCodecContext); 357 } 358 359 if (avcodec_open2(fCodecContext, fCodec, NULL) >= 0) 360 fCodecInitDone = true; 361 else { 362 TRACE("avcodec_open() failed to init codec!\n"); 363 return B_ERROR; 364 } 365 366 free(fChunkBuffer); 367 fChunkBuffer = NULL; 368 fChunkBufferSize = 0; 369 fAudioDecodeError = false; 370 fDecodedDataBufferOffset = 0; 371 fDecodedDataBufferSize = 0; 372 373 _ResetTempPacket(); 374 375 status_t statusOfDecodingFirstFrameChunk = _DecodeNextAudioFrameChunk(); 376 if (statusOfDecodingFirstFrameChunk != B_OK) { 377 TRACE("[a] decoding first audio frame chunk failed\n"); 378 return B_ERROR; 379 } 380 381 media_multi_audio_format outputAudioFormat; 382 outputAudioFormat = media_raw_audio_format::wildcard; 383 outputAudioFormat.byte_order = B_MEDIA_HOST_ENDIAN; 384 outputAudioFormat.frame_rate = fCodecContext->sample_rate; 385 outputAudioFormat.channel_count = fCodecContext->channels; 386 ConvertAVSampleFormatToRawAudioFormat(fCodecContext->sample_fmt, 387 outputAudioFormat.format); 388 // Check that format is not still a wild card! 389 if (outputAudioFormat.format == 0) { 390 TRACE(" format still a wild-card, assuming B_AUDIO_SHORT.\n"); 391 outputAudioFormat.format = media_raw_audio_format::B_AUDIO_SHORT; 392 } 393 outputAudioFormat.buffer_size = inOutFormat->u.raw_audio.buffer_size; 394 // Check that buffer_size has a sane value 395 size_t sampleSize = outputAudioFormat.format 396 & media_raw_audio_format::B_AUDIO_SIZE_MASK; 397 if (outputAudioFormat.buffer_size == 0) { 398 outputAudioFormat.buffer_size = 512 * sampleSize 399 * outputAudioFormat.channel_count; 400 } 401 402 inOutFormat->type = B_MEDIA_RAW_AUDIO; 403 inOutFormat->u.raw_audio = outputAudioFormat; 404 inOutFormat->require_flags = 0; 405 inOutFormat->deny_flags = B_MEDIA_MAUI_UNDEFINED_FLAGS; 406 407 // Initialize variables needed to manage decoding as much audio frames as 408 // needed to fill the buffer_size. 409 fOutputFrameSize = sampleSize * outputAudioFormat.channel_count; 410 fOutputFrameCount = outputAudioFormat.buffer_size / fOutputFrameSize; 411 fOutputFrameRate = outputAudioFormat.frame_rate; 412 if (av_sample_fmt_is_planar(fCodecContext->sample_fmt)) 413 fInputFrameSize = sampleSize; 414 else 415 fInputFrameSize = fOutputFrameSize; 416 417 fRawDecodedAudio->opaque 418 = av_realloc(fRawDecodedAudio->opaque, sizeof(avformat_codec_context)); 419 if (fRawDecodedAudio->opaque == NULL) 420 return B_NO_MEMORY; 421 422 if (av_sample_fmt_is_planar(fCodecContext->sample_fmt)) { 423 fResampleContext = swr_alloc_set_opts(NULL, 424 fCodecContext->channel_layout, 425 fCodecContext->request_sample_fmt, 426 fCodecContext->sample_rate, 427 fCodecContext->channel_layout, 428 fCodecContext->sample_fmt, 429 fCodecContext->sample_rate, 430 0, NULL); 431 swr_init(fResampleContext); 432 } 433 434 TRACE(" bit_rate = %d, sample_rate = %d, channels = %d, " 435 "output frame size: %d, count: %ld, rate: %.2f\n", 436 fCodecContext->bit_rate, fCodecContext->sample_rate, fCodecContext->channels, 437 fOutputFrameSize, fOutputFrameCount, fOutputFrameRate); 438 439 return B_OK; 440 } 441 442 443 status_t 444 AVCodecDecoder::_NegotiateVideoOutputFormat(media_format* inOutFormat) 445 { 446 TRACE("AVCodecDecoder::_NegotiateVideoOutputFormat()\n"); 447 448 TRACE(" requested video format 0x%x\n", 449 inOutFormat->u.raw_video.display.format); 450 451 _ApplyEssentialVideoContainerPropertiesToContext(); 452 // This makes video formats play that encode the video properties in 453 // the video container (e.g. WMV) and not in the video frames 454 // themself (e.g. MPEG2). 455 // Note: Doing this step unconditionally is OK, because the first call 456 // to _DecodeNextVideoFrame() will update the essential video format 457 // properties accordingly regardless of the settings here. 458 459 bool codecCanHandleIncompleteFrames 460 = (fCodec->capabilities & AV_CODEC_CAP_TRUNCATED) != 0; 461 if (codecCanHandleIncompleteFrames) { 462 // Expect and handle video frames to be splitted across consecutive 463 // data chunks. 464 fCodecContext->flags |= AV_CODEC_FLAG_TRUNCATED; 465 } 466 467 // close any previous instance 468 if (fCodecInitDone) { 469 fCodecInitDone = false; 470 avcodec_close(fCodecContext); 471 } 472 473 if (avcodec_open2(fCodecContext, fCodec, NULL) >= 0) 474 fCodecInitDone = true; 475 else { 476 TRACE("avcodec_open() failed to init codec!\n"); 477 return B_ERROR; 478 } 479 480 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION 481 fOutputColorSpace = B_RGB32; 482 #else 483 // Make MediaPlayer happy (if not in rgb32 screen depth and no overlay, 484 // it will only ask for YCbCr, which DrawBitmap doesn't handle, so the 485 // default colordepth is RGB32). 486 if (inOutFormat->u.raw_video.display.format == B_YCbCr422) 487 fOutputColorSpace = B_YCbCr422; 488 else 489 fOutputColorSpace = B_RGB32; 490 #endif 491 492 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION 493 if (fSwsContext != NULL) 494 sws_freeContext(fSwsContext); 495 fSwsContext = NULL; 496 #else 497 fFormatConversionFunc = 0; 498 #endif 499 500 free(fChunkBuffer); 501 fChunkBuffer = NULL; 502 fChunkBufferSize = 0; 503 504 _ResetTempPacket(); 505 506 status_t statusOfDecodingFirstFrame = _DecodeNextVideoFrame(); 507 if (statusOfDecodingFirstFrame != B_OK) { 508 TRACE("[v] decoding first video frame failed\n"); 509 return B_ERROR; 510 } 511 512 // Note: fSwsContext / fFormatConversionFunc should have been initialized 513 // by first call to _DecodeNextVideoFrame() above. 514 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION 515 if (fSwsContext == NULL) { 516 TRACE("No SWS Scale context or decoder has not set the pixel format " 517 "yet!\n"); 518 } 519 #else 520 if (fFormatConversionFunc == NULL) { 521 TRACE("no pixel format conversion function found or decoder has " 522 "not set the pixel format yet!\n"); 523 } 524 #endif 525 526 inOutFormat->type = B_MEDIA_RAW_VIDEO; 527 inOutFormat->require_flags = 0; 528 inOutFormat->deny_flags = B_MEDIA_MAUI_UNDEFINED_FLAGS; 529 inOutFormat->u.raw_video = fInputFormat.u.encoded_video.output; 530 inOutFormat->u.raw_video.interlace = 1; 531 // Progressive (non-interlaced) video frames are delivered 532 inOutFormat->u.raw_video.first_active 533 = fHeader.u.raw_video.first_active_line; 534 inOutFormat->u.raw_video.last_active = fHeader.u.raw_video.line_count; 535 inOutFormat->u.raw_video.pixel_width_aspect 536 = fHeader.u.raw_video.pixel_width_aspect; 537 inOutFormat->u.raw_video.pixel_height_aspect 538 = fHeader.u.raw_video.pixel_height_aspect; 539 #if 0 540 // This was added by Colin Günther in order to handle streams with a 541 // variable frame rate. fOutputFrameRate is computed from the stream 542 // time_base, but it actually assumes a timebase equal to the FPS. As far 543 // as I can see, a stream with a variable frame rate would have a higher 544 // resolution time_base and increment the pts (presentation time) of each 545 // frame by a value bigger than one. 546 // 547 // Fixed rate stream: 548 // time_base = 1/50s, frame PTS = 1, 2, 3... (for 50Hz) 549 // 550 // Variable rate stream: 551 // time_base = 1/300s, frame PTS = 6, 12, 18, ... (for 50Hz) 552 // time_base = 1/300s, frame PTS = 5, 10, 15, ... (for 60Hz) 553 // 554 // The fOutputFrameRate currently does not take this into account and 555 // ignores the PTS. This results in playing the above sample at 300Hz 556 // instead of 50 or 60. 557 // 558 // However, comparing the PTS for two consecutive implies we have already 559 // decoded 2 frames, which may not be the case when this method is first 560 // called. 561 inOutFormat->u.raw_video.field_rate = fOutputFrameRate; 562 // Was calculated by first call to _DecodeNextVideoFrame() 563 #endif 564 inOutFormat->u.raw_video.display.format = fOutputColorSpace; 565 inOutFormat->u.raw_video.display.line_width 566 = fHeader.u.raw_video.display_line_width; 567 inOutFormat->u.raw_video.display.line_count 568 = fHeader.u.raw_video.display_line_count; 569 inOutFormat->u.raw_video.display.bytes_per_row 570 = fHeader.u.raw_video.bytes_per_row; 571 572 #ifdef TRACE_AV_CODEC 573 char buffer[1024]; 574 string_for_format(*inOutFormat, buffer, sizeof(buffer)); 575 TRACE("[v] outFormat = %s\n", buffer); 576 TRACE(" returned video format 0x%x\n", 577 inOutFormat->u.raw_video.display.format); 578 #endif 579 580 return B_OK; 581 } 582 583 584 /*! \brief Fills the outBuffer with one or more already decoded audio frames. 585 586 Besides the main duty described above, this method also fills out the other 587 output parameters as documented below. 588 589 \param outBuffer Pointer to the output buffer to copy the decoded audio 590 frames to. 591 \param outFrameCount Pointer to the output variable to assign the number of 592 copied audio frames (usually several audio frames at once). 593 \param mediaHeader Pointer to the output media header that contains the 594 properties of the decoded audio frame being the first in the outBuffer. 595 \param info Specifies additional decoding parameters. (Note: unused). 596 597 \returns B_OK Decoding audio frames succeeded. 598 \returns B_LAST_BUFFER_ERROR There are no more audio frames available. 599 \returns Other error codes 600 */ 601 status_t 602 AVCodecDecoder::_DecodeAudio(void* outBuffer, int64* outFrameCount, 603 media_header* mediaHeader, media_decode_info* info) 604 { 605 TRACE_AUDIO("AVCodecDecoder::_DecodeAudio(audio start_time %.6fs)\n", 606 mediaHeader->start_time / 1000000.0); 607 608 status_t audioDecodingStatus 609 = fDecodedDataSizeInBytes > 0 ? B_OK : _DecodeNextAudioFrame(); 610 611 if (audioDecodingStatus != B_OK) 612 return audioDecodingStatus; 613 614 *outFrameCount = fDecodedDataSizeInBytes / fOutputFrameSize; 615 *mediaHeader = fHeader; 616 memcpy(outBuffer, fDecodedData, fDecodedDataSizeInBytes); 617 618 fDecodedDataSizeInBytes = 0; 619 620 return B_OK; 621 } 622 623 624 /*! \brief Fills the outBuffer with an already decoded video frame. 625 626 Besides the main duty described above, this method also fills out the other 627 output parameters as documented below. 628 629 \param outBuffer Pointer to the output buffer to copy the decoded video 630 frame to. 631 \param outFrameCount Pointer to the output variable to assign the number of 632 copied video frames (usually one video frame). 633 \param mediaHeader Pointer to the output media header that contains the 634 decoded video frame properties. 635 \param info Specifies additional decoding parameters. (Note: unused). 636 637 \returns B_OK Decoding a video frame succeeded. 638 \returns B_LAST_BUFFER_ERROR There are no more video frames available. 639 \returns Other error codes 640 */ 641 status_t 642 AVCodecDecoder::_DecodeVideo(void* outBuffer, int64* outFrameCount, 643 media_header* mediaHeader, media_decode_info* info) 644 { 645 status_t videoDecodingStatus 646 = fDecodedDataSizeInBytes > 0 ? B_OK : _DecodeNextVideoFrame(); 647 648 if (videoDecodingStatus != B_OK) 649 return videoDecodingStatus; 650 651 *outFrameCount = 1; 652 *mediaHeader = fHeader; 653 memcpy(outBuffer, fDecodedData, mediaHeader->size_used); 654 655 fDecodedDataSizeInBytes = 0; 656 657 return B_OK; 658 } 659 660 661 /*! \brief Decodes next audio frame. 662 663 We decode at least one audio frame into fDecodedData. To achieve this goal, 664 we might need to request several chunks of encoded data resulting in a 665 variable execution time of this function. 666 667 The length of the decoded audio frame(s) is stored in 668 fDecodedDataSizeInBytes. If this variable is greater than zero you can 669 assert that all audio frames in fDecodedData are valid. 670 671 It is assumed that the number of expected audio frames is stored in 672 fOutputFrameCount. So _DecodeNextAudioFrame() must be called only after 673 fOutputFrameCount has been set. 674 675 Note: fOutputFrameCount contains the maximum number of frames a caller 676 of BMediaDecoder::Decode() expects to receive. There is a direct 677 relationship between fOutputFrameCount and the buffer size a caller of 678 BMediaDecoder::Decode() will provide so we make sure to respect this limit 679 for fDecodedDataSizeInBytes. 680 681 On return with status code B_OK the following conditions hold true: 682 1. fDecodedData contains as much audio frames as the caller of 683 BMediaDecoder::Decode() expects. 684 2. fDecodedData contains lesser audio frames as the caller of 685 BMediaDecoder::Decode() expects only when one of the following 686 conditions hold true: 687 i No more audio frames left. Consecutive calls to 688 _DecodeNextAudioFrame() will then result in the return of 689 status code B_LAST_BUFFER_ERROR. 690 ii TODO: A change in the size of the audio frames. 691 3. fHeader is populated with the audio frame properties of the first 692 audio frame in fDecodedData. Especially the start_time field of 693 fHeader relates to that first audio frame. Start times of 694 consecutive audio frames in fDecodedData have to be calculated 695 manually (using the frame rate and the frame duration) if the 696 caller needs them. 697 698 TODO: Handle change of channel_count. Such a change results in a change of 699 the audio frame size and thus has different buffer requirements. 700 The most sane approach for implementing this is to return the audio frames 701 that were still decoded with the previous channel_count and inform the 702 client of BMediaDecoder::Decode() about the change so that it can adapt to 703 it. Furthermore we need to adapt our fDecodedData to the new buffer size 704 requirements accordingly. 705 706 \returns B_OK when we successfully decoded enough audio frames 707 \returns B_LAST_BUFFER_ERROR when there are no more audio frames available. 708 \returns Other Errors 709 */ 710 status_t 711 AVCodecDecoder::_DecodeNextAudioFrame() 712 { 713 assert(fTempPacket.size >= 0); 714 assert(fDecodedDataSizeInBytes == 0); 715 // _DecodeNextAudioFrame needs to be called on empty fDecodedData only! 716 // If this assert holds wrong we have a bug somewhere. 717 718 status_t resetStatus = _ResetRawDecodedAudio(); 719 if (resetStatus != B_OK) 720 return resetStatus; 721 722 while (fRawDecodedAudio->nb_samples < fOutputFrameCount) { 723 _CheckAndFixConditionsThatHintAtBrokenAudioCodeBelow(); 724 725 bool decodedDataBufferHasData = fDecodedDataBufferSize > 0; 726 if (decodedDataBufferHasData) { 727 _MoveAudioFramesToRawDecodedAudioAndUpdateStartTimes(); 728 continue; 729 } 730 731 status_t decodeAudioChunkStatus = _DecodeNextAudioFrameChunk(); 732 if (decodeAudioChunkStatus != B_OK) 733 return decodeAudioChunkStatus; 734 } 735 736 fFrame += fRawDecodedAudio->nb_samples; 737 fDecodedDataSizeInBytes = fRawDecodedAudio->linesize[0]; 738 739 _UpdateMediaHeaderForAudioFrame(); 740 741 #ifdef DEBUG 742 dump_ffframe_audio(fRawDecodedAudio, "ffaudi"); 743 #endif 744 745 TRACE_AUDIO(" frame count: %ld current: %lld\n", 746 fRawDecodedAudio->nb_samples, fFrame); 747 748 return B_OK; 749 } 750 751 752 /*! \brief Applies all essential audio input properties to fCodecContext that were 753 passed to AVCodecDecoder when Setup() was called. 754 755 Note: This function must be called before the AVCodec is opened via 756 avcodec_open2(). Otherwise the behaviour of FFMPEG's audio decoding 757 function avcodec_decode_audio4() is undefined. 758 759 Essential properties applied from fInputFormat.u.encoded_audio: 760 - bit_rate copied to fCodecContext->bit_rate 761 - frame_size copied to fCodecContext->frame_size 762 - output.format converted to fCodecContext->sample_fmt 763 - output.frame_rate copied to fCodecContext->sample_rate 764 - output.channel_count copied to fCodecContext->channels 765 766 Other essential properties being applied: 767 - fBlockAlign to fCodecContext->block_align 768 - fExtraData to fCodecContext->extradata 769 - fExtraDataSize to fCodecContext->extradata_size 770 771 TODO: Either the following documentation section should be removed or this 772 TODO when it is clear whether fInputFormat.MetaData() and 773 fInputFormat.MetaDataSize() have to be applied to fCodecContext. See the related 774 TODO in the method implementation. 775 Only applied when fInputFormat.MetaDataSize() is greater than zero: 776 - fInputFormat.MetaData() to fCodecContext->extradata 777 - fInputFormat.MetaDataSize() to fCodecContext->extradata_size 778 */ 779 void 780 AVCodecDecoder::_ApplyEssentialAudioContainerPropertiesToContext() 781 { 782 media_encoded_audio_format containerProperties 783 = fInputFormat.u.encoded_audio; 784 785 fCodecContext->bit_rate 786 = static_cast<int>(containerProperties.bit_rate); 787 fCodecContext->frame_size 788 = static_cast<int>(containerProperties.frame_size); 789 ConvertRawAudioFormatToAVSampleFormat( 790 containerProperties.output.format, fCodecContext->sample_fmt); 791 ConvertRawAudioFormatToAVSampleFormat( 792 containerProperties.output.format, fCodecContext->request_sample_fmt); 793 fCodecContext->sample_rate 794 = static_cast<int>(containerProperties.output.frame_rate); 795 fCodecContext->channels 796 = static_cast<int>(containerProperties.output.channel_count); 797 // Check that channel count is not still a wild card! 798 if (fCodecContext->channels == 0) { 799 TRACE(" channel_count still a wild-card, assuming stereo.\n"); 800 fCodecContext->channels = 2; 801 } 802 803 fCodecContext->block_align = fBlockAlign; 804 fCodecContext->extradata = reinterpret_cast<uint8_t*>(fExtraData); 805 fCodecContext->extradata_size = fExtraDataSize; 806 807 // TODO: This probably needs to go away, there is some misconception 808 // about extra data / info buffer and meta data. See 809 // Reader::GetStreamInfo(). The AVFormatReader puts extradata and 810 // extradata_size into media_format::MetaData(), but used to ignore 811 // the infoBuffer passed to GetStreamInfo(). I think this may be why 812 // the code below was added. 813 if (fInputFormat.MetaDataSize() > 0) { 814 fCodecContext->extradata = static_cast<uint8_t*>( 815 const_cast<void*>(fInputFormat.MetaData())); 816 fCodecContext->extradata_size = fInputFormat.MetaDataSize(); 817 } 818 819 TRACE(" bit_rate %d, sample_rate %d, channels %d, block_align %d, " 820 "extradata_size %d\n", 821 fCodecContext->bit_rate, 822 fCodecContext->sample_rate, 823 fCodecContext->channels, 824 fCodecContext->block_align, 825 fCodecContext->extradata_size); 826 } 827 828 829 /*! \brief Resets important fields in fRawDecodedVideo to their default values. 830 831 Note: Also initializes fDecodedData if not done already. 832 833 \returns B_OK Resetting successfully completed. 834 \returns B_NO_MEMORY No memory left for correct operation. 835 */ 836 status_t 837 AVCodecDecoder::_ResetRawDecodedAudio() 838 { 839 if (fDecodedData == NULL) { 840 size_t maximumSizeOfDecodedData = fOutputFrameCount * fOutputFrameSize; 841 fDecodedData 842 = static_cast<uint8_t*>(malloc(maximumSizeOfDecodedData)); 843 } 844 if (fDecodedData == NULL) 845 return B_NO_MEMORY; 846 847 fRawDecodedAudio->data[0] = fDecodedData; 848 fRawDecodedAudio->linesize[0] = 0; 849 fRawDecodedAudio->format = AV_SAMPLE_FMT_NONE; 850 fRawDecodedAudio->pkt_dts = AV_NOPTS_VALUE; 851 fRawDecodedAudio->nb_samples = 0; 852 memset(fRawDecodedAudio->opaque, 0, sizeof(avformat_codec_context)); 853 854 return B_OK; 855 } 856 857 858 /*! \brief Checks fDecodedDataBufferSize and fTempPacket for invalid values, 859 reports them and assigns valid values. 860 861 Note: This method is intended to be called before any code is executed that 862 deals with moving, loading or decoding any audio frames. 863 */ 864 void 865 AVCodecDecoder::_CheckAndFixConditionsThatHintAtBrokenAudioCodeBelow() 866 { 867 if (fDecodedDataBufferSize < 0) { 868 fprintf(stderr, "Decoding read past the end of the decoded data " 869 "buffer! %" B_PRId32 "\n", fDecodedDataBufferSize); 870 fDecodedDataBufferSize = 0; 871 } 872 if (fTempPacket.size < 0) { 873 fprintf(stderr, "Decoding read past the end of the temp packet! %d\n", 874 fTempPacket.size); 875 fTempPacket.size = 0; 876 } 877 } 878 879 880 /*! \brief Moves audio frames from fDecodedDataBuffer to fRawDecodedAudio (and 881 thus to fDecodedData) and updates the start times of fRawDecodedAudio, 882 fDecodedDataBuffer and fTempPacket accordingly. 883 884 When moving audio frames to fRawDecodedAudio this method also makes sure 885 that the following important fields of fRawDecodedAudio are populated and 886 updated with correct values: 887 - fRawDecodedAudio->data[0]: Points to first free byte of fDecodedData 888 - fRawDecodedAudio->linesize[0]: Total size of frames in fDecodedData 889 - fRawDecodedAudio->format: Format of first audio frame 890 - fRawDecodedAudio->pkt_dts: Start time of first audio frame 891 - fRawDecodedAudio->nb_samples: Number of audio frames 892 - fRawDecodedAudio->opaque: Contains the following fields for the first 893 audio frame: 894 - channels: Channel count of first audio frame 895 - sample_rate: Frame rate of first audio frame 896 897 This function assumes to be called only when the following assumptions 898 hold true: 899 1. There are decoded audio frames available in fDecodedDataBuffer 900 meaning that fDecodedDataBufferSize is greater than zero. 901 2. There is space left in fRawDecodedAudio to move some audio frames 902 in. This means that fRawDecodedAudio has lesser audio frames than 903 the maximum allowed (specified by fOutputFrameCount). 904 3. The audio frame rate is known so that we can calculate the time 905 range (covered by the moved audio frames) to update the start times 906 accordingly. 907 4. The field fRawDecodedAudio->opaque points to a memory block 908 representing a structure of type avformat_codec_context. 909 910 After this function returns the caller can safely make the following 911 assumptions: 912 1. The number of decoded audio frames in fDecodedDataBuffer is 913 decreased though it may still be greater then zero. 914 2. The number of frames in fRawDecodedAudio has increased and all 915 important fields are updated (see listing above). 916 3. Start times of fDecodedDataBuffer and fTempPacket were increased 917 with the time range covered by the moved audio frames. 918 919 Note: This function raises an exception (by calling the debugger), when 920 fDecodedDataBufferSize is not a multiple of fOutputFrameSize. 921 */ 922 void 923 AVCodecDecoder::_MoveAudioFramesToRawDecodedAudioAndUpdateStartTimes() 924 { 925 assert(fDecodedDataBufferSize > 0); 926 assert(fRawDecodedAudio->nb_samples < fOutputFrameCount); 927 assert(fOutputFrameRate > 0); 928 929 int32 outFrames = fOutputFrameCount - fRawDecodedAudio->nb_samples; 930 int32 inFrames = fDecodedDataBufferSize; 931 932 int32 frames = min_c(outFrames, inFrames); 933 if (frames == 0) 934 debugger("fDecodedDataBufferSize not multiple of frame size!"); 935 936 // Some decoders do not support format conversion on themselves, or use 937 // "planar" audio (each channel separated instead of interleaved samples). 938 // In that case, we use swresample to convert the data 939 if (av_sample_fmt_is_planar(fCodecContext->sample_fmt)) { 940 #if 0 941 const uint8_t* ptr[8]; 942 for (int i = 0; i < 8; i++) { 943 if (fDecodedDataBuffer->data[i] == NULL) 944 ptr[i] = NULL; 945 else 946 ptr[i] = fDecodedDataBuffer->data[i] + fDecodedDataBufferOffset; 947 } 948 949 // When there are more input frames than space in the output buffer, 950 // we could feed everything to swr and it would buffer the extra data. 951 // However, there is no easy way to flush that data without feeding more 952 // input, and it makes our timestamp computations fail. 953 // So, we feed only as much frames as we can get out, and handle the 954 // buffering ourselves. 955 // TODO Ideally, we should try to size our output buffer so that it can 956 // always hold all the output (swr provides helper functions for this) 957 inFrames = frames; 958 frames = swr_convert(fResampleContext, fRawDecodedAudio->data, 959 outFrames, ptr, inFrames); 960 961 if (frames < 0) 962 debugger("resampling failed"); 963 #else 964 // interleave planar audio with same format 965 uintptr_t out = (uintptr_t)fRawDecodedAudio->data[0]; 966 int32 offset = fDecodedDataBufferOffset; 967 for (int i = 0; i < frames; i++) { 968 for (int j = 0; j < fCodecContext->channels; j++) { 969 memcpy((void*)out, fDecodedDataBuffer->data[j] 970 + offset, fInputFrameSize); 971 out += fInputFrameSize; 972 } 973 offset += fInputFrameSize; 974 } 975 outFrames = frames; 976 inFrames = frames; 977 #endif 978 } else { 979 memcpy(fRawDecodedAudio->data[0], fDecodedDataBuffer->data[0] 980 + fDecodedDataBufferOffset, frames * fOutputFrameSize); 981 outFrames = frames; 982 inFrames = frames; 983 } 984 985 size_t remainingSize = inFrames * fInputFrameSize; 986 size_t decodedSize = outFrames * fOutputFrameSize; 987 fDecodedDataBufferSize -= inFrames; 988 989 bool firstAudioFramesCopiedToRawDecodedAudio 990 = fRawDecodedAudio->data[0] != fDecodedData; 991 if (!firstAudioFramesCopiedToRawDecodedAudio) { 992 fRawDecodedAudio->format = fDecodedDataBuffer->format; 993 fRawDecodedAudio->pkt_dts = fDecodedDataBuffer->pkt_dts; 994 995 avformat_codec_context* codecContext 996 = static_cast<avformat_codec_context*>(fRawDecodedAudio->opaque); 997 codecContext->channels = fCodecContext->channels; 998 codecContext->sample_rate = fCodecContext->sample_rate; 999 } 1000 1001 fRawDecodedAudio->data[0] += decodedSize; 1002 fRawDecodedAudio->linesize[0] += decodedSize; 1003 fRawDecodedAudio->nb_samples += outFrames; 1004 1005 fDecodedDataBufferOffset += remainingSize; 1006 1007 // Update start times accordingly 1008 bigtime_t framesTimeInterval = static_cast<bigtime_t>( 1009 (1000000LL * frames) / fOutputFrameRate); 1010 fDecodedDataBuffer->pkt_dts += framesTimeInterval; 1011 // Start time of buffer is updated in case that it contains 1012 // more audio frames to move. 1013 fTempPacket.dts += framesTimeInterval; 1014 // Start time of fTempPacket is updated in case the fTempPacket 1015 // contains more audio frames to decode. 1016 } 1017 1018 1019 /*! \brief Decodes next chunk of audio frames. 1020 1021 This method handles all the details of loading the input buffer 1022 (fChunkBuffer) at the right time and of calling FFMPEG often engouh until 1023 some audio frames have been decoded. 1024 1025 FFMPEG decides how much audio frames belong to a chunk. Because of that 1026 it is very likely that _DecodeNextAudioFrameChunk has to be called several 1027 times to decode enough audio frames to please the caller of 1028 BMediaDecoder::Decode(). 1029 1030 This function assumes to be called only when the following assumptions 1031 hold true: 1032 1. fDecodedDataBufferSize equals zero. 1033 1034 After this function returns successfully the caller can safely make the 1035 following assumptions: 1036 1. fDecodedDataBufferSize is greater than zero. 1037 2. fDecodedDataBufferOffset is set to zero. 1038 3. fDecodedDataBuffer contains audio frames. 1039 1040 1041 \returns B_OK on successfully decoding one audio frame chunk. 1042 \returns B_LAST_BUFFER_ERROR No more audio frame chunks available. From 1043 this point on further calls will return this same error. 1044 \returns B_ERROR Decoding failed 1045 */ 1046 status_t 1047 AVCodecDecoder::_DecodeNextAudioFrameChunk() 1048 { 1049 assert(fDecodedDataBufferSize == 0); 1050 1051 while (fDecodedDataBufferSize == 0) { 1052 status_t loadingChunkStatus 1053 = _LoadNextChunkIfNeededAndAssignStartTime(); 1054 if (loadingChunkStatus != B_OK) 1055 return loadingChunkStatus; 1056 1057 status_t decodingStatus 1058 = _DecodeSomeAudioFramesIntoEmptyDecodedDataBuffer(); 1059 if (decodingStatus != B_OK) { 1060 // Assume the audio decoded until now is broken so replace it with 1061 // some silence. 1062 memset(fDecodedData, 0, fRawDecodedAudio->linesize[0]); 1063 1064 if (!fAudioDecodeError) { 1065 // Report failure if not done already 1066 int32 chunkBufferOffset = fTempPacket.data - fChunkBuffer; 1067 printf("########### audio decode error, " 1068 "fTempPacket.size %d, fChunkBuffer data offset %" B_PRId32 1069 "\n", fTempPacket.size, chunkBufferOffset); 1070 fAudioDecodeError = true; 1071 } 1072 1073 // Assume that next audio chunk can be decoded so keep decoding. 1074 continue; 1075 } 1076 1077 fAudioDecodeError = false; 1078 } 1079 1080 return B_OK; 1081 } 1082 1083 1084 /*! \brief Tries to decode at least one audio frame and store it in the 1085 fDecodedDataBuffer. 1086 1087 This function assumes to be called only when the following assumptions 1088 hold true: 1089 1. fDecodedDataBufferSize equals zero. 1090 2. fTempPacket.size is greater than zero. 1091 1092 After this function returns successfully the caller can safely make the 1093 following assumptions: 1094 1. fDecodedDataBufferSize is greater than zero in the common case. 1095 Also see "Note" below. 1096 2. fTempPacket was updated to exclude the data chunk that was consumed 1097 by avcodec_decode_audio4(). 1098 3. fDecodedDataBufferOffset is set to zero. 1099 1100 When this function failed to decode at least one audio frame due to a 1101 decoding error the caller can safely make the following assumptions: 1102 1. fDecodedDataBufferSize equals zero. 1103 2. fTempPacket.size equals zero. 1104 1105 Note: It is possible that there wasn't any audio frame decoded into 1106 fDecodedDataBuffer after calling this function. This is normal and can 1107 happen when there was either a decoding error or there is some decoding 1108 delay in FFMPEGs audio decoder. Another call to this method is totally 1109 safe and is even expected as long as the calling assumptions hold true. 1110 1111 \returns B_OK Decoding successful. fDecodedDataBuffer contains decoded 1112 audio frames only when fDecodedDataBufferSize is greater than zero. 1113 fDecodedDataBuffer is empty, when avcodec_decode_audio4() didn't return 1114 audio frames due to delayed decoding or incomplete audio frames. 1115 \returns B_ERROR Decoding failed thus fDecodedDataBuffer contains no audio 1116 frames. 1117 */ 1118 status_t 1119 AVCodecDecoder::_DecodeSomeAudioFramesIntoEmptyDecodedDataBuffer() 1120 { 1121 assert(fDecodedDataBufferSize == 0); 1122 1123 memset(fDecodedDataBuffer, 0, sizeof(AVFrame)); 1124 av_frame_unref(fDecodedDataBuffer); 1125 fDecodedDataBufferOffset = 0; 1126 int gotAudioFrame = 0; 1127 1128 int encodedDataSizeInBytes = avcodec_decode_audio4(fCodecContext, 1129 fDecodedDataBuffer, &gotAudioFrame, &fTempPacket); 1130 if (encodedDataSizeInBytes <= 0) { 1131 // Error or failure to produce decompressed output. 1132 // Skip the temp packet data entirely. 1133 fTempPacket.size = 0; 1134 return B_ERROR; 1135 } 1136 1137 fTempPacket.data += encodedDataSizeInBytes; 1138 fTempPacket.size -= encodedDataSizeInBytes; 1139 1140 bool gotNoAudioFrame = gotAudioFrame == 0; 1141 if (gotNoAudioFrame) 1142 return B_OK; 1143 1144 fDecodedDataBufferSize = fDecodedDataBuffer->nb_samples; 1145 if (fDecodedDataBufferSize < 0) 1146 fDecodedDataBufferSize = 0; 1147 1148 return B_OK; 1149 } 1150 1151 1152 /*! \brief Updates relevant fields of the class member fHeader with the 1153 properties of the most recently decoded audio frame. 1154 1155 The following fields of fHeader are updated: 1156 - fHeader.type 1157 - fHeader.file_pos 1158 - fHeader.orig_size 1159 - fHeader.start_time 1160 - fHeader.size_used 1161 - fHeader.u.raw_audio.frame_rate 1162 - fHeader.u.raw_audio.channel_count 1163 1164 It is assumed that this function is called only when the following asserts 1165 hold true: 1166 1. We actually got a new audio frame decoded by the audio decoder. 1167 2. fHeader wasn't updated for the new audio frame yet. You MUST call 1168 this method only once per decoded audio frame. 1169 3. fRawDecodedAudio's fields relate to the first audio frame contained 1170 in fDecodedData. Especially the following fields are of importance: 1171 - fRawDecodedAudio->pkt_dts: Start time of first audio frame 1172 - fRawDecodedAudio->opaque: Contains the following fields for 1173 the first audio frame: 1174 - channels: Channel count of first audio frame 1175 - sample_rate: Frame rate of first audio frame 1176 */ 1177 void 1178 AVCodecDecoder::_UpdateMediaHeaderForAudioFrame() 1179 { 1180 fHeader.type = B_MEDIA_RAW_AUDIO; 1181 fHeader.file_pos = 0; 1182 fHeader.orig_size = 0; 1183 fHeader.start_time = fRawDecodedAudio->pkt_dts; 1184 fHeader.size_used = fRawDecodedAudio->linesize[0]; 1185 1186 avformat_codec_context* codecContext 1187 = static_cast<avformat_codec_context*>(fRawDecodedAudio->opaque); 1188 fHeader.u.raw_audio.channel_count = codecContext->channels; 1189 fHeader.u.raw_audio.frame_rate = codecContext->sample_rate; 1190 } 1191 1192 1193 /*! \brief Decodes next video frame. 1194 1195 We decode exactly one video frame into fDecodedData. To achieve this goal, 1196 we might need to request several chunks of encoded data resulting in a 1197 variable execution time of this function. 1198 1199 The length of the decoded video frame is stored in 1200 fDecodedDataSizeInBytes. If this variable is greater than zero, you can 1201 assert that there is a valid video frame available in fDecodedData. 1202 1203 The decoded video frame in fDecodedData has color space conversion and 1204 deinterlacing already applied. 1205 1206 To every decoded video frame there is a media_header populated in 1207 fHeader, containing the corresponding video frame properties. 1208 1209 Normally every decoded video frame has a start_time field populated in the 1210 associated fHeader, that determines the presentation time of the frame. 1211 This relationship will only hold true, when each data chunk that is 1212 provided via GetNextChunk() contains data for exactly one encoded video 1213 frame (one complete frame) - not more and not less. 1214 1215 We can decode data chunks that contain partial video frame data, too. In 1216 that case, you cannot trust the value of the start_time field in fHeader. 1217 We simply have no logic in place to establish a meaningful relationship 1218 between an incomplete frame and the start time it should be presented. 1219 Though this might change in the future. 1220 1221 We can decode data chunks that contain more than one video frame, too. In 1222 that case, you cannot trust the value of the start_time field in fHeader. 1223 We simply have no logic in place to track the start_time across multiple 1224 video frames. So a meaningful relationship between the 2nd, 3rd, ... frame 1225 and the start time it should be presented isn't established at the moment. 1226 Though this might change in the future. 1227 1228 More over the fOutputFrameRate variable is updated for every decoded video 1229 frame. 1230 1231 On first call the member variables fSwsContext / fFormatConversionFunc are 1232 initialized. 1233 1234 \returns B_OK when we successfully decoded one video frame 1235 \returns B_LAST_BUFFER_ERROR when there are no more video frames available. 1236 \returns B_NO_MEMORY when we have no memory left for correct operation. 1237 \returns Other Errors 1238 */ 1239 status_t 1240 AVCodecDecoder::_DecodeNextVideoFrame() 1241 { 1242 int error; 1243 int send_error; 1244 1245 #if DO_PROFILING 1246 bigtime_t startTime = system_time(); 1247 #endif 1248 1249 error = avcodec_receive_frame(fCodecContext, fRawDecodedPicture); 1250 1251 if (error == AVERROR_EOF) 1252 return B_LAST_BUFFER_ERROR; 1253 1254 if (error == AVERROR(EAGAIN)) { 1255 do { 1256 status_t loadingChunkStatus 1257 = _LoadNextChunkIfNeededAndAssignStartTime(); 1258 if (loadingChunkStatus == B_LAST_BUFFER_ERROR) 1259 return _FlushOneVideoFrameFromDecoderBuffer(); 1260 if (loadingChunkStatus != B_OK) { 1261 TRACE("[v] AVCodecDecoder::_DecodeNextVideoFrame(): error from " 1262 "GetNextChunk(): %s\n", strerror(loadingChunkStatus)); 1263 return loadingChunkStatus; 1264 } 1265 1266 char timestamp[AV_TS_MAX_STRING_SIZE]; 1267 av_ts_make_time_string(timestamp, 1268 fTempPacket.dts, &fCodecContext->time_base); 1269 TRACE("[v] Feed %d more bytes (dts %s)\n", fTempPacket.size, 1270 timestamp); 1271 1272 send_error = avcodec_send_packet(fCodecContext, &fTempPacket); 1273 if (send_error < 0 && send_error != AVERROR(EAGAIN)) { 1274 TRACE("[v] AVCodecDecoder: ignoring error in decoding frame " 1275 "%lld: %d\n", fFrame, error); 1276 } 1277 1278 // Packet is consumed, clear it 1279 fTempPacket.data = NULL; 1280 fTempPacket.size = 0; 1281 1282 error = avcodec_receive_frame(fCodecContext, fRawDecodedPicture); 1283 if (error != 0 && error != AVERROR(EAGAIN)) { 1284 TRACE("[v] frame %lld - decoding error, error code: %d, " 1285 "chunk size: %ld\n", fFrame, error, fChunkBufferSize); 1286 } 1287 1288 } while (error != 0); 1289 } 1290 1291 #if DO_PROFILING 1292 bigtime_t formatConversionStart = system_time(); 1293 #endif 1294 1295 status_t handleStatus = _HandleNewVideoFrameAndUpdateSystemState(); 1296 if (handleStatus != B_OK) 1297 return handleStatus; 1298 1299 #if DO_PROFILING 1300 bigtime_t doneTime = system_time(); 1301 decodingTime += formatConversionStart - startTime; 1302 conversionTime += doneTime - formatConversionStart; 1303 profileCounter++; 1304 if (!(fFrame % 5)) { 1305 printf("[v] profile: d1 = %lld, d2 = %lld (%lld) required %Ld\n", 1306 decodingTime / profileCounter, conversionTime / profileCounter, 1307 fFrame, bigtime_t(1000000LL / fOutputFrameRate)); 1308 decodingTime = 0; 1309 conversionTime = 0; 1310 profileCounter = 0; 1311 } 1312 #endif 1313 return error; 1314 } 1315 1316 1317 /*! \brief Applies all essential video input properties to fCodecContext that were 1318 passed to AVCodecDecoder when Setup() was called. 1319 1320 Note: This function must be called before the AVCodec is opened via 1321 avcodec_open2(). Otherwise the behaviour of FFMPEG's video decoding 1322 function avcodec_decode_video2() is undefined. 1323 1324 Essential properties applied from fInputFormat.u.encoded_video.output: 1325 - display.line_width copied to fCodecContext->width 1326 - display.line_count copied to fCodecContext->height 1327 - pixel_width_aspect and pixel_height_aspect converted to 1328 fCodecContext->sample_aspect_ratio 1329 - field_rate converted to fCodecContext->time_base and 1330 fCodecContext->ticks_per_frame 1331 1332 Other essential properties being applied: 1333 - fExtraData to fCodecContext->extradata 1334 - fExtraDataSize to fCodecContext->extradata_size 1335 */ 1336 void 1337 AVCodecDecoder::_ApplyEssentialVideoContainerPropertiesToContext() 1338 { 1339 media_raw_video_format containerProperties 1340 = fInputFormat.u.encoded_video.output; 1341 1342 fCodecContext->width = containerProperties.display.line_width; 1343 fCodecContext->height = containerProperties.display.line_count; 1344 1345 if (containerProperties.pixel_width_aspect > 0 1346 && containerProperties.pixel_height_aspect > 0) { 1347 ConvertVideoAspectWidthAndHeightToAVCodecContext( 1348 containerProperties.pixel_width_aspect, 1349 containerProperties.pixel_height_aspect, *fCodecContext); 1350 } 1351 1352 if (containerProperties.field_rate > 0.0) { 1353 ConvertVideoFrameRateToAVCodecContext(containerProperties.field_rate, 1354 *fCodecContext); 1355 } 1356 1357 fCodecContext->extradata = reinterpret_cast<uint8_t*>(fExtraData); 1358 fCodecContext->extradata_size = fExtraDataSize; 1359 } 1360 1361 1362 /*! \brief Loads the next chunk into fChunkBuffer and assigns it (including 1363 the start time) to fTempPacket but only if fTempPacket is empty. 1364 1365 \returns B_OK 1366 1. meaning: Next chunk is loaded. 1367 2. meaning: No need to load and assign anything. Proceed as usual. 1368 \returns B_LAST_BUFFER_ERROR No more chunks available. fChunkBuffer and 1369 fTempPacket are left untouched. 1370 \returns Other errors Caller should bail out because fChunkBuffer and 1371 fTempPacket are in unknown states. Normal operation cannot be 1372 guaranteed. 1373 */ 1374 status_t 1375 AVCodecDecoder::_LoadNextChunkIfNeededAndAssignStartTime() 1376 { 1377 if (fTempPacket.size > 0) 1378 return B_OK; 1379 1380 const void* chunkBuffer = NULL; 1381 size_t chunkBufferSize = 0; 1382 // In the case that GetNextChunk() returns an error fChunkBufferSize 1383 // should be left untouched. 1384 media_header chunkMediaHeader; 1385 1386 status_t getNextChunkStatus = GetNextChunk(&chunkBuffer, &chunkBufferSize, 1387 &chunkMediaHeader); 1388 if (getNextChunkStatus != B_OK) 1389 return getNextChunkStatus; 1390 1391 status_t chunkBufferPaddingStatus 1392 = _CopyChunkToChunkBufferAndAddPadding(chunkBuffer, chunkBufferSize); 1393 if (chunkBufferPaddingStatus != B_OK) 1394 return chunkBufferPaddingStatus; 1395 1396 fTempPacket.data = fChunkBuffer; 1397 fTempPacket.size = fChunkBufferSize; 1398 fTempPacket.dts = chunkMediaHeader.start_time; 1399 // Let FFMPEG handle the correct relationship between start_time and 1400 // decoded a/v frame. By doing so we are simply copying the way how it 1401 // is implemented in ffplay.c for video frames (for audio frames it 1402 // works, too, but isn't used by ffplay.c). 1403 // \see http://git.videolan.org/?p=ffmpeg.git;a=blob;f=ffplay.c;h=09623db374e5289ed20b7cc28c262c4375a8b2e4;hb=9153b33a742c4e2a85ff6230aea0e75f5a8b26c2#l1502 1404 // 1405 // FIXME: Research how to establish a meaningful relationship between 1406 // start_time and decoded a/v frame when the received chunk buffer 1407 // contains partial a/v frames. Maybe some data formats do contain time 1408 // stamps (ake pts / dts fields) that can be evaluated by FFMPEG. But 1409 // as long as I don't have such video data to test it, it makes no 1410 // sense trying to implement it. 1411 // 1412 // FIXME: Implement tracking start_time of video frames originating in 1413 // data chunks that encode more than one video frame at a time. In that 1414 // case on would increment the start_time for each consecutive frame of 1415 // such a data chunk (like it is done for audio frame decoding). But as 1416 // long as I don't have such video data to test it, it makes no sense 1417 // to implement it. 1418 1419 #ifdef LOG_STREAM_TO_FILE 1420 BFile* logFile = fIsAudio ? &sAudioStreamLogFile : &sVideoStreamLogFile; 1421 if (sDumpedPackets < 100) { 1422 logFile->Write(chunkBuffer, fChunkBufferSize); 1423 printf("wrote %ld bytes\n", fChunkBufferSize); 1424 sDumpedPackets++; 1425 } else if (sDumpedPackets == 100) 1426 logFile->Unset(); 1427 #endif 1428 1429 return B_OK; 1430 } 1431 1432 1433 /*! \brief Copies a chunk into fChunkBuffer and adds a "safety net" of 1434 additional memory as required by FFMPEG for input buffers to video 1435 decoders. 1436 1437 This is needed so that some decoders can read safely a predefined number of 1438 bytes at a time for performance optimization purposes. 1439 1440 The additional memory has a size of AV_INPUT_BUFFER_PADDING_SIZE as defined 1441 in avcodec.h. 1442 1443 Ownership of fChunkBuffer memory is with the class so it needs to be freed 1444 at the right times (on destruction, on seeking). 1445 1446 Also update fChunkBufferSize to reflect the size of the contained data 1447 (leaving out the padding). 1448 1449 \param chunk The chunk to copy. 1450 \param chunkSize Size of the chunk in bytes 1451 1452 \returns B_OK Padding was successful. You are responsible for releasing the 1453 allocated memory. fChunkBufferSize is set to chunkSize. 1454 \returns B_NO_MEMORY Padding failed. 1455 fChunkBuffer is set to NULL making it safe to call free() on it. 1456 fChunkBufferSize is set to 0 to reflect the size of fChunkBuffer. 1457 */ 1458 status_t 1459 AVCodecDecoder::_CopyChunkToChunkBufferAndAddPadding(const void* chunk, 1460 size_t chunkSize) 1461 { 1462 uint8_t* tmpBuffer = static_cast<uint8_t*>(realloc(fChunkBuffer, 1463 chunkSize + AV_INPUT_BUFFER_PADDING_SIZE)); 1464 if (tmpBuffer == NULL) { 1465 free(fChunkBuffer); 1466 fChunkBuffer = NULL; 1467 fChunkBufferSize = 0; 1468 return B_NO_MEMORY; 1469 } else { 1470 fChunkBuffer = tmpBuffer; 1471 } 1472 1473 memcpy(fChunkBuffer, chunk, chunkSize); 1474 memset(fChunkBuffer + chunkSize, 0, AV_INPUT_BUFFER_PADDING_SIZE); 1475 // Establish safety net, by zero'ing the padding area. 1476 1477 fChunkBufferSize = chunkSize; 1478 1479 return B_OK; 1480 } 1481 1482 1483 /*! \brief Executes all steps needed for a freshly decoded video frame. 1484 1485 \see _UpdateMediaHeaderForVideoFrame() and 1486 \see _DeinterlaceAndColorConvertVideoFrame() for when you are allowed to 1487 call this method. 1488 1489 \returns B_OK when video frame was handled successfully 1490 \returnb B_NO_MEMORY when no memory is left for correct operation. 1491 */ 1492 status_t 1493 AVCodecDecoder::_HandleNewVideoFrameAndUpdateSystemState() 1494 { 1495 _UpdateMediaHeaderForVideoFrame(); 1496 status_t postProcessStatus = _DeinterlaceAndColorConvertVideoFrame(); 1497 if (postProcessStatus != B_OK) 1498 return postProcessStatus; 1499 1500 ConvertAVCodecContextToVideoFrameRate(*fCodecContext, fOutputFrameRate); 1501 1502 #ifdef DEBUG 1503 dump_ffframe_video(fRawDecodedPicture, "ffpict"); 1504 #endif 1505 1506 fFrame++; 1507 1508 return B_OK; 1509 } 1510 1511 1512 /*! \brief Flushes one video frame - if any - still buffered by the decoder. 1513 1514 Some FFMPEG decoder are buffering video frames. To retrieve those buffered 1515 frames the decoder needs to be told so. 1516 1517 The intended use of this method is to call it, once there are no more data 1518 chunks for decoding left. Reframed in other words: Once GetNextChunk() 1519 returns with status B_LAST_BUFFER_ERROR it is time to start flushing. 1520 1521 \returns B_OK Retrieved one video frame, handled it accordingly and updated 1522 the system state accordingly. 1523 There maybe more video frames left. So it is valid for the client of 1524 AVCodecDecoder to call it one more time. 1525 1526 \returns B_LAST_BUFFER_ERROR No video frame left. 1527 The client of the AVCodecDecoder should stop calling it now. 1528 1529 \returns B_NO_MEMORY No memory left for correct operation. 1530 */ 1531 status_t 1532 AVCodecDecoder::_FlushOneVideoFrameFromDecoderBuffer() 1533 { 1534 // Tell the decoder there is nothing to send anymore 1535 avcodec_send_packet(fCodecContext, NULL); 1536 1537 // Get any remaining frame 1538 int error = avcodec_receive_frame(fCodecContext, fRawDecodedPicture); 1539 1540 if (error != 0 && error != AVERROR(EAGAIN)) { 1541 // video buffer is flushed successfully 1542 // (or there is an error, not much we can do about it) 1543 return B_LAST_BUFFER_ERROR; 1544 } 1545 1546 return _HandleNewVideoFrameAndUpdateSystemState(); 1547 } 1548 1549 1550 /*! \brief Updates relevant fields of the class member fHeader with the 1551 properties of the most recently decoded video frame. 1552 1553 It is assumed that this function is called only when the following asserts 1554 hold true: 1555 1. We actually got a new picture decoded by the video decoder. 1556 2. fHeader wasn't updated for the new picture yet. You MUST call this 1557 method only once per decoded video frame. 1558 3. This function MUST be called after 1559 _DeinterlaceAndColorConvertVideoFrame() as it relys on an updated 1560 fDecodedDataSizeInBytes. 1561 4. There will be at maximumn only one decoded video frame in our cache 1562 at any single point in time. Otherwise you couldn't tell to which 1563 cached decoded video frame the properties in fHeader relate to. 1564 5. AVCodecContext is still valid for this video frame (This is the case 1565 when this function is called after avcodec_decode_video2() and 1566 before the next call to avcodec_decode_video2(). 1567 */ 1568 void 1569 AVCodecDecoder::_UpdateMediaHeaderForVideoFrame() 1570 { 1571 fHeader.type = B_MEDIA_RAW_VIDEO; 1572 fHeader.file_pos = 0; 1573 fHeader.orig_size = 0; 1574 fHeader.start_time = fRawDecodedPicture->pkt_dts; 1575 // The pkt_dts is already in microseconds, even if ffmpeg docs says 1576 // 'in codec time_base units' 1577 fHeader.size_used = av_image_get_buffer_size( 1578 colorspace_to_pixfmt(fOutputColorSpace), fRawDecodedPicture->width, 1579 fRawDecodedPicture->height, 1); 1580 fHeader.u.raw_video.display_line_width = fRawDecodedPicture->width; 1581 fHeader.u.raw_video.display_line_count = fRawDecodedPicture->height; 1582 fHeader.u.raw_video.bytes_per_row 1583 = CalculateBytesPerRowWithColorSpaceAndVideoWidth(fOutputColorSpace, 1584 fRawDecodedPicture->width); 1585 fHeader.u.raw_video.field_gamma = 1.0; 1586 fHeader.u.raw_video.field_sequence = fFrame; 1587 fHeader.u.raw_video.field_number = 0; 1588 fHeader.u.raw_video.pulldown_number = 0; 1589 fHeader.u.raw_video.first_active_line = 1; 1590 fHeader.u.raw_video.line_count = fRawDecodedPicture->height; 1591 1592 ConvertAVCodecContextToVideoAspectWidthAndHeight(*fCodecContext, 1593 fHeader.u.raw_video.pixel_width_aspect, 1594 fHeader.u.raw_video.pixel_height_aspect); 1595 1596 char timestamp[AV_TS_MAX_STRING_SIZE]; 1597 av_ts_make_time_string(timestamp, 1598 fRawDecodedPicture->best_effort_timestamp, &fCodecContext->time_base); 1599 1600 TRACE("[v] start_time=%s field_sequence=%lu\n", 1601 timestamp, fHeader.u.raw_video.field_sequence); 1602 } 1603 1604 1605 /*! \brief This function applies deinterlacing (only if needed) and color 1606 conversion to the video frame in fRawDecodedPicture. 1607 1608 It is assumed that fRawDecodedPicture wasn't deinterlaced and color 1609 converted yet (otherwise this function behaves in unknown manners). 1610 1611 This function MUST be called after _UpdateMediaHeaderForVideoFrame() as it 1612 relys on the fHeader.size_used and fHeader.u.raw_video.bytes_per_row fields 1613 for correct operation 1614 1615 You should only call this function when you got a new picture decoded by 1616 the video decoder. 1617 1618 When this function finishes the postprocessed video frame will be available 1619 in fPostProcessedDecodedPicture and fDecodedData (fDecodedDataSizeInBytes 1620 will be set accordingly). 1621 1622 \returns B_OK video frame successfully deinterlaced and color converted. 1623 \returns B_NO_MEMORY Not enough memory available for correct operation. 1624 */ 1625 status_t 1626 AVCodecDecoder::_DeinterlaceAndColorConvertVideoFrame() 1627 { 1628 int displayWidth = fRawDecodedPicture->width; 1629 int displayHeight = fRawDecodedPicture->height; 1630 AVFrame deinterlacedPicture; 1631 bool useDeinterlacedPicture = false; 1632 1633 if (fRawDecodedPicture->interlaced_frame) { 1634 AVFrame rawPicture; 1635 rawPicture.data[0] = fRawDecodedPicture->data[0]; 1636 rawPicture.data[1] = fRawDecodedPicture->data[1]; 1637 rawPicture.data[2] = fRawDecodedPicture->data[2]; 1638 rawPicture.data[3] = fRawDecodedPicture->data[3]; 1639 rawPicture.linesize[0] = fRawDecodedPicture->linesize[0]; 1640 rawPicture.linesize[1] = fRawDecodedPicture->linesize[1]; 1641 rawPicture.linesize[2] = fRawDecodedPicture->linesize[2]; 1642 rawPicture.linesize[3] = fRawDecodedPicture->linesize[3]; 1643 1644 if (av_image_alloc(deinterlacedPicture.data, 1645 deinterlacedPicture.linesize, displayWidth, displayHeight, 1646 fCodecContext->pix_fmt, 1) < 0) 1647 return B_NO_MEMORY; 1648 1649 // deinterlace implemented using avfilter 1650 _ProcessFilterGraph(&deinterlacedPicture, &rawPicture, 1651 fCodecContext->pix_fmt, displayWidth, displayHeight); 1652 useDeinterlacedPicture = true; 1653 } 1654 1655 // Some decoders do not set pix_fmt until they have decoded 1 frame 1656 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION 1657 if (fSwsContext == NULL) { 1658 fSwsContext = sws_getContext(displayWidth, displayHeight, 1659 fCodecContext->pix_fmt, displayWidth, displayHeight, 1660 colorspace_to_pixfmt(fOutputColorSpace), 1661 SWS_FAST_BILINEAR, NULL, NULL, NULL); 1662 } 1663 #else 1664 if (fFormatConversionFunc == NULL) { 1665 fFormatConversionFunc = resolve_colorspace(fOutputColorSpace, 1666 fCodecContext->pix_fmt, displayWidth, displayHeight); 1667 } 1668 #endif 1669 1670 fDecodedDataSizeInBytes = fHeader.size_used; 1671 1672 if (fDecodedData == NULL) { 1673 const size_t kOptimalAlignmentForColorConversion = 32; 1674 posix_memalign(reinterpret_cast<void**>(&fDecodedData), 1675 kOptimalAlignmentForColorConversion, fDecodedDataSizeInBytes); 1676 } 1677 if (fDecodedData == NULL) 1678 return B_NO_MEMORY; 1679 1680 fPostProcessedDecodedPicture->data[0] = fDecodedData; 1681 fPostProcessedDecodedPicture->linesize[0] 1682 = fHeader.u.raw_video.bytes_per_row; 1683 1684 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION 1685 if (fSwsContext != NULL) { 1686 #else 1687 if (fFormatConversionFunc != NULL) { 1688 #endif 1689 if (useDeinterlacedPicture) { 1690 AVFrame deinterlacedFrame; 1691 deinterlacedFrame.data[0] = deinterlacedPicture.data[0]; 1692 deinterlacedFrame.data[1] = deinterlacedPicture.data[1]; 1693 deinterlacedFrame.data[2] = deinterlacedPicture.data[2]; 1694 deinterlacedFrame.data[3] = deinterlacedPicture.data[3]; 1695 deinterlacedFrame.linesize[0] 1696 = deinterlacedPicture.linesize[0]; 1697 deinterlacedFrame.linesize[1] 1698 = deinterlacedPicture.linesize[1]; 1699 deinterlacedFrame.linesize[2] 1700 = deinterlacedPicture.linesize[2]; 1701 deinterlacedFrame.linesize[3] 1702 = deinterlacedPicture.linesize[3]; 1703 1704 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION 1705 sws_scale(fSwsContext, deinterlacedFrame.data, 1706 deinterlacedFrame.linesize, 0, displayHeight, 1707 fPostProcessedDecodedPicture->data, 1708 fPostProcessedDecodedPicture->linesize); 1709 #else 1710 (*fFormatConversionFunc)(&deinterlacedFrame, 1711 fPostProcessedDecodedPicture, displayWidth, displayHeight); 1712 #endif 1713 } else { 1714 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION 1715 sws_scale(fSwsContext, fRawDecodedPicture->data, 1716 fRawDecodedPicture->linesize, 0, displayHeight, 1717 fPostProcessedDecodedPicture->data, 1718 fPostProcessedDecodedPicture->linesize); 1719 #else 1720 (*fFormatConversionFunc)(fRawDecodedPicture, 1721 fPostProcessedDecodedPicture, displayWidth, displayHeight); 1722 #endif 1723 } 1724 } 1725 1726 if (fRawDecodedPicture->interlaced_frame) 1727 av_freep(&deinterlacedPicture.data[0]); 1728 1729 return B_OK; 1730 } 1731 1732 1733 /*! \brief Init the deinterlace filter graph. 1734 1735 \returns B_OK the filter graph could be built. 1736 \returns B_BAD_VALUE something was wrong with building the graph. 1737 */ 1738 status_t 1739 AVCodecDecoder::_InitFilterGraph(enum AVPixelFormat pixfmt, int32 width, 1740 int32 height) 1741 { 1742 if (fFilterGraph != NULL) { 1743 av_frame_free(&fFilterFrame); 1744 avfilter_graph_free(&fFilterGraph); 1745 } 1746 1747 fFilterGraph = avfilter_graph_alloc(); 1748 1749 BString arguments; 1750 arguments.SetToFormat("buffer=video_size=%" B_PRId32 "x%" B_PRId32 1751 ":pix_fmt=%d:time_base=1/1:pixel_aspect=0/1[in];[in]yadif[out];" 1752 "[out]buffersink", width, height, 1753 pixfmt); 1754 AVFilterInOut* inputs = NULL; 1755 AVFilterInOut* outputs = NULL; 1756 TRACE("[v] _InitFilterGraph(): %s\n", arguments.String()); 1757 int ret = avfilter_graph_parse2(fFilterGraph, arguments.String(), &inputs, 1758 &outputs); 1759 if (ret < 0) { 1760 fprintf(stderr, "avfilter_graph_parse2() failed\n"); 1761 return B_BAD_VALUE; 1762 } 1763 1764 ret = avfilter_graph_config(fFilterGraph, NULL); 1765 if (ret < 0) { 1766 fprintf(stderr, "avfilter_graph_config() failed\n"); 1767 return B_BAD_VALUE; 1768 } 1769 1770 fBufferSourceContext = avfilter_graph_get_filter(fFilterGraph, 1771 "Parsed_buffer_0"); 1772 fBufferSinkContext = avfilter_graph_get_filter(fFilterGraph, 1773 "Parsed_buffersink_2"); 1774 if (fBufferSourceContext == NULL || fBufferSinkContext == NULL) { 1775 fprintf(stderr, "avfilter_graph_get_filter() failed\n"); 1776 return B_BAD_VALUE; 1777 } 1778 fFilterFrame = av_frame_alloc(); 1779 fLastWidth = width; 1780 fLastHeight = height; 1781 fLastPixfmt = pixfmt; 1782 1783 return B_OK; 1784 } 1785 1786 1787 /*! \brief Process an AVPicture with the deinterlace filter graph. 1788 1789 We decode exactly one video frame into dst. 1790 Equivalent function for avpicture_deinterlace() from version 2.x. 1791 1792 \returns B_OK video frame successfully deinterlaced. 1793 \returns B_BAD_DATA No frame could be output. 1794 \returns B_NO_MEMORY Not enough memory available for correct operation. 1795 */ 1796 status_t 1797 AVCodecDecoder::_ProcessFilterGraph(AVFrame *dst, const AVFrame *src, 1798 enum AVPixelFormat pixfmt, int32 width, int32 height) 1799 { 1800 if (fFilterGraph == NULL || width != fLastWidth 1801 || height != fLastHeight || pixfmt != fLastPixfmt) { 1802 1803 status_t err = _InitFilterGraph(pixfmt, width, height); 1804 if (err != B_OK) 1805 return err; 1806 } 1807 1808 memcpy(fFilterFrame->data, src->data, sizeof(src->data)); 1809 memcpy(fFilterFrame->linesize, src->linesize, sizeof(src->linesize)); 1810 fFilterFrame->width = width; 1811 fFilterFrame->height = height; 1812 fFilterFrame->format = pixfmt; 1813 1814 int ret = av_buffersrc_add_frame(fBufferSourceContext, fFilterFrame); 1815 if (ret < 0) 1816 return B_NO_MEMORY; 1817 1818 ret = av_buffersink_get_frame(fBufferSinkContext, fFilterFrame); 1819 if (ret < 0) 1820 return B_BAD_DATA; 1821 1822 av_image_copy(dst->data, dst->linesize, (const uint8**)fFilterFrame->data, 1823 fFilterFrame->linesize, pixfmt, width, height); 1824 av_frame_unref(fFilterFrame); 1825 return B_OK; 1826 } 1827