1 /* 2 * Copyright (C) 2001 Carlos Hasan 3 * Copyright (C) 2001 François Revol 4 * Copyright (C) 2001 Axel Dörfler 5 * Copyright (C) 2004 Marcus Overhagen 6 * Copyright (C) 2009 Stephan Amßus <superstippi@gmx.de> 7 * Copyright (C) 2014 Colin Günther <coling@gmx.de> 8 * Copyright (C) 2015 Adrien Destugues <pulkomandy@pulkomandy.tk> 9 * 10 * All rights reserved. Distributed under the terms of the MIT License. 11 */ 12 13 //! libavcodec based decoder for Haiku 14 15 16 #include "AVCodecDecoder.h" 17 18 #include <new> 19 20 #include <assert.h> 21 #include <string.h> 22 23 #include <Bitmap.h> 24 #include <Debug.h> 25 #include <String.h> 26 27 #include "Utilities.h" 28 29 30 #undef TRACE 31 //#define TRACE_AV_CODEC 32 #ifdef TRACE_AV_CODEC 33 # define TRACE(x...) printf(x) 34 # define TRACE_AUDIO(x...) printf(x) 35 # define TRACE_VIDEO(x...) printf(x) 36 #else 37 # define TRACE(x...) 38 # define TRACE_AUDIO(x...) 39 # define TRACE_VIDEO(x...) 40 #endif 41 42 //#define LOG_STREAM_TO_FILE 43 #ifdef LOG_STREAM_TO_FILE 44 # include <File.h> 45 static BFile sAudioStreamLogFile( 46 "/boot/home/Desktop/AVCodecDebugAudioStream.raw", 47 B_CREATE_FILE | B_ERASE_FILE | B_WRITE_ONLY); 48 static BFile sVideoStreamLogFile( 49 "/boot/home/Desktop/AVCodecDebugVideoStream.raw", 50 B_CREATE_FILE | B_ERASE_FILE | B_WRITE_ONLY); 51 static int sDumpedPackets = 0; 52 #endif 53 54 typedef AVCodecID CodecID; 55 56 struct wave_format_ex { 57 uint16 format_tag; 58 uint16 channels; 59 uint32 frames_per_sec; 60 uint32 avg_bytes_per_sec; 61 uint16 block_align; 62 uint16 bits_per_sample; 63 uint16 extra_size; 64 // extra_data[extra_size] 65 } _PACKED; 66 67 struct avformat_codec_context { 68 int sample_rate; 69 int channels; 70 }; 71 72 73 // profiling related globals 74 #define DO_PROFILING 0 75 #if DO_PROFILING 76 static bigtime_t decodingTime = 0; 77 static bigtime_t conversionTime = 0; 78 static long profileCounter = 0; 79 #endif 80 81 82 AVCodecDecoder::AVCodecDecoder() 83 : 84 fHeader(), 85 fInputFormat(), 86 fFrame(0), 87 fIsAudio(false), 88 fCodec(NULL), 89 fCodecContext(avcodec_alloc_context3(NULL)), 90 fResampleContext(NULL), 91 fDecodedData(NULL), 92 fDecodedDataSizeInBytes(0), 93 fPostProcessedDecodedPicture(av_frame_alloc()), 94 fRawDecodedPicture(av_frame_alloc()), 95 fRawDecodedAudio(av_frame_alloc()), 96 97 fCodecInitDone(false), 98 99 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION 100 fSwsContext(NULL), 101 #else 102 fFormatConversionFunc(NULL), 103 #endif 104 105 fExtraData(NULL), 106 fExtraDataSize(0), 107 fBlockAlign(0), 108 109 fOutputColorSpace(B_NO_COLOR_SPACE), 110 fOutputFrameCount(0), 111 fOutputFrameRate(1.0), 112 fOutputFrameSize(0), 113 fInputFrameSize(0), 114 115 fChunkBuffer(NULL), 116 fChunkBufferSize(0), 117 fAudioDecodeError(false), 118 119 fDecodedDataBuffer(av_frame_alloc()), 120 fDecodedDataBufferOffset(0), 121 fDecodedDataBufferSize(0), 122 fBufferSinkContext(NULL), 123 fBufferSourceContext(NULL), 124 fFilterGraph(NULL), 125 fFilterFrame(NULL) 126 { 127 TRACE("AVCodecDecoder::AVCodecDecoder()\n"); 128 129 system_info info; 130 get_system_info(&info); 131 132 fCodecContext->err_recognition = AV_EF_CAREFUL; 133 fCodecContext->error_concealment = 3; 134 fCodecContext->thread_count = info.cpu_count; 135 } 136 137 138 AVCodecDecoder::~AVCodecDecoder() 139 { 140 TRACE("[%c] AVCodecDecoder::~AVCodecDecoder()\n", fIsAudio?('a'):('v')); 141 142 #if DO_PROFILING 143 if (profileCounter > 0) { 144 printf("[%c] profile: d1 = %lld, d2 = %lld (%lld)\n", 145 fIsAudio?('a'):('v'), decodingTime / profileCounter, 146 conversionTime / profileCounter, fFrame); 147 } 148 #endif 149 150 if (fCodecInitDone) 151 avcodec_close(fCodecContext); 152 153 swr_free(&fResampleContext); 154 free(fChunkBuffer); 155 free(fDecodedData); 156 157 av_free(fPostProcessedDecodedPicture); 158 av_free(fRawDecodedPicture); 159 av_free(fRawDecodedAudio->opaque); 160 av_free(fRawDecodedAudio); 161 av_free(fCodecContext); 162 av_free(fDecodedDataBuffer); 163 164 av_frame_free(&fFilterFrame); 165 avfilter_graph_free(&fFilterGraph); 166 167 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION 168 if (fSwsContext != NULL) 169 sws_freeContext(fSwsContext); 170 #endif 171 172 delete[] fExtraData; 173 } 174 175 176 void 177 AVCodecDecoder::GetCodecInfo(media_codec_info* mci) 178 { 179 snprintf(mci->short_name, 32, "%s", fCodec->name); 180 snprintf(mci->pretty_name, 96, "%s", fCodec->long_name); 181 mci->id = 0; 182 mci->sub_id = fCodec->id; 183 } 184 185 186 status_t 187 AVCodecDecoder::Setup(media_format* ioEncodedFormat, const void* infoBuffer, 188 size_t infoSize) 189 { 190 if (ioEncodedFormat->type != B_MEDIA_ENCODED_AUDIO 191 && ioEncodedFormat->type != B_MEDIA_ENCODED_VIDEO) 192 return B_ERROR; 193 194 fIsAudio = (ioEncodedFormat->type == B_MEDIA_ENCODED_AUDIO); 195 TRACE("[%c] AVCodecDecoder::Setup()\n", fIsAudio?('a'):('v')); 196 197 #ifdef TRACE_AV_CODEC 198 char buffer[1024]; 199 string_for_format(*ioEncodedFormat, buffer, sizeof(buffer)); 200 TRACE("[%c] input_format = %s\n", fIsAudio?('a'):('v'), buffer); 201 TRACE("[%c] infoSize = %ld\n", fIsAudio?('a'):('v'), infoSize); 202 TRACE("[%c] user_data_type = %08lx\n", fIsAudio?('a'):('v'), 203 ioEncodedFormat->user_data_type); 204 TRACE("[%c] meta_data_size = %ld\n", fIsAudio?('a'):('v'), 205 ioEncodedFormat->MetaDataSize()); 206 #endif 207 208 media_format_description description; 209 if (BMediaFormats().GetCodeFor(*ioEncodedFormat, 210 B_MISC_FORMAT_FAMILY, &description) == B_OK) { 211 if (description.u.misc.file_format != 'ffmp') 212 return B_NOT_SUPPORTED; 213 fCodec = avcodec_find_decoder(static_cast<CodecID>( 214 description.u.misc.codec)); 215 if (fCodec == NULL) { 216 TRACE(" unable to find the correct FFmpeg " 217 "decoder (id = %lu)\n", description.u.misc.codec); 218 return B_ERROR; 219 } 220 TRACE(" found decoder %s\n", fCodec->name); 221 222 const void* extraData = infoBuffer; 223 fExtraDataSize = infoSize; 224 if (description.family == B_WAV_FORMAT_FAMILY 225 && infoSize >= sizeof(wave_format_ex)) { 226 TRACE(" trying to use wave_format_ex\n"); 227 // Special case extra data in B_WAV_FORMAT_FAMILY 228 const wave_format_ex* waveFormatData 229 = (const wave_format_ex*)infoBuffer; 230 231 size_t waveFormatSize = infoSize; 232 if (waveFormatData != NULL && waveFormatSize > 0) { 233 fBlockAlign = waveFormatData->block_align; 234 TRACE(" found block align: %d\n", fBlockAlign); 235 fExtraDataSize = waveFormatData->extra_size; 236 // skip the wave_format_ex from the extra data. 237 extraData = waveFormatData + 1; 238 } 239 } else { 240 if (fIsAudio) { 241 fBlockAlign 242 = ioEncodedFormat->u.encoded_audio.output.buffer_size; 243 TRACE(" using buffer_size as block align: %d\n", 244 fBlockAlign); 245 } 246 } 247 if (extraData != NULL && fExtraDataSize > 0) { 248 TRACE("AVCodecDecoder: extra data size %ld\n", infoSize); 249 delete[] fExtraData; 250 fExtraData = new(std::nothrow) char[fExtraDataSize]; 251 if (fExtraData != NULL) 252 memcpy(fExtraData, infoBuffer, fExtraDataSize); 253 else 254 fExtraDataSize = 0; 255 } 256 257 fInputFormat = *ioEncodedFormat; 258 return B_OK; 259 } else { 260 TRACE("AVCodecDecoder: BMediaFormats().GetCodeFor() failed.\n"); 261 } 262 263 printf("AVCodecDecoder::Setup failed!\n"); 264 return B_ERROR; 265 } 266 267 268 status_t 269 AVCodecDecoder::SeekedTo(int64 frame, bigtime_t time) 270 { 271 status_t ret = B_OK; 272 // Reset the FFmpeg codec to flush buffers, so we keep the sync 273 if (fCodecInitDone) { 274 avcodec_flush_buffers(fCodecContext); 275 _ResetTempPacket(); 276 } 277 278 // Flush internal buffers as well. 279 free(fChunkBuffer); 280 fChunkBuffer = NULL; 281 fChunkBufferSize = 0; 282 fDecodedDataBufferOffset = 0; 283 fDecodedDataBufferSize = 0; 284 fDecodedDataSizeInBytes = 0; 285 286 fFrame = frame; 287 288 return ret; 289 } 290 291 292 status_t 293 AVCodecDecoder::NegotiateOutputFormat(media_format* inOutFormat) 294 { 295 TRACE("AVCodecDecoder::NegotiateOutputFormat() [%c] \n", 296 fIsAudio?('a'):('v')); 297 298 #ifdef TRACE_AV_CODEC 299 char buffer[1024]; 300 string_for_format(*inOutFormat, buffer, sizeof(buffer)); 301 TRACE(" [%c] requested format = %s\n", fIsAudio?('a'):('v'), buffer); 302 #endif 303 304 if (fIsAudio) 305 return _NegotiateAudioOutputFormat(inOutFormat); 306 else 307 return _NegotiateVideoOutputFormat(inOutFormat); 308 } 309 310 311 status_t 312 AVCodecDecoder::Decode(void* outBuffer, int64* outFrameCount, 313 media_header* mediaHeader, media_decode_info* info) 314 { 315 if (!fCodecInitDone) 316 return B_NO_INIT; 317 318 status_t ret; 319 if (fIsAudio) 320 ret = _DecodeAudio(outBuffer, outFrameCount, mediaHeader, info); 321 else 322 ret = _DecodeVideo(outBuffer, outFrameCount, mediaHeader, info); 323 324 return ret; 325 } 326 327 328 // #pragma mark - 329 330 331 void 332 AVCodecDecoder::_ResetTempPacket() 333 { 334 av_init_packet(&fTempPacket); 335 fTempPacket.size = 0; 336 fTempPacket.data = NULL; 337 } 338 339 340 status_t 341 AVCodecDecoder::_NegotiateAudioOutputFormat(media_format* inOutFormat) 342 { 343 TRACE("AVCodecDecoder::_NegotiateAudioOutputFormat()\n"); 344 345 _ApplyEssentialAudioContainerPropertiesToContext(); 346 // This makes audio formats play that encode the audio properties in 347 // the audio container (e.g. WMA) and not in the audio frames 348 // themself (e.g. MP3). 349 // Note: Doing this step unconditionally is OK, because the first call 350 // to _DecodeNextAudioFrameChunk() will update the essential audio 351 // format properties accordingly regardless of the settings here. 352 353 // close any previous instance 354 if (fCodecInitDone) { 355 fCodecInitDone = false; 356 avcodec_close(fCodecContext); 357 } 358 359 if (avcodec_open2(fCodecContext, fCodec, NULL) >= 0) 360 fCodecInitDone = true; 361 else { 362 TRACE("avcodec_open() failed to init codec!\n"); 363 return B_ERROR; 364 } 365 366 free(fChunkBuffer); 367 fChunkBuffer = NULL; 368 fChunkBufferSize = 0; 369 fAudioDecodeError = false; 370 fDecodedDataBufferOffset = 0; 371 fDecodedDataBufferSize = 0; 372 373 _ResetTempPacket(); 374 375 status_t statusOfDecodingFirstFrameChunk = _DecodeNextAudioFrameChunk(); 376 if (statusOfDecodingFirstFrameChunk != B_OK) { 377 TRACE("[a] decoding first audio frame chunk failed\n"); 378 return B_ERROR; 379 } 380 381 media_multi_audio_format outputAudioFormat; 382 outputAudioFormat = media_raw_audio_format::wildcard; 383 outputAudioFormat.byte_order = B_MEDIA_HOST_ENDIAN; 384 outputAudioFormat.frame_rate = fCodecContext->sample_rate; 385 outputAudioFormat.channel_count = fCodecContext->channels; 386 ConvertAVSampleFormatToRawAudioFormat(fCodecContext->sample_fmt, 387 outputAudioFormat.format); 388 // Check that format is not still a wild card! 389 if (outputAudioFormat.format == 0) { 390 TRACE(" format still a wild-card, assuming B_AUDIO_SHORT.\n"); 391 outputAudioFormat.format = media_raw_audio_format::B_AUDIO_SHORT; 392 } 393 outputAudioFormat.buffer_size = inOutFormat->u.raw_audio.buffer_size; 394 // Check that buffer_size has a sane value 395 size_t sampleSize = outputAudioFormat.format 396 & media_raw_audio_format::B_AUDIO_SIZE_MASK; 397 if (outputAudioFormat.buffer_size == 0) { 398 outputAudioFormat.buffer_size = 512 * sampleSize 399 * outputAudioFormat.channel_count; 400 } 401 402 inOutFormat->type = B_MEDIA_RAW_AUDIO; 403 inOutFormat->u.raw_audio = outputAudioFormat; 404 inOutFormat->require_flags = 0; 405 inOutFormat->deny_flags = B_MEDIA_MAUI_UNDEFINED_FLAGS; 406 407 // Initialize variables needed to manage decoding as much audio frames as 408 // needed to fill the buffer_size. 409 fOutputFrameSize = sampleSize * outputAudioFormat.channel_count; 410 fOutputFrameCount = outputAudioFormat.buffer_size / fOutputFrameSize; 411 fOutputFrameRate = outputAudioFormat.frame_rate; 412 if (av_sample_fmt_is_planar(fCodecContext->sample_fmt)) 413 fInputFrameSize = sampleSize; 414 else 415 fInputFrameSize = fOutputFrameSize; 416 417 fRawDecodedAudio->opaque 418 = av_realloc(fRawDecodedAudio->opaque, sizeof(avformat_codec_context)); 419 if (fRawDecodedAudio->opaque == NULL) 420 return B_NO_MEMORY; 421 422 if (av_sample_fmt_is_planar(fCodecContext->sample_fmt)) { 423 fResampleContext = swr_alloc_set_opts(NULL, 424 fCodecContext->channel_layout, 425 fCodecContext->request_sample_fmt, 426 fCodecContext->sample_rate, 427 fCodecContext->channel_layout, 428 fCodecContext->sample_fmt, 429 fCodecContext->sample_rate, 430 0, NULL); 431 swr_init(fResampleContext); 432 } 433 434 TRACE(" bit_rate = %d, sample_rate = %d, channels = %d, " 435 "output frame size: %d, count: %ld, rate: %.2f\n", 436 fCodecContext->bit_rate, fCodecContext->sample_rate, fCodecContext->channels, 437 fOutputFrameSize, fOutputFrameCount, fOutputFrameRate); 438 439 return B_OK; 440 } 441 442 443 status_t 444 AVCodecDecoder::_NegotiateVideoOutputFormat(media_format* inOutFormat) 445 { 446 TRACE("AVCodecDecoder::_NegotiateVideoOutputFormat()\n"); 447 448 TRACE(" requested video format 0x%x\n", 449 inOutFormat->u.raw_video.display.format); 450 451 _ApplyEssentialVideoContainerPropertiesToContext(); 452 // This makes video formats play that encode the video properties in 453 // the video container (e.g. WMV) and not in the video frames 454 // themself (e.g. MPEG2). 455 // Note: Doing this step unconditionally is OK, because the first call 456 // to _DecodeNextVideoFrame() will update the essential video format 457 // properties accordingly regardless of the settings here. 458 459 bool codecCanHandleIncompleteFrames 460 = (fCodec->capabilities & AV_CODEC_CAP_TRUNCATED) != 0; 461 if (codecCanHandleIncompleteFrames) { 462 // Expect and handle video frames to be splitted across consecutive 463 // data chunks. 464 fCodecContext->flags |= AV_CODEC_FLAG_TRUNCATED; 465 } 466 467 // close any previous instance 468 if (fCodecInitDone) { 469 fCodecInitDone = false; 470 avcodec_close(fCodecContext); 471 } 472 473 if (avcodec_open2(fCodecContext, fCodec, NULL) >= 0) 474 fCodecInitDone = true; 475 else { 476 TRACE("avcodec_open() failed to init codec!\n"); 477 return B_ERROR; 478 } 479 480 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION 481 fOutputColorSpace = B_RGB32; 482 #else 483 // Make MediaPlayer happy (if not in rgb32 screen depth and no overlay, 484 // it will only ask for YCbCr, which DrawBitmap doesn't handle, so the 485 // default colordepth is RGB32). 486 if (inOutFormat->u.raw_video.display.format == B_YCbCr422) 487 fOutputColorSpace = B_YCbCr422; 488 else 489 fOutputColorSpace = B_RGB32; 490 #endif 491 492 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION 493 if (fSwsContext != NULL) 494 sws_freeContext(fSwsContext); 495 fSwsContext = NULL; 496 #else 497 fFormatConversionFunc = 0; 498 #endif 499 500 free(fChunkBuffer); 501 fChunkBuffer = NULL; 502 fChunkBufferSize = 0; 503 504 _ResetTempPacket(); 505 506 status_t statusOfDecodingFirstFrame = _DecodeNextVideoFrame(); 507 if (statusOfDecodingFirstFrame != B_OK) { 508 TRACE("[v] decoding first video frame failed\n"); 509 return B_ERROR; 510 } 511 512 // Note: fSwsContext / fFormatConversionFunc should have been initialized 513 // by first call to _DecodeNextVideoFrame() above. 514 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION 515 if (fSwsContext == NULL) { 516 TRACE("No SWS Scale context or decoder has not set the pixel format " 517 "yet!\n"); 518 } 519 #else 520 if (fFormatConversionFunc == NULL) { 521 TRACE("no pixel format conversion function found or decoder has " 522 "not set the pixel format yet!\n"); 523 } 524 #endif 525 526 inOutFormat->type = B_MEDIA_RAW_VIDEO; 527 inOutFormat->require_flags = 0; 528 inOutFormat->deny_flags = B_MEDIA_MAUI_UNDEFINED_FLAGS; 529 inOutFormat->u.raw_video = fInputFormat.u.encoded_video.output; 530 inOutFormat->u.raw_video.interlace = 1; 531 // Progressive (non-interlaced) video frames are delivered 532 inOutFormat->u.raw_video.first_active 533 = fHeader.u.raw_video.first_active_line; 534 inOutFormat->u.raw_video.last_active = fHeader.u.raw_video.line_count; 535 inOutFormat->u.raw_video.pixel_width_aspect 536 = fHeader.u.raw_video.pixel_width_aspect; 537 inOutFormat->u.raw_video.pixel_height_aspect 538 = fHeader.u.raw_video.pixel_height_aspect; 539 #if 0 540 // This was added by Colin Günther in order to handle streams with a 541 // variable frame rate. fOutputFrameRate is computed from the stream 542 // time_base, but it actually assumes a timebase equal to the FPS. As far 543 // as I can see, a stream with a variable frame rate would have a higher 544 // resolution time_base and increment the pts (presentation time) of each 545 // frame by a value bigger than one. 546 // 547 // Fixed rate stream: 548 // time_base = 1/50s, frame PTS = 1, 2, 3... (for 50Hz) 549 // 550 // Variable rate stream: 551 // time_base = 1/300s, frame PTS = 6, 12, 18, ... (for 50Hz) 552 // time_base = 1/300s, frame PTS = 5, 10, 15, ... (for 60Hz) 553 // 554 // The fOutputFrameRate currently does not take this into account and 555 // ignores the PTS. This results in playing the above sample at 300Hz 556 // instead of 50 or 60. 557 // 558 // However, comparing the PTS for two consecutive implies we have already 559 // decoded 2 frames, which may not be the case when this method is first 560 // called. 561 inOutFormat->u.raw_video.field_rate = fOutputFrameRate; 562 // Was calculated by first call to _DecodeNextVideoFrame() 563 #endif 564 inOutFormat->u.raw_video.display.format = fOutputColorSpace; 565 inOutFormat->u.raw_video.display.line_width 566 = fHeader.u.raw_video.display_line_width; 567 inOutFormat->u.raw_video.display.line_count 568 = fHeader.u.raw_video.display_line_count; 569 inOutFormat->u.raw_video.display.bytes_per_row 570 = fHeader.u.raw_video.bytes_per_row; 571 572 #ifdef TRACE_AV_CODEC 573 char buffer[1024]; 574 string_for_format(*inOutFormat, buffer, sizeof(buffer)); 575 TRACE("[v] outFormat = %s\n", buffer); 576 TRACE(" returned video format 0x%x\n", 577 inOutFormat->u.raw_video.display.format); 578 #endif 579 580 return B_OK; 581 } 582 583 584 /*! \brief Fills the outBuffer with one or more already decoded audio frames. 585 586 Besides the main duty described above, this method also fills out the other 587 output parameters as documented below. 588 589 \param outBuffer Pointer to the output buffer to copy the decoded audio 590 frames to. 591 \param outFrameCount Pointer to the output variable to assign the number of 592 copied audio frames (usually several audio frames at once). 593 \param mediaHeader Pointer to the output media header that contains the 594 properties of the decoded audio frame being the first in the outBuffer. 595 \param info Specifies additional decoding parameters. (Note: unused). 596 597 \returns B_OK Decoding audio frames succeeded. 598 \returns B_LAST_BUFFER_ERROR There are no more audio frames available. 599 \returns Other error codes 600 */ 601 status_t 602 AVCodecDecoder::_DecodeAudio(void* outBuffer, int64* outFrameCount, 603 media_header* mediaHeader, media_decode_info* info) 604 { 605 TRACE_AUDIO("AVCodecDecoder::_DecodeAudio(audio start_time %.6fs)\n", 606 mediaHeader->start_time / 1000000.0); 607 608 status_t audioDecodingStatus 609 = fDecodedDataSizeInBytes > 0 ? B_OK : _DecodeNextAudioFrame(); 610 611 if (audioDecodingStatus != B_OK) 612 return audioDecodingStatus; 613 614 *outFrameCount = fDecodedDataSizeInBytes / fOutputFrameSize; 615 *mediaHeader = fHeader; 616 memcpy(outBuffer, fDecodedData, fDecodedDataSizeInBytes); 617 618 fDecodedDataSizeInBytes = 0; 619 620 return B_OK; 621 } 622 623 624 /*! \brief Fills the outBuffer with an already decoded video frame. 625 626 Besides the main duty described above, this method also fills out the other 627 output parameters as documented below. 628 629 \param outBuffer Pointer to the output buffer to copy the decoded video 630 frame to. 631 \param outFrameCount Pointer to the output variable to assign the number of 632 copied video frames (usually one video frame). 633 \param mediaHeader Pointer to the output media header that contains the 634 decoded video frame properties. 635 \param info Specifies additional decoding parameters. (Note: unused). 636 637 \returns B_OK Decoding a video frame succeeded. 638 \returns B_LAST_BUFFER_ERROR There are no more video frames available. 639 \returns Other error codes 640 */ 641 status_t 642 AVCodecDecoder::_DecodeVideo(void* outBuffer, int64* outFrameCount, 643 media_header* mediaHeader, media_decode_info* info) 644 { 645 status_t videoDecodingStatus 646 = fDecodedDataSizeInBytes > 0 ? B_OK : _DecodeNextVideoFrame(); 647 648 if (videoDecodingStatus != B_OK) 649 return videoDecodingStatus; 650 651 *outFrameCount = 1; 652 *mediaHeader = fHeader; 653 memcpy(outBuffer, fDecodedData, mediaHeader->size_used); 654 655 fDecodedDataSizeInBytes = 0; 656 657 return B_OK; 658 } 659 660 661 /*! \brief Decodes next audio frame. 662 663 We decode at least one audio frame into fDecodedData. To achieve this goal, 664 we might need to request several chunks of encoded data resulting in a 665 variable execution time of this function. 666 667 The length of the decoded audio frame(s) is stored in 668 fDecodedDataSizeInBytes. If this variable is greater than zero you can 669 assert that all audio frames in fDecodedData are valid. 670 671 It is assumed that the number of expected audio frames is stored in 672 fOutputFrameCount. So _DecodeNextAudioFrame() must be called only after 673 fOutputFrameCount has been set. 674 675 Note: fOutputFrameCount contains the maximum number of frames a caller 676 of BMediaDecoder::Decode() expects to receive. There is a direct 677 relationship between fOutputFrameCount and the buffer size a caller of 678 BMediaDecoder::Decode() will provide so we make sure to respect this limit 679 for fDecodedDataSizeInBytes. 680 681 On return with status code B_OK the following conditions hold true: 682 1. fDecodedData contains as much audio frames as the caller of 683 BMediaDecoder::Decode() expects. 684 2. fDecodedData contains lesser audio frames as the caller of 685 BMediaDecoder::Decode() expects only when one of the following 686 conditions hold true: 687 i No more audio frames left. Consecutive calls to 688 _DecodeNextAudioFrame() will then result in the return of 689 status code B_LAST_BUFFER_ERROR. 690 ii TODO: A change in the size of the audio frames. 691 3. fHeader is populated with the audio frame properties of the first 692 audio frame in fDecodedData. Especially the start_time field of 693 fHeader relates to that first audio frame. Start times of 694 consecutive audio frames in fDecodedData have to be calculated 695 manually (using the frame rate and the frame duration) if the 696 caller needs them. 697 698 TODO: Handle change of channel_count. Such a change results in a change of 699 the audio frame size and thus has different buffer requirements. 700 The most sane approach for implementing this is to return the audio frames 701 that were still decoded with the previous channel_count and inform the 702 client of BMediaDecoder::Decode() about the change so that it can adapt to 703 it. Furthermore we need to adapt our fDecodedData to the new buffer size 704 requirements accordingly. 705 706 \returns B_OK when we successfully decoded enough audio frames 707 \returns B_LAST_BUFFER_ERROR when there are no more audio frames available. 708 \returns Other Errors 709 */ 710 status_t 711 AVCodecDecoder::_DecodeNextAudioFrame() 712 { 713 assert(fTempPacket.size >= 0); 714 assert(fDecodedDataSizeInBytes == 0); 715 // _DecodeNextAudioFrame needs to be called on empty fDecodedData only! 716 // If this assert holds wrong we have a bug somewhere. 717 718 status_t resetStatus = _ResetRawDecodedAudio(); 719 if (resetStatus != B_OK) 720 return resetStatus; 721 722 while (fRawDecodedAudio->nb_samples < fOutputFrameCount) { 723 _CheckAndFixConditionsThatHintAtBrokenAudioCodeBelow(); 724 725 bool decodedDataBufferHasData = fDecodedDataBufferSize > 0; 726 if (decodedDataBufferHasData) { 727 _MoveAudioFramesToRawDecodedAudioAndUpdateStartTimes(); 728 continue; 729 } 730 731 status_t decodeAudioChunkStatus = _DecodeNextAudioFrameChunk(); 732 if (decodeAudioChunkStatus == B_LAST_BUFFER_ERROR 733 && fRawDecodedAudio->nb_samples > 0) 734 break; 735 if (decodeAudioChunkStatus != B_OK) 736 return decodeAudioChunkStatus; 737 } 738 739 fFrame += fRawDecodedAudio->nb_samples; 740 fDecodedDataSizeInBytes = fRawDecodedAudio->linesize[0]; 741 742 _UpdateMediaHeaderForAudioFrame(); 743 744 #ifdef DEBUG 745 dump_ffframe_audio(fRawDecodedAudio, "ffaudi"); 746 #endif 747 748 TRACE_AUDIO(" frame count: %ld current: %lld\n", 749 fRawDecodedAudio->nb_samples, fFrame); 750 751 return B_OK; 752 } 753 754 755 /*! \brief Applies all essential audio input properties to fCodecContext that were 756 passed to AVCodecDecoder when Setup() was called. 757 758 Note: This function must be called before the AVCodec is opened via 759 avcodec_open2(). Otherwise the behaviour of FFMPEG's audio decoding 760 function avcodec_receive_frame() is undefined. 761 762 Essential properties applied from fInputFormat.u.encoded_audio: 763 - bit_rate copied to fCodecContext->bit_rate 764 - frame_size copied to fCodecContext->frame_size 765 - output.format converted to fCodecContext->sample_fmt 766 - output.frame_rate copied to fCodecContext->sample_rate 767 - output.channel_count copied to fCodecContext->channels 768 769 Other essential properties being applied: 770 - fBlockAlign to fCodecContext->block_align 771 - fExtraData to fCodecContext->extradata 772 - fExtraDataSize to fCodecContext->extradata_size 773 774 TODO: Either the following documentation section should be removed or this 775 TODO when it is clear whether fInputFormat.MetaData() and 776 fInputFormat.MetaDataSize() have to be applied to fCodecContext. See the related 777 TODO in the method implementation. 778 Only applied when fInputFormat.MetaDataSize() is greater than zero: 779 - fInputFormat.MetaData() to fCodecContext->extradata 780 - fInputFormat.MetaDataSize() to fCodecContext->extradata_size 781 */ 782 void 783 AVCodecDecoder::_ApplyEssentialAudioContainerPropertiesToContext() 784 { 785 media_encoded_audio_format containerProperties 786 = fInputFormat.u.encoded_audio; 787 788 fCodecContext->bit_rate 789 = static_cast<int>(containerProperties.bit_rate); 790 fCodecContext->frame_size 791 = static_cast<int>(containerProperties.frame_size); 792 ConvertRawAudioFormatToAVSampleFormat( 793 containerProperties.output.format, fCodecContext->sample_fmt); 794 ConvertRawAudioFormatToAVSampleFormat( 795 containerProperties.output.format, fCodecContext->request_sample_fmt); 796 fCodecContext->sample_rate 797 = static_cast<int>(containerProperties.output.frame_rate); 798 fCodecContext->channels 799 = static_cast<int>(containerProperties.output.channel_count); 800 // Check that channel count is not still a wild card! 801 if (fCodecContext->channels == 0) { 802 TRACE(" channel_count still a wild-card, assuming stereo.\n"); 803 fCodecContext->channels = 2; 804 } 805 806 fCodecContext->block_align = fBlockAlign; 807 fCodecContext->extradata = reinterpret_cast<uint8_t*>(fExtraData); 808 fCodecContext->extradata_size = fExtraDataSize; 809 810 // TODO: This probably needs to go away, there is some misconception 811 // about extra data / info buffer and meta data. See 812 // Reader::GetStreamInfo(). The AVFormatReader puts extradata and 813 // extradata_size into media_format::MetaData(), but used to ignore 814 // the infoBuffer passed to GetStreamInfo(). I think this may be why 815 // the code below was added. 816 if (fInputFormat.MetaDataSize() > 0) { 817 fCodecContext->extradata = static_cast<uint8_t*>( 818 const_cast<void*>(fInputFormat.MetaData())); 819 fCodecContext->extradata_size = fInputFormat.MetaDataSize(); 820 } 821 822 TRACE(" bit_rate %d, sample_rate %d, channels %d, block_align %d, " 823 "extradata_size %d\n", 824 fCodecContext->bit_rate, 825 fCodecContext->sample_rate, 826 fCodecContext->channels, 827 fCodecContext->block_align, 828 fCodecContext->extradata_size); 829 } 830 831 832 /*! \brief Resets important fields in fRawDecodedVideo to their default values. 833 834 Note: Also initializes fDecodedData if not done already. 835 836 \returns B_OK Resetting successfully completed. 837 \returns B_NO_MEMORY No memory left for correct operation. 838 */ 839 status_t 840 AVCodecDecoder::_ResetRawDecodedAudio() 841 { 842 if (fDecodedData == NULL) { 843 size_t maximumSizeOfDecodedData = fOutputFrameCount * fOutputFrameSize; 844 fDecodedData 845 = static_cast<uint8_t*>(malloc(maximumSizeOfDecodedData)); 846 } 847 if (fDecodedData == NULL) 848 return B_NO_MEMORY; 849 850 fRawDecodedAudio->data[0] = fDecodedData; 851 fRawDecodedAudio->linesize[0] = 0; 852 fRawDecodedAudio->format = AV_SAMPLE_FMT_NONE; 853 fRawDecodedAudio->pkt_dts = AV_NOPTS_VALUE; 854 fRawDecodedAudio->nb_samples = 0; 855 memset(fRawDecodedAudio->opaque, 0, sizeof(avformat_codec_context)); 856 857 return B_OK; 858 } 859 860 861 /*! \brief Checks fDecodedDataBufferSize and fTempPacket for invalid values, 862 reports them and assigns valid values. 863 864 Note: This method is intended to be called before any code is executed that 865 deals with moving, loading or decoding any audio frames. 866 */ 867 void 868 AVCodecDecoder::_CheckAndFixConditionsThatHintAtBrokenAudioCodeBelow() 869 { 870 if (fDecodedDataBufferSize < 0) { 871 fprintf(stderr, "Decoding read past the end of the decoded data " 872 "buffer! %" B_PRId32 "\n", fDecodedDataBufferSize); 873 fDecodedDataBufferSize = 0; 874 } 875 if (fTempPacket.size < 0) { 876 fprintf(stderr, "Decoding read past the end of the temp packet! %d\n", 877 fTempPacket.size); 878 fTempPacket.size = 0; 879 } 880 } 881 882 883 /*! \brief Moves audio frames from fDecodedDataBuffer to fRawDecodedAudio (and 884 thus to fDecodedData) and updates the start times of fRawDecodedAudio, 885 fDecodedDataBuffer and fTempPacket accordingly. 886 887 When moving audio frames to fRawDecodedAudio this method also makes sure 888 that the following important fields of fRawDecodedAudio are populated and 889 updated with correct values: 890 - fRawDecodedAudio->data[0]: Points to first free byte of fDecodedData 891 - fRawDecodedAudio->linesize[0]: Total size of frames in fDecodedData 892 - fRawDecodedAudio->format: Format of first audio frame 893 - fRawDecodedAudio->pkt_dts: Start time of first audio frame 894 - fRawDecodedAudio->nb_samples: Number of audio frames 895 - fRawDecodedAudio->opaque: Contains the following fields for the first 896 audio frame: 897 - channels: Channel count of first audio frame 898 - sample_rate: Frame rate of first audio frame 899 900 This function assumes to be called only when the following assumptions 901 hold true: 902 1. There are decoded audio frames available in fDecodedDataBuffer 903 meaning that fDecodedDataBufferSize is greater than zero. 904 2. There is space left in fRawDecodedAudio to move some audio frames 905 in. This means that fRawDecodedAudio has lesser audio frames than 906 the maximum allowed (specified by fOutputFrameCount). 907 3. The audio frame rate is known so that we can calculate the time 908 range (covered by the moved audio frames) to update the start times 909 accordingly. 910 4. The field fRawDecodedAudio->opaque points to a memory block 911 representing a structure of type avformat_codec_context. 912 913 After this function returns the caller can safely make the following 914 assumptions: 915 1. The number of decoded audio frames in fDecodedDataBuffer is 916 decreased though it may still be greater then zero. 917 2. The number of frames in fRawDecodedAudio has increased and all 918 important fields are updated (see listing above). 919 3. Start times of fDecodedDataBuffer and fTempPacket were increased 920 with the time range covered by the moved audio frames. 921 922 Note: This function raises an exception (by calling the debugger), when 923 fDecodedDataBufferSize is not a multiple of fOutputFrameSize. 924 */ 925 void 926 AVCodecDecoder::_MoveAudioFramesToRawDecodedAudioAndUpdateStartTimes() 927 { 928 assert(fDecodedDataBufferSize > 0); 929 assert(fRawDecodedAudio->nb_samples < fOutputFrameCount); 930 assert(fOutputFrameRate > 0); 931 932 int32 outFrames = fOutputFrameCount - fRawDecodedAudio->nb_samples; 933 int32 inFrames = fDecodedDataBufferSize; 934 935 int32 frames = min_c(outFrames, inFrames); 936 if (frames == 0) 937 debugger("fDecodedDataBufferSize not multiple of frame size!"); 938 939 // Some decoders do not support format conversion on themselves, or use 940 // "planar" audio (each channel separated instead of interleaved samples). 941 // In that case, we use swresample to convert the data 942 if (av_sample_fmt_is_planar(fCodecContext->sample_fmt)) { 943 #if 0 944 const uint8_t* ptr[8]; 945 for (int i = 0; i < 8; i++) { 946 if (fDecodedDataBuffer->data[i] == NULL) 947 ptr[i] = NULL; 948 else 949 ptr[i] = fDecodedDataBuffer->data[i] + fDecodedDataBufferOffset; 950 } 951 952 // When there are more input frames than space in the output buffer, 953 // we could feed everything to swr and it would buffer the extra data. 954 // However, there is no easy way to flush that data without feeding more 955 // input, and it makes our timestamp computations fail. 956 // So, we feed only as much frames as we can get out, and handle the 957 // buffering ourselves. 958 // TODO Ideally, we should try to size our output buffer so that it can 959 // always hold all the output (swr provides helper functions for this) 960 inFrames = frames; 961 frames = swr_convert(fResampleContext, fRawDecodedAudio->data, 962 outFrames, ptr, inFrames); 963 964 if (frames < 0) 965 debugger("resampling failed"); 966 #else 967 // interleave planar audio with same format 968 uintptr_t out = (uintptr_t)fRawDecodedAudio->data[0]; 969 int32 offset = fDecodedDataBufferOffset; 970 for (int i = 0; i < frames; i++) { 971 for (int j = 0; j < fCodecContext->channels; j++) { 972 memcpy((void*)out, fDecodedDataBuffer->data[j] 973 + offset, fInputFrameSize); 974 out += fInputFrameSize; 975 } 976 offset += fInputFrameSize; 977 } 978 outFrames = frames; 979 inFrames = frames; 980 #endif 981 } else { 982 memcpy(fRawDecodedAudio->data[0], fDecodedDataBuffer->data[0] 983 + fDecodedDataBufferOffset, frames * fOutputFrameSize); 984 outFrames = frames; 985 inFrames = frames; 986 } 987 988 size_t remainingSize = inFrames * fInputFrameSize; 989 size_t decodedSize = outFrames * fOutputFrameSize; 990 fDecodedDataBufferSize -= inFrames; 991 992 bool firstAudioFramesCopiedToRawDecodedAudio 993 = fRawDecodedAudio->data[0] != fDecodedData; 994 if (!firstAudioFramesCopiedToRawDecodedAudio) { 995 fRawDecodedAudio->format = fDecodedDataBuffer->format; 996 fRawDecodedAudio->pkt_dts = fDecodedDataBuffer->pkt_dts; 997 998 avformat_codec_context* codecContext 999 = static_cast<avformat_codec_context*>(fRawDecodedAudio->opaque); 1000 codecContext->channels = fCodecContext->channels; 1001 codecContext->sample_rate = fCodecContext->sample_rate; 1002 } 1003 1004 fRawDecodedAudio->data[0] += decodedSize; 1005 fRawDecodedAudio->linesize[0] += decodedSize; 1006 fRawDecodedAudio->nb_samples += outFrames; 1007 1008 fDecodedDataBufferOffset += remainingSize; 1009 1010 // Update start times accordingly 1011 bigtime_t framesTimeInterval = static_cast<bigtime_t>( 1012 (1000000LL * frames) / fOutputFrameRate); 1013 fDecodedDataBuffer->pkt_dts += framesTimeInterval; 1014 // Start time of buffer is updated in case that it contains 1015 // more audio frames to move. 1016 fTempPacket.dts += framesTimeInterval; 1017 // Start time of fTempPacket is updated in case the fTempPacket 1018 // contains more audio frames to decode. 1019 } 1020 1021 1022 /*! \brief Decodes next chunk of audio frames. 1023 1024 This method handles all the details of loading the input buffer 1025 (fChunkBuffer) at the right time and of calling FFMPEG often engouh until 1026 some audio frames have been decoded. 1027 1028 FFMPEG decides how much audio frames belong to a chunk. Because of that 1029 it is very likely that _DecodeNextAudioFrameChunk has to be called several 1030 times to decode enough audio frames to please the caller of 1031 BMediaDecoder::Decode(). 1032 1033 This function assumes to be called only when the following assumptions 1034 hold true: 1035 1. fDecodedDataBufferSize equals zero. 1036 1037 After this function returns successfully the caller can safely make the 1038 following assumptions: 1039 1. fDecodedDataBufferSize is greater than zero. 1040 2. fDecodedDataBufferOffset is set to zero. 1041 3. fDecodedDataBuffer contains audio frames. 1042 1043 1044 \returns B_OK on successfully decoding one audio frame chunk. 1045 \returns B_LAST_BUFFER_ERROR No more audio frame chunks available. From 1046 this point on further calls will return this same error. 1047 \returns B_ERROR Decoding failed 1048 */ 1049 status_t 1050 AVCodecDecoder::_DecodeNextAudioFrameChunk() 1051 { 1052 assert(fDecodedDataBufferSize == 0); 1053 1054 while (fDecodedDataBufferSize == 0) { 1055 status_t loadingChunkStatus 1056 = _LoadNextChunkIfNeededAndAssignStartTime(); 1057 if (loadingChunkStatus != B_OK) 1058 return loadingChunkStatus; 1059 1060 status_t decodingStatus 1061 = _DecodeSomeAudioFramesIntoEmptyDecodedDataBuffer(); 1062 if (decodingStatus != B_OK) { 1063 // Assume the audio decoded until now is broken so replace it with 1064 // some silence. 1065 memset(fDecodedData, 0, fRawDecodedAudio->linesize[0]); 1066 1067 if (!fAudioDecodeError) { 1068 // Report failure if not done already 1069 int32 chunkBufferOffset = fTempPacket.data - fChunkBuffer; 1070 printf("########### audio decode error, " 1071 "fTempPacket.size %d, fChunkBuffer data offset %" B_PRId32 1072 "\n", fTempPacket.size, chunkBufferOffset); 1073 fAudioDecodeError = true; 1074 } 1075 1076 // Assume that next audio chunk can be decoded so keep decoding. 1077 continue; 1078 } 1079 1080 fAudioDecodeError = false; 1081 } 1082 1083 return B_OK; 1084 } 1085 1086 1087 /*! \brief Tries to decode at least one audio frame and store it in the 1088 fDecodedDataBuffer. 1089 1090 This function assumes to be called only when the following assumptions 1091 hold true: 1092 1. fDecodedDataBufferSize equals zero. 1093 2. fTempPacket.size is greater than zero. 1094 1095 After this function returns successfully the caller can safely make the 1096 following assumptions: 1097 1. fDecodedDataBufferSize is greater than zero in the common case. 1098 Also see "Note" below. 1099 2. fTempPacket was updated to exclude the data chunk that was consumed 1100 by avcodec_send_packet(). 1101 3. fDecodedDataBufferOffset is set to zero. 1102 1103 When this function failed to decode at least one audio frame due to a 1104 decoding error the caller can safely make the following assumptions: 1105 1. fDecodedDataBufferSize equals zero. 1106 2. fTempPacket.size equals zero. 1107 1108 Note: It is possible that there wasn't any audio frame decoded into 1109 fDecodedDataBuffer after calling this function. This is normal and can 1110 happen when there was either a decoding error or there is some decoding 1111 delay in FFMPEGs audio decoder. Another call to this method is totally 1112 safe and is even expected as long as the calling assumptions hold true. 1113 1114 \returns B_OK Decoding successful. fDecodedDataBuffer contains decoded 1115 audio frames only when fDecodedDataBufferSize is greater than zero. 1116 fDecodedDataBuffer is empty, when avcodec_receive_frame() didn't return 1117 audio frames due to delayed decoding or incomplete audio frames. 1118 \returns B_ERROR Decoding failed thus fDecodedDataBuffer contains no audio 1119 frames. 1120 */ 1121 status_t 1122 AVCodecDecoder::_DecodeSomeAudioFramesIntoEmptyDecodedDataBuffer() 1123 { 1124 assert(fDecodedDataBufferSize == 0); 1125 1126 memset(fDecodedDataBuffer, 0, sizeof(AVFrame)); 1127 av_frame_unref(fDecodedDataBuffer); 1128 fDecodedDataBufferOffset = 0; 1129 1130 int error = avcodec_receive_frame(fCodecContext, fDecodedDataBuffer); 1131 if (error == AVERROR_EOF) 1132 return B_LAST_BUFFER_ERROR; 1133 1134 if (error == AVERROR(EAGAIN)) { 1135 // We need to feed more data into the decoder 1136 avcodec_send_packet(fCodecContext, &fTempPacket); 1137 1138 // All the data is always consumed by avcodec_send_packet 1139 fTempPacket.size = 0; 1140 1141 // Try again to see if we can get some decoded audio out now 1142 error = avcodec_receive_frame(fCodecContext, fDecodedDataBuffer); 1143 } 1144 1145 fDecodedDataBufferSize = fDecodedDataBuffer->nb_samples; 1146 if (fDecodedDataBufferSize < 0) 1147 fDecodedDataBufferSize = 0; 1148 1149 if (error == 0) 1150 return B_OK; 1151 else 1152 return B_ERROR; 1153 } 1154 1155 1156 /*! \brief Updates relevant fields of the class member fHeader with the 1157 properties of the most recently decoded audio frame. 1158 1159 The following fields of fHeader are updated: 1160 - fHeader.type 1161 - fHeader.file_pos 1162 - fHeader.orig_size 1163 - fHeader.start_time 1164 - fHeader.size_used 1165 - fHeader.u.raw_audio.frame_rate 1166 - fHeader.u.raw_audio.channel_count 1167 1168 It is assumed that this function is called only when the following asserts 1169 hold true: 1170 1. We actually got a new audio frame decoded by the audio decoder. 1171 2. fHeader wasn't updated for the new audio frame yet. You MUST call 1172 this method only once per decoded audio frame. 1173 3. fRawDecodedAudio's fields relate to the first audio frame contained 1174 in fDecodedData. Especially the following fields are of importance: 1175 - fRawDecodedAudio->pkt_dts: Start time of first audio frame 1176 - fRawDecodedAudio->opaque: Contains the following fields for 1177 the first audio frame: 1178 - channels: Channel count of first audio frame 1179 - sample_rate: Frame rate of first audio frame 1180 */ 1181 void 1182 AVCodecDecoder::_UpdateMediaHeaderForAudioFrame() 1183 { 1184 fHeader.type = B_MEDIA_RAW_AUDIO; 1185 fHeader.file_pos = 0; 1186 fHeader.orig_size = 0; 1187 fHeader.start_time = fRawDecodedAudio->pkt_dts; 1188 fHeader.size_used = fRawDecodedAudio->linesize[0]; 1189 1190 avformat_codec_context* codecContext 1191 = static_cast<avformat_codec_context*>(fRawDecodedAudio->opaque); 1192 fHeader.u.raw_audio.channel_count = codecContext->channels; 1193 fHeader.u.raw_audio.frame_rate = codecContext->sample_rate; 1194 } 1195 1196 1197 /*! \brief Decodes next video frame. 1198 1199 We decode exactly one video frame into fDecodedData. To achieve this goal, 1200 we might need to request several chunks of encoded data resulting in a 1201 variable execution time of this function. 1202 1203 The length of the decoded video frame is stored in 1204 fDecodedDataSizeInBytes. If this variable is greater than zero, you can 1205 assert that there is a valid video frame available in fDecodedData. 1206 1207 The decoded video frame in fDecodedData has color space conversion and 1208 deinterlacing already applied. 1209 1210 To every decoded video frame there is a media_header populated in 1211 fHeader, containing the corresponding video frame properties. 1212 1213 Normally every decoded video frame has a start_time field populated in the 1214 associated fHeader, that determines the presentation time of the frame. 1215 This relationship will only hold true, when each data chunk that is 1216 provided via GetNextChunk() contains data for exactly one encoded video 1217 frame (one complete frame) - not more and not less. 1218 1219 We can decode data chunks that contain partial video frame data, too. In 1220 that case, you cannot trust the value of the start_time field in fHeader. 1221 We simply have no logic in place to establish a meaningful relationship 1222 between an incomplete frame and the start time it should be presented. 1223 Though this might change in the future. 1224 1225 We can decode data chunks that contain more than one video frame, too. In 1226 that case, you cannot trust the value of the start_time field in fHeader. 1227 We simply have no logic in place to track the start_time across multiple 1228 video frames. So a meaningful relationship between the 2nd, 3rd, ... frame 1229 and the start time it should be presented isn't established at the moment. 1230 Though this might change in the future. 1231 1232 More over the fOutputFrameRate variable is updated for every decoded video 1233 frame. 1234 1235 On first call the member variables fSwsContext / fFormatConversionFunc are 1236 initialized. 1237 1238 \returns B_OK when we successfully decoded one video frame 1239 \returns B_LAST_BUFFER_ERROR when there are no more video frames available. 1240 \returns B_NO_MEMORY when we have no memory left for correct operation. 1241 \returns Other Errors 1242 */ 1243 status_t 1244 AVCodecDecoder::_DecodeNextVideoFrame() 1245 { 1246 int error; 1247 int send_error; 1248 1249 #if DO_PROFILING 1250 bigtime_t startTime = system_time(); 1251 #endif 1252 1253 error = avcodec_receive_frame(fCodecContext, fRawDecodedPicture); 1254 1255 if (error == AVERROR_EOF) 1256 return B_LAST_BUFFER_ERROR; 1257 1258 if (error == AVERROR(EAGAIN)) { 1259 do { 1260 status_t loadingChunkStatus 1261 = _LoadNextChunkIfNeededAndAssignStartTime(); 1262 if (loadingChunkStatus == B_LAST_BUFFER_ERROR) 1263 return _FlushOneVideoFrameFromDecoderBuffer(); 1264 if (loadingChunkStatus != B_OK) { 1265 TRACE("[v] AVCodecDecoder::_DecodeNextVideoFrame(): error from " 1266 "GetNextChunk(): %s\n", strerror(loadingChunkStatus)); 1267 return loadingChunkStatus; 1268 } 1269 1270 char timestamp[AV_TS_MAX_STRING_SIZE]; 1271 av_ts_make_time_string(timestamp, 1272 fTempPacket.dts, &fCodecContext->time_base); 1273 TRACE("[v] Feed %d more bytes (dts %s)\n", fTempPacket.size, 1274 timestamp); 1275 1276 send_error = avcodec_send_packet(fCodecContext, &fTempPacket); 1277 if (send_error < 0 && send_error != AVERROR(EAGAIN)) { 1278 TRACE("[v] AVCodecDecoder: ignoring error in decoding frame " 1279 "%lld: %d\n", fFrame, error); 1280 } 1281 1282 // Packet is consumed, clear it 1283 fTempPacket.data = NULL; 1284 fTempPacket.size = 0; 1285 1286 error = avcodec_receive_frame(fCodecContext, fRawDecodedPicture); 1287 if (error != 0 && error != AVERROR(EAGAIN)) { 1288 TRACE("[v] frame %lld - decoding error, error code: %d, " 1289 "chunk size: %ld\n", fFrame, error, fChunkBufferSize); 1290 } 1291 1292 } while (error != 0); 1293 } 1294 1295 #if DO_PROFILING 1296 bigtime_t formatConversionStart = system_time(); 1297 #endif 1298 1299 status_t handleStatus = _HandleNewVideoFrameAndUpdateSystemState(); 1300 if (handleStatus != B_OK) 1301 return handleStatus; 1302 1303 #if DO_PROFILING 1304 bigtime_t doneTime = system_time(); 1305 decodingTime += formatConversionStart - startTime; 1306 conversionTime += doneTime - formatConversionStart; 1307 profileCounter++; 1308 if (!(fFrame % 5)) { 1309 printf("[v] profile: d1 = %lld, d2 = %lld (%lld) required %lld\n", 1310 decodingTime / profileCounter, conversionTime / profileCounter, 1311 fFrame, bigtime_t(1000000LL / fOutputFrameRate)); 1312 decodingTime = 0; 1313 conversionTime = 0; 1314 profileCounter = 0; 1315 } 1316 #endif 1317 return error; 1318 } 1319 1320 1321 /*! \brief Applies all essential video input properties to fCodecContext that were 1322 passed to AVCodecDecoder when Setup() was called. 1323 1324 Note: This function must be called before the AVCodec is opened via 1325 avcodec_open2(). Otherwise the behaviour of FFMPEG's video decoding 1326 function avcodec_decode_video2() is undefined. 1327 1328 Essential properties applied from fInputFormat.u.encoded_video.output: 1329 - display.line_width copied to fCodecContext->width 1330 - display.line_count copied to fCodecContext->height 1331 - pixel_width_aspect and pixel_height_aspect converted to 1332 fCodecContext->sample_aspect_ratio 1333 - field_rate converted to fCodecContext->time_base and 1334 fCodecContext->ticks_per_frame 1335 1336 Other essential properties being applied: 1337 - fExtraData to fCodecContext->extradata 1338 - fExtraDataSize to fCodecContext->extradata_size 1339 */ 1340 void 1341 AVCodecDecoder::_ApplyEssentialVideoContainerPropertiesToContext() 1342 { 1343 media_raw_video_format containerProperties 1344 = fInputFormat.u.encoded_video.output; 1345 1346 fCodecContext->width = containerProperties.display.line_width; 1347 fCodecContext->height = containerProperties.display.line_count; 1348 1349 if (containerProperties.pixel_width_aspect > 0 1350 && containerProperties.pixel_height_aspect > 0) { 1351 ConvertVideoAspectWidthAndHeightToAVCodecContext( 1352 containerProperties.pixel_width_aspect, 1353 containerProperties.pixel_height_aspect, *fCodecContext); 1354 } 1355 1356 if (containerProperties.field_rate > 0.0) { 1357 ConvertVideoFrameRateToAVCodecContext(containerProperties.field_rate, 1358 *fCodecContext); 1359 } 1360 1361 fCodecContext->extradata = reinterpret_cast<uint8_t*>(fExtraData); 1362 fCodecContext->extradata_size = fExtraDataSize; 1363 } 1364 1365 1366 /*! \brief Loads the next chunk into fChunkBuffer and assigns it (including 1367 the start time) to fTempPacket but only if fTempPacket is empty. 1368 1369 \returns B_OK 1370 1. meaning: Next chunk is loaded. 1371 2. meaning: No need to load and assign anything. Proceed as usual. 1372 \returns B_LAST_BUFFER_ERROR No more chunks available. fChunkBuffer and 1373 fTempPacket are left untouched. 1374 \returns Other errors Caller should bail out because fChunkBuffer and 1375 fTempPacket are in unknown states. Normal operation cannot be 1376 guaranteed. 1377 */ 1378 status_t 1379 AVCodecDecoder::_LoadNextChunkIfNeededAndAssignStartTime() 1380 { 1381 if (fTempPacket.size > 0) 1382 return B_OK; 1383 1384 const void* chunkBuffer = NULL; 1385 size_t chunkBufferSize = 0; 1386 // In the case that GetNextChunk() returns an error fChunkBufferSize 1387 // should be left untouched. 1388 media_header chunkMediaHeader; 1389 1390 status_t getNextChunkStatus = GetNextChunk(&chunkBuffer, &chunkBufferSize, 1391 &chunkMediaHeader); 1392 if (getNextChunkStatus != B_OK) 1393 return getNextChunkStatus; 1394 1395 status_t chunkBufferPaddingStatus 1396 = _CopyChunkToChunkBufferAndAddPadding(chunkBuffer, chunkBufferSize); 1397 if (chunkBufferPaddingStatus != B_OK) 1398 return chunkBufferPaddingStatus; 1399 1400 fTempPacket.data = fChunkBuffer; 1401 fTempPacket.size = fChunkBufferSize; 1402 fTempPacket.dts = chunkMediaHeader.start_time; 1403 // Let FFMPEG handle the correct relationship between start_time and 1404 // decoded a/v frame. By doing so we are simply copying the way how it 1405 // is implemented in ffplay.c for video frames (for audio frames it 1406 // works, too, but isn't used by ffplay.c). 1407 // \see http://git.videolan.org/?p=ffmpeg.git;a=blob;f=ffplay.c;h=09623db374e5289ed20b7cc28c262c4375a8b2e4;hb=9153b33a742c4e2a85ff6230aea0e75f5a8b26c2#l1502 1408 // 1409 // FIXME: Research how to establish a meaningful relationship between 1410 // start_time and decoded a/v frame when the received chunk buffer 1411 // contains partial a/v frames. Maybe some data formats do contain time 1412 // stamps (ake pts / dts fields) that can be evaluated by FFMPEG. But 1413 // as long as I don't have such video data to test it, it makes no 1414 // sense trying to implement it. 1415 // 1416 // FIXME: Implement tracking start_time of video frames originating in 1417 // data chunks that encode more than one video frame at a time. In that 1418 // case on would increment the start_time for each consecutive frame of 1419 // such a data chunk (like it is done for audio frame decoding). But as 1420 // long as I don't have such video data to test it, it makes no sense 1421 // to implement it. 1422 1423 #ifdef LOG_STREAM_TO_FILE 1424 BFile* logFile = fIsAudio ? &sAudioStreamLogFile : &sVideoStreamLogFile; 1425 if (sDumpedPackets < 100) { 1426 logFile->Write(chunkBuffer, fChunkBufferSize); 1427 printf("wrote %ld bytes\n", fChunkBufferSize); 1428 sDumpedPackets++; 1429 } else if (sDumpedPackets == 100) 1430 logFile->Unset(); 1431 #endif 1432 1433 return B_OK; 1434 } 1435 1436 1437 /*! \brief Copies a chunk into fChunkBuffer and adds a "safety net" of 1438 additional memory as required by FFMPEG for input buffers to video 1439 decoders. 1440 1441 This is needed so that some decoders can read safely a predefined number of 1442 bytes at a time for performance optimization purposes. 1443 1444 The additional memory has a size of AV_INPUT_BUFFER_PADDING_SIZE as defined 1445 in avcodec.h. 1446 1447 Ownership of fChunkBuffer memory is with the class so it needs to be freed 1448 at the right times (on destruction, on seeking). 1449 1450 Also update fChunkBufferSize to reflect the size of the contained data 1451 (leaving out the padding). 1452 1453 \param chunk The chunk to copy. 1454 \param chunkSize Size of the chunk in bytes 1455 1456 \returns B_OK Padding was successful. You are responsible for releasing the 1457 allocated memory. fChunkBufferSize is set to chunkSize. 1458 \returns B_NO_MEMORY Padding failed. 1459 fChunkBuffer is set to NULL making it safe to call free() on it. 1460 fChunkBufferSize is set to 0 to reflect the size of fChunkBuffer. 1461 */ 1462 status_t 1463 AVCodecDecoder::_CopyChunkToChunkBufferAndAddPadding(const void* chunk, 1464 size_t chunkSize) 1465 { 1466 uint8_t* tmpBuffer = static_cast<uint8_t*>(realloc(fChunkBuffer, 1467 chunkSize + AV_INPUT_BUFFER_PADDING_SIZE)); 1468 if (tmpBuffer == NULL) { 1469 free(fChunkBuffer); 1470 fChunkBuffer = NULL; 1471 fChunkBufferSize = 0; 1472 return B_NO_MEMORY; 1473 } else { 1474 fChunkBuffer = tmpBuffer; 1475 } 1476 1477 memcpy(fChunkBuffer, chunk, chunkSize); 1478 memset(fChunkBuffer + chunkSize, 0, AV_INPUT_BUFFER_PADDING_SIZE); 1479 // Establish safety net, by zero'ing the padding area. 1480 1481 fChunkBufferSize = chunkSize; 1482 1483 return B_OK; 1484 } 1485 1486 1487 /*! \brief Executes all steps needed for a freshly decoded video frame. 1488 1489 \see _UpdateMediaHeaderForVideoFrame() and 1490 \see _DeinterlaceAndColorConvertVideoFrame() for when you are allowed to 1491 call this method. 1492 1493 \returns B_OK when video frame was handled successfully 1494 \returnb B_NO_MEMORY when no memory is left for correct operation. 1495 */ 1496 status_t 1497 AVCodecDecoder::_HandleNewVideoFrameAndUpdateSystemState() 1498 { 1499 _UpdateMediaHeaderForVideoFrame(); 1500 status_t postProcessStatus = _DeinterlaceAndColorConvertVideoFrame(); 1501 if (postProcessStatus != B_OK) 1502 return postProcessStatus; 1503 1504 ConvertAVCodecContextToVideoFrameRate(*fCodecContext, fOutputFrameRate); 1505 1506 #ifdef DEBUG 1507 dump_ffframe_video(fRawDecodedPicture, "ffpict"); 1508 #endif 1509 1510 fFrame++; 1511 1512 return B_OK; 1513 } 1514 1515 1516 /*! \brief Flushes one video frame - if any - still buffered by the decoder. 1517 1518 Some FFMPEG decoder are buffering video frames. To retrieve those buffered 1519 frames the decoder needs to be told so. 1520 1521 The intended use of this method is to call it, once there are no more data 1522 chunks for decoding left. Reframed in other words: Once GetNextChunk() 1523 returns with status B_LAST_BUFFER_ERROR it is time to start flushing. 1524 1525 \returns B_OK Retrieved one video frame, handled it accordingly and updated 1526 the system state accordingly. 1527 There maybe more video frames left. So it is valid for the client of 1528 AVCodecDecoder to call it one more time. 1529 1530 \returns B_LAST_BUFFER_ERROR No video frame left. 1531 The client of the AVCodecDecoder should stop calling it now. 1532 1533 \returns B_NO_MEMORY No memory left for correct operation. 1534 */ 1535 status_t 1536 AVCodecDecoder::_FlushOneVideoFrameFromDecoderBuffer() 1537 { 1538 // Tell the decoder there is nothing to send anymore 1539 avcodec_send_packet(fCodecContext, NULL); 1540 1541 // Get any remaining frame 1542 int error = avcodec_receive_frame(fCodecContext, fRawDecodedPicture); 1543 1544 if (error != 0 && error != AVERROR(EAGAIN)) { 1545 // video buffer is flushed successfully 1546 // (or there is an error, not much we can do about it) 1547 return B_LAST_BUFFER_ERROR; 1548 } 1549 1550 return _HandleNewVideoFrameAndUpdateSystemState(); 1551 } 1552 1553 1554 /*! \brief Updates relevant fields of the class member fHeader with the 1555 properties of the most recently decoded video frame. 1556 1557 It is assumed that this function is called only when the following asserts 1558 hold true: 1559 1. We actually got a new picture decoded by the video decoder. 1560 2. fHeader wasn't updated for the new picture yet. You MUST call this 1561 method only once per decoded video frame. 1562 3. This function MUST be called after 1563 _DeinterlaceAndColorConvertVideoFrame() as it relys on an updated 1564 fDecodedDataSizeInBytes. 1565 4. There will be at maximumn only one decoded video frame in our cache 1566 at any single point in time. Otherwise you couldn't tell to which 1567 cached decoded video frame the properties in fHeader relate to. 1568 5. AVCodecContext is still valid for this video frame (This is the case 1569 when this function is called after avcodec_decode_video2() and 1570 before the next call to avcodec_decode_video2(). 1571 */ 1572 void 1573 AVCodecDecoder::_UpdateMediaHeaderForVideoFrame() 1574 { 1575 fHeader.type = B_MEDIA_RAW_VIDEO; 1576 fHeader.file_pos = 0; 1577 fHeader.orig_size = 0; 1578 fHeader.start_time = fRawDecodedPicture->pkt_dts; 1579 // The pkt_dts is already in microseconds, even if ffmpeg docs says 1580 // 'in codec time_base units' 1581 fHeader.size_used = av_image_get_buffer_size( 1582 colorspace_to_pixfmt(fOutputColorSpace), fRawDecodedPicture->width, 1583 fRawDecodedPicture->height, 1); 1584 fHeader.u.raw_video.display_line_width = fRawDecodedPicture->width; 1585 fHeader.u.raw_video.display_line_count = fRawDecodedPicture->height; 1586 fHeader.u.raw_video.bytes_per_row 1587 = CalculateBytesPerRowWithColorSpaceAndVideoWidth(fOutputColorSpace, 1588 fRawDecodedPicture->width); 1589 fHeader.u.raw_video.field_gamma = 1.0; 1590 fHeader.u.raw_video.field_sequence = fFrame; 1591 fHeader.u.raw_video.field_number = 0; 1592 fHeader.u.raw_video.pulldown_number = 0; 1593 fHeader.u.raw_video.first_active_line = 1; 1594 fHeader.u.raw_video.line_count = fRawDecodedPicture->height; 1595 1596 ConvertAVCodecContextToVideoAspectWidthAndHeight(*fCodecContext, 1597 fHeader.u.raw_video.pixel_width_aspect, 1598 fHeader.u.raw_video.pixel_height_aspect); 1599 1600 char timestamp[AV_TS_MAX_STRING_SIZE]; 1601 av_ts_make_time_string(timestamp, 1602 fRawDecodedPicture->best_effort_timestamp, &fCodecContext->time_base); 1603 1604 TRACE("[v] start_time=%s field_sequence=%lu\n", 1605 timestamp, fHeader.u.raw_video.field_sequence); 1606 } 1607 1608 1609 /*! \brief This function applies deinterlacing (only if needed) and color 1610 conversion to the video frame in fRawDecodedPicture. 1611 1612 It is assumed that fRawDecodedPicture wasn't deinterlaced and color 1613 converted yet (otherwise this function behaves in unknown manners). 1614 1615 This function MUST be called after _UpdateMediaHeaderForVideoFrame() as it 1616 relys on the fHeader.size_used and fHeader.u.raw_video.bytes_per_row fields 1617 for correct operation 1618 1619 You should only call this function when you got a new picture decoded by 1620 the video decoder. 1621 1622 When this function finishes the postprocessed video frame will be available 1623 in fPostProcessedDecodedPicture and fDecodedData (fDecodedDataSizeInBytes 1624 will be set accordingly). 1625 1626 \returns B_OK video frame successfully deinterlaced and color converted. 1627 \returns B_NO_MEMORY Not enough memory available for correct operation. 1628 */ 1629 status_t 1630 AVCodecDecoder::_DeinterlaceAndColorConvertVideoFrame() 1631 { 1632 int displayWidth = fRawDecodedPicture->width; 1633 int displayHeight = fRawDecodedPicture->height; 1634 AVFrame deinterlacedPicture; 1635 bool useDeinterlacedPicture = false; 1636 1637 if (fRawDecodedPicture->interlaced_frame) { 1638 AVFrame rawPicture; 1639 rawPicture.data[0] = fRawDecodedPicture->data[0]; 1640 rawPicture.data[1] = fRawDecodedPicture->data[1]; 1641 rawPicture.data[2] = fRawDecodedPicture->data[2]; 1642 rawPicture.data[3] = fRawDecodedPicture->data[3]; 1643 rawPicture.linesize[0] = fRawDecodedPicture->linesize[0]; 1644 rawPicture.linesize[1] = fRawDecodedPicture->linesize[1]; 1645 rawPicture.linesize[2] = fRawDecodedPicture->linesize[2]; 1646 rawPicture.linesize[3] = fRawDecodedPicture->linesize[3]; 1647 1648 if (av_image_alloc(deinterlacedPicture.data, 1649 deinterlacedPicture.linesize, displayWidth, displayHeight, 1650 fCodecContext->pix_fmt, 1) < 0) 1651 return B_NO_MEMORY; 1652 1653 // deinterlace implemented using avfilter 1654 _ProcessFilterGraph(&deinterlacedPicture, &rawPicture, 1655 fCodecContext->pix_fmt, displayWidth, displayHeight); 1656 useDeinterlacedPicture = true; 1657 } 1658 1659 // Some decoders do not set pix_fmt until they have decoded 1 frame 1660 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION 1661 if (fSwsContext == NULL) { 1662 fSwsContext = sws_getContext(displayWidth, displayHeight, 1663 fCodecContext->pix_fmt, displayWidth, displayHeight, 1664 colorspace_to_pixfmt(fOutputColorSpace), 1665 SWS_FAST_BILINEAR, NULL, NULL, NULL); 1666 } 1667 #else 1668 if (fFormatConversionFunc == NULL) { 1669 fFormatConversionFunc = resolve_colorspace(fOutputColorSpace, 1670 fCodecContext->pix_fmt, displayWidth, displayHeight); 1671 } 1672 #endif 1673 1674 fDecodedDataSizeInBytes = fHeader.size_used; 1675 1676 if (fDecodedData == NULL) { 1677 const size_t kOptimalAlignmentForColorConversion = 32; 1678 posix_memalign(reinterpret_cast<void**>(&fDecodedData), 1679 kOptimalAlignmentForColorConversion, fDecodedDataSizeInBytes); 1680 } 1681 if (fDecodedData == NULL) 1682 return B_NO_MEMORY; 1683 1684 fPostProcessedDecodedPicture->data[0] = fDecodedData; 1685 fPostProcessedDecodedPicture->linesize[0] 1686 = fHeader.u.raw_video.bytes_per_row; 1687 1688 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION 1689 if (fSwsContext != NULL) { 1690 #else 1691 if (fFormatConversionFunc != NULL) { 1692 #endif 1693 if (useDeinterlacedPicture) { 1694 AVFrame deinterlacedFrame; 1695 deinterlacedFrame.data[0] = deinterlacedPicture.data[0]; 1696 deinterlacedFrame.data[1] = deinterlacedPicture.data[1]; 1697 deinterlacedFrame.data[2] = deinterlacedPicture.data[2]; 1698 deinterlacedFrame.data[3] = deinterlacedPicture.data[3]; 1699 deinterlacedFrame.linesize[0] 1700 = deinterlacedPicture.linesize[0]; 1701 deinterlacedFrame.linesize[1] 1702 = deinterlacedPicture.linesize[1]; 1703 deinterlacedFrame.linesize[2] 1704 = deinterlacedPicture.linesize[2]; 1705 deinterlacedFrame.linesize[3] 1706 = deinterlacedPicture.linesize[3]; 1707 1708 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION 1709 sws_scale(fSwsContext, deinterlacedFrame.data, 1710 deinterlacedFrame.linesize, 0, displayHeight, 1711 fPostProcessedDecodedPicture->data, 1712 fPostProcessedDecodedPicture->linesize); 1713 #else 1714 (*fFormatConversionFunc)(&deinterlacedFrame, 1715 fPostProcessedDecodedPicture, displayWidth, displayHeight); 1716 #endif 1717 } else { 1718 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION 1719 sws_scale(fSwsContext, fRawDecodedPicture->data, 1720 fRawDecodedPicture->linesize, 0, displayHeight, 1721 fPostProcessedDecodedPicture->data, 1722 fPostProcessedDecodedPicture->linesize); 1723 #else 1724 (*fFormatConversionFunc)(fRawDecodedPicture, 1725 fPostProcessedDecodedPicture, displayWidth, displayHeight); 1726 #endif 1727 } 1728 } 1729 1730 if (fRawDecodedPicture->interlaced_frame) 1731 av_freep(&deinterlacedPicture.data[0]); 1732 1733 return B_OK; 1734 } 1735 1736 1737 /*! \brief Init the deinterlace filter graph. 1738 1739 \returns B_OK the filter graph could be built. 1740 \returns B_BAD_VALUE something was wrong with building the graph. 1741 */ 1742 status_t 1743 AVCodecDecoder::_InitFilterGraph(enum AVPixelFormat pixfmt, int32 width, 1744 int32 height) 1745 { 1746 if (fFilterGraph != NULL) { 1747 av_frame_free(&fFilterFrame); 1748 avfilter_graph_free(&fFilterGraph); 1749 } 1750 1751 fFilterGraph = avfilter_graph_alloc(); 1752 1753 BString arguments; 1754 arguments.SetToFormat("buffer=video_size=%" B_PRId32 "x%" B_PRId32 1755 ":pix_fmt=%d:time_base=1/1:pixel_aspect=0/1[in];[in]yadif[out];" 1756 "[out]buffersink", width, height, 1757 pixfmt); 1758 AVFilterInOut* inputs = NULL; 1759 AVFilterInOut* outputs = NULL; 1760 TRACE("[v] _InitFilterGraph(): %s\n", arguments.String()); 1761 int ret = avfilter_graph_parse2(fFilterGraph, arguments.String(), &inputs, 1762 &outputs); 1763 if (ret < 0) { 1764 fprintf(stderr, "avfilter_graph_parse2() failed\n"); 1765 return B_BAD_VALUE; 1766 } 1767 1768 ret = avfilter_graph_config(fFilterGraph, NULL); 1769 if (ret < 0) { 1770 fprintf(stderr, "avfilter_graph_config() failed\n"); 1771 return B_BAD_VALUE; 1772 } 1773 1774 fBufferSourceContext = avfilter_graph_get_filter(fFilterGraph, 1775 "Parsed_buffer_0"); 1776 fBufferSinkContext = avfilter_graph_get_filter(fFilterGraph, 1777 "Parsed_buffersink_2"); 1778 if (fBufferSourceContext == NULL || fBufferSinkContext == NULL) { 1779 fprintf(stderr, "avfilter_graph_get_filter() failed\n"); 1780 return B_BAD_VALUE; 1781 } 1782 fFilterFrame = av_frame_alloc(); 1783 fLastWidth = width; 1784 fLastHeight = height; 1785 fLastPixfmt = pixfmt; 1786 1787 return B_OK; 1788 } 1789 1790 1791 /*! \brief Process an AVPicture with the deinterlace filter graph. 1792 1793 We decode exactly one video frame into dst. 1794 Equivalent function for avpicture_deinterlace() from version 2.x. 1795 1796 \returns B_OK video frame successfully deinterlaced. 1797 \returns B_BAD_DATA No frame could be output. 1798 \returns B_NO_MEMORY Not enough memory available for correct operation. 1799 */ 1800 status_t 1801 AVCodecDecoder::_ProcessFilterGraph(AVFrame *dst, const AVFrame *src, 1802 enum AVPixelFormat pixfmt, int32 width, int32 height) 1803 { 1804 if (fFilterGraph == NULL || width != fLastWidth 1805 || height != fLastHeight || pixfmt != fLastPixfmt) { 1806 1807 status_t err = _InitFilterGraph(pixfmt, width, height); 1808 if (err != B_OK) 1809 return err; 1810 } 1811 1812 memcpy(fFilterFrame->data, src->data, sizeof(src->data)); 1813 memcpy(fFilterFrame->linesize, src->linesize, sizeof(src->linesize)); 1814 fFilterFrame->width = width; 1815 fFilterFrame->height = height; 1816 fFilterFrame->format = pixfmt; 1817 1818 int ret = av_buffersrc_add_frame(fBufferSourceContext, fFilterFrame); 1819 if (ret < 0) 1820 return B_NO_MEMORY; 1821 1822 ret = av_buffersink_get_frame(fBufferSinkContext, fFilterFrame); 1823 if (ret < 0) 1824 return B_BAD_DATA; 1825 1826 av_image_copy(dst->data, dst->linesize, (const uint8**)fFilterFrame->data, 1827 fFilterFrame->linesize, pixfmt, width, height); 1828 av_frame_unref(fFilterFrame); 1829 return B_OK; 1830 } 1831