1 /* 2 * Copyright (C) 2001 Carlos Hasan 3 * Copyright (C) 2001 François Revol 4 * Copyright (C) 2001 Axel Dörfler 5 * Copyright (C) 2004 Marcus Overhagen 6 * Copyright (C) 2009 Stephan Amßus <superstippi@gmx.de> 7 * Copyright (C) 2014 Colin Günther <coling@gmx.de> 8 * Copyright (C) 2015 Adrien Destugues <pulkomandy@pulkomandy.tk> 9 * 10 * All rights reserved. Distributed under the terms of the MIT License. 11 */ 12 13 //! libavcodec based decoder for Haiku 14 15 16 #include "AVCodecDecoder.h" 17 18 #include <new> 19 20 #include <assert.h> 21 #include <string.h> 22 23 #include <Bitmap.h> 24 #include <Debug.h> 25 #include <OS.h> 26 #include <String.h> 27 28 #include "Utilities.h" 29 30 31 #undef TRACE 32 //#define TRACE_AV_CODEC 33 #ifdef TRACE_AV_CODEC 34 # define TRACE(x...) printf(x) 35 # define TRACE_AUDIO(x...) printf(x) 36 # define TRACE_VIDEO(x...) printf(x) 37 #else 38 # define TRACE(x...) 39 # define TRACE_AUDIO(x...) 40 # define TRACE_VIDEO(x...) 41 #endif 42 43 //#define LOG_STREAM_TO_FILE 44 #ifdef LOG_STREAM_TO_FILE 45 # include <File.h> 46 static BFile sAudioStreamLogFile( 47 "/boot/home/Desktop/AVCodecDebugAudioStream.raw", 48 B_CREATE_FILE | B_ERASE_FILE | B_WRITE_ONLY); 49 static BFile sVideoStreamLogFile( 50 "/boot/home/Desktop/AVCodecDebugVideoStream.raw", 51 B_CREATE_FILE | B_ERASE_FILE | B_WRITE_ONLY); 52 static int sDumpedPackets = 0; 53 #endif 54 55 typedef AVCodecID CodecID; 56 57 struct wave_format_ex { 58 uint16 format_tag; 59 uint16 channels; 60 uint32 frames_per_sec; 61 uint32 avg_bytes_per_sec; 62 uint16 block_align; 63 uint16 bits_per_sample; 64 uint16 extra_size; 65 // extra_data[extra_size] 66 } _PACKED; 67 68 struct avformat_codec_context { 69 int sample_rate; 70 int channels; 71 }; 72 73 74 // profiling related globals 75 #define DO_PROFILING 0 76 #if DO_PROFILING 77 static bigtime_t decodingTime = 0; 78 static bigtime_t conversionTime = 0; 79 static long profileCounter = 0; 80 #endif 81 82 83 AVCodecDecoder::AVCodecDecoder() 84 : 85 fHeader(), 86 fInputFormat(), 87 fFrame(0), 88 fIsAudio(false), 89 fCodec(NULL), 90 fCodecContext(NULL), 91 fResampleContext(NULL), 92 fDecodedData(NULL), 93 fDecodedDataSizeInBytes(0), 94 fPostProcessedDecodedPicture(av_frame_alloc()), 95 fRawDecodedPicture(av_frame_alloc()), 96 fRawDecodedAudio(av_frame_alloc()), 97 98 fCodecInitDone(false), 99 100 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION 101 fSwsContext(NULL), 102 #else 103 fFormatConversionFunc(NULL), 104 #endif 105 106 fExtraData(NULL), 107 fExtraDataSize(0), 108 fBlockAlign(0), 109 110 fOutputColorSpace(B_NO_COLOR_SPACE), 111 fOutputFrameCount(0), 112 fOutputFrameRate(1.0), 113 fOutputFrameSize(0), 114 fInputFrameSize(0), 115 116 fChunkBuffer(NULL), 117 fChunkBufferSize(0), 118 fAudioDecodeError(false), 119 120 fDecodedDataBuffer(av_frame_alloc()), 121 fDecodedDataBufferOffset(0), 122 fDecodedDataBufferSize(0), 123 fTempPacket(NULL), 124 fBufferSinkContext(NULL), 125 fBufferSourceContext(NULL), 126 fFilterGraph(NULL), 127 fFilterFrame(NULL) 128 { 129 TRACE("AVCodecDecoder::AVCodecDecoder()\n"); 130 } 131 132 133 AVCodecDecoder::~AVCodecDecoder() 134 { 135 TRACE("[%c] AVCodecDecoder::~AVCodecDecoder()\n", fIsAudio ? 'a' : 'v'); 136 137 #if DO_PROFILING 138 if (profileCounter > 0) { 139 printf("[%c] profile: d1 = %lld, d2 = %lld (%lld)\n", 140 fIsAudio ? 'a' : 'v', decodingTime / profileCounter, 141 conversionTime / profileCounter, fFrame); 142 } 143 #endif 144 145 swr_free(&fResampleContext); 146 free(fChunkBuffer); 147 free(fDecodedData); 148 149 av_frame_free(&fPostProcessedDecodedPicture); 150 av_frame_free(&fRawDecodedPicture); 151 av_free(fRawDecodedAudio->opaque); 152 av_frame_free(&fRawDecodedAudio); 153 if (fCodecContext != NULL) { 154 fCodecContext->extradata = NULL; 155 avcodec_free_context(&fCodecContext); 156 } 157 av_frame_free(&fDecodedDataBuffer); 158 159 av_frame_free(&fFilterFrame); 160 avfilter_graph_free(&fFilterGraph); 161 162 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION 163 if (fSwsContext != NULL) 164 sws_freeContext(fSwsContext); 165 #endif 166 167 delete[] fExtraData; 168 169 av_packet_free(&fTempPacket); 170 } 171 172 173 void 174 AVCodecDecoder::GetCodecInfo(media_codec_info* mci) 175 { 176 snprintf(mci->short_name, 32, "%s", fCodec->name); 177 snprintf(mci->pretty_name, 96, "%s", fCodec->long_name); 178 mci->id = 0; 179 mci->sub_id = fCodec->id; 180 } 181 182 183 status_t 184 AVCodecDecoder::Setup(media_format* ioEncodedFormat, const void* infoBuffer, 185 size_t infoSize) 186 { 187 if (ioEncodedFormat->type != B_MEDIA_ENCODED_AUDIO 188 && ioEncodedFormat->type != B_MEDIA_ENCODED_VIDEO) 189 return B_ERROR; 190 191 fIsAudio = (ioEncodedFormat->type == B_MEDIA_ENCODED_AUDIO); 192 TRACE("[%c] AVCodecDecoder::Setup()\n", fIsAudio ? 'a' : 'v'); 193 194 #ifdef TRACE_AV_CODEC 195 char buffer[1024]; 196 string_for_format(*ioEncodedFormat, buffer, sizeof(buffer)); 197 TRACE("[%c] input_format = %s\n", fIsAudio ? 'a' : 'v', buffer); 198 TRACE("[%c] infoSize = %ld\n", fIsAudio ? 'a' : 'v', infoSize); 199 TRACE("[%c] user_data_type = %08" B_PRIx32 "\n", fIsAudio ? 'a' : 'v', 200 ioEncodedFormat->user_data_type); 201 TRACE("[%c] meta_data_size = %" B_PRId32 "\n", fIsAudio ? 'a' : 'v', 202 ioEncodedFormat->MetaDataSize()); 203 #endif 204 205 media_format_description description; 206 if (BMediaFormats().GetCodeFor(*ioEncodedFormat, 207 B_MISC_FORMAT_FAMILY, &description) == B_OK) { 208 if (description.u.misc.file_format != 'ffmp') 209 return B_NOT_SUPPORTED; 210 fCodec = avcodec_find_decoder(static_cast<CodecID>( 211 description.u.misc.codec)); 212 if (fCodec == NULL) { 213 TRACE(" unable to find the correct FFmpeg decoder (id = %" B_PRIu32 ")\n", 214 description.u.misc.codec); 215 return B_ERROR; 216 } 217 TRACE(" found decoder %s\n", fCodec->name); 218 219 const void* extraData = infoBuffer; 220 fExtraDataSize = infoSize; 221 if (description.family == B_WAV_FORMAT_FAMILY 222 && infoSize >= sizeof(wave_format_ex)) { 223 TRACE(" trying to use wave_format_ex\n"); 224 // Special case extra data in B_WAV_FORMAT_FAMILY 225 const wave_format_ex* waveFormatData 226 = (const wave_format_ex*)infoBuffer; 227 228 size_t waveFormatSize = infoSize; 229 if (waveFormatData != NULL && waveFormatSize > 0) { 230 fBlockAlign = waveFormatData->block_align; 231 TRACE(" found block align: %d\n", fBlockAlign); 232 fExtraDataSize = waveFormatData->extra_size; 233 // skip the wave_format_ex from the extra data. 234 extraData = waveFormatData + 1; 235 } 236 } else { 237 if (fIsAudio) { 238 fBlockAlign 239 = ioEncodedFormat->u.encoded_audio.output.buffer_size; 240 TRACE(" using buffer_size as block align: %d\n", 241 fBlockAlign); 242 } 243 } 244 if (extraData != NULL && fExtraDataSize > 0) { 245 TRACE("AVCodecDecoder: extra data size %ld\n", infoSize); 246 delete[] fExtraData; 247 fExtraData = new(std::nothrow) char[fExtraDataSize]; 248 if (fExtraData != NULL) 249 memcpy(fExtraData, infoBuffer, fExtraDataSize); 250 else 251 fExtraDataSize = 0; 252 } 253 254 fInputFormat = *ioEncodedFormat; 255 return B_OK; 256 } else { 257 TRACE("AVCodecDecoder: BMediaFormats().GetCodeFor() failed.\n"); 258 } 259 260 printf("AVCodecDecoder::Setup failed!\n"); 261 return B_ERROR; 262 } 263 264 265 status_t 266 AVCodecDecoder::SeekedTo(int64 frame, bigtime_t time) 267 { 268 status_t ret = B_OK; 269 // Reset the FFmpeg codec to flush buffers, so we keep the sync 270 if (fCodecInitDone) { 271 avcodec_flush_buffers(fCodecContext); 272 _ResetTempPacket(); 273 } 274 275 // Flush internal buffers as well. 276 free(fChunkBuffer); 277 fChunkBuffer = NULL; 278 fChunkBufferSize = 0; 279 fDecodedDataBufferOffset = 0; 280 fDecodedDataBufferSize = 0; 281 fDecodedDataSizeInBytes = 0; 282 283 fFrame = frame; 284 285 return ret; 286 } 287 288 289 status_t 290 AVCodecDecoder::NegotiateOutputFormat(media_format* inOutFormat) 291 { 292 TRACE("AVCodecDecoder::NegotiateOutputFormat() [%c] \n", 293 fIsAudio ? 'a' : 'v'); 294 295 #ifdef TRACE_AV_CODEC 296 char buffer[1024]; 297 string_for_format(*inOutFormat, buffer, sizeof(buffer)); 298 TRACE(" [%c] requested format = %s\n", fIsAudio ? 'a' : 'v', buffer); 299 #endif 300 301 // close any previous instance 302 if (fCodecContext != NULL) { 303 fCodecContext->extradata = NULL; 304 avcodec_free_context(&fCodecContext); 305 } 306 307 fCodecContext = avcodec_alloc_context3(fCodec); 308 fCodecInitDone = false; 309 310 system_info info; 311 get_system_info(&info); 312 313 fCodecContext->err_recognition = AV_EF_CAREFUL; 314 fCodecContext->error_concealment = 3; 315 fCodecContext->thread_count = info.cpu_count; 316 317 if (fIsAudio) 318 return _NegotiateAudioOutputFormat(inOutFormat); 319 else 320 return _NegotiateVideoOutputFormat(inOutFormat); 321 } 322 323 324 status_t 325 AVCodecDecoder::Decode(void* outBuffer, int64* outFrameCount, 326 media_header* mediaHeader, media_decode_info* info) 327 { 328 if (!fCodecInitDone) 329 return B_NO_INIT; 330 331 status_t ret; 332 if (fIsAudio) 333 ret = _DecodeAudio(outBuffer, outFrameCount, mediaHeader, info); 334 else 335 ret = _DecodeVideo(outBuffer, outFrameCount, mediaHeader, info); 336 337 return ret; 338 } 339 340 341 // #pragma mark - 342 343 344 void 345 AVCodecDecoder::_ResetTempPacket() 346 { 347 if (fTempPacket == NULL) 348 fTempPacket = av_packet_alloc(); 349 fTempPacket->size = 0; 350 fTempPacket->data = NULL; 351 } 352 353 354 static int 355 get_channel_count(AVCodecContext* context) 356 { 357 return context->ch_layout.nb_channels; 358 } 359 360 361 static void 362 set_channel_count(AVCodecContext* context, int count) 363 { 364 context->ch_layout.nb_channels = count; 365 } 366 367 368 status_t 369 AVCodecDecoder::_NegotiateAudioOutputFormat(media_format* inOutFormat) 370 { 371 TRACE("AVCodecDecoder::_NegotiateAudioOutputFormat()\n"); 372 373 _ApplyEssentialAudioContainerPropertiesToContext(); 374 // This makes audio formats play that encode the audio properties in 375 // the audio container (e.g. WMA) and not in the audio frames 376 // themself (e.g. MP3). 377 // Note: Doing this step unconditionally is OK, because the first call 378 // to _DecodeNextAudioFrameChunk() will update the essential audio 379 // format properties accordingly regardless of the settings here. 380 381 if (avcodec_open2(fCodecContext, fCodec, NULL) < 0) { 382 TRACE("avcodec_open() failed to init codec!\n"); 383 return B_ERROR; 384 } 385 fCodecInitDone = true; 386 387 free(fChunkBuffer); 388 fChunkBuffer = NULL; 389 fChunkBufferSize = 0; 390 fAudioDecodeError = false; 391 fDecodedDataBufferOffset = 0; 392 fDecodedDataBufferSize = 0; 393 394 _ResetTempPacket(); 395 396 status_t statusOfDecodingFirstFrameChunk = _DecodeNextAudioFrameChunk(); 397 if (statusOfDecodingFirstFrameChunk != B_OK) { 398 TRACE("[a] decoding first audio frame chunk failed\n"); 399 return B_ERROR; 400 } 401 402 media_multi_audio_format outputAudioFormat; 403 outputAudioFormat = media_raw_audio_format::wildcard; 404 outputAudioFormat.byte_order = B_MEDIA_HOST_ENDIAN; 405 outputAudioFormat.frame_rate = fCodecContext->sample_rate; 406 outputAudioFormat.channel_count = get_channel_count(fCodecContext); 407 ConvertAVSampleFormatToRawAudioFormat(fCodecContext->sample_fmt, 408 outputAudioFormat.format); 409 // Check that format is not still a wild card! 410 if (outputAudioFormat.format == 0) { 411 TRACE(" format still a wild-card, assuming B_AUDIO_SHORT.\n"); 412 outputAudioFormat.format = media_raw_audio_format::B_AUDIO_SHORT; 413 } 414 outputAudioFormat.buffer_size = inOutFormat->u.raw_audio.buffer_size; 415 // Check that buffer_size has a sane value 416 size_t sampleSize = outputAudioFormat.format 417 & media_raw_audio_format::B_AUDIO_SIZE_MASK; 418 if (outputAudioFormat.buffer_size == 0) { 419 outputAudioFormat.buffer_size = 512 * sampleSize 420 * outputAudioFormat.channel_count; 421 } 422 423 inOutFormat->type = B_MEDIA_RAW_AUDIO; 424 inOutFormat->u.raw_audio = outputAudioFormat; 425 inOutFormat->require_flags = 0; 426 inOutFormat->deny_flags = B_MEDIA_MAUI_UNDEFINED_FLAGS; 427 428 // Initialize variables needed to manage decoding as much audio frames as 429 // needed to fill the buffer_size. 430 fOutputFrameSize = sampleSize * outputAudioFormat.channel_count; 431 fOutputFrameCount = outputAudioFormat.buffer_size / fOutputFrameSize; 432 fOutputFrameRate = outputAudioFormat.frame_rate; 433 if (av_sample_fmt_is_planar(fCodecContext->sample_fmt)) 434 fInputFrameSize = sampleSize; 435 else 436 fInputFrameSize = fOutputFrameSize; 437 438 fRawDecodedAudio->opaque 439 = av_realloc(fRawDecodedAudio->opaque, sizeof(avformat_codec_context)); 440 if (fRawDecodedAudio->opaque == NULL) 441 return B_NO_MEMORY; 442 443 if (av_sample_fmt_is_planar(fCodecContext->sample_fmt)) { 444 fResampleContext = NULL; 445 swr_alloc_set_opts2(&fResampleContext, 446 &fCodecContext->ch_layout, 447 fCodecContext->request_sample_fmt, 448 fCodecContext->sample_rate, 449 &fCodecContext->ch_layout, 450 fCodecContext->sample_fmt, 451 fCodecContext->sample_rate, 452 0, NULL); 453 swr_init(fResampleContext); 454 } 455 456 TRACE(" bit_rate = %" PRId64 ", sample_rate = %d, channels = %d, " 457 "output frame size: %d, count: %" B_PRId32 ", rate: %.2f\n", 458 fCodecContext->bit_rate, fCodecContext->sample_rate, fCodecContext->ch_layout.nb_channels, 459 fOutputFrameSize, fOutputFrameCount, fOutputFrameRate); 460 461 return B_OK; 462 } 463 464 465 status_t 466 AVCodecDecoder::_NegotiateVideoOutputFormat(media_format* inOutFormat) 467 { 468 TRACE("AVCodecDecoder::_NegotiateVideoOutputFormat()\n"); 469 470 TRACE(" requested video format 0x%x\n", 471 inOutFormat->u.raw_video.display.format); 472 473 _ApplyEssentialVideoContainerPropertiesToContext(); 474 // This makes video formats play that encode the video properties in 475 // the video container (e.g. WMV) and not in the video frames 476 // themself (e.g. MPEG2). 477 // Note: Doing this step unconditionally is OK, because the first call 478 // to _DecodeNextVideoFrame() will update the essential video format 479 // properties accordingly regardless of the settings here. 480 481 if (avcodec_open2(fCodecContext, fCodec, NULL) < 0) { 482 TRACE("avcodec_open() failed to init codec!\n"); 483 return B_ERROR; 484 } 485 fCodecInitDone = true; 486 487 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION 488 fOutputColorSpace = B_RGB32; 489 #else 490 // Make MediaPlayer happy (if not in rgb32 screen depth and no overlay, 491 // it will only ask for YCbCr, which DrawBitmap doesn't handle, so the 492 // default colordepth is RGB32). 493 if (inOutFormat->u.raw_video.display.format == B_YCbCr422) 494 fOutputColorSpace = B_YCbCr422; 495 else 496 fOutputColorSpace = B_RGB32; 497 #endif 498 499 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION 500 if (fSwsContext != NULL) 501 sws_freeContext(fSwsContext); 502 fSwsContext = NULL; 503 #else 504 fFormatConversionFunc = 0; 505 #endif 506 507 free(fChunkBuffer); 508 fChunkBuffer = NULL; 509 fChunkBufferSize = 0; 510 511 _ResetTempPacket(); 512 513 status_t statusOfDecodingFirstFrame = _DecodeNextVideoFrame(); 514 if (statusOfDecodingFirstFrame != B_OK) { 515 TRACE("[v] decoding first video frame failed\n"); 516 return B_ERROR; 517 } 518 519 // Note: fSwsContext / fFormatConversionFunc should have been initialized 520 // by first call to _DecodeNextVideoFrame() above. 521 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION 522 if (fSwsContext == NULL) { 523 TRACE("No SWS Scale context or decoder has not set the pixel format " 524 "yet!\n"); 525 } 526 #else 527 if (fFormatConversionFunc == NULL) { 528 TRACE("no pixel format conversion function found or decoder has " 529 "not set the pixel format yet!\n"); 530 } 531 #endif 532 533 inOutFormat->type = B_MEDIA_RAW_VIDEO; 534 inOutFormat->require_flags = 0; 535 inOutFormat->deny_flags = B_MEDIA_MAUI_UNDEFINED_FLAGS; 536 inOutFormat->u.raw_video = fInputFormat.u.encoded_video.output; 537 inOutFormat->u.raw_video.interlace = 1; 538 // Progressive (non-interlaced) video frames are delivered 539 inOutFormat->u.raw_video.first_active 540 = fHeader.u.raw_video.first_active_line; 541 inOutFormat->u.raw_video.last_active = fHeader.u.raw_video.line_count; 542 inOutFormat->u.raw_video.pixel_width_aspect 543 = fHeader.u.raw_video.pixel_width_aspect; 544 inOutFormat->u.raw_video.pixel_height_aspect 545 = fHeader.u.raw_video.pixel_height_aspect; 546 // The framerate in fCodecContext is not always equivalent to the field rate. Instead it can 547 // be some internal value of the codec, for example mpeg4 uses a framerate of 90000 and then 548 // the video frames have timestamps that are several hundred values apart. This allows for 549 // example mixing 50 and 60Hz video in the same stream. 550 // 551 // Normally in ffmepg, one would use av_guess_frame_rate to compute this, but we can't do this 552 // here because we don't have direct access to the AVFormatContext nor the AVStream (in our 553 // architecture these are only available in the AVFormatReader class). So we provide a similar 554 // implementation here, trying to guess from the input format properties and the info for the 555 // first frame which we just decoded (that updates fCodecContext inside ffmpeg). 556 // 557 // If we don't know, the field can also be set to 0, and we will still provide correct 558 // presentation timestamps for each individual frame. 559 // 560 // TODO The field_rate is twice the frame rate for interlaced streams, so we need to determine 561 // if we are decoding an interlaced stream, and wether ffmpeg delivers every half-frame or not 562 // in that case (since we let ffmpeg do the deinterlacing). 563 float fromFormat = fInputFormat.u.encoded_video.output.field_rate; 564 if (fromFormat < 70) 565 inOutFormat->u.raw_video.field_rate = fromFormat; 566 // See if the codec knows better (adapted from av_guess_frame_rate in ffmpeg) 567 AVRational codec_fr = fCodecContext->framerate; 568 if (codec_fr.num > 0 && codec_fr.den > 0 569 && (fromFormat == 0 || av_q2d(codec_fr) < fromFormat * 0.7)) { 570 inOutFormat->u.raw_video.field_rate = av_q2d(fCodecContext->framerate); 571 } 572 inOutFormat->u.raw_video.display.format = fOutputColorSpace; 573 inOutFormat->u.raw_video.display.line_width 574 = fHeader.u.raw_video.display_line_width; 575 inOutFormat->u.raw_video.display.line_count 576 = fHeader.u.raw_video.display_line_count; 577 inOutFormat->u.raw_video.display.bytes_per_row 578 = fHeader.u.raw_video.bytes_per_row; 579 580 #ifdef TRACE_AV_CODEC 581 char buffer[1024]; 582 string_for_format(*inOutFormat, buffer, sizeof(buffer)); 583 TRACE("[v] outFormat = %s\n", buffer); 584 TRACE(" returned video format 0x%x\n", 585 inOutFormat->u.raw_video.display.format); 586 #endif 587 588 return B_OK; 589 } 590 591 592 /*! \brief Fills the outBuffer with one or more already decoded audio frames. 593 594 Besides the main duty described above, this method also fills out the other 595 output parameters as documented below. 596 597 \param outBuffer Pointer to the output buffer to copy the decoded audio 598 frames to. 599 \param outFrameCount Pointer to the output variable to assign the number of 600 copied audio frames (usually several audio frames at once). 601 \param mediaHeader Pointer to the output media header that contains the 602 properties of the decoded audio frame being the first in the outBuffer. 603 \param info Specifies additional decoding parameters. (Note: unused). 604 605 \returns B_OK Decoding audio frames succeeded. 606 \returns B_LAST_BUFFER_ERROR There are no more audio frames available. 607 \returns Other error codes 608 */ 609 status_t 610 AVCodecDecoder::_DecodeAudio(void* outBuffer, int64* outFrameCount, 611 media_header* mediaHeader, media_decode_info* info) 612 { 613 TRACE_AUDIO("AVCodecDecoder::_DecodeAudio(audio start_time %.6fs)\n", 614 mediaHeader->start_time / 1000000.0); 615 616 status_t audioDecodingStatus 617 = fDecodedDataSizeInBytes > 0 ? B_OK : _DecodeNextAudioFrame(); 618 619 if (audioDecodingStatus != B_OK) 620 return audioDecodingStatus; 621 622 *outFrameCount = fDecodedDataSizeInBytes / fOutputFrameSize; 623 *mediaHeader = fHeader; 624 memcpy(outBuffer, fDecodedData, fDecodedDataSizeInBytes); 625 626 fDecodedDataSizeInBytes = 0; 627 628 return B_OK; 629 } 630 631 632 /*! \brief Fills the outBuffer with an already decoded video frame. 633 634 Besides the main duty described above, this method also fills out the other 635 output parameters as documented below. 636 637 \param outBuffer Pointer to the output buffer to copy the decoded video 638 frame to. 639 \param outFrameCount Pointer to the output variable to assign the number of 640 copied video frames (usually one video frame). 641 \param mediaHeader Pointer to the output media header that contains the 642 decoded video frame properties. 643 \param info Specifies additional decoding parameters. (Note: unused). 644 645 \returns B_OK Decoding a video frame succeeded. 646 \returns B_LAST_BUFFER_ERROR There are no more video frames available. 647 \returns Other error codes 648 */ 649 status_t 650 AVCodecDecoder::_DecodeVideo(void* outBuffer, int64* outFrameCount, 651 media_header* mediaHeader, media_decode_info* info) 652 { 653 status_t videoDecodingStatus 654 = fDecodedDataSizeInBytes > 0 ? B_OK : _DecodeNextVideoFrame(); 655 656 if (videoDecodingStatus != B_OK) 657 return videoDecodingStatus; 658 659 *outFrameCount = 1; 660 *mediaHeader = fHeader; 661 memcpy(outBuffer, fDecodedData, mediaHeader->size_used); 662 663 fDecodedDataSizeInBytes = 0; 664 665 return B_OK; 666 } 667 668 669 /*! \brief Decodes next audio frame. 670 671 We decode at least one audio frame into fDecodedData. To achieve this goal, 672 we might need to request several chunks of encoded data resulting in a 673 variable execution time of this function. 674 675 The length of the decoded audio frame(s) is stored in 676 fDecodedDataSizeInBytes. If this variable is greater than zero you can 677 assert that all audio frames in fDecodedData are valid. 678 679 It is assumed that the number of expected audio frames is stored in 680 fOutputFrameCount. So _DecodeNextAudioFrame() must be called only after 681 fOutputFrameCount has been set. 682 683 Note: fOutputFrameCount contains the maximum number of frames a caller 684 of BMediaDecoder::Decode() expects to receive. There is a direct 685 relationship between fOutputFrameCount and the buffer size a caller of 686 BMediaDecoder::Decode() will provide so we make sure to respect this limit 687 for fDecodedDataSizeInBytes. 688 689 On return with status code B_OK the following conditions hold true: 690 1. fDecodedData contains as much audio frames as the caller of 691 BMediaDecoder::Decode() expects. 692 2. fDecodedData contains lesser audio frames as the caller of 693 BMediaDecoder::Decode() expects only when one of the following 694 conditions hold true: 695 i No more audio frames left. Consecutive calls to 696 _DecodeNextAudioFrame() will then result in the return of 697 status code B_LAST_BUFFER_ERROR. 698 ii TODO: A change in the size of the audio frames. 699 3. fHeader is populated with the audio frame properties of the first 700 audio frame in fDecodedData. Especially the start_time field of 701 fHeader relates to that first audio frame. Start times of 702 consecutive audio frames in fDecodedData have to be calculated 703 manually (using the frame rate and the frame duration) if the 704 caller needs them. 705 706 TODO: Handle change of channel_count. Such a change results in a change of 707 the audio frame size and thus has different buffer requirements. 708 The most sane approach for implementing this is to return the audio frames 709 that were still decoded with the previous channel_count and inform the 710 client of BMediaDecoder::Decode() about the change so that it can adapt to 711 it. Furthermore we need to adapt our fDecodedData to the new buffer size 712 requirements accordingly. 713 714 \returns B_OK when we successfully decoded enough audio frames 715 \returns B_LAST_BUFFER_ERROR when there are no more audio frames available. 716 \returns Other Errors 717 */ 718 status_t 719 AVCodecDecoder::_DecodeNextAudioFrame() 720 { 721 assert(fTempPacket->size >= 0); 722 assert(fDecodedDataSizeInBytes == 0); 723 // _DecodeNextAudioFrame needs to be called on empty fDecodedData only! 724 // If this assert holds wrong we have a bug somewhere. 725 726 status_t resetStatus = _ResetRawDecodedAudio(); 727 if (resetStatus != B_OK) 728 return resetStatus; 729 730 while (fRawDecodedAudio->nb_samples < fOutputFrameCount) { 731 _CheckAndFixConditionsThatHintAtBrokenAudioCodeBelow(); 732 733 bool decodedDataBufferHasData = fDecodedDataBufferSize > 0; 734 if (decodedDataBufferHasData) { 735 _MoveAudioFramesToRawDecodedAudioAndUpdateStartTimes(); 736 continue; 737 } 738 739 status_t decodeAudioChunkStatus = _DecodeNextAudioFrameChunk(); 740 if (decodeAudioChunkStatus == B_LAST_BUFFER_ERROR 741 && fRawDecodedAudio->nb_samples > 0) 742 break; 743 if (decodeAudioChunkStatus != B_OK) 744 return decodeAudioChunkStatus; 745 } 746 747 fFrame += fRawDecodedAudio->nb_samples; 748 fDecodedDataSizeInBytes = fRawDecodedAudio->linesize[0]; 749 750 _UpdateMediaHeaderForAudioFrame(); 751 752 #ifdef DEBUG 753 dump_ffframe_audio(fRawDecodedAudio, "ffaudi"); 754 #endif 755 756 TRACE_AUDIO(" frame count: %d current: %" B_PRId64 "\n", 757 fRawDecodedAudio->nb_samples, fFrame); 758 759 return B_OK; 760 } 761 762 763 /*! \brief Applies all essential audio input properties to fCodecContext that were 764 passed to AVCodecDecoder when Setup() was called. 765 766 Note: This function must be called before the AVCodec is opened via 767 avcodec_open2(). Otherwise the behaviour of FFMPEG's audio decoding 768 function avcodec_receive_frame() is undefined. 769 770 Essential properties applied from fInputFormat.u.encoded_audio: 771 - bit_rate copied to fCodecContext->bit_rate 772 - frame_size copied to fCodecContext->frame_size 773 - output.format converted to fCodecContext->sample_fmt 774 - output.frame_rate copied to fCodecContext->sample_rate 775 - output.channel_count copied to fCodecContext->channels 776 777 Other essential properties being applied: 778 - fBlockAlign to fCodecContext->block_align 779 - fExtraData to fCodecContext->extradata 780 - fExtraDataSize to fCodecContext->extradata_size 781 782 TODO: Either the following documentation section should be removed or this 783 TODO when it is clear whether fInputFormat.MetaData() and 784 fInputFormat.MetaDataSize() have to be applied to fCodecContext. See the related 785 TODO in the method implementation. 786 Only applied when fInputFormat.MetaDataSize() is greater than zero: 787 - fInputFormat.MetaData() to fCodecContext->extradata 788 - fInputFormat.MetaDataSize() to fCodecContext->extradata_size 789 */ 790 void 791 AVCodecDecoder::_ApplyEssentialAudioContainerPropertiesToContext() 792 { 793 media_encoded_audio_format containerProperties 794 = fInputFormat.u.encoded_audio; 795 796 fCodecContext->bit_rate 797 = static_cast<int>(containerProperties.bit_rate); 798 fCodecContext->frame_size 799 = static_cast<int>(containerProperties.frame_size); 800 ConvertRawAudioFormatToAVSampleFormat( 801 containerProperties.output.format, fCodecContext->sample_fmt); 802 ConvertRawAudioFormatToAVSampleFormat( 803 containerProperties.output.format, fCodecContext->request_sample_fmt); 804 fCodecContext->sample_rate 805 = static_cast<int>(containerProperties.output.frame_rate); 806 int channel_count = static_cast<int>(containerProperties.output.channel_count); 807 // Check that channel count is not still a wild card! 808 if (channel_count == 0) { 809 TRACE(" channel_count still a wild-card, assuming stereo.\n"); 810 set_channel_count(fCodecContext, 2); 811 } else 812 set_channel_count(fCodecContext, channel_count); 813 814 fCodecContext->block_align = fBlockAlign; 815 fCodecContext->extradata = reinterpret_cast<uint8_t*>(fExtraData); 816 fCodecContext->extradata_size = fExtraDataSize; 817 818 // TODO: This probably needs to go away, there is some misconception 819 // about extra data / info buffer and meta data. See 820 // Reader::GetStreamInfo(). The AVFormatReader puts extradata and 821 // extradata_size into media_format::MetaData(), but used to ignore 822 // the infoBuffer passed to GetStreamInfo(). I think this may be why 823 // the code below was added. 824 if (fInputFormat.MetaDataSize() > 0) { 825 fCodecContext->extradata = static_cast<uint8_t*>( 826 const_cast<void*>(fInputFormat.MetaData())); 827 fCodecContext->extradata_size = fInputFormat.MetaDataSize(); 828 } 829 830 TRACE(" bit_rate %" PRId64 ", sample_rate %d, channels %d, block_align %d, " 831 "extradata_size %d\n", 832 fCodecContext->bit_rate, 833 fCodecContext->sample_rate, 834 fCodecContext->ch_layout.nb_channels, 835 fCodecContext->block_align, 836 fCodecContext->extradata_size); 837 } 838 839 840 /*! \brief Resets important fields in fRawDecodedVideo to their default values. 841 842 Note: Also initializes fDecodedData if not done already. 843 844 \returns B_OK Resetting successfully completed. 845 \returns B_NO_MEMORY No memory left for correct operation. 846 */ 847 status_t 848 AVCodecDecoder::_ResetRawDecodedAudio() 849 { 850 if (fDecodedData == NULL) { 851 size_t maximumSizeOfDecodedData = fOutputFrameCount * fOutputFrameSize; 852 fDecodedData 853 = static_cast<uint8_t*>(malloc(maximumSizeOfDecodedData)); 854 } 855 if (fDecodedData == NULL) 856 return B_NO_MEMORY; 857 858 fRawDecodedAudio->data[0] = fDecodedData; 859 fRawDecodedAudio->linesize[0] = 0; 860 fRawDecodedAudio->format = AV_SAMPLE_FMT_NONE; 861 fRawDecodedAudio->pkt_dts = AV_NOPTS_VALUE; 862 fRawDecodedAudio->nb_samples = 0; 863 memset(fRawDecodedAudio->opaque, 0, sizeof(avformat_codec_context)); 864 865 return B_OK; 866 } 867 868 869 /*! \brief Checks fDecodedDataBufferSize and fTempPacket for invalid values, 870 reports them and assigns valid values. 871 872 Note: This method is intended to be called before any code is executed that 873 deals with moving, loading or decoding any audio frames. 874 */ 875 void 876 AVCodecDecoder::_CheckAndFixConditionsThatHintAtBrokenAudioCodeBelow() 877 { 878 if (fDecodedDataBufferSize < 0) { 879 fprintf(stderr, "Decoding read past the end of the decoded data " 880 "buffer! %" B_PRId32 "\n", fDecodedDataBufferSize); 881 fDecodedDataBufferSize = 0; 882 } 883 if (fTempPacket->size < 0) { 884 fprintf(stderr, "Decoding read past the end of the temp packet! %d\n", 885 fTempPacket->size); 886 fTempPacket->size = 0; 887 } 888 } 889 890 891 /*! \brief Moves audio frames from fDecodedDataBuffer to fRawDecodedAudio (and 892 thus to fDecodedData) and updates the start times of fRawDecodedAudio, 893 fDecodedDataBuffer and fTempPacket accordingly. 894 895 When moving audio frames to fRawDecodedAudio this method also makes sure 896 that the following important fields of fRawDecodedAudio are populated and 897 updated with correct values: 898 - fRawDecodedAudio->data[0]: Points to first free byte of fDecodedData 899 - fRawDecodedAudio->linesize[0]: Total size of frames in fDecodedData 900 - fRawDecodedAudio->format: Format of first audio frame 901 - fRawDecodedAudio->pkt_dts: Start time of first audio frame 902 - fRawDecodedAudio->nb_samples: Number of audio frames 903 - fRawDecodedAudio->opaque: Contains the following fields for the first 904 audio frame: 905 - channels: Channel count of first audio frame 906 - sample_rate: Frame rate of first audio frame 907 908 This function assumes to be called only when the following assumptions 909 hold true: 910 1. There are decoded audio frames available in fDecodedDataBuffer 911 meaning that fDecodedDataBufferSize is greater than zero. 912 2. There is space left in fRawDecodedAudio to move some audio frames 913 in. This means that fRawDecodedAudio has lesser audio frames than 914 the maximum allowed (specified by fOutputFrameCount). 915 3. The audio frame rate is known so that we can calculate the time 916 range (covered by the moved audio frames) to update the start times 917 accordingly. 918 4. The field fRawDecodedAudio->opaque points to a memory block 919 representing a structure of type avformat_codec_context. 920 921 After this function returns the caller can safely make the following 922 assumptions: 923 1. The number of decoded audio frames in fDecodedDataBuffer is 924 decreased though it may still be greater then zero. 925 2. The number of frames in fRawDecodedAudio has increased and all 926 important fields are updated (see listing above). 927 3. Start times of fDecodedDataBuffer and fTempPacket were increased 928 with the time range covered by the moved audio frames. 929 930 Note: This function raises an exception (by calling the debugger), when 931 fDecodedDataBufferSize is not a multiple of fOutputFrameSize. 932 */ 933 void 934 AVCodecDecoder::_MoveAudioFramesToRawDecodedAudioAndUpdateStartTimes() 935 { 936 assert(fDecodedDataBufferSize > 0); 937 assert(fRawDecodedAudio->nb_samples < fOutputFrameCount); 938 assert(fOutputFrameRate > 0); 939 940 int32 outFrames = fOutputFrameCount - fRawDecodedAudio->nb_samples; 941 int32 inFrames = fDecodedDataBufferSize; 942 943 int32 frames = min_c(outFrames, inFrames); 944 if (frames == 0) 945 debugger("fDecodedDataBufferSize not multiple of frame size!"); 946 947 // Some decoders do not support format conversion on themselves, or use 948 // "planar" audio (each channel separated instead of interleaved samples). 949 // In that case, we use swresample to convert the data 950 if (av_sample_fmt_is_planar(fCodecContext->sample_fmt)) { 951 #if 0 952 const uint8_t* ptr[8]; 953 for (int i = 0; i < 8; i++) { 954 if (fDecodedDataBuffer->data[i] == NULL) 955 ptr[i] = NULL; 956 else 957 ptr[i] = fDecodedDataBuffer->data[i] + fDecodedDataBufferOffset; 958 } 959 960 // When there are more input frames than space in the output buffer, 961 // we could feed everything to swr and it would buffer the extra data. 962 // However, there is no easy way to flush that data without feeding more 963 // input, and it makes our timestamp computations fail. 964 // So, we feed only as much frames as we can get out, and handle the 965 // buffering ourselves. 966 // TODO Ideally, we should try to size our output buffer so that it can 967 // always hold all the output (swr provides helper functions for this) 968 inFrames = frames; 969 frames = swr_convert(fResampleContext, fRawDecodedAudio->data, 970 outFrames, ptr, inFrames); 971 972 if (frames < 0) 973 debugger("resampling failed"); 974 #else 975 // interleave planar audio with same format 976 uintptr_t out = (uintptr_t)fRawDecodedAudio->data[0]; 977 int32 offset = fDecodedDataBufferOffset; 978 for (int i = 0; i < frames; i++) { 979 for (int j = 0; j < get_channel_count(fCodecContext); j++) { 980 memcpy((void*)out, fDecodedDataBuffer->data[j] 981 + offset, fInputFrameSize); 982 out += fInputFrameSize; 983 } 984 offset += fInputFrameSize; 985 } 986 outFrames = frames; 987 inFrames = frames; 988 #endif 989 } else { 990 memcpy(fRawDecodedAudio->data[0], fDecodedDataBuffer->data[0] 991 + fDecodedDataBufferOffset, frames * fOutputFrameSize); 992 outFrames = frames; 993 inFrames = frames; 994 } 995 996 size_t remainingSize = inFrames * fInputFrameSize; 997 size_t decodedSize = outFrames * fOutputFrameSize; 998 fDecodedDataBufferSize -= inFrames; 999 1000 bool firstAudioFramesCopiedToRawDecodedAudio 1001 = fRawDecodedAudio->data[0] != fDecodedData; 1002 if (!firstAudioFramesCopiedToRawDecodedAudio) { 1003 fRawDecodedAudio->format = fDecodedDataBuffer->format; 1004 fRawDecodedAudio->pkt_dts = fDecodedDataBuffer->pkt_dts; 1005 1006 avformat_codec_context* codecContext 1007 = static_cast<avformat_codec_context*>(fRawDecodedAudio->opaque); 1008 codecContext->channels = get_channel_count(fCodecContext); 1009 codecContext->sample_rate = fCodecContext->sample_rate; 1010 } 1011 1012 fRawDecodedAudio->data[0] += decodedSize; 1013 fRawDecodedAudio->linesize[0] += decodedSize; 1014 fRawDecodedAudio->nb_samples += outFrames; 1015 1016 fDecodedDataBufferOffset += remainingSize; 1017 1018 // Update start times accordingly 1019 bigtime_t framesTimeInterval = static_cast<bigtime_t>( 1020 (1000000LL * frames) / fOutputFrameRate); 1021 fDecodedDataBuffer->pkt_dts += framesTimeInterval; 1022 // Start time of buffer is updated in case that it contains 1023 // more audio frames to move. 1024 fTempPacket->dts += framesTimeInterval; 1025 // Start time of fTempPacket is updated in case the fTempPacket 1026 // contains more audio frames to decode. 1027 } 1028 1029 1030 /*! \brief Decodes next chunk of audio frames. 1031 1032 This method handles all the details of loading the input buffer 1033 (fChunkBuffer) at the right time and of calling FFMPEG often engouh until 1034 some audio frames have been decoded. 1035 1036 FFMPEG decides how much audio frames belong to a chunk. Because of that 1037 it is very likely that _DecodeNextAudioFrameChunk has to be called several 1038 times to decode enough audio frames to please the caller of 1039 BMediaDecoder::Decode(). 1040 1041 This function assumes to be called only when the following assumptions 1042 hold true: 1043 1. fDecodedDataBufferSize equals zero. 1044 1045 After this function returns successfully the caller can safely make the 1046 following assumptions: 1047 1. fDecodedDataBufferSize is greater than zero. 1048 2. fDecodedDataBufferOffset is set to zero. 1049 3. fDecodedDataBuffer contains audio frames. 1050 1051 1052 \returns B_OK on successfully decoding one audio frame chunk. 1053 \returns B_LAST_BUFFER_ERROR No more audio frame chunks available. From 1054 this point on further calls will return this same error. 1055 \returns B_ERROR Decoding failed 1056 */ 1057 status_t 1058 AVCodecDecoder::_DecodeNextAudioFrameChunk() 1059 { 1060 assert(fDecodedDataBufferSize == 0); 1061 1062 while (fDecodedDataBufferSize == 0) { 1063 status_t loadingChunkStatus 1064 = _LoadNextChunkIfNeededAndAssignStartTime(); 1065 if (loadingChunkStatus != B_OK) 1066 return loadingChunkStatus; 1067 1068 status_t decodingStatus 1069 = _DecodeSomeAudioFramesIntoEmptyDecodedDataBuffer(); 1070 if (decodingStatus != B_OK) { 1071 // Assume the audio decoded until now is broken so replace it with 1072 // some silence. 1073 memset(fDecodedData, 0, fRawDecodedAudio->linesize[0]); 1074 1075 if (!fAudioDecodeError) { 1076 // Report failure if not done already 1077 int32 chunkBufferOffset = fTempPacket->data - fChunkBuffer; 1078 printf("########### audio decode error, " 1079 "fTempPacket->size %d, fChunkBuffer data offset %" B_PRId32 1080 "\n", fTempPacket->size, chunkBufferOffset); 1081 fAudioDecodeError = true; 1082 } 1083 1084 // Assume that next audio chunk can be decoded so keep decoding. 1085 continue; 1086 } 1087 1088 fAudioDecodeError = false; 1089 } 1090 1091 return B_OK; 1092 } 1093 1094 1095 /*! \brief Tries to decode at least one audio frame and store it in the 1096 fDecodedDataBuffer. 1097 1098 This function assumes to be called only when the following assumptions 1099 hold true: 1100 1. fDecodedDataBufferSize equals zero. 1101 2. fTempPacket->size is greater than zero. 1102 1103 After this function returns successfully the caller can safely make the 1104 following assumptions: 1105 1. fDecodedDataBufferSize is greater than zero in the common case. 1106 Also see "Note" below. 1107 2. fTempPacket was updated to exclude the data chunk that was consumed 1108 by avcodec_send_packet(). 1109 3. fDecodedDataBufferOffset is set to zero. 1110 1111 When this function failed to decode at least one audio frame due to a 1112 decoding error the caller can safely make the following assumptions: 1113 1. fDecodedDataBufferSize equals zero. 1114 2. fTempPacket->size equals zero. 1115 1116 Note: It is possible that there wasn't any audio frame decoded into 1117 fDecodedDataBuffer after calling this function. This is normal and can 1118 happen when there was either a decoding error or there is some decoding 1119 delay in FFMPEGs audio decoder. Another call to this method is totally 1120 safe and is even expected as long as the calling assumptions hold true. 1121 1122 \returns B_OK Decoding successful. fDecodedDataBuffer contains decoded 1123 audio frames only when fDecodedDataBufferSize is greater than zero. 1124 fDecodedDataBuffer is empty, when avcodec_receive_frame() didn't return 1125 audio frames due to delayed decoding or incomplete audio frames. 1126 \returns B_ERROR Decoding failed thus fDecodedDataBuffer contains no audio 1127 frames. 1128 */ 1129 status_t 1130 AVCodecDecoder::_DecodeSomeAudioFramesIntoEmptyDecodedDataBuffer() 1131 { 1132 assert(fDecodedDataBufferSize == 0); 1133 1134 av_frame_unref(fDecodedDataBuffer); 1135 fDecodedDataBufferOffset = 0; 1136 1137 int error = avcodec_receive_frame(fCodecContext, fDecodedDataBuffer); 1138 if (error == AVERROR_EOF) 1139 return B_LAST_BUFFER_ERROR; 1140 1141 if (error == AVERROR(EAGAIN)) { 1142 // We need to feed more data into the decoder 1143 avcodec_send_packet(fCodecContext, fTempPacket); 1144 1145 // All the data is always consumed by avcodec_send_packet 1146 fTempPacket->size = 0; 1147 1148 // Try again to see if we can get some decoded audio out now 1149 error = avcodec_receive_frame(fCodecContext, fDecodedDataBuffer); 1150 } 1151 1152 fDecodedDataBufferSize = fDecodedDataBuffer->nb_samples; 1153 if (fDecodedDataBufferSize < 0) 1154 fDecodedDataBufferSize = 0; 1155 1156 if (error == 0) 1157 return B_OK; 1158 else 1159 return B_ERROR; 1160 } 1161 1162 1163 /*! \brief Updates relevant fields of the class member fHeader with the 1164 properties of the most recently decoded audio frame. 1165 1166 The following fields of fHeader are updated: 1167 - fHeader.type 1168 - fHeader.file_pos 1169 - fHeader.orig_size 1170 - fHeader.start_time 1171 - fHeader.size_used 1172 - fHeader.u.raw_audio.frame_rate 1173 - fHeader.u.raw_audio.channel_count 1174 1175 It is assumed that this function is called only when the following asserts 1176 hold true: 1177 1. We actually got a new audio frame decoded by the audio decoder. 1178 2. fHeader wasn't updated for the new audio frame yet. You MUST call 1179 this method only once per decoded audio frame. 1180 3. fRawDecodedAudio's fields relate to the first audio frame contained 1181 in fDecodedData. Especially the following fields are of importance: 1182 - fRawDecodedAudio->pkt_dts: Start time of first audio frame 1183 - fRawDecodedAudio->opaque: Contains the following fields for 1184 the first audio frame: 1185 - channels: Channel count of first audio frame 1186 - sample_rate: Frame rate of first audio frame 1187 */ 1188 void 1189 AVCodecDecoder::_UpdateMediaHeaderForAudioFrame() 1190 { 1191 fHeader.type = B_MEDIA_RAW_AUDIO; 1192 fHeader.file_pos = 0; 1193 fHeader.orig_size = 0; 1194 fHeader.start_time = fRawDecodedAudio->pkt_dts; 1195 fHeader.size_used = fRawDecodedAudio->linesize[0]; 1196 1197 avformat_codec_context* codecContext 1198 = static_cast<avformat_codec_context*>(fRawDecodedAudio->opaque); 1199 fHeader.u.raw_audio.channel_count = codecContext->channels; 1200 fHeader.u.raw_audio.frame_rate = codecContext->sample_rate; 1201 } 1202 1203 1204 /*! \brief Decodes next video frame. 1205 1206 We decode exactly one video frame into fDecodedData. To achieve this goal, 1207 we might need to request several chunks of encoded data resulting in a 1208 variable execution time of this function. 1209 1210 The length of the decoded video frame is stored in 1211 fDecodedDataSizeInBytes. If this variable is greater than zero, you can 1212 assert that there is a valid video frame available in fDecodedData. 1213 1214 The decoded video frame in fDecodedData has color space conversion and 1215 deinterlacing already applied. 1216 1217 To every decoded video frame there is a media_header populated in 1218 fHeader, containing the corresponding video frame properties. 1219 1220 Normally every decoded video frame has a start_time field populated in the 1221 associated fHeader, that determines the presentation time of the frame. 1222 This relationship will only hold true, when each data chunk that is 1223 provided via GetNextChunk() contains data for exactly one encoded video 1224 frame (one complete frame) - not more and not less. 1225 1226 We can decode data chunks that contain partial video frame data, too. In 1227 that case, you cannot trust the value of the start_time field in fHeader. 1228 We simply have no logic in place to establish a meaningful relationship 1229 between an incomplete frame and the start time it should be presented. 1230 Though this might change in the future. 1231 1232 We can decode data chunks that contain more than one video frame, too. In 1233 that case, you cannot trust the value of the start_time field in fHeader. 1234 We simply have no logic in place to track the start_time across multiple 1235 video frames. So a meaningful relationship between the 2nd, 3rd, ... frame 1236 and the start time it should be presented isn't established at the moment. 1237 Though this might change in the future. 1238 1239 On first call the member variables fSwsContext / fFormatConversionFunc are 1240 initialized. 1241 1242 \returns B_OK when we successfully decoded one video frame 1243 \returns B_LAST_BUFFER_ERROR when there are no more video frames available. 1244 \returns B_NO_MEMORY when we have no memory left for correct operation. 1245 \returns Other Errors 1246 */ 1247 status_t 1248 AVCodecDecoder::_DecodeNextVideoFrame() 1249 { 1250 int error; 1251 int send_error; 1252 1253 #if DO_PROFILING 1254 bigtime_t startTime = system_time(); 1255 #endif 1256 1257 error = avcodec_receive_frame(fCodecContext, fRawDecodedPicture); 1258 1259 if (error == AVERROR_EOF) 1260 return B_LAST_BUFFER_ERROR; 1261 1262 if (error == AVERROR(EAGAIN)) { 1263 do { 1264 status_t loadingChunkStatus 1265 = _LoadNextChunkIfNeededAndAssignStartTime(); 1266 if (loadingChunkStatus == B_LAST_BUFFER_ERROR) 1267 return _FlushOneVideoFrameFromDecoderBuffer(); 1268 if (loadingChunkStatus != B_OK) { 1269 TRACE("[v] AVCodecDecoder::_DecodeNextVideoFrame(): error from " 1270 "GetNextChunk(): %s\n", strerror(loadingChunkStatus)); 1271 return loadingChunkStatus; 1272 } 1273 1274 char timestamp[AV_TS_MAX_STRING_SIZE]; 1275 av_ts_make_time_string(timestamp, 1276 fTempPacket->dts, &fCodecContext->time_base); 1277 TRACE("[v] Feed %d more bytes (dts %s)\n", fTempPacket->size, 1278 timestamp); 1279 1280 send_error = avcodec_send_packet(fCodecContext, fTempPacket); 1281 if (send_error < 0 && send_error != AVERROR(EAGAIN)) { 1282 TRACE("[v] AVCodecDecoder: ignoring error in decoding frame %" B_PRId64 ": %d\n", 1283 fFrame, error); 1284 } 1285 1286 // Packet is consumed, clear it 1287 fTempPacket->data = NULL; 1288 fTempPacket->size = 0; 1289 1290 error = avcodec_receive_frame(fCodecContext, fRawDecodedPicture); 1291 if (error != 0 && error != AVERROR(EAGAIN)) { 1292 TRACE("[v] frame %" B_PRId64 " decoding error: error code: %d, chunk size: %ld\n", 1293 fFrame, error, fChunkBufferSize); 1294 } 1295 1296 } while (error != 0); 1297 } 1298 1299 #if DO_PROFILING 1300 bigtime_t formatConversionStart = system_time(); 1301 #endif 1302 1303 status_t handleStatus = _HandleNewVideoFrameAndUpdateSystemState(); 1304 if (handleStatus != B_OK) 1305 return handleStatus; 1306 1307 #if DO_PROFILING 1308 bigtime_t doneTime = system_time(); 1309 decodingTime += formatConversionStart - startTime; 1310 conversionTime += doneTime - formatConversionStart; 1311 profileCounter++; 1312 if (!(fFrame % 5)) { 1313 printf("[v] profile: d1 = %lld, d2 = %lld (%lld)\n", 1314 decodingTime / profileCounter, conversionTime / profileCounter, 1315 fFrame); 1316 decodingTime = 0; 1317 conversionTime = 0; 1318 profileCounter = 0; 1319 } 1320 #endif 1321 return error; 1322 } 1323 1324 1325 /*! \brief Applies all essential video input properties to fCodecContext that were 1326 passed to AVCodecDecoder when Setup() was called. 1327 1328 Note: This function must be called before the AVCodec is opened via 1329 avcodec_open2(). Otherwise the behaviour of FFMPEG's video decoding 1330 function avcodec_decode_video2() is undefined. 1331 1332 Essential properties applied from fInputFormat.u.encoded_video.output: 1333 - display.line_width copied to fCodecContext->width 1334 - display.line_count copied to fCodecContext->height 1335 - pixel_width_aspect and pixel_height_aspect converted to 1336 fCodecContext->sample_aspect_ratio 1337 - field_rate converted to fCodecContext->time_base and 1338 fCodecContext->ticks_per_frame 1339 1340 Other essential properties being applied: 1341 - fExtraData to fCodecContext->extradata 1342 - fExtraDataSize to fCodecContext->extradata_size 1343 */ 1344 void 1345 AVCodecDecoder::_ApplyEssentialVideoContainerPropertiesToContext() 1346 { 1347 media_raw_video_format containerProperties 1348 = fInputFormat.u.encoded_video.output; 1349 1350 fCodecContext->width = containerProperties.display.line_width; 1351 fCodecContext->height = containerProperties.display.line_count; 1352 1353 if (containerProperties.pixel_width_aspect > 0 1354 && containerProperties.pixel_height_aspect > 0) { 1355 ConvertVideoAspectWidthAndHeightToAVCodecContext( 1356 containerProperties.pixel_width_aspect, 1357 containerProperties.pixel_height_aspect, *fCodecContext); 1358 } 1359 1360 if (containerProperties.field_rate > 0.0) { 1361 ConvertVideoFrameRateToAVCodecContext(containerProperties.field_rate, 1362 *fCodecContext); 1363 } 1364 1365 fCodecContext->extradata = reinterpret_cast<uint8_t*>(fExtraData); 1366 fCodecContext->extradata_size = fExtraDataSize; 1367 } 1368 1369 1370 /*! \brief Loads the next chunk into fChunkBuffer and assigns it (including 1371 the start time) to fTempPacket but only if fTempPacket is empty. 1372 1373 \returns B_OK 1374 1. meaning: Next chunk is loaded. 1375 2. meaning: No need to load and assign anything. Proceed as usual. 1376 \returns B_LAST_BUFFER_ERROR No more chunks available. fChunkBuffer and 1377 fTempPacket are left untouched. 1378 \returns Other errors Caller should bail out because fChunkBuffer and 1379 fTempPacket are in unknown states. Normal operation cannot be 1380 guaranteed. 1381 */ 1382 status_t 1383 AVCodecDecoder::_LoadNextChunkIfNeededAndAssignStartTime() 1384 { 1385 if (fTempPacket->size > 0) 1386 return B_OK; 1387 1388 const void* chunkBuffer = NULL; 1389 size_t chunkBufferSize = 0; 1390 // In the case that GetNextChunk() returns an error fChunkBufferSize 1391 // should be left untouched. 1392 media_header chunkMediaHeader; 1393 1394 status_t getNextChunkStatus = GetNextChunk(&chunkBuffer, &chunkBufferSize, 1395 &chunkMediaHeader); 1396 if (getNextChunkStatus != B_OK) 1397 return getNextChunkStatus; 1398 1399 status_t chunkBufferPaddingStatus 1400 = _CopyChunkToChunkBufferAndAddPadding(chunkBuffer, chunkBufferSize); 1401 if (chunkBufferPaddingStatus != B_OK) 1402 return chunkBufferPaddingStatus; 1403 1404 fTempPacket->data = fChunkBuffer; 1405 fTempPacket->size = fChunkBufferSize; 1406 1407 fTempPacket->dts = chunkMediaHeader.start_time; 1408 // Let FFMPEG handle the correct relationship between start_time and 1409 // decoded a/v frame. By doing so we are simply copying the way how it 1410 // is implemented in ffplay.c for video frames (for audio frames it 1411 // works, too, but isn't used by ffplay.c). 1412 // \see http://git.videolan.org/?p=ffmpeg.git;a=blob;f=ffplay.c;h=09623db374e5289ed20b7cc28c262c4375a8b2e4;hb=9153b33a742c4e2a85ff6230aea0e75f5a8b26c2#l1502 1413 1414 if (chunkMediaHeader.user_data_type == AVPACKET_USER_DATA_TYPE) { 1415 avpacket_user_data* data = (avpacket_user_data*)&chunkMediaHeader.user_data; 1416 fTempPacket->pts = data->pts; 1417 fTempPacket->dts = data->dts; 1418 fTempPacket->stream_index = data->stream_index; 1419 fTempPacket->flags = data->flags; 1420 fTempPacket->duration = data->duration; 1421 fTempPacket->pos = data->pos; 1422 } 1423 1424 #ifdef LOG_STREAM_TO_FILE 1425 BFile* logFile = fIsAudio ? &sAudioStreamLogFile : &sVideoStreamLogFile; 1426 if (sDumpedPackets < 100) { 1427 logFile->Write(chunkBuffer, fChunkBufferSize); 1428 printf("wrote %ld bytes\n", fChunkBufferSize); 1429 sDumpedPackets++; 1430 } else if (sDumpedPackets == 100) 1431 logFile->Unset(); 1432 #endif 1433 1434 return B_OK; 1435 } 1436 1437 1438 /*! \brief Copies a chunk into fChunkBuffer and adds a "safety net" of 1439 additional memory as required by FFMPEG for input buffers to video 1440 decoders. 1441 1442 This is needed so that some decoders can read safely a predefined number of 1443 bytes at a time for performance optimization purposes. 1444 1445 The additional memory has a size of AV_INPUT_BUFFER_PADDING_SIZE as defined 1446 in avcodec.h. 1447 1448 Ownership of fChunkBuffer memory is with the class so it needs to be freed 1449 at the right times (on destruction, on seeking). 1450 1451 Also update fChunkBufferSize to reflect the size of the contained data 1452 (leaving out the padding). 1453 1454 \param chunk The chunk to copy. 1455 \param chunkSize Size of the chunk in bytes 1456 1457 \returns B_OK Padding was successful. You are responsible for releasing the 1458 allocated memory. fChunkBufferSize is set to chunkSize. 1459 \returns B_NO_MEMORY Padding failed. 1460 fChunkBuffer is set to NULL making it safe to call free() on it. 1461 fChunkBufferSize is set to 0 to reflect the size of fChunkBuffer. 1462 */ 1463 status_t 1464 AVCodecDecoder::_CopyChunkToChunkBufferAndAddPadding(const void* chunk, 1465 size_t chunkSize) 1466 { 1467 uint8_t* tmpBuffer = static_cast<uint8_t*>(realloc(fChunkBuffer, 1468 chunkSize + AV_INPUT_BUFFER_PADDING_SIZE)); 1469 if (tmpBuffer == NULL) { 1470 free(fChunkBuffer); 1471 fChunkBuffer = NULL; 1472 fChunkBufferSize = 0; 1473 return B_NO_MEMORY; 1474 } else { 1475 fChunkBuffer = tmpBuffer; 1476 } 1477 1478 memcpy(fChunkBuffer, chunk, chunkSize); 1479 memset(fChunkBuffer + chunkSize, 0, AV_INPUT_BUFFER_PADDING_SIZE); 1480 // Establish safety net, by zero'ing the padding area. 1481 1482 fChunkBufferSize = chunkSize; 1483 1484 return B_OK; 1485 } 1486 1487 1488 /*! \brief Executes all steps needed for a freshly decoded video frame. 1489 1490 \see _UpdateMediaHeaderForVideoFrame() and 1491 \see _DeinterlaceAndColorConvertVideoFrame() for when you are allowed to 1492 call this method. 1493 1494 \returns B_OK when video frame was handled successfully 1495 \returnb B_NO_MEMORY when no memory is left for correct operation. 1496 */ 1497 status_t 1498 AVCodecDecoder::_HandleNewVideoFrameAndUpdateSystemState() 1499 { 1500 _UpdateMediaHeaderForVideoFrame(); 1501 status_t postProcessStatus = _DeinterlaceAndColorConvertVideoFrame(); 1502 if (postProcessStatus != B_OK) 1503 return postProcessStatus; 1504 1505 #ifdef DEBUG 1506 dump_ffframe_video(fRawDecodedPicture, "ffpict"); 1507 #endif 1508 1509 fFrame++; 1510 1511 return B_OK; 1512 } 1513 1514 1515 /*! \brief Flushes one video frame - if any - still buffered by the decoder. 1516 1517 Some FFMPEG decoder are buffering video frames. To retrieve those buffered 1518 frames the decoder needs to be told so. 1519 1520 The intended use of this method is to call it, once there are no more data 1521 chunks for decoding left. Reframed in other words: Once GetNextChunk() 1522 returns with status B_LAST_BUFFER_ERROR it is time to start flushing. 1523 1524 \returns B_OK Retrieved one video frame, handled it accordingly and updated 1525 the system state accordingly. 1526 There maybe more video frames left. So it is valid for the client of 1527 AVCodecDecoder to call it one more time. 1528 1529 \returns B_LAST_BUFFER_ERROR No video frame left. 1530 The client of the AVCodecDecoder should stop calling it now. 1531 1532 \returns B_NO_MEMORY No memory left for correct operation. 1533 */ 1534 status_t 1535 AVCodecDecoder::_FlushOneVideoFrameFromDecoderBuffer() 1536 { 1537 // Tell the decoder there is nothing to send anymore 1538 avcodec_send_packet(fCodecContext, NULL); 1539 1540 // Get any remaining frame 1541 int error = avcodec_receive_frame(fCodecContext, fRawDecodedPicture); 1542 1543 if (error != 0 && error != AVERROR(EAGAIN)) { 1544 // video buffer is flushed successfully 1545 // (or there is an error, not much we can do about it) 1546 return B_LAST_BUFFER_ERROR; 1547 } 1548 1549 return _HandleNewVideoFrameAndUpdateSystemState(); 1550 } 1551 1552 1553 /*! \brief Updates relevant fields of the class member fHeader with the 1554 properties of the most recently decoded video frame. 1555 1556 It is assumed that this function is called only when the following asserts 1557 hold true: 1558 1. We actually got a new picture decoded by the video decoder. 1559 2. fHeader wasn't updated for the new picture yet. You MUST call this 1560 method only once per decoded video frame. 1561 3. This function MUST be called after 1562 _DeinterlaceAndColorConvertVideoFrame() as it relys on an updated 1563 fDecodedDataSizeInBytes. 1564 4. There will be at maximumn only one decoded video frame in our cache 1565 at any single point in time. Otherwise you couldn't tell to which 1566 cached decoded video frame the properties in fHeader relate to. 1567 5. AVCodecContext is still valid for this video frame (This is the case 1568 when this function is called after avcodec_decode_video2() and 1569 before the next call to avcodec_decode_video2(). 1570 */ 1571 void 1572 AVCodecDecoder::_UpdateMediaHeaderForVideoFrame() 1573 { 1574 fHeader.type = B_MEDIA_RAW_VIDEO; 1575 fHeader.file_pos = 0; 1576 fHeader.orig_size = 0; 1577 fHeader.start_time = fRawDecodedPicture->best_effort_timestamp; 1578 fHeader.size_used = av_image_get_buffer_size( 1579 colorspace_to_pixfmt(fOutputColorSpace), fRawDecodedPicture->width, 1580 fRawDecodedPicture->height, 1); 1581 fHeader.u.raw_video.display_line_width = fRawDecodedPicture->width; 1582 fHeader.u.raw_video.display_line_count = fRawDecodedPicture->height; 1583 fHeader.u.raw_video.bytes_per_row 1584 = CalculateBytesPerRowWithColorSpaceAndVideoWidth(fOutputColorSpace, 1585 fRawDecodedPicture->width); 1586 fHeader.u.raw_video.field_gamma = 1.0; 1587 fHeader.u.raw_video.field_sequence = fFrame; 1588 fHeader.u.raw_video.field_number = 0; 1589 fHeader.u.raw_video.pulldown_number = 0; 1590 fHeader.u.raw_video.first_active_line = 1; 1591 fHeader.u.raw_video.line_count = fRawDecodedPicture->height; 1592 1593 ConvertAVCodecContextToVideoAspectWidthAndHeight(*fCodecContext, 1594 fHeader.u.raw_video.pixel_width_aspect, 1595 fHeader.u.raw_video.pixel_height_aspect); 1596 1597 char timestamp[AV_TS_MAX_STRING_SIZE]; 1598 av_ts_make_time_string(timestamp, 1599 fRawDecodedPicture->best_effort_timestamp, &fCodecContext->time_base); 1600 1601 TRACE("[v] start_time=%s field_sequence=%" B_PRIu32 "\n", 1602 timestamp, fHeader.u.raw_video.field_sequence); 1603 } 1604 1605 1606 /*! \brief This function applies deinterlacing (only if needed) and color 1607 conversion to the video frame in fRawDecodedPicture. 1608 1609 It is assumed that fRawDecodedPicture wasn't deinterlaced and color 1610 converted yet (otherwise this function behaves in unknown manners). 1611 1612 This function MUST be called after _UpdateMediaHeaderForVideoFrame() as it 1613 relys on the fHeader.size_used and fHeader.u.raw_video.bytes_per_row fields 1614 for correct operation 1615 1616 You should only call this function when you got a new picture decoded by 1617 the video decoder. 1618 1619 When this function finishes the postprocessed video frame will be available 1620 in fPostProcessedDecodedPicture and fDecodedData (fDecodedDataSizeInBytes 1621 will be set accordingly). 1622 1623 \returns B_OK video frame successfully deinterlaced and color converted. 1624 \returns B_NO_MEMORY Not enough memory available for correct operation. 1625 */ 1626 status_t 1627 AVCodecDecoder::_DeinterlaceAndColorConvertVideoFrame() 1628 { 1629 int displayWidth = fRawDecodedPicture->width; 1630 int displayHeight = fRawDecodedPicture->height; 1631 AVFrame deinterlacedPicture; 1632 bool useDeinterlacedPicture = false; 1633 1634 if (fRawDecodedPicture->flags & AV_FRAME_FLAG_INTERLACED) { 1635 AVFrame rawPicture; 1636 rawPicture.data[0] = fRawDecodedPicture->data[0]; 1637 rawPicture.data[1] = fRawDecodedPicture->data[1]; 1638 rawPicture.data[2] = fRawDecodedPicture->data[2]; 1639 rawPicture.data[3] = fRawDecodedPicture->data[3]; 1640 rawPicture.linesize[0] = fRawDecodedPicture->linesize[0]; 1641 rawPicture.linesize[1] = fRawDecodedPicture->linesize[1]; 1642 rawPicture.linesize[2] = fRawDecodedPicture->linesize[2]; 1643 rawPicture.linesize[3] = fRawDecodedPicture->linesize[3]; 1644 1645 if (av_image_alloc(deinterlacedPicture.data, 1646 deinterlacedPicture.linesize, displayWidth, displayHeight, 1647 fCodecContext->pix_fmt, 1) < 0) 1648 return B_NO_MEMORY; 1649 1650 // deinterlace implemented using avfilter 1651 _ProcessFilterGraph(&deinterlacedPicture, &rawPicture, 1652 fCodecContext->pix_fmt, displayWidth, displayHeight); 1653 useDeinterlacedPicture = true; 1654 } 1655 1656 // Some decoders do not set pix_fmt until they have decoded 1 frame 1657 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION 1658 if (fSwsContext == NULL) { 1659 fSwsContext = sws_getContext(displayWidth, displayHeight, 1660 fCodecContext->pix_fmt, displayWidth, displayHeight, 1661 colorspace_to_pixfmt(fOutputColorSpace), 1662 SWS_FAST_BILINEAR, NULL, NULL, NULL); 1663 } 1664 #else 1665 if (fFormatConversionFunc == NULL) { 1666 fFormatConversionFunc = resolve_colorspace(fOutputColorSpace, 1667 fCodecContext->pix_fmt, displayWidth, displayHeight); 1668 } 1669 #endif 1670 1671 fDecodedDataSizeInBytes = fHeader.size_used; 1672 1673 if (fDecodedData == NULL) { 1674 const size_t kOptimalAlignmentForColorConversion = 32; 1675 posix_memalign(reinterpret_cast<void**>(&fDecodedData), 1676 kOptimalAlignmentForColorConversion, fDecodedDataSizeInBytes); 1677 } 1678 if (fDecodedData == NULL) 1679 return B_NO_MEMORY; 1680 1681 fPostProcessedDecodedPicture->data[0] = fDecodedData; 1682 fPostProcessedDecodedPicture->linesize[0] 1683 = fHeader.u.raw_video.bytes_per_row; 1684 1685 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION 1686 if (fSwsContext != NULL) { 1687 #else 1688 if (fFormatConversionFunc != NULL) { 1689 #endif 1690 if (useDeinterlacedPicture) { 1691 AVFrame deinterlacedFrame; 1692 deinterlacedFrame.data[0] = deinterlacedPicture.data[0]; 1693 deinterlacedFrame.data[1] = deinterlacedPicture.data[1]; 1694 deinterlacedFrame.data[2] = deinterlacedPicture.data[2]; 1695 deinterlacedFrame.data[3] = deinterlacedPicture.data[3]; 1696 deinterlacedFrame.linesize[0] 1697 = deinterlacedPicture.linesize[0]; 1698 deinterlacedFrame.linesize[1] 1699 = deinterlacedPicture.linesize[1]; 1700 deinterlacedFrame.linesize[2] 1701 = deinterlacedPicture.linesize[2]; 1702 deinterlacedFrame.linesize[3] 1703 = deinterlacedPicture.linesize[3]; 1704 1705 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION 1706 sws_scale(fSwsContext, deinterlacedFrame.data, 1707 deinterlacedFrame.linesize, 0, displayHeight, 1708 fPostProcessedDecodedPicture->data, 1709 fPostProcessedDecodedPicture->linesize); 1710 #else 1711 (*fFormatConversionFunc)(&deinterlacedFrame, 1712 fPostProcessedDecodedPicture, displayWidth, displayHeight); 1713 #endif 1714 } else { 1715 #if USE_SWS_FOR_COLOR_SPACE_CONVERSION 1716 sws_scale(fSwsContext, fRawDecodedPicture->data, 1717 fRawDecodedPicture->linesize, 0, displayHeight, 1718 fPostProcessedDecodedPicture->data, 1719 fPostProcessedDecodedPicture->linesize); 1720 #else 1721 (*fFormatConversionFunc)(fRawDecodedPicture, 1722 fPostProcessedDecodedPicture, displayWidth, displayHeight); 1723 #endif 1724 } 1725 } 1726 1727 if (fRawDecodedPicture->flags & AV_FRAME_FLAG_INTERLACED) 1728 av_freep(&deinterlacedPicture.data[0]); 1729 1730 return B_OK; 1731 } 1732 1733 1734 /*! \brief Init the deinterlace filter graph. 1735 1736 \returns B_OK the filter graph could be built. 1737 \returns B_BAD_VALUE something was wrong with building the graph. 1738 */ 1739 status_t 1740 AVCodecDecoder::_InitFilterGraph(enum AVPixelFormat pixfmt, int32 width, 1741 int32 height) 1742 { 1743 if (fFilterGraph != NULL) { 1744 av_frame_free(&fFilterFrame); 1745 avfilter_graph_free(&fFilterGraph); 1746 } 1747 1748 fFilterGraph = avfilter_graph_alloc(); 1749 1750 BString arguments; 1751 arguments.SetToFormat("buffer=video_size=%" B_PRId32 "x%" B_PRId32 1752 ":pix_fmt=%d:time_base=1/1:pixel_aspect=0/1[in];[in]yadif[out];" 1753 "[out]buffersink", width, height, 1754 pixfmt); 1755 AVFilterInOut* inputs = NULL; 1756 AVFilterInOut* outputs = NULL; 1757 TRACE("[v] _InitFilterGraph(): %s\n", arguments.String()); 1758 int ret = avfilter_graph_parse2(fFilterGraph, arguments.String(), &inputs, 1759 &outputs); 1760 if (ret < 0) { 1761 fprintf(stderr, "avfilter_graph_parse2() failed\n"); 1762 return B_BAD_VALUE; 1763 } 1764 1765 ret = avfilter_graph_config(fFilterGraph, NULL); 1766 if (ret < 0) { 1767 fprintf(stderr, "avfilter_graph_config() failed\n"); 1768 return B_BAD_VALUE; 1769 } 1770 1771 fBufferSourceContext = avfilter_graph_get_filter(fFilterGraph, 1772 "Parsed_buffer_0"); 1773 fBufferSinkContext = avfilter_graph_get_filter(fFilterGraph, 1774 "Parsed_buffersink_2"); 1775 if (fBufferSourceContext == NULL || fBufferSinkContext == NULL) { 1776 fprintf(stderr, "avfilter_graph_get_filter() failed\n"); 1777 return B_BAD_VALUE; 1778 } 1779 fFilterFrame = av_frame_alloc(); 1780 fLastWidth = width; 1781 fLastHeight = height; 1782 fLastPixfmt = pixfmt; 1783 1784 return B_OK; 1785 } 1786 1787 1788 /*! \brief Process an AVPicture with the deinterlace filter graph. 1789 1790 We decode exactly one video frame into dst. 1791 Equivalent function for avpicture_deinterlace() from version 2.x. 1792 1793 \returns B_OK video frame successfully deinterlaced. 1794 \returns B_BAD_DATA No frame could be output. 1795 \returns B_NO_MEMORY Not enough memory available for correct operation. 1796 */ 1797 status_t 1798 AVCodecDecoder::_ProcessFilterGraph(AVFrame *dst, const AVFrame *src, 1799 enum AVPixelFormat pixfmt, int32 width, int32 height) 1800 { 1801 if (fFilterGraph == NULL || width != fLastWidth 1802 || height != fLastHeight || pixfmt != fLastPixfmt) { 1803 1804 status_t err = _InitFilterGraph(pixfmt, width, height); 1805 if (err != B_OK) 1806 return err; 1807 } 1808 1809 memcpy(fFilterFrame->data, src->data, sizeof(src->data)); 1810 memcpy(fFilterFrame->linesize, src->linesize, sizeof(src->linesize)); 1811 fFilterFrame->width = width; 1812 fFilterFrame->height = height; 1813 fFilterFrame->format = pixfmt; 1814 1815 int ret = av_buffersrc_add_frame(fBufferSourceContext, fFilterFrame); 1816 if (ret < 0) 1817 return B_NO_MEMORY; 1818 1819 ret = av_buffersink_get_frame(fBufferSinkContext, fFilterFrame); 1820 if (ret < 0) 1821 return B_BAD_DATA; 1822 1823 av_image_copy(dst->data, dst->linesize, (const uint8**)fFilterFrame->data, 1824 fFilterFrame->linesize, pixfmt, width, height); 1825 av_frame_unref(fFilterFrame); 1826 return B_OK; 1827 } 1828