1 /* 2 * Copyright 2009-2010, Stephan Amßus <superstippi@gmx.de> 3 * All rights reserved. Distributed under the terms of the MIT license. 4 */ 5 6 7 #include "AVCodecEncoder.h" 8 9 #include <new> 10 11 #include <stdio.h> 12 #include <string.h> 13 14 #include <Application.h> 15 #include <Roster.h> 16 17 extern "C" { 18 #include "rational.h" 19 } 20 21 #include "EncoderTable.h" 22 #include "gfx_util.h" 23 24 25 #undef TRACE 26 //#define TRACE_AV_CODEC_ENCODER 27 #ifdef TRACE_AV_CODEC_ENCODER 28 # define TRACE printf 29 # define TRACE_IO(a...) 30 #else 31 # define TRACE(a...) 32 # define TRACE_IO(a...) 33 #endif 34 35 36 static const size_t kDefaultChunkBufferSize = 2 * 1024 * 1024; 37 38 39 AVCodecEncoder::AVCodecEncoder(uint32 codecID, int bitRateScale) 40 : 41 Encoder(), 42 fBitRateScale(bitRateScale), 43 fCodecID((CodecID)codecID), 44 fCodec(NULL), 45 fOwnContext(avcodec_alloc_context3(NULL)), 46 fContext(fOwnContext), 47 fCodecInitStatus(CODEC_INIT_NEEDED), 48 fFrame(avcodec_alloc_frame()), 49 fSwsContext(NULL), 50 fFramesWritten(0) 51 { 52 TRACE("AVCodecEncoder::AVCodecEncoder()\n"); 53 _Init(); 54 } 55 56 57 void 58 AVCodecEncoder::_Init() 59 { 60 fChunkBuffer = new(std::nothrow) uint8[kDefaultChunkBufferSize]; 61 if (fCodecID > 0) { 62 fCodec = avcodec_find_encoder(fCodecID); 63 TRACE(" found AVCodec for %u: %p\n", fCodecID, fCodec); 64 } 65 66 memset(&fInputFormat, 0, sizeof(media_format)); 67 68 fAudioFifo = av_fifo_alloc(0); 69 70 fDstFrame.data[0] = NULL; 71 fDstFrame.data[1] = NULL; 72 fDstFrame.data[2] = NULL; 73 fDstFrame.data[3] = NULL; 74 75 fDstFrame.linesize[0] = 0; 76 fDstFrame.linesize[1] = 0; 77 fDstFrame.linesize[2] = 0; 78 fDstFrame.linesize[3] = 0; 79 80 // Initial parameters, so we know if the user changed them 81 fEncodeParameters.avg_field_size = 0; 82 fEncodeParameters.max_field_size = 0; 83 fEncodeParameters.quality = 1.0f; 84 } 85 86 87 AVCodecEncoder::~AVCodecEncoder() 88 { 89 TRACE("AVCodecEncoder::~AVCodecEncoder()\n"); 90 91 _CloseCodecIfNeeded(); 92 93 if (fSwsContext != NULL) 94 sws_freeContext(fSwsContext); 95 96 av_fifo_free(fAudioFifo); 97 98 avpicture_free(&fDstFrame); 99 // NOTE: Do not use avpicture_free() on fSrcFrame!! We fill the picture 100 // data on the fly with the media buffer data passed to Encode(). 101 102 if (fFrame != NULL) { 103 fFrame->data[0] = NULL; 104 fFrame->data[1] = NULL; 105 fFrame->data[2] = NULL; 106 fFrame->data[3] = NULL; 107 108 fFrame->linesize[0] = 0; 109 fFrame->linesize[1] = 0; 110 fFrame->linesize[2] = 0; 111 fFrame->linesize[3] = 0; 112 av_free(fFrame); 113 } 114 115 av_free(fOwnContext); 116 117 delete[] fChunkBuffer; 118 } 119 120 121 status_t 122 AVCodecEncoder::AcceptedFormat(const media_format* proposedInputFormat, 123 media_format* _acceptedInputFormat) 124 { 125 TRACE("AVCodecEncoder::AcceptedFormat(%p, %p)\n", proposedInputFormat, 126 _acceptedInputFormat); 127 128 if (proposedInputFormat == NULL) 129 return B_BAD_VALUE; 130 131 if (_acceptedInputFormat != NULL) { 132 memcpy(_acceptedInputFormat, proposedInputFormat, 133 sizeof(media_format)); 134 } 135 136 return B_OK; 137 } 138 139 140 status_t 141 AVCodecEncoder::SetUp(const media_format* inputFormat) 142 { 143 TRACE("AVCodecEncoder::SetUp()\n"); 144 145 if (fContext == NULL) 146 return B_NO_INIT; 147 148 if (inputFormat == NULL) 149 return B_BAD_VALUE; 150 151 // Codec IDs for raw-formats may need to be figured out here. 152 if (fCodec == NULL && fCodecID == CODEC_ID_NONE) { 153 fCodecID = raw_audio_codec_id_for(*inputFormat); 154 if (fCodecID != CODEC_ID_NONE) 155 fCodec = avcodec_find_encoder(fCodecID); 156 } 157 if (fCodec == NULL) { 158 TRACE(" encoder not found!\n"); 159 return B_NO_INIT; 160 } 161 162 _CloseCodecIfNeeded(); 163 164 fInputFormat = *inputFormat; 165 fFramesWritten = 0; 166 167 const uchar* userData = inputFormat->user_data; 168 if (*(uint32*)userData == 'ffmp') { 169 userData += sizeof(uint32); 170 // The Writer plugin used is the FFmpeg plugin. It stores the 171 // AVCodecContext pointer in the user data section. Use this 172 // context instead of our own. It requires the Writer living in 173 // the same team, of course. 174 app_info appInfo; 175 if (be_app->GetAppInfo(&appInfo) == B_OK 176 && *(team_id*)userData == appInfo.team) { 177 userData += sizeof(team_id); 178 // Use the AVCodecContext from the Writer. This works better 179 // than using our own context with some encoders. 180 fContext = *(AVCodecContext**)userData; 181 } 182 } 183 184 return _Setup(); 185 } 186 187 188 status_t 189 AVCodecEncoder::GetEncodeParameters(encode_parameters* parameters) const 190 { 191 TRACE("AVCodecEncoder::GetEncodeParameters(%p)\n", parameters); 192 193 // TODO: Implement maintaining an automatically calculated bit_rate versus 194 // a user specified (via SetEncodeParameters()) bit_rate. At this point, the 195 // fContext->bit_rate may not yet have been specified (_Setup() was never 196 // called yet). So it cannot work like the code below, but in any case, it's 197 // showing how to convert between the values (albeit untested). 198 // int avgBytesPerSecond = fContext->bit_rate / 8; 199 // int maxBytesPerSecond = (fContext->bit_rate 200 // + fContext->bit_rate_tolerance) / 8; 201 // 202 // if (fInputFormat.type == B_MEDIA_RAW_AUDIO) { 203 // fEncodeParameters.avg_field_size = (int32)(avgBytesPerSecond 204 // / fInputFormat.u.raw_audio.frame_rate); 205 // fEncodeParameters.max_field_size = (int32)(maxBytesPerSecond 206 // / fInputFormat.u.raw_audio.frame_rate); 207 // } else if (fInputFormat.type == B_MEDIA_RAW_VIDEO) { 208 // fEncodeParameters.avg_field_size = (int32)(avgBytesPerSecond 209 // / fInputFormat.u.raw_video.field_rate); 210 // fEncodeParameters.max_field_size = (int32)(maxBytesPerSecond 211 // / fInputFormat.u.raw_video.field_rate); 212 // } 213 214 parameters->quality = fEncodeParameters.quality; 215 216 return B_OK; 217 } 218 219 220 status_t 221 AVCodecEncoder::SetEncodeParameters(encode_parameters* parameters) 222 { 223 TRACE("AVCodecEncoder::SetEncodeParameters(%p)\n", parameters); 224 225 if (fFramesWritten > 0) 226 return B_NOT_SUPPORTED; 227 228 fEncodeParameters.quality = parameters->quality; 229 TRACE(" quality: %.5f\n", parameters->quality); 230 if (fEncodeParameters.quality == 0.0f) { 231 TRACE(" using default quality (1.0)\n"); 232 fEncodeParameters.quality = 1.0f; 233 } 234 235 // TODO: Auto-bit_rate versus user supplied. See above. 236 // int avgBytesPerSecond = 0; 237 // int maxBytesPerSecond = 0; 238 // 239 // if (fInputFormat.type == B_MEDIA_RAW_AUDIO) { 240 // avgBytesPerSecond = (int)(parameters->avg_field_size 241 // * fInputFormat.u.raw_audio.frame_rate); 242 // maxBytesPerSecond = (int)(parameters->max_field_size 243 // * fInputFormat.u.raw_audio.frame_rate); 244 // } else if (fInputFormat.type == B_MEDIA_RAW_VIDEO) { 245 // avgBytesPerSecond = (int)(parameters->avg_field_size 246 // * fInputFormat.u.raw_video.field_rate); 247 // maxBytesPerSecond = (int)(parameters->max_field_size 248 // * fInputFormat.u.raw_video.field_rate); 249 // } 250 // 251 // if (maxBytesPerSecond < avgBytesPerSecond) 252 // maxBytesPerSecond = avgBytesPerSecond; 253 // 254 // // Reset these, so we can tell the difference between uninitialized 255 // // and initialized... 256 // if (avgBytesPerSecond > 0) { 257 // fContext->bit_rate = avgBytesPerSecond * 8; 258 // fContext->bit_rate_tolerance = (maxBytesPerSecond 259 // - avgBytesPerSecond) * 8; 260 // fBitRateControlledByUser = true; 261 // } 262 263 return _Setup(); 264 } 265 266 267 status_t 268 AVCodecEncoder::Encode(const void* buffer, int64 frameCount, 269 media_encode_info* info) 270 { 271 TRACE("AVCodecEncoder::Encode(%p, %lld, %p)\n", buffer, frameCount, info); 272 273 if (!_OpenCodecIfNeeded()) 274 return B_NO_INIT; 275 276 if (fInputFormat.type == B_MEDIA_RAW_AUDIO) 277 return _EncodeAudio(buffer, frameCount, info); 278 else if (fInputFormat.type == B_MEDIA_RAW_VIDEO) 279 return _EncodeVideo(buffer, frameCount, info); 280 else 281 return B_NO_INIT; 282 } 283 284 285 // #pragma mark - 286 287 288 status_t 289 AVCodecEncoder::_Setup() 290 { 291 TRACE("AVCodecEncoder::_Setup\n"); 292 293 int rawBitRate; 294 295 if (fInputFormat.type == B_MEDIA_RAW_VIDEO) { 296 TRACE(" B_MEDIA_RAW_VIDEO\n"); 297 // frame rate 298 fContext->time_base.den = (int)fInputFormat.u.raw_video.field_rate; 299 fContext->time_base.num = 1; 300 // video size 301 fContext->width = fInputFormat.u.raw_video.display.line_width; 302 fContext->height = fInputFormat.u.raw_video.display.line_count; 303 fContext->gop_size = 12; 304 305 // TODO: Fix pixel format or setup conversion method... 306 if (fCodec->pix_fmts != NULL) { 307 for (int i = 0; fCodec->pix_fmts[i] != PIX_FMT_NONE; i++) { 308 // Use the last supported pixel format, which we hope is the 309 // one with the best quality. 310 fContext->pix_fmt = fCodec->pix_fmts[i]; 311 } 312 } 313 314 // TODO: Setup rate control: 315 // fContext->rate_emu = 0; 316 // fContext->rc_eq = NULL; 317 // fContext->rc_max_rate = 0; 318 // fContext->rc_min_rate = 0; 319 // TODO: Try to calculate a good bit rate... 320 rawBitRate = (int)(fContext->width * fContext->height * 2 321 * fInputFormat.u.raw_video.field_rate) * 8; 322 323 // Pixel aspect ratio 324 fContext->sample_aspect_ratio.num 325 = fInputFormat.u.raw_video.pixel_width_aspect; 326 fContext->sample_aspect_ratio.den 327 = fInputFormat.u.raw_video.pixel_height_aspect; 328 if (fContext->sample_aspect_ratio.num == 0 329 || fContext->sample_aspect_ratio.den == 0) { 330 av_reduce(&fContext->sample_aspect_ratio.num, 331 &fContext->sample_aspect_ratio.den, fContext->width, 332 fContext->height, 255); 333 } 334 335 // TODO: This should already happen in AcceptFormat() 336 if (fInputFormat.u.raw_video.display.bytes_per_row == 0) { 337 fInputFormat.u.raw_video.display.bytes_per_row 338 = fContext->width * 4; 339 } 340 341 fFrame->pts = 0; 342 343 // Allocate space for colorspace converted AVPicture 344 // TODO: Check allocations... 345 avpicture_alloc(&fDstFrame, fContext->pix_fmt, fContext->width, 346 fContext->height); 347 348 // Make the frame point to the data in the converted AVPicture 349 fFrame->data[0] = fDstFrame.data[0]; 350 fFrame->data[1] = fDstFrame.data[1]; 351 fFrame->data[2] = fDstFrame.data[2]; 352 fFrame->data[3] = fDstFrame.data[3]; 353 354 fFrame->linesize[0] = fDstFrame.linesize[0]; 355 fFrame->linesize[1] = fDstFrame.linesize[1]; 356 fFrame->linesize[2] = fDstFrame.linesize[2]; 357 fFrame->linesize[3] = fDstFrame.linesize[3]; 358 359 fSwsContext = sws_getContext(fContext->width, fContext->height, 360 colorspace_to_pixfmt(fInputFormat.u.raw_video.display.format), 361 fContext->width, fContext->height, 362 fContext->pix_fmt, SWS_FAST_BILINEAR, NULL, NULL, NULL); 363 364 } else if (fInputFormat.type == B_MEDIA_RAW_AUDIO) { 365 TRACE(" B_MEDIA_RAW_AUDIO\n"); 366 // frame rate 367 fContext->sample_rate = (int)fInputFormat.u.raw_audio.frame_rate; 368 // channels 369 fContext->channels = fInputFormat.u.raw_audio.channel_count; 370 // raw bitrate 371 rawBitRate = fContext->sample_rate * fContext->channels 372 * (fInputFormat.u.raw_audio.format 373 & media_raw_audio_format::B_AUDIO_SIZE_MASK) * 8; 374 // sample format 375 switch (fInputFormat.u.raw_audio.format) { 376 case media_raw_audio_format::B_AUDIO_FLOAT: 377 fContext->sample_fmt = AV_SAMPLE_FMT_FLT; 378 break; 379 case media_raw_audio_format::B_AUDIO_DOUBLE: 380 fContext->sample_fmt = AV_SAMPLE_FMT_DBL; 381 break; 382 case media_raw_audio_format::B_AUDIO_INT: 383 fContext->sample_fmt = AV_SAMPLE_FMT_S32; 384 break; 385 case media_raw_audio_format::B_AUDIO_SHORT: 386 fContext->sample_fmt = AV_SAMPLE_FMT_S16; 387 break; 388 case media_raw_audio_format::B_AUDIO_UCHAR: 389 fContext->sample_fmt = AV_SAMPLE_FMT_U8; 390 break; 391 392 case media_raw_audio_format::B_AUDIO_CHAR: 393 default: 394 return B_MEDIA_BAD_FORMAT; 395 break; 396 } 397 if (fInputFormat.u.raw_audio.channel_mask == 0) { 398 // guess the channel mask... 399 switch (fInputFormat.u.raw_audio.channel_count) { 400 default: 401 case 2: 402 fContext->channel_layout = AV_CH_LAYOUT_STEREO; 403 break; 404 case 1: 405 fContext->channel_layout = AV_CH_LAYOUT_MONO; 406 break; 407 case 3: 408 fContext->channel_layout = AV_CH_LAYOUT_SURROUND; 409 break; 410 case 4: 411 fContext->channel_layout = AV_CH_LAYOUT_QUAD; 412 break; 413 case 5: 414 fContext->channel_layout = AV_CH_LAYOUT_5POINT0; 415 break; 416 case 6: 417 fContext->channel_layout = AV_CH_LAYOUT_5POINT1; 418 break; 419 case 8: 420 fContext->channel_layout = AV_CH_LAYOUT_7POINT1; 421 break; 422 case 10: 423 fContext->channel_layout = AV_CH_LAYOUT_7POINT1_WIDE; 424 break; 425 } 426 } else { 427 // The bits match 1:1 for media_multi_channels and FFmpeg defines. 428 fContext->channel_layout = fInputFormat.u.raw_audio.channel_mask; 429 } 430 } else { 431 TRACE(" UNSUPPORTED MEDIA TYPE!\n"); 432 return B_NOT_SUPPORTED; 433 } 434 435 // TODO: Support letting the user overwrite this via 436 // SetEncodeParameters(). See comments there... 437 int wantedBitRate = (int)(rawBitRate / fBitRateScale 438 * fEncodeParameters.quality); 439 if (wantedBitRate == 0) 440 wantedBitRate = (int)(rawBitRate / fBitRateScale); 441 442 fContext->bit_rate = wantedBitRate; 443 444 if (fInputFormat.type == B_MEDIA_RAW_AUDIO) { 445 // Some audio encoders support certain bitrates only. Use the 446 // closest match to the wantedBitRate. 447 const int kBitRates[] = { 448 32000, 40000, 48000, 56000, 64000, 80000, 96000, 112000, 128000, 449 160000, 192000, 224000, 256000, 320000, 384000, 448000, 512000, 450 576000, 640000 451 }; 452 int diff = wantedBitRate; 453 for (unsigned int i = 0; i < sizeof(kBitRates) / sizeof(int); i++) { 454 int currentDiff = abs(wantedBitRate - kBitRates[i]); 455 if (currentDiff < diff) { 456 fContext->bit_rate = kBitRates[i]; 457 diff = currentDiff; 458 } else 459 break; 460 } 461 } 462 463 TRACE(" rawBitRate: %d, wantedBitRate: %d (%.1f), " 464 "context bitrate: %d\n", rawBitRate, wantedBitRate, 465 fEncodeParameters.quality, fContext->bit_rate); 466 467 // Add some known fixes from the FFmpeg API example: 468 if (fContext->codec_id == CODEC_ID_MPEG2VIDEO) { 469 // Just for testing, we also add B frames */ 470 fContext->max_b_frames = 2; 471 } else if (fContext->codec_id == CODEC_ID_MPEG1VIDEO) { 472 // Needed to avoid using macroblocks in which some coeffs overflow. 473 // This does not happen with normal video, it just happens here as 474 // the motion of the chroma plane does not match the luma plane. 475 fContext->mb_decision = 2; 476 } 477 478 // Unfortunately, we may fail later, when we try to open the codec 479 // for real... but we need to delay this because we still allow 480 // parameter/quality changes. 481 return B_OK; 482 } 483 484 485 bool 486 AVCodecEncoder::_OpenCodecIfNeeded() 487 { 488 if (fContext != fOwnContext) { 489 // We are using the AVCodecContext of the AVFormatWriter plugin, 490 // and don't maintain it's open/close state. 491 return true; 492 } 493 494 if (fCodecInitStatus == CODEC_INIT_DONE) 495 return true; 496 497 if (fCodecInitStatus == CODEC_INIT_FAILED) 498 return false; 499 500 fContext->strict_std_compliance = -2; 501 502 // Open the codec 503 int result = avcodec_open2(fContext, fCodec, NULL); 504 if (result >= 0) 505 fCodecInitStatus = CODEC_INIT_DONE; 506 else 507 fCodecInitStatus = CODEC_INIT_FAILED; 508 509 TRACE(" avcodec_open(%p, %p): %d\n", fContext, fCodec, result); 510 511 return fCodecInitStatus == CODEC_INIT_DONE; 512 513 } 514 515 516 void 517 AVCodecEncoder::_CloseCodecIfNeeded() 518 { 519 if (fContext != fOwnContext) { 520 // See _OpenCodecIfNeeded(). 521 return; 522 } 523 524 if (fCodecInitStatus == CODEC_INIT_DONE) { 525 avcodec_close(fContext); 526 fCodecInitStatus = CODEC_INIT_NEEDED; 527 } 528 } 529 530 531 static const int64 kNoPTSValue = 0x8000000000000000LL; 532 // NOTE: For some reasons, I have trouble with the avcodec.h define: 533 // #define AV_NOPTS_VALUE INT64_C(0x8000000000000000) 534 // INT64_C is not defined here. 535 536 status_t 537 AVCodecEncoder::_EncodeAudio(const void* _buffer, int64 frameCount, 538 media_encode_info* info) 539 { 540 TRACE("AVCodecEncoder::_EncodeAudio(%p, %lld, %p)\n", _buffer, frameCount, 541 info); 542 543 if (fChunkBuffer == NULL) 544 return B_NO_MEMORY; 545 546 status_t ret = B_OK; 547 548 const uint8* buffer = reinterpret_cast<const uint8*>(_buffer); 549 550 size_t inputSampleSize = fInputFormat.u.raw_audio.format 551 & media_raw_audio_format::B_AUDIO_SIZE_MASK; 552 size_t inputFrameSize = inputSampleSize 553 * fInputFormat.u.raw_audio.channel_count; 554 555 size_t bufferSize = frameCount * inputFrameSize; 556 bufferSize = min_c(bufferSize, kDefaultChunkBufferSize); 557 558 if (fContext->frame_size > 1) { 559 // Encoded audio. Things work differently from raw audio. We need 560 // the fAudioFifo to pipe data. 561 if (av_fifo_realloc2(fAudioFifo, 562 av_fifo_size(fAudioFifo) + bufferSize) < 0) { 563 TRACE(" av_fifo_realloc2() failed\n"); 564 return B_NO_MEMORY; 565 } 566 av_fifo_generic_write(fAudioFifo, const_cast<uint8*>(buffer), 567 bufferSize, NULL); 568 569 int frameBytes = fContext->frame_size * inputFrameSize; 570 uint8* tempBuffer = new(std::nothrow) uint8[frameBytes]; 571 if (tempBuffer == NULL) 572 return B_NO_MEMORY; 573 574 // Encode as many chunks as can be read from the FIFO. 575 while (av_fifo_size(fAudioFifo) >= frameBytes) { 576 av_fifo_generic_read(fAudioFifo, tempBuffer, frameBytes, NULL); 577 578 ret = _EncodeAudio(tempBuffer, frameBytes, fContext->frame_size, 579 info); 580 if (ret != B_OK) 581 break; 582 } 583 584 delete[] tempBuffer; 585 } else { 586 // Raw audio. The number of bytes returned from avcodec_encode_audio() 587 // is always the same as the number of input bytes. 588 return _EncodeAudio(buffer, bufferSize, frameCount, 589 info); 590 } 591 592 return ret; 593 } 594 595 596 status_t 597 AVCodecEncoder::_EncodeAudio(const uint8* buffer, size_t bufferSize, 598 int64 frameCount, media_encode_info* info) 599 { 600 status_t ret; 601 602 // Encode one audio chunk/frame. 603 AVPacket packet; 604 av_init_packet(&packet); 605 // By leaving these NULL, we let the encoder allocate memory as it needs. 606 // This way we don't risk iving a too small buffer. 607 packet.data = NULL; 608 packet.size = 0; 609 610 // We need to wrap our input data into an AVFrame structure. 611 AVFrame frame; 612 int gotPacket = 0; 613 614 if (buffer) { 615 avcodec_get_frame_defaults(&frame); 616 617 frame.nb_samples = frameCount; 618 619 ret = avcodec_fill_audio_frame(&frame, fContext->channels, 620 fContext->sample_fmt, (const uint8_t *) buffer, bufferSize, 1); 621 622 if (ret != 0) 623 return B_ERROR; 624 625 /* Set the presentation time of the frame */ 626 frame.pts = (bigtime_t)(fFramesWritten * 1000000LL 627 / fInputFormat.u.raw_audio.frame_rate); 628 fFramesWritten += frame.nb_samples; 629 630 ret = avcodec_encode_audio2(fContext, &packet, &frame, &gotPacket); 631 } else { 632 // If called with NULL, ask the encoder to flush any buffers it may 633 // have pending. 634 ret = avcodec_encode_audio2(fContext, &packet, NULL, &gotPacket); 635 } 636 637 if (buffer && frame.extended_data != frame.data) 638 av_freep(&frame.extended_data); 639 640 if (ret != 0) { 641 TRACE(" avcodec_encode_audio() failed: %ld\n", ret); 642 return B_ERROR; 643 } 644 645 fFramesWritten += frameCount; 646 647 if (gotPacket) { 648 if (fContext->coded_frame) { 649 // Store information about the coded frame in the context. 650 fContext->coded_frame->pts = packet.pts; 651 fContext->coded_frame->key_frame = !!(packet.flags & AV_PKT_FLAG_KEY); 652 } 653 654 // Setup media_encode_info, most important is the time stamp. 655 info->start_time = packet.pts; 656 657 if (packet.flags & AV_PKT_FLAG_KEY) 658 info->flags = B_MEDIA_KEY_FRAME; 659 else 660 info->flags = 0; 661 662 // We got a packet out of the encoder, write it to the output stream 663 ret = WriteChunk(packet.data, packet.size, info); 664 if (ret != B_OK) { 665 TRACE(" error writing chunk: %s\n", strerror(ret)); 666 av_free_packet(&packet); 667 return ret; 668 } 669 } 670 671 av_free_packet(&packet); 672 return B_OK; 673 } 674 675 676 status_t 677 AVCodecEncoder::_EncodeVideo(const void* buffer, int64 frameCount, 678 media_encode_info* info) 679 { 680 TRACE_IO("AVCodecEncoder::_EncodeVideo(%p, %lld, %p)\n", buffer, frameCount, 681 info); 682 683 if (fChunkBuffer == NULL) 684 return B_NO_MEMORY; 685 686 status_t ret = B_OK; 687 688 while (frameCount > 0) { 689 size_t bpr = fInputFormat.u.raw_video.display.bytes_per_row; 690 size_t bufferSize = fInputFormat.u.raw_video.display.line_count * bpr; 691 692 // We should always get chunky bitmaps, so this code should be safe. 693 fSrcFrame.data[0] = (uint8_t*)buffer; 694 fSrcFrame.linesize[0] = bpr; 695 696 // Run the pixel format conversion 697 sws_scale(fSwsContext, fSrcFrame.data, fSrcFrame.linesize, 0, 698 fInputFormat.u.raw_video.display.line_count, fDstFrame.data, 699 fDstFrame.linesize); 700 701 // Encode one video chunk/frame. 702 int usedBytes = avcodec_encode_video(fContext, fChunkBuffer, 703 kDefaultChunkBufferSize, fFrame); 704 705 // avcodec.h says we need to set it. 706 fFrame->pts++; 707 708 if (usedBytes < 0) { 709 TRACE(" avcodec_encode_video() failed: %d\n", usedBytes); 710 return B_ERROR; 711 } 712 713 // Maybe we need to use this PTS to calculate start_time: 714 if (fContext->coded_frame->pts != kNoPTSValue) { 715 TRACE(" codec frame PTS: %lld (codec time_base: %d/%d)\n", 716 fContext->coded_frame->pts, fContext->time_base.num, 717 fContext->time_base.den); 718 } else { 719 TRACE(" codec frame PTS: N/A (codec time_base: %d/%d)\n", 720 fContext->time_base.num, fContext->time_base.den); 721 } 722 723 // Setup media_encode_info, most important is the time stamp. 724 info->start_time = (bigtime_t)(fFramesWritten * 1000000LL 725 / fInputFormat.u.raw_video.field_rate); 726 727 info->flags = 0; 728 if (fContext->coded_frame->key_frame) 729 info->flags |= B_MEDIA_KEY_FRAME; 730 731 // Write the chunk 732 ret = WriteChunk(fChunkBuffer, usedBytes, info); 733 if (ret != B_OK) { 734 TRACE(" error writing chunk: %s\n", strerror(ret)); 735 break; 736 } 737 738 // Skip to the next frame (but usually, there is only one to encode 739 // for video). 740 frameCount--; 741 fFramesWritten++; 742 buffer = (const void*)((const uint8*)buffer + bufferSize); 743 } 744 745 return ret; 746 } 747 748