1 /* 2 * Copyright 2009-2010, Stephan Amßus <superstippi@gmx.de> 3 * All rights reserved. Distributed under the terms of the MIT license. 4 */ 5 6 7 #include "AVCodecEncoder.h" 8 9 #include <new> 10 11 #include <stdio.h> 12 #include <string.h> 13 14 #include <Application.h> 15 #include <Roster.h> 16 17 extern "C" { 18 #include "rational.h" 19 } 20 21 #include "EncoderTable.h" 22 #include "gfx_util.h" 23 24 25 #undef TRACE 26 //#define TRACE_AV_CODEC_ENCODER 27 #ifdef TRACE_AV_CODEC_ENCODER 28 # define TRACE printf 29 # define TRACE_IO(a...) 30 #else 31 # define TRACE(a...) 32 # define TRACE_IO(a...) 33 #endif 34 35 36 static const size_t kDefaultChunkBufferSize = 2 * 1024 * 1024; 37 38 39 AVCodecEncoder::AVCodecEncoder(uint32 codecID, int bitRateScale) 40 : 41 Encoder(), 42 fBitRateScale(bitRateScale), 43 fCodecID((enum CodecID)codecID), 44 fCodec(NULL), 45 fOwnContext(avcodec_alloc_context()), 46 fContext(fOwnContext), 47 fCodecInitStatus(CODEC_INIT_NEEDED), 48 49 fFrame(avcodec_alloc_frame()), 50 fSwsContext(NULL), 51 52 fFramesWritten(0), 53 54 fChunkBuffer(new(std::nothrow) uint8[kDefaultChunkBufferSize]) 55 { 56 TRACE("AVCodecEncoder::AVCodecEncoder()\n"); 57 58 if (fCodecID > 0) { 59 fCodec = avcodec_find_encoder(fCodecID); 60 TRACE(" found AVCodec for %u: %p\n", fCodecID, fCodec); 61 } 62 63 memset(&fInputFormat, 0, sizeof(media_format)); 64 65 fAudioFifo = av_fifo_alloc(0); 66 67 fDstFrame.data[0] = NULL; 68 fDstFrame.data[1] = NULL; 69 fDstFrame.data[2] = NULL; 70 fDstFrame.data[3] = NULL; 71 72 fDstFrame.linesize[0] = 0; 73 fDstFrame.linesize[1] = 0; 74 fDstFrame.linesize[2] = 0; 75 fDstFrame.linesize[3] = 0; 76 77 // Initial parameters, so we know if the user changed them 78 fEncodeParameters.avg_field_size = 0; 79 fEncodeParameters.max_field_size = 0; 80 fEncodeParameters.quality = 1.0f; 81 } 82 83 84 AVCodecEncoder::~AVCodecEncoder() 85 { 86 TRACE("AVCodecEncoder::~AVCodecEncoder()\n"); 87 88 _CloseCodecIfNeeded(); 89 90 if (fSwsContext != NULL) 91 sws_freeContext(fSwsContext); 92 93 av_fifo_free(fAudioFifo); 94 95 avpicture_free(&fDstFrame); 96 // NOTE: Do not use avpicture_free() on fSrcFrame!! We fill the picture 97 // data on the fly with the media buffer data passed to Encode(). 98 99 if (fFrame != NULL) { 100 fFrame->data[0] = NULL; 101 fFrame->data[1] = NULL; 102 fFrame->data[2] = NULL; 103 fFrame->data[3] = NULL; 104 105 fFrame->linesize[0] = 0; 106 fFrame->linesize[1] = 0; 107 fFrame->linesize[2] = 0; 108 fFrame->linesize[3] = 0; 109 av_free(fFrame); 110 } 111 112 av_free(fOwnContext); 113 114 delete[] fChunkBuffer; 115 } 116 117 118 status_t 119 AVCodecEncoder::AcceptedFormat(const media_format* proposedInputFormat, 120 media_format* _acceptedInputFormat) 121 { 122 TRACE("AVCodecEncoder::AcceptedFormat(%p, %p)\n", proposedInputFormat, 123 _acceptedInputFormat); 124 125 if (proposedInputFormat == NULL) 126 return B_BAD_VALUE; 127 128 if (_acceptedInputFormat != NULL) { 129 memcpy(_acceptedInputFormat, proposedInputFormat, 130 sizeof(media_format)); 131 } 132 133 return B_OK; 134 } 135 136 137 status_t 138 AVCodecEncoder::SetUp(const media_format* inputFormat) 139 { 140 TRACE("AVCodecEncoder::SetUp()\n"); 141 142 if (fContext == NULL) 143 return B_NO_INIT; 144 145 if (inputFormat == NULL) 146 return B_BAD_VALUE; 147 148 // Codec IDs for raw-formats may need to be figured out here. 149 if (fCodec == NULL && fCodecID == CODEC_ID_NONE) { 150 fCodecID = raw_audio_codec_id_for(*inputFormat); 151 if (fCodecID != CODEC_ID_NONE) 152 fCodec = avcodec_find_encoder(fCodecID); 153 } 154 if (fCodec == NULL) { 155 TRACE(" encoder not found!\n"); 156 return B_NO_INIT; 157 } 158 159 _CloseCodecIfNeeded(); 160 161 fInputFormat = *inputFormat; 162 fFramesWritten = 0; 163 164 const uchar* userData = inputFormat->user_data; 165 if (*(uint32*)userData == 'ffmp') { 166 userData += sizeof(uint32); 167 // The Writer plugin used is the FFmpeg plugin. It stores the 168 // AVCodecContext pointer in the user data section. Use this 169 // context instead of our own. It requires the Writer living in 170 // the same team, of course. 171 app_info appInfo; 172 if (be_app->GetAppInfo(&appInfo) == B_OK 173 && *(team_id*)userData == appInfo.team) { 174 userData += sizeof(team_id); 175 // Use the AVCodecContext from the Writer. This works better 176 // than using our own context with some encoders. 177 fContext = *(AVCodecContext**)userData; 178 } 179 } 180 181 return _Setup(); 182 } 183 184 185 status_t 186 AVCodecEncoder::GetEncodeParameters(encode_parameters* parameters) const 187 { 188 TRACE("AVCodecEncoder::GetEncodeParameters(%p)\n", parameters); 189 190 // TODO: Implement maintaining an automatically calculated bit_rate versus 191 // a user specified (via SetEncodeParameters()) bit_rate. At this point, the 192 // fContext->bit_rate may not yet have been specified (_Setup() was never 193 // called yet). So it cannot work like the code below, but in any case, it's 194 // showing how to convert between the values (albeit untested). 195 // int avgBytesPerSecond = fContext->bit_rate / 8; 196 // int maxBytesPerSecond = (fContext->bit_rate 197 // + fContext->bit_rate_tolerance) / 8; 198 // 199 // if (fInputFormat.type == B_MEDIA_RAW_AUDIO) { 200 // fEncodeParameters.avg_field_size = (int32)(avgBytesPerSecond 201 // / fInputFormat.u.raw_audio.frame_rate); 202 // fEncodeParameters.max_field_size = (int32)(maxBytesPerSecond 203 // / fInputFormat.u.raw_audio.frame_rate); 204 // } else if (fInputFormat.type == B_MEDIA_RAW_VIDEO) { 205 // fEncodeParameters.avg_field_size = (int32)(avgBytesPerSecond 206 // / fInputFormat.u.raw_video.field_rate); 207 // fEncodeParameters.max_field_size = (int32)(maxBytesPerSecond 208 // / fInputFormat.u.raw_video.field_rate); 209 // } 210 211 parameters->quality = fEncodeParameters.quality; 212 213 return B_OK; 214 } 215 216 217 status_t 218 AVCodecEncoder::SetEncodeParameters(encode_parameters* parameters) 219 { 220 TRACE("AVCodecEncoder::SetEncodeParameters(%p)\n", parameters); 221 222 if (fFramesWritten > 0) 223 return B_NOT_SUPPORTED; 224 225 fEncodeParameters.quality = parameters->quality; 226 TRACE(" quality: %.5f\n", parameters->quality); 227 if (fEncodeParameters.quality == 0.0f) { 228 TRACE(" using default quality (1.0)\n"); 229 fEncodeParameters.quality = 1.0f; 230 } 231 232 // TODO: Auto-bit_rate versus user supplied. See above. 233 // int avgBytesPerSecond = 0; 234 // int maxBytesPerSecond = 0; 235 // 236 // if (fInputFormat.type == B_MEDIA_RAW_AUDIO) { 237 // avgBytesPerSecond = (int)(parameters->avg_field_size 238 // * fInputFormat.u.raw_audio.frame_rate); 239 // maxBytesPerSecond = (int)(parameters->max_field_size 240 // * fInputFormat.u.raw_audio.frame_rate); 241 // } else if (fInputFormat.type == B_MEDIA_RAW_VIDEO) { 242 // avgBytesPerSecond = (int)(parameters->avg_field_size 243 // * fInputFormat.u.raw_video.field_rate); 244 // maxBytesPerSecond = (int)(parameters->max_field_size 245 // * fInputFormat.u.raw_video.field_rate); 246 // } 247 // 248 // if (maxBytesPerSecond < avgBytesPerSecond) 249 // maxBytesPerSecond = avgBytesPerSecond; 250 // 251 // // Reset these, so we can tell the difference between uninitialized 252 // // and initialized... 253 // if (avgBytesPerSecond > 0) { 254 // fContext->bit_rate = avgBytesPerSecond * 8; 255 // fContext->bit_rate_tolerance = (maxBytesPerSecond 256 // - avgBytesPerSecond) * 8; 257 // fBitRateControlledByUser = true; 258 // } 259 260 return _Setup(); 261 } 262 263 264 status_t 265 AVCodecEncoder::Encode(const void* buffer, int64 frameCount, 266 media_encode_info* info) 267 { 268 TRACE("AVCodecEncoder::Encode(%p, %lld, %p)\n", buffer, frameCount, info); 269 270 if (!_OpenCodecIfNeeded()) 271 return B_NO_INIT; 272 273 if (fInputFormat.type == B_MEDIA_RAW_AUDIO) 274 return _EncodeAudio(buffer, frameCount, info); 275 else if (fInputFormat.type == B_MEDIA_RAW_VIDEO) 276 return _EncodeVideo(buffer, frameCount, info); 277 else 278 return B_NO_INIT; 279 } 280 281 282 // #pragma mark - 283 284 285 status_t 286 AVCodecEncoder::_Setup() 287 { 288 TRACE("AVCodecEncoder::_Setup\n"); 289 290 int rawBitRate; 291 292 if (fInputFormat.type == B_MEDIA_RAW_VIDEO) { 293 TRACE(" B_MEDIA_RAW_VIDEO\n"); 294 // frame rate 295 fContext->time_base.den = (int)fInputFormat.u.raw_video.field_rate; 296 fContext->time_base.num = 1; 297 // video size 298 fContext->width = fInputFormat.u.raw_video.display.line_width; 299 fContext->height = fInputFormat.u.raw_video.display.line_count; 300 fContext->gop_size = 12; 301 // TODO: Fix pixel format or setup conversion method... 302 for (int i = 0; fCodec->pix_fmts[i] != PIX_FMT_NONE; i++) { 303 // Use the last supported pixel format, which we hope is the 304 // one with the best quality. 305 fContext->pix_fmt = fCodec->pix_fmts[i]; 306 } 307 308 // TODO: Setup rate control: 309 // fContext->rate_emu = 0; 310 // fContext->rc_eq = NULL; 311 // fContext->rc_max_rate = 0; 312 // fContext->rc_min_rate = 0; 313 // TODO: Try to calculate a good bit rate... 314 rawBitRate = (int)(fContext->width * fContext->height * 2 315 * fInputFormat.u.raw_video.field_rate) * 8; 316 317 // Pixel aspect ratio 318 fContext->sample_aspect_ratio.num 319 = fInputFormat.u.raw_video.pixel_width_aspect; 320 fContext->sample_aspect_ratio.den 321 = fInputFormat.u.raw_video.pixel_height_aspect; 322 if (fContext->sample_aspect_ratio.num == 0 323 || fContext->sample_aspect_ratio.den == 0) { 324 av_reduce(&fContext->sample_aspect_ratio.num, 325 &fContext->sample_aspect_ratio.den, fContext->width, 326 fContext->height, 255); 327 } 328 329 // TODO: This should already happen in AcceptFormat() 330 if (fInputFormat.u.raw_video.display.bytes_per_row == 0) { 331 fInputFormat.u.raw_video.display.bytes_per_row 332 = fContext->width * 4; 333 } 334 335 fFrame->pts = 0; 336 337 // Allocate space for colorspace converted AVPicture 338 // TODO: Check allocations... 339 avpicture_alloc(&fDstFrame, fContext->pix_fmt, fContext->width, 340 fContext->height); 341 342 // Make the frame point to the data in the converted AVPicture 343 fFrame->data[0] = fDstFrame.data[0]; 344 fFrame->data[1] = fDstFrame.data[1]; 345 fFrame->data[2] = fDstFrame.data[2]; 346 fFrame->data[3] = fDstFrame.data[3]; 347 348 fFrame->linesize[0] = fDstFrame.linesize[0]; 349 fFrame->linesize[1] = fDstFrame.linesize[1]; 350 fFrame->linesize[2] = fDstFrame.linesize[2]; 351 fFrame->linesize[3] = fDstFrame.linesize[3]; 352 353 fSwsContext = sws_getContext(fContext->width, fContext->height, 354 colorspace_to_pixfmt(fInputFormat.u.raw_video.display.format), 355 fContext->width, fContext->height, 356 fContext->pix_fmt, SWS_FAST_BILINEAR, NULL, NULL, NULL); 357 358 } else if (fInputFormat.type == B_MEDIA_RAW_AUDIO) { 359 TRACE(" B_MEDIA_RAW_AUDIO\n"); 360 // frame rate 361 fContext->sample_rate = (int)fInputFormat.u.raw_audio.frame_rate; 362 // channels 363 fContext->channels = fInputFormat.u.raw_audio.channel_count; 364 // raw bitrate 365 rawBitRate = fContext->sample_rate * fContext->channels 366 * (fInputFormat.u.raw_audio.format 367 & media_raw_audio_format::B_AUDIO_SIZE_MASK) * 8; 368 // sample format 369 switch (fInputFormat.u.raw_audio.format) { 370 case media_raw_audio_format::B_AUDIO_FLOAT: 371 fContext->sample_fmt = AV_SAMPLE_FMT_FLT; 372 break; 373 case media_raw_audio_format::B_AUDIO_DOUBLE: 374 fContext->sample_fmt = AV_SAMPLE_FMT_DBL; 375 break; 376 case media_raw_audio_format::B_AUDIO_INT: 377 fContext->sample_fmt = AV_SAMPLE_FMT_S32; 378 break; 379 case media_raw_audio_format::B_AUDIO_SHORT: 380 fContext->sample_fmt = AV_SAMPLE_FMT_S16; 381 break; 382 case media_raw_audio_format::B_AUDIO_UCHAR: 383 fContext->sample_fmt = AV_SAMPLE_FMT_U8; 384 break; 385 386 case media_raw_audio_format::B_AUDIO_CHAR: 387 default: 388 return B_MEDIA_BAD_FORMAT; 389 break; 390 } 391 if (fInputFormat.u.raw_audio.channel_mask == 0) { 392 // guess the channel mask... 393 switch (fInputFormat.u.raw_audio.channel_count) { 394 default: 395 case 2: 396 fContext->channel_layout = AV_CH_LAYOUT_STEREO; 397 break; 398 case 1: 399 fContext->channel_layout = AV_CH_LAYOUT_MONO; 400 break; 401 case 3: 402 fContext->channel_layout = AV_CH_LAYOUT_SURROUND; 403 break; 404 case 4: 405 fContext->channel_layout = AV_CH_LAYOUT_QUAD; 406 break; 407 case 5: 408 fContext->channel_layout = AV_CH_LAYOUT_5POINT0; 409 break; 410 case 6: 411 fContext->channel_layout = AV_CH_LAYOUT_5POINT1; 412 break; 413 case 8: 414 fContext->channel_layout = AV_CH_LAYOUT_7POINT1; 415 break; 416 case 10: 417 fContext->channel_layout = AV_CH_LAYOUT_7POINT1_WIDE; 418 break; 419 } 420 } else { 421 // The bits match 1:1 for media_multi_channels and FFmpeg defines. 422 fContext->channel_layout = fInputFormat.u.raw_audio.channel_mask; 423 } 424 } else { 425 TRACE(" UNSUPPORTED MEDIA TYPE!\n"); 426 return B_NOT_SUPPORTED; 427 } 428 429 // TODO: Support letting the user overwrite this via 430 // SetEncodeParameters(). See comments there... 431 int wantedBitRate = (int)(rawBitRate / fBitRateScale 432 * fEncodeParameters.quality); 433 if (wantedBitRate == 0) 434 wantedBitRate = (int)(rawBitRate / fBitRateScale); 435 436 fContext->bit_rate = wantedBitRate; 437 438 if (fInputFormat.type == B_MEDIA_RAW_AUDIO) { 439 // Some audio encoders support certain bitrates only. Use the 440 // closest match to the wantedBitRate. 441 const int kBitRates[] = { 442 32000, 40000, 48000, 56000, 64000, 80000, 96000, 112000, 128000, 443 160000, 192000, 224000, 256000, 320000, 384000, 448000, 512000, 444 576000, 640000 445 }; 446 int diff = wantedBitRate; 447 for (unsigned int i = 0; i < sizeof(kBitRates) / sizeof(int); i++) { 448 int currentDiff = abs(wantedBitRate - kBitRates[i]); 449 if (currentDiff < diff) { 450 fContext->bit_rate = kBitRates[i]; 451 diff = currentDiff; 452 } else 453 break; 454 } 455 } 456 457 TRACE(" rawBitRate: %d, wantedBitRate: %d (%.1f), " 458 "context bitrate: %d\n", rawBitRate, wantedBitRate, 459 fEncodeParameters.quality, fContext->bit_rate); 460 461 // Add some known fixes from the FFmpeg API example: 462 if (fContext->codec_id == CODEC_ID_MPEG2VIDEO) { 463 // Just for testing, we also add B frames */ 464 fContext->max_b_frames = 2; 465 } else if (fContext->codec_id == CODEC_ID_MPEG1VIDEO) { 466 // Needed to avoid using macroblocks in which some coeffs overflow. 467 // This does not happen with normal video, it just happens here as 468 // the motion of the chroma plane does not match the luma plane. 469 fContext->mb_decision = 2; 470 } 471 472 // Unfortunately, we may fail later, when we try to open the codec 473 // for real... but we need to delay this because we still allow 474 // parameter/quality changes. 475 return B_OK; 476 } 477 478 479 bool 480 AVCodecEncoder::_OpenCodecIfNeeded() 481 { 482 if (fContext != fOwnContext) { 483 // We are using the AVCodecContext of the AVFormatWriter plugin, 484 // and don't maintain it's open/close state. 485 return true; 486 } 487 488 if (fCodecInitStatus == CODEC_INIT_DONE) 489 return true; 490 491 if (fCodecInitStatus == CODEC_INIT_FAILED) 492 return false; 493 494 fContext->strict_std_compliance = -2; 495 496 // Open the codec 497 int result = avcodec_open(fContext, fCodec); 498 if (result >= 0) 499 fCodecInitStatus = CODEC_INIT_DONE; 500 else 501 fCodecInitStatus = CODEC_INIT_FAILED; 502 503 TRACE(" avcodec_open(%p, %p): %d\n", fContext, fCodec, result); 504 505 return fCodecInitStatus == CODEC_INIT_DONE; 506 507 } 508 509 510 void 511 AVCodecEncoder::_CloseCodecIfNeeded() 512 { 513 if (fContext != fOwnContext) { 514 // See _OpenCodecIfNeeded(). 515 return; 516 } 517 518 if (fCodecInitStatus == CODEC_INIT_DONE) { 519 avcodec_close(fContext); 520 fCodecInitStatus = CODEC_INIT_NEEDED; 521 } 522 } 523 524 525 static const int64 kNoPTSValue = 0x8000000000000000LL; 526 // NOTE: For some reasons, I have trouble with the avcodec.h define: 527 // #define AV_NOPTS_VALUE INT64_C(0x8000000000000000) 528 // INT64_C is not defined here. 529 530 status_t 531 AVCodecEncoder::_EncodeAudio(const void* _buffer, int64 frameCount, 532 media_encode_info* info) 533 { 534 TRACE("AVCodecEncoder::_EncodeAudio(%p, %lld, %p)\n", _buffer, frameCount, 535 info); 536 537 if (fChunkBuffer == NULL) 538 return B_NO_MEMORY; 539 540 status_t ret = B_OK; 541 542 const uint8* buffer = reinterpret_cast<const uint8*>(_buffer); 543 544 size_t inputSampleSize = fInputFormat.u.raw_audio.format 545 & media_raw_audio_format::B_AUDIO_SIZE_MASK; 546 size_t inputFrameSize = inputSampleSize 547 * fInputFormat.u.raw_audio.channel_count; 548 549 size_t bufferSize = frameCount * inputFrameSize; 550 bufferSize = min_c(bufferSize, kDefaultChunkBufferSize); 551 552 if (fContext->frame_size > 1) { 553 // Encoded audio. Things work differently from raw audio. We need 554 // the fAudioFifo to pipe data. 555 if (av_fifo_realloc2(fAudioFifo, 556 av_fifo_size(fAudioFifo) + bufferSize) < 0) { 557 TRACE(" av_fifo_realloc2() failed\n"); 558 return B_NO_MEMORY; 559 } 560 av_fifo_generic_write(fAudioFifo, const_cast<uint8*>(buffer), 561 bufferSize, NULL); 562 563 int frameBytes = fContext->frame_size * inputFrameSize; 564 uint8* tempBuffer = new(std::nothrow) uint8[frameBytes]; 565 if (tempBuffer == NULL) 566 return B_NO_MEMORY; 567 568 // Encode as many chunks as can be read from the FIFO. 569 while (av_fifo_size(fAudioFifo) >= frameBytes) { 570 av_fifo_generic_read(fAudioFifo, tempBuffer, frameBytes, NULL); 571 572 ret = _EncodeAudio(tempBuffer, frameBytes, fContext->frame_size, 573 info); 574 if (ret != B_OK) 575 break; 576 } 577 578 delete[] tempBuffer; 579 } else { 580 // Raw audio. The number of bytes returned from avcodec_encode_audio() 581 // is always the same as the number of input bytes. 582 return _EncodeAudio(buffer, bufferSize, frameCount, 583 info); 584 } 585 586 return ret; 587 } 588 589 590 status_t 591 AVCodecEncoder::_EncodeAudio(const uint8* buffer, size_t bufferSize, 592 int64 frameCount, media_encode_info* info) 593 { 594 // Encode one audio chunk/frame. The bufferSize has already been adapted 595 // to the needed size for fContext->frame_size, or we are writing raw 596 // audio. 597 int usedBytes = avcodec_encode_audio(fContext, fChunkBuffer, 598 bufferSize, reinterpret_cast<const short*>(buffer)); 599 600 if (usedBytes < 0) { 601 TRACE(" avcodec_encode_audio() failed: %d\n", usedBytes); 602 return B_ERROR; 603 } 604 if (usedBytes == 0) 605 return B_OK; 606 607 // // Maybe we need to use this PTS to calculate start_time: 608 // if (fContext->coded_frame->pts != kNoPTSValue) { 609 // TRACE(" codec frame PTS: %lld (codec time_base: %d/%d)\n", 610 // fContext->coded_frame->pts, fContext->time_base.num, 611 // fContext->time_base.den); 612 // } else { 613 // TRACE(" codec frame PTS: N/A (codec time_base: %d/%d)\n", 614 // fContext->time_base.num, fContext->time_base.den); 615 // } 616 617 // Setup media_encode_info, most important is the time stamp. 618 info->start_time = (bigtime_t)(fFramesWritten * 1000000LL 619 / fInputFormat.u.raw_audio.frame_rate); 620 info->flags = B_MEDIA_KEY_FRAME; 621 622 // Write the chunk 623 status_t ret = WriteChunk(fChunkBuffer, usedBytes, info); 624 if (ret != B_OK) { 625 TRACE(" error writing chunk: %s\n", strerror(ret)); 626 return ret; 627 } 628 629 fFramesWritten += frameCount; 630 631 return B_OK; 632 } 633 634 635 status_t 636 AVCodecEncoder::_EncodeVideo(const void* buffer, int64 frameCount, 637 media_encode_info* info) 638 { 639 TRACE_IO("AVCodecEncoder::_EncodeVideo(%p, %lld, %p)\n", buffer, frameCount, 640 info); 641 642 if (fChunkBuffer == NULL) 643 return B_NO_MEMORY; 644 645 status_t ret = B_OK; 646 647 while (frameCount > 0) { 648 size_t bpr = fInputFormat.u.raw_video.display.bytes_per_row; 649 size_t bufferSize = fInputFormat.u.raw_video.display.line_count * bpr; 650 651 // We should always get chunky bitmaps, so this code should be safe. 652 fSrcFrame.data[0] = (uint8_t*)buffer; 653 fSrcFrame.linesize[0] = bpr; 654 655 // Run the pixel format conversion 656 sws_scale(fSwsContext, fSrcFrame.data, fSrcFrame.linesize, 0, 657 fInputFormat.u.raw_video.display.line_count, fDstFrame.data, 658 fDstFrame.linesize); 659 660 // Encode one video chunk/frame. 661 int usedBytes = avcodec_encode_video(fContext, fChunkBuffer, 662 kDefaultChunkBufferSize, fFrame); 663 664 // avcodec.h says we need to set it. 665 fFrame->pts++; 666 667 if (usedBytes < 0) { 668 TRACE(" avcodec_encode_video() failed: %d\n", usedBytes); 669 return B_ERROR; 670 } 671 672 // Maybe we need to use this PTS to calculate start_time: 673 if (fContext->coded_frame->pts != kNoPTSValue) { 674 TRACE(" codec frame PTS: %lld (codec time_base: %d/%d)\n", 675 fContext->coded_frame->pts, fContext->time_base.num, 676 fContext->time_base.den); 677 } else { 678 TRACE(" codec frame PTS: N/A (codec time_base: %d/%d)\n", 679 fContext->time_base.num, fContext->time_base.den); 680 } 681 682 // Setup media_encode_info, most important is the time stamp. 683 info->start_time = (bigtime_t)(fFramesWritten * 1000000LL 684 / fInputFormat.u.raw_video.field_rate); 685 686 info->flags = 0; 687 if (fContext->coded_frame->key_frame) 688 info->flags |= B_MEDIA_KEY_FRAME; 689 690 // Write the chunk 691 ret = WriteChunk(fChunkBuffer, usedBytes, info); 692 if (ret != B_OK) { 693 TRACE(" error writing chunk: %s\n", strerror(ret)); 694 break; 695 } 696 697 // Skip to the next frame (but usually, there is only one to encode 698 // for video). 699 frameCount--; 700 fFramesWritten++; 701 buffer = (const void*)((const uint8*)buffer + bufferSize); 702 } 703 704 return ret; 705 } 706 707