xref: /haiku/src/add-ons/media/plugins/ffmpeg/AVCodecEncoder.cpp (revision 385ee03ba83b7a40d315e17b03031b3ca37820c0)
1 /*
2  * Copyright 2009-2010, Stephan Amßus <superstippi@gmx.de>
3  * All rights reserved. Distributed under the terms of the MIT license.
4  */
5 
6 
7 #include "AVCodecEncoder.h"
8 
9 #include <new>
10 
11 #include <stdio.h>
12 #include <string.h>
13 
14 #include <Application.h>
15 #include <Roster.h>
16 
17 extern "C" {
18 	#include "rational.h"
19 }
20 
21 #include "EncoderTable.h"
22 #include "gfx_util.h"
23 
24 
25 #undef TRACE
26 //#define TRACE_AV_CODEC_ENCODER
27 #ifdef TRACE_AV_CODEC_ENCODER
28 #	define TRACE	printf
29 #	define TRACE_IO(a...)
30 #else
31 #	define TRACE(a...)
32 #	define TRACE_IO(a...)
33 #endif
34 
35 
36 static const size_t kDefaultChunkBufferSize = 2 * 1024 * 1024;
37 
38 #if LIBAVCODEC_VERSION_INT < ((54 << 16) | (50 << 8))
39 #define AV_PIX_FMT_NONE PIX_FMT_NONE
40 #define AV_CODEC_ID_NONE CODEC_ID_NONE
41 #define AV_CODEC_ID_MPEG1VIDEO CODEC_ID_MPEG1VIDEO
42 #define AV_CODEC_ID_MPEG2VIDEO CODEC_ID_MPEG2VIDEO
43 #endif
44 #if LIBAVCODEC_VERSION_INT < ((55 << 16) | (45 << 8))
45 #define av_frame_alloc avcodec_alloc_frame
46 #define av_frame_unref avcodec_get_frame_defaults
47 #define av_frame_free avcodec_free_frame
48 #endif
49 
50 
51 AVCodecEncoder::AVCodecEncoder(uint32 codecID, int bitRateScale)
52 	:
53 	Encoder(),
54 	fBitRateScale(bitRateScale),
55 	fCodecID((CodecID)codecID),
56 	fCodec(NULL),
57 	fOwnContext(avcodec_alloc_context3(NULL)),
58 	fContext(fOwnContext),
59 	fCodecInitStatus(CODEC_INIT_NEEDED),
60 	fFrame(av_frame_alloc()),
61 	fSwsContext(NULL),
62 	fFramesWritten(0)
63 {
64 	TRACE("AVCodecEncoder::AVCodecEncoder()\n");
65 	_Init();
66 }
67 
68 
69 void
70 AVCodecEncoder::_Init()
71 {
72 	fChunkBuffer = new(std::nothrow) uint8[kDefaultChunkBufferSize];
73 	if (fCodecID > 0) {
74 		fCodec = avcodec_find_encoder(fCodecID);
75 		TRACE("  found AVCodec for %u: %p\n", fCodecID, fCodec);
76 	}
77 
78 	memset(&fInputFormat, 0, sizeof(media_format));
79 
80 	fAudioFifo = av_fifo_alloc(0);
81 
82 	fDstFrame.data[0] = NULL;
83 	fDstFrame.data[1] = NULL;
84 	fDstFrame.data[2] = NULL;
85 	fDstFrame.data[3] = NULL;
86 
87 	fDstFrame.linesize[0] = 0;
88 	fDstFrame.linesize[1] = 0;
89 	fDstFrame.linesize[2] = 0;
90 	fDstFrame.linesize[3] = 0;
91 
92 	// Initial parameters, so we know if the user changed them
93 	fEncodeParameters.avg_field_size = 0;
94 	fEncodeParameters.max_field_size = 0;
95 	fEncodeParameters.quality = 1.0f;
96 }
97 
98 
99 AVCodecEncoder::~AVCodecEncoder()
100 {
101 	TRACE("AVCodecEncoder::~AVCodecEncoder()\n");
102 
103 	_CloseCodecIfNeeded();
104 
105 	if (fSwsContext != NULL)
106 		sws_freeContext(fSwsContext);
107 
108 	av_fifo_free(fAudioFifo);
109 
110 	avpicture_free(&fDstFrame);
111 	// NOTE: Do not use avpicture_free() on fSrcFrame!! We fill the picture
112 	// data on the fly with the media buffer data passed to Encode().
113 
114 	if (fFrame != NULL) {
115 		fFrame->data[0] = NULL;
116 		fFrame->data[1] = NULL;
117 		fFrame->data[2] = NULL;
118 		fFrame->data[3] = NULL;
119 
120 		fFrame->linesize[0] = 0;
121 		fFrame->linesize[1] = 0;
122 		fFrame->linesize[2] = 0;
123 		fFrame->linesize[3] = 0;
124 		av_free(fFrame);
125 	}
126 
127 	av_free(fOwnContext);
128 
129 	delete[] fChunkBuffer;
130 }
131 
132 
133 status_t
134 AVCodecEncoder::AcceptedFormat(const media_format* proposedInputFormat,
135 	media_format* _acceptedInputFormat)
136 {
137 	TRACE("AVCodecEncoder::AcceptedFormat(%p, %p)\n", proposedInputFormat,
138 		_acceptedInputFormat);
139 
140 	if (proposedInputFormat == NULL)
141 		return B_BAD_VALUE;
142 
143 	if (_acceptedInputFormat != NULL) {
144 		memcpy(_acceptedInputFormat, proposedInputFormat,
145 			sizeof(media_format));
146 	}
147 
148 	return B_OK;
149 }
150 
151 
152 status_t
153 AVCodecEncoder::SetUp(const media_format* inputFormat)
154 {
155 	TRACE("AVCodecEncoder::SetUp()\n");
156 
157 	if (fContext == NULL)
158 		return B_NO_INIT;
159 
160 	if (inputFormat == NULL)
161 		return B_BAD_VALUE;
162 
163 	// Codec IDs for raw-formats may need to be figured out here.
164 	if (fCodec == NULL && fCodecID == AV_CODEC_ID_NONE) {
165 		fCodecID = raw_audio_codec_id_for(*inputFormat);
166 		if (fCodecID != AV_CODEC_ID_NONE)
167 			fCodec = avcodec_find_encoder(fCodecID);
168 	}
169 	if (fCodec == NULL) {
170 		TRACE("  encoder not found!\n");
171 		return B_NO_INIT;
172 	}
173 
174 	_CloseCodecIfNeeded();
175 
176 	fInputFormat = *inputFormat;
177 	fFramesWritten = 0;
178 
179 	const uchar* userData = inputFormat->user_data;
180 	if (*(uint32*)userData == 'ffmp') {
181 		userData += sizeof(uint32);
182 		// The Writer plugin used is the FFmpeg plugin. It stores the
183 		// AVCodecContext pointer in the user data section. Use this
184 		// context instead of our own. It requires the Writer living in
185 		// the same team, of course.
186 		app_info appInfo;
187 		if (be_app->GetAppInfo(&appInfo) == B_OK
188 			&& *(team_id*)userData == appInfo.team) {
189 			userData += sizeof(team_id);
190 			// Use the AVCodecContext from the Writer. This works better
191 			// than using our own context with some encoders.
192 			fContext = *(AVCodecContext**)userData;
193 		}
194 	}
195 
196 	return _Setup();
197 }
198 
199 
200 status_t
201 AVCodecEncoder::GetEncodeParameters(encode_parameters* parameters) const
202 {
203 	TRACE("AVCodecEncoder::GetEncodeParameters(%p)\n", parameters);
204 
205 // TODO: Implement maintaining an automatically calculated bit_rate versus
206 // a user specified (via SetEncodeParameters()) bit_rate. At this point, the
207 // fContext->bit_rate may not yet have been specified (_Setup() was never
208 // called yet). So it cannot work like the code below, but in any case, it's
209 // showing how to convert between the values (albeit untested).
210 //	int avgBytesPerSecond = fContext->bit_rate / 8;
211 //	int maxBytesPerSecond = (fContext->bit_rate
212 //		+ fContext->bit_rate_tolerance) / 8;
213 //
214 //	if (fInputFormat.type == B_MEDIA_RAW_AUDIO) {
215 //		fEncodeParameters.avg_field_size = (int32)(avgBytesPerSecond
216 //			/ fInputFormat.u.raw_audio.frame_rate);
217 //		fEncodeParameters.max_field_size = (int32)(maxBytesPerSecond
218 //			/ fInputFormat.u.raw_audio.frame_rate);
219 //	} else if (fInputFormat.type == B_MEDIA_RAW_VIDEO) {
220 //		fEncodeParameters.avg_field_size = (int32)(avgBytesPerSecond
221 //			/ fInputFormat.u.raw_video.field_rate);
222 //		fEncodeParameters.max_field_size = (int32)(maxBytesPerSecond
223 //			/ fInputFormat.u.raw_video.field_rate);
224 //	}
225 
226 	parameters->quality = fEncodeParameters.quality;
227 
228 	return B_OK;
229 }
230 
231 
232 status_t
233 AVCodecEncoder::SetEncodeParameters(encode_parameters* parameters)
234 {
235 	TRACE("AVCodecEncoder::SetEncodeParameters(%p)\n", parameters);
236 
237 	if (fFramesWritten > 0)
238 		return B_NOT_SUPPORTED;
239 
240 	fEncodeParameters.quality = parameters->quality;
241 	TRACE("  quality: %.5f\n", parameters->quality);
242 	if (fEncodeParameters.quality == 0.0f) {
243 		TRACE("  using default quality (1.0)\n");
244 		fEncodeParameters.quality = 1.0f;
245 	}
246 
247 // TODO: Auto-bit_rate versus user supplied. See above.
248 //	int avgBytesPerSecond = 0;
249 //	int maxBytesPerSecond = 0;
250 //
251 //	if (fInputFormat.type == B_MEDIA_RAW_AUDIO) {
252 //		avgBytesPerSecond = (int)(parameters->avg_field_size
253 //			* fInputFormat.u.raw_audio.frame_rate);
254 //		maxBytesPerSecond = (int)(parameters->max_field_size
255 //			* fInputFormat.u.raw_audio.frame_rate);
256 //	} else if (fInputFormat.type == B_MEDIA_RAW_VIDEO) {
257 //		avgBytesPerSecond = (int)(parameters->avg_field_size
258 //			* fInputFormat.u.raw_video.field_rate);
259 //		maxBytesPerSecond = (int)(parameters->max_field_size
260 //			* fInputFormat.u.raw_video.field_rate);
261 //	}
262 //
263 //	if (maxBytesPerSecond < avgBytesPerSecond)
264 //		maxBytesPerSecond = avgBytesPerSecond;
265 //
266 //	// Reset these, so we can tell the difference between uninitialized
267 //	// and initialized...
268 //	if (avgBytesPerSecond > 0) {
269 //		fContext->bit_rate = avgBytesPerSecond * 8;
270 //		fContext->bit_rate_tolerance = (maxBytesPerSecond
271 //			- avgBytesPerSecond) * 8;
272 //		fBitRateControlledByUser = true;
273 //	}
274 
275 	return _Setup();
276 }
277 
278 
279 status_t
280 AVCodecEncoder::Encode(const void* buffer, int64 frameCount,
281 	media_encode_info* info)
282 {
283 	TRACE("AVCodecEncoder::Encode(%p, %lld, %p)\n", buffer, frameCount, info);
284 
285 	if (!_OpenCodecIfNeeded())
286 		return B_NO_INIT;
287 
288 	if (fInputFormat.type == B_MEDIA_RAW_AUDIO)
289 		return _EncodeAudio(buffer, frameCount, info);
290 	else if (fInputFormat.type == B_MEDIA_RAW_VIDEO)
291 		return _EncodeVideo(buffer, frameCount, info);
292 	else
293 		return B_NO_INIT;
294 }
295 
296 
297 // #pragma mark -
298 
299 
300 status_t
301 AVCodecEncoder::_Setup()
302 {
303 	TRACE("AVCodecEncoder::_Setup\n");
304 
305 	int rawBitRate;
306 
307 	if (fInputFormat.type == B_MEDIA_RAW_VIDEO) {
308 		TRACE("  B_MEDIA_RAW_VIDEO\n");
309 		// frame rate
310 		fContext->time_base.den = (int)fInputFormat.u.raw_video.field_rate;
311 		fContext->time_base.num = 1;
312 		// video size
313 		fContext->width = fInputFormat.u.raw_video.display.line_width;
314 		fContext->height = fInputFormat.u.raw_video.display.line_count;
315 		fContext->gop_size = 12;
316 
317 		// TODO: Fix pixel format or setup conversion method...
318 		if (fCodec->pix_fmts != NULL) {
319 			for (int i = 0; fCodec->pix_fmts[i] != AV_PIX_FMT_NONE; i++) {
320 				// Use the last supported pixel format, which we hope is the
321 				// one with the best quality.
322 				fContext->pix_fmt = fCodec->pix_fmts[i];
323 			}
324 		}
325 
326 		// TODO: Setup rate control:
327 //		fContext->rate_emu = 0;
328 //		fContext->rc_eq = NULL;
329 //		fContext->rc_max_rate = 0;
330 //		fContext->rc_min_rate = 0;
331 		// TODO: Try to calculate a good bit rate...
332 		rawBitRate = (int)(fContext->width * fContext->height * 2
333 			* fInputFormat.u.raw_video.field_rate) * 8;
334 
335 		// Pixel aspect ratio
336 		fContext->sample_aspect_ratio.num
337 			= fInputFormat.u.raw_video.pixel_width_aspect;
338 		fContext->sample_aspect_ratio.den
339 			= fInputFormat.u.raw_video.pixel_height_aspect;
340 		if (fContext->sample_aspect_ratio.num == 0
341 			|| fContext->sample_aspect_ratio.den == 0) {
342 			av_reduce(&fContext->sample_aspect_ratio.num,
343 				&fContext->sample_aspect_ratio.den, fContext->width,
344 				fContext->height, 255);
345 		}
346 
347 		// TODO: This should already happen in AcceptFormat()
348 		if (fInputFormat.u.raw_video.display.bytes_per_row == 0) {
349 			fInputFormat.u.raw_video.display.bytes_per_row
350 				= fContext->width * 4;
351 		}
352 
353 		fFrame->pts = 0;
354 
355 		// Allocate space for colorspace converted AVPicture
356 		// TODO: Check allocations...
357 		avpicture_alloc(&fDstFrame, fContext->pix_fmt, fContext->width,
358 			fContext->height);
359 
360 		// Make the frame point to the data in the converted AVPicture
361 		fFrame->data[0] = fDstFrame.data[0];
362 		fFrame->data[1] = fDstFrame.data[1];
363 		fFrame->data[2] = fDstFrame.data[2];
364 		fFrame->data[3] = fDstFrame.data[3];
365 
366 		fFrame->linesize[0] = fDstFrame.linesize[0];
367 		fFrame->linesize[1] = fDstFrame.linesize[1];
368 		fFrame->linesize[2] = fDstFrame.linesize[2];
369 		fFrame->linesize[3] = fDstFrame.linesize[3];
370 
371 		fSwsContext = sws_getContext(fContext->width, fContext->height,
372 			colorspace_to_pixfmt(fInputFormat.u.raw_video.display.format),
373 			fContext->width, fContext->height,
374 			fContext->pix_fmt, SWS_FAST_BILINEAR, NULL, NULL, NULL);
375 
376 	} else if (fInputFormat.type == B_MEDIA_RAW_AUDIO) {
377 		TRACE("  B_MEDIA_RAW_AUDIO\n");
378 		// frame rate
379 		fContext->sample_rate = (int)fInputFormat.u.raw_audio.frame_rate;
380 		// channels
381 		fContext->channels = fInputFormat.u.raw_audio.channel_count;
382 		// raw bitrate
383 		rawBitRate = fContext->sample_rate * fContext->channels
384 			* (fInputFormat.u.raw_audio.format
385 				& media_raw_audio_format::B_AUDIO_SIZE_MASK) * 8;
386 		// sample format
387 		switch (fInputFormat.u.raw_audio.format) {
388 			case media_raw_audio_format::B_AUDIO_FLOAT:
389 				fContext->sample_fmt = AV_SAMPLE_FMT_FLT;
390 				break;
391 			case media_raw_audio_format::B_AUDIO_DOUBLE:
392 				fContext->sample_fmt = AV_SAMPLE_FMT_DBL;
393 				break;
394 			case media_raw_audio_format::B_AUDIO_INT:
395 				fContext->sample_fmt = AV_SAMPLE_FMT_S32;
396 				break;
397 			case media_raw_audio_format::B_AUDIO_SHORT:
398 				fContext->sample_fmt = AV_SAMPLE_FMT_S16;
399 				break;
400 			case media_raw_audio_format::B_AUDIO_UCHAR:
401 				fContext->sample_fmt = AV_SAMPLE_FMT_U8;
402 				break;
403 
404 			case media_raw_audio_format::B_AUDIO_CHAR:
405 			default:
406 				return B_MEDIA_BAD_FORMAT;
407 				break;
408 		}
409 		if (fInputFormat.u.raw_audio.channel_mask == 0) {
410 			// guess the channel mask...
411 			switch (fInputFormat.u.raw_audio.channel_count) {
412 				default:
413 				case 2:
414 					fContext->channel_layout = AV_CH_LAYOUT_STEREO;
415 					break;
416 				case 1:
417 					fContext->channel_layout = AV_CH_LAYOUT_MONO;
418 					break;
419 				case 3:
420 					fContext->channel_layout = AV_CH_LAYOUT_SURROUND;
421 					break;
422 				case 4:
423 					fContext->channel_layout = AV_CH_LAYOUT_QUAD;
424 					break;
425 				case 5:
426 					fContext->channel_layout = AV_CH_LAYOUT_5POINT0;
427 					break;
428 				case 6:
429 					fContext->channel_layout = AV_CH_LAYOUT_5POINT1;
430 					break;
431 				case 8:
432 					fContext->channel_layout = AV_CH_LAYOUT_7POINT1;
433 					break;
434 				case 10:
435 					fContext->channel_layout = AV_CH_LAYOUT_7POINT1_WIDE;
436 					break;
437 			}
438 		} else {
439 			// The bits match 1:1 for media_multi_channels and FFmpeg defines.
440 			fContext->channel_layout = fInputFormat.u.raw_audio.channel_mask;
441 		}
442 	} else {
443 		TRACE("  UNSUPPORTED MEDIA TYPE!\n");
444 		return B_NOT_SUPPORTED;
445 	}
446 
447 	// TODO: Support letting the user overwrite this via
448 	// SetEncodeParameters(). See comments there...
449 	int wantedBitRate = (int)(rawBitRate / fBitRateScale
450 		* fEncodeParameters.quality);
451 	if (wantedBitRate == 0)
452 		wantedBitRate = (int)(rawBitRate / fBitRateScale);
453 
454 	fContext->bit_rate = wantedBitRate;
455 
456 	if (fInputFormat.type == B_MEDIA_RAW_AUDIO) {
457 		// Some audio encoders support certain bitrates only. Use the
458 		// closest match to the wantedBitRate.
459 		const int kBitRates[] = {
460 			32000, 40000, 48000, 56000, 64000, 80000, 96000, 112000, 128000,
461 			160000, 192000, 224000, 256000, 320000, 384000, 448000, 512000,
462 			576000, 640000
463 		};
464 		int diff = wantedBitRate;
465 		for (unsigned int i = 0; i < sizeof(kBitRates) / sizeof(int); i++) {
466 			int currentDiff = abs(wantedBitRate - kBitRates[i]);
467 			if (currentDiff < diff) {
468 				fContext->bit_rate = kBitRates[i];
469 				diff = currentDiff;
470 			} else
471 				break;
472 		}
473 	}
474 
475 	TRACE("  rawBitRate: %d, wantedBitRate: %d (%.1f), "
476 		"context bitrate: %d\n", rawBitRate, wantedBitRate,
477 		fEncodeParameters.quality, fContext->bit_rate);
478 
479 	// Add some known fixes from the FFmpeg API example:
480 	if (fContext->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
481 		// Just for testing, we also add B frames */
482 		fContext->max_b_frames = 2;
483 	} else if (fContext->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
484 		// Needed to avoid using macroblocks in which some coeffs overflow.
485 		// This does not happen with normal video, it just happens here as
486 		// the motion of the chroma plane does not match the luma plane.
487 		fContext->mb_decision = 2;
488 	}
489 
490 	// Unfortunately, we may fail later, when we try to open the codec
491 	// for real... but we need to delay this because we still allow
492 	// parameter/quality changes.
493 	return B_OK;
494 }
495 
496 
497 bool
498 AVCodecEncoder::_OpenCodecIfNeeded()
499 {
500 	if (fContext != fOwnContext) {
501 		// We are using the AVCodecContext of the AVFormatWriter plugin,
502 		// and don't maintain it's open/close state.
503 		return true;
504 	}
505 
506 	if (fCodecInitStatus == CODEC_INIT_DONE)
507 		return true;
508 
509 	if (fCodecInitStatus == CODEC_INIT_FAILED)
510 		return false;
511 
512 	fContext->strict_std_compliance = -2;
513 
514 	// Open the codec
515 	int result = avcodec_open2(fContext, fCodec, NULL);
516 	if (result >= 0)
517 		fCodecInitStatus = CODEC_INIT_DONE;
518 	else
519 		fCodecInitStatus = CODEC_INIT_FAILED;
520 
521 	TRACE("  avcodec_open(%p, %p): %d\n", fContext, fCodec, result);
522 
523 	return fCodecInitStatus == CODEC_INIT_DONE;
524 
525 }
526 
527 
528 void
529 AVCodecEncoder::_CloseCodecIfNeeded()
530 {
531 	if (fContext != fOwnContext) {
532 		// See _OpenCodecIfNeeded().
533 		return;
534 	}
535 
536 	if (fCodecInitStatus == CODEC_INIT_DONE) {
537 		avcodec_close(fContext);
538 		fCodecInitStatus = CODEC_INIT_NEEDED;
539 	}
540 }
541 
542 
543 static const int64 kNoPTSValue = 0x8000000000000000LL;
544 	// NOTE: For some reasons, I have trouble with the avcodec.h define:
545 	// #define AV_NOPTS_VALUE          INT64_C(0x8000000000000000)
546 	// INT64_C is not defined here.
547 
548 status_t
549 AVCodecEncoder::_EncodeAudio(const void* _buffer, int64 frameCount,
550 	media_encode_info* info)
551 {
552 	TRACE("AVCodecEncoder::_EncodeAudio(%p, %lld, %p)\n", _buffer, frameCount,
553 		info);
554 
555 	if (fChunkBuffer == NULL)
556 		return B_NO_MEMORY;
557 
558 	status_t ret = B_OK;
559 
560 	const uint8* buffer = reinterpret_cast<const uint8*>(_buffer);
561 
562 	size_t inputSampleSize = fInputFormat.u.raw_audio.format
563 		& media_raw_audio_format::B_AUDIO_SIZE_MASK;
564 	size_t inputFrameSize = inputSampleSize
565 		* fInputFormat.u.raw_audio.channel_count;
566 
567 	size_t bufferSize = frameCount * inputFrameSize;
568 	bufferSize = min_c(bufferSize, kDefaultChunkBufferSize);
569 
570 	if (fContext->frame_size > 1) {
571 		// Encoded audio. Things work differently from raw audio. We need
572 		// the fAudioFifo to pipe data.
573 		if (av_fifo_realloc2(fAudioFifo,
574 				av_fifo_size(fAudioFifo) + bufferSize) < 0) {
575 			TRACE("  av_fifo_realloc2() failed\n");
576             return B_NO_MEMORY;
577         }
578         av_fifo_generic_write(fAudioFifo, const_cast<uint8*>(buffer),
579         	bufferSize, NULL);
580 
581 		int frameBytes = fContext->frame_size * inputFrameSize;
582 		uint8* tempBuffer = new(std::nothrow) uint8[frameBytes];
583 		if (tempBuffer == NULL)
584 			return B_NO_MEMORY;
585 
586 		// Encode as many chunks as can be read from the FIFO.
587 		while (av_fifo_size(fAudioFifo) >= frameBytes) {
588 			av_fifo_generic_read(fAudioFifo, tempBuffer, frameBytes, NULL);
589 
590 			ret = _EncodeAudio(tempBuffer, frameBytes, fContext->frame_size,
591 				info);
592 			if (ret != B_OK)
593 				break;
594 		}
595 
596 		delete[] tempBuffer;
597 	} else {
598 		// Raw audio. The number of bytes returned from avcodec_encode_audio()
599 		// is always the same as the number of input bytes.
600 		return _EncodeAudio(buffer, bufferSize, frameCount,
601 			info);
602 	}
603 
604 	return ret;
605 }
606 
607 
608 status_t
609 AVCodecEncoder::_EncodeAudio(const uint8* buffer, size_t bufferSize,
610 	int64 frameCount, media_encode_info* info)
611 {
612 	status_t ret;
613 
614 	// Encode one audio chunk/frame.
615 	AVPacket packet;
616 	av_init_packet(&packet);
617 	// By leaving these NULL, we let the encoder allocate memory as it needs.
618 	// This way we don't risk iving a too small buffer.
619 	packet.data = NULL;
620 	packet.size = 0;
621 
622 	// We need to wrap our input data into an AVFrame structure.
623 	AVFrame frame;
624 	int gotPacket = 0;
625 
626 	if (buffer) {
627 		av_frame_unref(&frame);
628 
629 		frame.nb_samples = frameCount;
630 
631 		ret = avcodec_fill_audio_frame(&frame, fContext->channels,
632 				fContext->sample_fmt, (const uint8_t *) buffer, bufferSize, 1);
633 
634 		if (ret != 0)
635 			return B_ERROR;
636 
637 		/* Set the presentation time of the frame */
638 		frame.pts = (bigtime_t)(fFramesWritten * 1000000LL
639 			/ fInputFormat.u.raw_audio.frame_rate);
640 		fFramesWritten += frame.nb_samples;
641 
642 		ret = avcodec_encode_audio2(fContext, &packet, &frame, &gotPacket);
643 	} else {
644 		// If called with NULL, ask the encoder to flush any buffers it may
645 		// have pending.
646 		ret = avcodec_encode_audio2(fContext, &packet, NULL, &gotPacket);
647 	}
648 
649 	if (buffer && frame.extended_data != frame.data)
650 		av_freep(&frame.extended_data);
651 
652 	if (ret != 0) {
653 		TRACE("  avcodec_encode_audio() failed: %ld\n", ret);
654 		return B_ERROR;
655 	}
656 
657 	fFramesWritten += frameCount;
658 
659 	if (gotPacket) {
660 		if (fContext->coded_frame) {
661 			// Store information about the coded frame in the context.
662 			fContext->coded_frame->pts = packet.pts;
663 			fContext->coded_frame->key_frame = !!(packet.flags & AV_PKT_FLAG_KEY);
664 		}
665 
666 		// Setup media_encode_info, most important is the time stamp.
667 		info->start_time = packet.pts;
668 
669 		if (packet.flags & AV_PKT_FLAG_KEY)
670 			info->flags = B_MEDIA_KEY_FRAME;
671 		else
672 			info->flags = 0;
673 
674 		// We got a packet out of the encoder, write it to the output stream
675 		ret = WriteChunk(packet.data, packet.size, info);
676 		if (ret != B_OK) {
677 			TRACE("  error writing chunk: %s\n", strerror(ret));
678 			av_free_packet(&packet);
679 			return ret;
680 		}
681 	}
682 
683 	av_free_packet(&packet);
684 	return B_OK;
685 }
686 
687 
688 status_t
689 AVCodecEncoder::_EncodeVideo(const void* buffer, int64 frameCount,
690 	media_encode_info* info)
691 {
692 	TRACE_IO("AVCodecEncoder::_EncodeVideo(%p, %lld, %p)\n", buffer, frameCount,
693 		info);
694 
695 	if (fChunkBuffer == NULL)
696 		return B_NO_MEMORY;
697 
698 	status_t ret = B_OK;
699 
700 	while (frameCount > 0) {
701 		size_t bpr = fInputFormat.u.raw_video.display.bytes_per_row;
702 		size_t bufferSize = fInputFormat.u.raw_video.display.line_count * bpr;
703 
704 		// We should always get chunky bitmaps, so this code should be safe.
705 		fSrcFrame.data[0] = (uint8_t*)buffer;
706 		fSrcFrame.linesize[0] = bpr;
707 
708 		// Run the pixel format conversion
709 		sws_scale(fSwsContext, fSrcFrame.data, fSrcFrame.linesize, 0,
710 			fInputFormat.u.raw_video.display.line_count, fDstFrame.data,
711 			fDstFrame.linesize);
712 
713 		// Encode one video chunk/frame.
714 #if LIBAVCODEC_VERSION_INT < ((55 << 16) | (45 << 8))
715 		int usedBytes = avcodec_encode_video(fContext, fChunkBuffer,
716 			kDefaultChunkBufferSize, fFrame);
717 #else
718 		int gotPacket;
719 		AVPacket pkt;
720 		pkt.data = NULL;
721 		pkt.size = 0;
722 		av_init_packet(&pkt);
723 		int usedBytes = avcodec_encode_video2(fContext, &pkt, fFrame, &gotPacket);
724 #endif
725 		// avcodec.h says we need to set it.
726 		fFrame->pts++;
727 
728 		if (usedBytes < 0) {
729 			TRACE("  avcodec_encode_video() failed: %d\n", usedBytes);
730 			return B_ERROR;
731 		}
732 
733 #if LIBAVCODEC_VERSION_INT < ((55 << 16) | (45 << 8))
734 		// Maybe we need to use this PTS to calculate start_time:
735 		if (fContext->coded_frame->pts != kNoPTSValue) {
736 			TRACE("  codec frame PTS: %lld (codec time_base: %d/%d)\n",
737 				fContext->coded_frame->pts, fContext->time_base.num,
738 				fContext->time_base.den);
739 		} else {
740 			TRACE("  codec frame PTS: N/A (codec time_base: %d/%d)\n",
741 				fContext->time_base.num, fContext->time_base.den);
742 		}
743 #else
744 		// Maybe we need to use this PTS to calculate start_time:
745 		if (pkt.pts != AV_NOPTS_VALUE) {
746 			TRACE("  codec frame PTS: %lld (codec time_base: %d/%d)\n",
747 				pkt.pts, fContext->time_base.num,
748 				fContext->time_base.den);
749 		} else {
750 			TRACE("  codec frame PTS: N/A (codec time_base: %d/%d)\n",
751 				fContext->time_base.num, fContext->time_base.den);
752 		}
753 #endif
754 
755 		// Setup media_encode_info, most important is the time stamp.
756 		info->start_time = (bigtime_t)(fFramesWritten * 1000000LL
757 			/ fInputFormat.u.raw_video.field_rate);
758 
759 		info->flags = 0;
760 		if (fContext->coded_frame->key_frame)
761 			info->flags |= B_MEDIA_KEY_FRAME;
762 
763 		// Write the chunk
764 #if LIBAVCODEC_VERSION_INT < ((55 << 16) | (45 << 8))
765 		ret = WriteChunk(fChunkBuffer, usedBytes, info);
766 #else
767 		ret = WriteChunk(pkt.data, pkt.size, info);
768 #endif
769 		if (ret != B_OK) {
770 			TRACE("  error writing chunk: %s\n", strerror(ret));
771 			break;
772 		}
773 
774 		// Skip to the next frame (but usually, there is only one to encode
775 		// for video).
776 		frameCount--;
777 		fFramesWritten++;
778 		buffer = (const void*)((const uint8*)buffer + bufferSize);
779 	}
780 
781 	return ret;
782 }
783 
784