xref: /haiku/src/add-ons/media/plugins/ffmpeg/AVCodecEncoder.cpp (revision a9b301871d06c0ebe42d22b31c685abed5107acd)
1 /*
2  * Copyright 2009-2010, Stephan Amßus <superstippi@gmx.de>
3  * Copyright 2018, Dario Casalinuovo
4  * All rights reserved. Distributed under the terms of the MIT license.
5  */
6 
7 
8 #include "AVCodecEncoder.h"
9 
10 #include <new>
11 
12 #include <stdio.h>
13 #include <string.h>
14 
15 #include <Application.h>
16 #include <Roster.h>
17 
18 extern "C" {
19 	#include "rational.h"
20 }
21 
22 #include "EncoderTable.h"
23 #include "gfx_util.h"
24 
25 
26 #undef TRACE
27 //#define TRACE_AV_CODEC_ENCODER
28 #ifdef TRACE_AV_CODEC_ENCODER
29 #	define TRACE	printf
30 #	define TRACE_IO(a...)
31 #else
32 #	define TRACE(a...)
33 #	define TRACE_IO(a...)
34 #endif
35 
36 
37 static const size_t kDefaultChunkBufferSize = 2 * 1024 * 1024;
38 
39 
40 AVCodecEncoder::AVCodecEncoder(uint32 codecID, int bitRateScale)
41 	:
42 	Encoder(),
43 	fBitRateScale(bitRateScale),
44 	fCodecID((CodecID)codecID),
45 	fCodec(NULL),
46 	fCodecContext(avcodec_alloc_context3(NULL)),
47 	fCodecInitStatus(CODEC_INIT_NEEDED),
48 	fFrame(av_frame_alloc()),
49 	fSwsContext(NULL),
50 	fFramesWritten(0)
51 {
52 	TRACE("AVCodecEncoder::AVCodecEncoder()\n");
53 	_Init();
54 }
55 
56 
57 void
58 AVCodecEncoder::_Init()
59 {
60 	fChunkBuffer = new(std::nothrow) uint8[kDefaultChunkBufferSize];
61 	if (fCodecID > 0) {
62 		fCodec = avcodec_find_encoder(fCodecID);
63 		TRACE("  found AVCodec for %u: %p\n", fCodecID, fCodec);
64 	}
65 
66 	memset(&fInputFormat, 0, sizeof(media_format));
67 
68 	fAudioFifo = av_fifo_alloc(0);
69 
70 	fDstFrame.data[0] = NULL;
71 	fDstFrame.data[1] = NULL;
72 	fDstFrame.data[2] = NULL;
73 	fDstFrame.data[3] = NULL;
74 
75 	fDstFrame.linesize[0] = 0;
76 	fDstFrame.linesize[1] = 0;
77 	fDstFrame.linesize[2] = 0;
78 	fDstFrame.linesize[3] = 0;
79 
80 	// Initial parameters, so we know if the user changed them
81 	fEncodeParameters.avg_field_size = 0;
82 	fEncodeParameters.max_field_size = 0;
83 	fEncodeParameters.quality = 1.0f;
84 }
85 
86 
87 AVCodecEncoder::~AVCodecEncoder()
88 {
89 	TRACE("AVCodecEncoder::~AVCodecEncoder()\n");
90 
91 	if (fSwsContext != NULL)
92 		sws_freeContext(fSwsContext);
93 
94 	av_fifo_free(fAudioFifo);
95 
96 	avpicture_free(&fDstFrame);
97 	// NOTE: Do not use avpicture_free() on fSrcFrame!! We fill the picture
98 	// data on the fly with the media buffer data passed to Encode().
99 
100 	if (fFrame != NULL) {
101 		fFrame->data[0] = NULL;
102 		fFrame->data[1] = NULL;
103 		fFrame->data[2] = NULL;
104 		fFrame->data[3] = NULL;
105 
106 		fFrame->linesize[0] = 0;
107 		fFrame->linesize[1] = 0;
108 		fFrame->linesize[2] = 0;
109 		fFrame->linesize[3] = 0;
110 		av_frame_free(&fFrame);
111 	}
112 
113 	avcodec_free_context(&fCodecContext);
114 
115 	delete[] fChunkBuffer;
116 }
117 
118 
119 status_t
120 AVCodecEncoder::AcceptedFormat(const media_format* proposedInputFormat,
121 	media_format* _acceptedInputFormat)
122 {
123 	TRACE("AVCodecEncoder::AcceptedFormat(%p, %p)\n", proposedInputFormat,
124 		_acceptedInputFormat);
125 
126 	if (proposedInputFormat == NULL)
127 		return B_BAD_VALUE;
128 
129 	if (_acceptedInputFormat != NULL) {
130 		memcpy(_acceptedInputFormat, proposedInputFormat,
131 			sizeof(media_format));
132 	}
133 
134 	return B_OK;
135 }
136 
137 
138 status_t
139 AVCodecEncoder::SetUp(const media_format* inputFormat)
140 {
141 	TRACE("AVCodecEncoder::SetUp()\n");
142 
143 	if (fCodecContext == NULL)
144 		return B_NO_INIT;
145 
146 	if (inputFormat == NULL)
147 		return B_BAD_VALUE;
148 
149 	// Codec IDs for raw-formats may need to be figured out here.
150 	if (fCodec == NULL && fCodecID == AV_CODEC_ID_NONE) {
151 		fCodecID = raw_audio_codec_id_for(*inputFormat);
152 		if (fCodecID != AV_CODEC_ID_NONE)
153 			fCodec = avcodec_find_encoder(fCodecID);
154 	}
155 	if (fCodec == NULL) {
156 		TRACE("  encoder not found!\n");
157 		return B_NO_INIT;
158 	}
159 
160 	fInputFormat = *inputFormat;
161 	fFramesWritten = 0;
162 
163 	return _Setup();
164 }
165 
166 
167 status_t
168 AVCodecEncoder::GetEncodeParameters(encode_parameters* parameters) const
169 {
170 	TRACE("AVCodecEncoder::GetEncodeParameters(%p)\n", parameters);
171 
172 // TODO: Implement maintaining an automatically calculated bit_rate versus
173 // a user specified (via SetEncodeParameters()) bit_rate. At this point, the
174 // fCodecContext->bit_rate may not yet have been specified (_Setup() was never
175 // called yet). So it cannot work like the code below, but in any case, it's
176 // showing how to convert between the values (albeit untested).
177 //	int avgBytesPerSecond = fCodecContext->bit_rate / 8;
178 //	int maxBytesPerSecond = (fCodecContext->bit_rate
179 //		+ fCodecContext->bit_rate_tolerance) / 8;
180 //
181 //	if (fInputFormat.type == B_MEDIA_RAW_AUDIO) {
182 //		fEncodeParameters.avg_field_size = (int32)(avgBytesPerSecond
183 //			/ fInputFormat.u.raw_audio.frame_rate);
184 //		fEncodeParameters.max_field_size = (int32)(maxBytesPerSecond
185 //			/ fInputFormat.u.raw_audio.frame_rate);
186 //	} else if (fInputFormat.type == B_MEDIA_RAW_VIDEO) {
187 //		fEncodeParameters.avg_field_size = (int32)(avgBytesPerSecond
188 //			/ fInputFormat.u.raw_video.field_rate);
189 //		fEncodeParameters.max_field_size = (int32)(maxBytesPerSecond
190 //			/ fInputFormat.u.raw_video.field_rate);
191 //	}
192 
193 	parameters->quality = fEncodeParameters.quality;
194 
195 	return B_OK;
196 }
197 
198 
199 status_t
200 AVCodecEncoder::SetEncodeParameters(encode_parameters* parameters)
201 {
202 	TRACE("AVCodecEncoder::SetEncodeParameters(%p)\n", parameters);
203 
204 	if (fFramesWritten > 0)
205 		return B_NOT_SUPPORTED;
206 
207 	fEncodeParameters.quality = parameters->quality;
208 	TRACE("  quality: %.5f\n", parameters->quality);
209 	if (fEncodeParameters.quality == 0.0f) {
210 		TRACE("  using default quality (1.0)\n");
211 		fEncodeParameters.quality = 1.0f;
212 	}
213 
214 // TODO: Auto-bit_rate versus user supplied. See above.
215 //	int avgBytesPerSecond = 0;
216 //	int maxBytesPerSecond = 0;
217 //
218 //	if (fInputFormat.type == B_MEDIA_RAW_AUDIO) {
219 //		avgBytesPerSecond = (int)(parameters->avg_field_size
220 //			* fInputFormat.u.raw_audio.frame_rate);
221 //		maxBytesPerSecond = (int)(parameters->max_field_size
222 //			* fInputFormat.u.raw_audio.frame_rate);
223 //	} else if (fInputFormat.type == B_MEDIA_RAW_VIDEO) {
224 //		avgBytesPerSecond = (int)(parameters->avg_field_size
225 //			* fInputFormat.u.raw_video.field_rate);
226 //		maxBytesPerSecond = (int)(parameters->max_field_size
227 //			* fInputFormat.u.raw_video.field_rate);
228 //	}
229 //
230 //	if (maxBytesPerSecond < avgBytesPerSecond)
231 //		maxBytesPerSecond = avgBytesPerSecond;
232 //
233 //	// Reset these, so we can tell the difference between uninitialized
234 //	// and initialized...
235 //	if (avgBytesPerSecond > 0) {
236 //		fCodecContext->bit_rate = avgBytesPerSecond * 8;
237 //		fCodecContext->bit_rate_tolerance = (maxBytesPerSecond
238 //			- avgBytesPerSecond) * 8;
239 //		fBitRateControlledByUser = true;
240 //	}
241 
242 	return _Setup();
243 }
244 
245 
246 status_t
247 AVCodecEncoder::Encode(const void* buffer, int64 frameCount,
248 	media_encode_info* info)
249 {
250 	TRACE("AVCodecEncoder::Encode(%p, %lld, %p)\n", buffer, frameCount, info);
251 
252 	if (!_OpenCodecIfNeeded())
253 		return B_NO_INIT;
254 
255 	if (fInputFormat.type == B_MEDIA_RAW_AUDIO)
256 		return _EncodeAudio(buffer, frameCount, info);
257 	else if (fInputFormat.type == B_MEDIA_RAW_VIDEO)
258 		return _EncodeVideo(buffer, frameCount, info);
259 	else
260 		return B_NO_INIT;
261 }
262 
263 
264 // #pragma mark -
265 
266 
267 status_t
268 AVCodecEncoder::_Setup()
269 {
270 	TRACE("AVCodecEncoder::_Setup\n");
271 
272 	int rawBitRate;
273 
274 	if (fInputFormat.type == B_MEDIA_RAW_VIDEO) {
275 		TRACE("  B_MEDIA_RAW_VIDEO\n");
276 
277 		// Check input parameters
278 		AVPixelFormat pixFmt = colorspace_to_pixfmt(
279 			fInputFormat.u.raw_video.display.format);
280 		if (pixFmt == AV_PIX_FMT_NONE) {
281 			TRACE("Invalid input colorspace");
282 			return B_BAD_DATA;
283 		}
284 
285 		// frame rate
286 		fCodecContext->time_base = (AVRational){1, (int)fInputFormat.u.raw_video.field_rate};
287 		fCodecContext->framerate = (AVRational){(int)fInputFormat.u.raw_video.field_rate, 1};
288 
289 		// video size
290 		fCodecContext->width = fInputFormat.u.raw_video.display.line_width;
291 		fCodecContext->height = fInputFormat.u.raw_video.display.line_count;
292 		fCodecContext->gop_size = 12;
293 
294 		// TODO: Fix pixel format or setup conversion method...
295 		if (fCodec->pix_fmts != NULL) {
296 			for (int i = 0; fCodec->pix_fmts[i] != AV_PIX_FMT_NONE; i++) {
297 				// Use the last supported pixel format, which we hope is the
298 				// one with the best quality.
299 				fCodecContext->pix_fmt = fCodec->pix_fmts[i];
300 			}
301 		}
302 
303 		// TODO: Setup rate control:
304 //		fCodecContext->rate_emu = 0;
305 //		fCodecContext->rc_eq = NULL;
306 //		fCodecContext->rc_max_rate = 0;
307 //		fCodecContext->rc_min_rate = 0;
308 		// TODO: Try to calculate a good bit rate...
309 		rawBitRate = (int)(fCodecContext->width * fCodecContext->height * 2
310 			* fInputFormat.u.raw_video.field_rate) * 8;
311 
312 		// Pixel aspect ratio
313 		fCodecContext->sample_aspect_ratio.num
314 			= fInputFormat.u.raw_video.pixel_width_aspect;
315 		fCodecContext->sample_aspect_ratio.den
316 			= fInputFormat.u.raw_video.pixel_height_aspect;
317 		if (fCodecContext->sample_aspect_ratio.num == 0
318 			|| fCodecContext->sample_aspect_ratio.den == 0) {
319 			av_reduce(&fCodecContext->sample_aspect_ratio.num,
320 				&fCodecContext->sample_aspect_ratio.den, fCodecContext->width,
321 				fCodecContext->height, 255);
322 		}
323 
324 		// TODO: This should already happen in AcceptFormat()
325 		if (fInputFormat.u.raw_video.display.bytes_per_row == 0) {
326 			fInputFormat.u.raw_video.display.bytes_per_row
327 				= fCodecContext->width * 4;
328 		}
329 
330 		fFrame->pts = 0;
331 
332 		// Allocate space for colorspace converted AVPicture
333 		// TODO: Check allocations...
334 		avpicture_alloc(&fDstFrame, fCodecContext->pix_fmt, fCodecContext->width,
335 			fCodecContext->height);
336 
337 		// Make the frame point to the data in the converted AVPicture
338 		fFrame->data[0] = fDstFrame.data[0];
339 		fFrame->data[1] = fDstFrame.data[1];
340 		fFrame->data[2] = fDstFrame.data[2];
341 		fFrame->data[3] = fDstFrame.data[3];
342 
343 		fFrame->linesize[0] = fDstFrame.linesize[0];
344 		fFrame->linesize[1] = fDstFrame.linesize[1];
345 		fFrame->linesize[2] = fDstFrame.linesize[2];
346 		fFrame->linesize[3] = fDstFrame.linesize[3];
347 
348 		fSwsContext = sws_getContext(fCodecContext->width,
349 			fCodecContext->height, pixFmt,
350 			fCodecContext->width, fCodecContext->height,
351 			fCodecContext->pix_fmt, SWS_FAST_BILINEAR, NULL, NULL, NULL);
352 
353 	} else if (fInputFormat.type == B_MEDIA_RAW_AUDIO) {
354 		TRACE("  B_MEDIA_RAW_AUDIO\n");
355 		// frame rate
356 		fCodecContext->sample_rate = (int)fInputFormat.u.raw_audio.frame_rate;
357 		// channels
358 		fCodecContext->channels = fInputFormat.u.raw_audio.channel_count;
359 		// raw bitrate
360 		rawBitRate = fCodecContext->sample_rate * fCodecContext->channels
361 			* (fInputFormat.u.raw_audio.format
362 				& media_raw_audio_format::B_AUDIO_SIZE_MASK) * 8;
363 		// sample format
364 		switch (fInputFormat.u.raw_audio.format) {
365 			case media_raw_audio_format::B_AUDIO_FLOAT:
366 				fCodecContext->sample_fmt = AV_SAMPLE_FMT_FLT;
367 				break;
368 			case media_raw_audio_format::B_AUDIO_DOUBLE:
369 				fCodecContext->sample_fmt = AV_SAMPLE_FMT_DBL;
370 				break;
371 			case media_raw_audio_format::B_AUDIO_INT:
372 				fCodecContext->sample_fmt = AV_SAMPLE_FMT_S32;
373 				break;
374 			case media_raw_audio_format::B_AUDIO_SHORT:
375 				fCodecContext->sample_fmt = AV_SAMPLE_FMT_S16;
376 				break;
377 			case media_raw_audio_format::B_AUDIO_UCHAR:
378 				fCodecContext->sample_fmt = AV_SAMPLE_FMT_U8;
379 				break;
380 
381 			case media_raw_audio_format::B_AUDIO_CHAR:
382 			default:
383 				return B_MEDIA_BAD_FORMAT;
384 				break;
385 		}
386 		if (fInputFormat.u.raw_audio.channel_mask == 0) {
387 			// guess the channel mask...
388 			switch (fInputFormat.u.raw_audio.channel_count) {
389 				default:
390 				case 2:
391 					fCodecContext->channel_layout = AV_CH_LAYOUT_STEREO;
392 					break;
393 				case 1:
394 					fCodecContext->channel_layout = AV_CH_LAYOUT_MONO;
395 					break;
396 				case 3:
397 					fCodecContext->channel_layout = AV_CH_LAYOUT_SURROUND;
398 					break;
399 				case 4:
400 					fCodecContext->channel_layout = AV_CH_LAYOUT_QUAD;
401 					break;
402 				case 5:
403 					fCodecContext->channel_layout = AV_CH_LAYOUT_5POINT0;
404 					break;
405 				case 6:
406 					fCodecContext->channel_layout = AV_CH_LAYOUT_5POINT1;
407 					break;
408 				case 8:
409 					fCodecContext->channel_layout = AV_CH_LAYOUT_7POINT1;
410 					break;
411 				case 10:
412 					fCodecContext->channel_layout = AV_CH_LAYOUT_7POINT1_WIDE;
413 					break;
414 			}
415 		} else {
416 			// The bits match 1:1 for media_multi_channels and FFmpeg defines.
417 			fCodecContext->channel_layout = fInputFormat.u.raw_audio.channel_mask;
418 		}
419 	} else {
420 		TRACE("  UNSUPPORTED MEDIA TYPE!\n");
421 		return B_NOT_SUPPORTED;
422 	}
423 
424 	// TODO: Support letting the user overwrite this via
425 	// SetEncodeParameters(). See comments there...
426 	int wantedBitRate = (int)(rawBitRate / fBitRateScale
427 		* fEncodeParameters.quality);
428 	if (wantedBitRate == 0)
429 		wantedBitRate = (int)(rawBitRate / fBitRateScale);
430 
431 	fCodecContext->bit_rate = wantedBitRate;
432 
433 	if (fInputFormat.type == B_MEDIA_RAW_AUDIO) {
434 		// Some audio encoders support certain bitrates only. Use the
435 		// closest match to the wantedBitRate.
436 		const int kBitRates[] = {
437 			32000, 40000, 48000, 56000, 64000, 80000, 96000, 112000, 128000,
438 			160000, 192000, 224000, 256000, 320000, 384000, 448000, 512000,
439 			576000, 640000
440 		};
441 		int diff = wantedBitRate;
442 		for (unsigned int i = 0; i < sizeof(kBitRates) / sizeof(int); i++) {
443 			int currentDiff = abs(wantedBitRate - kBitRates[i]);
444 			if (currentDiff < diff) {
445 				fCodecContext->bit_rate = kBitRates[i];
446 				diff = currentDiff;
447 			} else
448 				break;
449 		}
450 	}
451 
452 	TRACE("  rawBitRate: %d, wantedBitRate: %d (%.1f), "
453 		"context bitrate: %d\n", rawBitRate, wantedBitRate,
454 		fEncodeParameters.quality, fCodecContext->bit_rate);
455 
456 	// Add some known fixes from the FFmpeg API example:
457 	if (fCodecContext->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
458 		// Just for testing, we also add B frames */
459 		fCodecContext->max_b_frames = 2;
460 	} else if (fCodecContext->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
461 		// Needed to avoid using macroblocks in which some coeffs overflow.
462 		// This does not happen with normal video, it just happens here as
463 		// the motion of the chroma plane does not match the luma plane.
464 		fCodecContext->mb_decision = 2;
465 	}
466 
467 	// Unfortunately, we may fail later, when we try to open the codec
468 	// for real... but we need to delay this because we still allow
469 	// parameter/quality changes.
470 	return B_OK;
471 }
472 
473 
474 bool
475 AVCodecEncoder::_OpenCodecIfNeeded()
476 {
477 	if (fCodecInitStatus == CODEC_INIT_DONE)
478 		return true;
479 
480 	if (fCodecInitStatus == CODEC_INIT_FAILED)
481 		return false;
482 
483 	fCodecContext->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
484 
485 	// Some codecs need this to be set before open
486 	fFrame->format = fCodecContext->pix_fmt;
487 	fFrame->width = fCodecContext->width;
488 	fFrame->height = fCodecContext->height;
489 
490 	// Open the codec
491 	int result = avcodec_open2(fCodecContext, fCodec, NULL);
492 	if (result >= 0)
493 		fCodecInitStatus = CODEC_INIT_DONE;
494 	else
495 		fCodecInitStatus = CODEC_INIT_FAILED;
496 
497 	TRACE("  avcodec_open(%p, %p): %d\n", fCodecContext, fCodec, result);
498 
499 	return fCodecInitStatus == CODEC_INIT_DONE;
500 
501 }
502 
503 
504 status_t
505 AVCodecEncoder::_EncodeAudio(const void* _buffer, int64 frameCount,
506 	media_encode_info* info)
507 {
508 	TRACE("AVCodecEncoder::_EncodeAudio(%p, %lld, %p)\n", _buffer, frameCount,
509 		info);
510 
511 	if (fChunkBuffer == NULL)
512 		return B_NO_MEMORY;
513 
514 	status_t ret = B_OK;
515 
516 	const uint8* buffer = reinterpret_cast<const uint8*>(_buffer);
517 
518 	size_t inputSampleSize = fInputFormat.u.raw_audio.format
519 		& media_raw_audio_format::B_AUDIO_SIZE_MASK;
520 	size_t inputFrameSize = inputSampleSize
521 		* fInputFormat.u.raw_audio.channel_count;
522 
523 	size_t bufferSize = frameCount * inputFrameSize;
524 	bufferSize = min_c(bufferSize, kDefaultChunkBufferSize);
525 
526 	if (fCodecContext->frame_size > 1) {
527 		// Encoded audio. Things work differently from raw audio. We need
528 		// the fAudioFifo to pipe data.
529 		if (av_fifo_realloc2(fAudioFifo,
530 				av_fifo_size(fAudioFifo) + bufferSize) < 0) {
531 			TRACE("  av_fifo_realloc2() failed\n");
532             return B_NO_MEMORY;
533         }
534         av_fifo_generic_write(fAudioFifo, const_cast<uint8*>(buffer),
535         	bufferSize, NULL);
536 
537 		int frameBytes = fCodecContext->frame_size * inputFrameSize;
538 		uint8* tempBuffer = new(std::nothrow) uint8[frameBytes];
539 		if (tempBuffer == NULL)
540 			return B_NO_MEMORY;
541 
542 		// Encode as many chunks as can be read from the FIFO.
543 		while (av_fifo_size(fAudioFifo) >= frameBytes) {
544 			av_fifo_generic_read(fAudioFifo, tempBuffer, frameBytes, NULL);
545 
546 			ret = _EncodeAudio(tempBuffer, frameBytes, fCodecContext->frame_size,
547 				info);
548 			if (ret != B_OK)
549 				break;
550 		}
551 
552 		delete[] tempBuffer;
553 	} else {
554 		// Raw audio. The number of bytes returned from avcodec_encode_audio()
555 		// is always the same as the number of input bytes.
556 		return _EncodeAudio(buffer, bufferSize, frameCount,
557 			info);
558 	}
559 
560 	return ret;
561 }
562 
563 
564 status_t
565 AVCodecEncoder::_EncodeAudio(const uint8* buffer, size_t bufferSize,
566 	int64 frameCount, media_encode_info* info)
567 {
568 	status_t ret;
569 
570 	// Encode one audio chunk/frame.
571 	AVPacket packet;
572 	av_init_packet(&packet);
573 	// By leaving these NULL, we let the encoder allocate memory as it needs.
574 	// This way we don't risk iving a too small buffer.
575 	packet.data = NULL;
576 	packet.size = 0;
577 
578 	// We need to wrap our input data into an AVFrame structure.
579 	AVFrame frame;
580 	int gotPacket = 0;
581 
582 	if (buffer) {
583 		av_frame_unref(&frame);
584 
585 		frame.nb_samples = frameCount;
586 
587 		ret = avcodec_fill_audio_frame(&frame, fCodecContext->channels,
588 				fCodecContext->sample_fmt, (const uint8_t *) buffer, bufferSize, 1);
589 
590 		if (ret != 0)
591 			return B_ERROR;
592 
593 		/* Set the presentation time of the frame */
594 		frame.pts = (bigtime_t)(fFramesWritten * 1000000LL
595 			/ fInputFormat.u.raw_audio.frame_rate);
596 		fFramesWritten += frame.nb_samples;
597 
598 		ret = avcodec_encode_audio2(fCodecContext, &packet, &frame, &gotPacket);
599 	} else {
600 		// If called with NULL, ask the encoder to flush any buffers it may
601 		// have pending.
602 		ret = avcodec_encode_audio2(fCodecContext, &packet, NULL, &gotPacket);
603 	}
604 
605 	if (buffer && frame.extended_data != frame.data)
606 		av_freep(&frame.extended_data);
607 
608 	if (ret != 0) {
609 		TRACE("  avcodec_encode_audio() failed: %ld\n", ret);
610 		return B_ERROR;
611 	}
612 
613 	fFramesWritten += frameCount;
614 
615 	if (gotPacket) {
616 		if (fCodecContext->coded_frame) {
617 			// Store information about the coded frame in the context.
618 			fCodecContext->coded_frame->pts = packet.pts;
619 			// TODO: double "!" operator ?
620 			fCodecContext->coded_frame->key_frame = !!(packet.flags & AV_PKT_FLAG_KEY);
621 		}
622 
623 		// Setup media_encode_info, most important is the time stamp.
624 		info->start_time = packet.pts;
625 
626 		if (packet.flags & AV_PKT_FLAG_KEY)
627 			info->flags = B_MEDIA_KEY_FRAME;
628 		else
629 			info->flags = 0;
630 
631 		// We got a packet out of the encoder, write it to the output stream
632 		ret = WriteChunk(packet.data, packet.size, info);
633 		if (ret != B_OK) {
634 			TRACE("  error writing chunk: %s\n", strerror(ret));
635 			av_free_packet(&packet);
636 			return ret;
637 		}
638 	}
639 
640 	av_free_packet(&packet);
641 	return B_OK;
642 }
643 
644 
645 status_t
646 AVCodecEncoder::_EncodeVideo(const void* buffer, int64 frameCount,
647 	media_encode_info* info)
648 {
649 	TRACE_IO("AVCodecEncoder::_EncodeVideo(%p, %lld, %p)\n", buffer, frameCount,
650 		info);
651 
652 	if (fChunkBuffer == NULL)
653 		return B_NO_MEMORY;
654 
655 	status_t ret = B_OK;
656 
657 	AVPacket* pkt = av_packet_alloc();
658 	while (frameCount > 0) {
659 		size_t bpr = fInputFormat.u.raw_video.display.bytes_per_row;
660 		size_t bufferSize = fInputFormat.u.raw_video.display.line_count * bpr;
661 
662 		// We should always get chunky bitmaps, so this code should be safe.
663 		fSrcFrame.data[0] = (uint8_t*)buffer;
664 		fSrcFrame.linesize[0] = bpr;
665 
666 		// Run the pixel format conversion
667 		sws_scale(fSwsContext, fSrcFrame.data, fSrcFrame.linesize, 0,
668 			fInputFormat.u.raw_video.display.line_count, fDstFrame.data,
669 			fDstFrame.linesize);
670 
671 		if (_EncodeVideoFrame(fFrame, pkt, info) == B_OK) {
672 			// Skip to the next frame (but usually, there is only one to encode
673 			// for video).
674 			frameCount--;
675 			fFramesWritten++;
676 			buffer = (const void*)((const uint8*)buffer + bufferSize);
677 		}
678 	}
679 
680 	// TODO: we should pass a NULL AVFrame and enter "draining" mode, then flush buffers
681 	// when we have finished and there is no more data. We cannot do that here, though, since
682 	// 1. It's not efficient
683 	// 2. It's incorrect, since many codecs need the "next" frame to be able to do optimization.
684 	// if we drain the codec, they cannot work with the "next" frame.
685 	//_EncodeVideoFrame(NULL, pkt, info);
686 	//avcodec_flush_buffers(fCodecContext);
687 	av_packet_free(&pkt);
688 	return ret;
689 }
690 
691 
692 status_t
693 AVCodecEncoder::_EncodeVideoFrame(AVFrame* frame, AVPacket* pkt, media_encode_info* info)
694 {
695 	// Encode one video chunk/frame.
696 	int result = avcodec_send_frame(fCodecContext, frame);
697 	if (result < 0) {
698 		TRACE("  avcodec_send_frame() failed: %d\n", result);
699 		return B_ERROR;
700 	}
701 
702 	// Increase the frame pts as in the ffmpeg sample code
703 	if (frame != NULL)
704 		frame->pts++;
705 
706 	while (result == 0) {
707 		result = avcodec_receive_packet(fCodecContext, pkt);
708 		if (result == 0) {
709 			TRACE("  avcodec_receive_packet: received one packet\n");
710 			// Maybe we need to use this PTS to calculate start_time:
711 			if (pkt->pts != AV_NOPTS_VALUE) {
712 				TRACE("  codec frame PTS: %lld (codec time_base: %d/%d)\n",
713 					pkt->pts, fCodecContext->time_base.num,
714 					fCodecContext->time_base.den);
715 			} else {
716 				TRACE("  codec frame PTS: N/A (codec time_base: %d/%d)\n",
717 					fCodecContext->time_base.num, fCodecContext->time_base.den);
718 			}
719 
720 			// Setup media_encode_info, most important is the time stamp.
721 			info->start_time = (bigtime_t)(fFramesWritten * 1000000LL
722 				/ fInputFormat.u.raw_video.field_rate);
723 
724 			info->flags = 0;
725 			if (fCodecContext->coded_frame->key_frame)
726 				info->flags |= B_MEDIA_KEY_FRAME;
727 
728 			// Write the chunk
729 			result = WriteChunk(pkt->data, pkt->size, info);
730 			if (result != B_OK) {
731 				TRACE("  error writing chunk: %s\n", strerror(result));
732 				break;
733 			}
734 		}
735 		av_packet_unref(pkt);
736 	}
737 	if (result == AVERROR(EAGAIN))
738 		return B_OK;
739 
740 	TRACE("   _EncodeVideoFrame(): returning...\n");
741 	return result;
742 }
743 
744