xref: /haiku/src/add-ons/media/plugins/ffmpeg/AVCodecEncoder.cpp (revision 529cd177b573aaba391c8adc9c9f5ad76a14bf81)
1 /*
2  * Copyright 2009-2010, Stephan Amßus <superstippi@gmx.de>
3  * All rights reserved. Distributed under the terms of the MIT license.
4  */
5 
6 
7 #include "AVCodecEncoder.h"
8 
9 #include <new>
10 
11 #include <stdio.h>
12 #include <string.h>
13 
14 #include <Application.h>
15 #include <Roster.h>
16 
17 extern "C" {
18 	#include "rational.h"
19 }
20 
21 #include "EncoderTable.h"
22 #include "gfx_util.h"
23 
24 
25 #undef TRACE
26 //#define TRACE_AV_CODEC_ENCODER
27 #ifdef TRACE_AV_CODEC_ENCODER
28 #	define TRACE	printf
29 #	define TRACE_IO(a...)
30 #else
31 #	define TRACE(a...)
32 #	define TRACE_IO(a...)
33 #endif
34 
35 
36 static const size_t kDefaultChunkBufferSize = 2 * 1024 * 1024;
37 
38 
39 AVCodecEncoder::AVCodecEncoder(uint32 codecID, int bitRateScale)
40 	:
41 	Encoder(),
42 	fBitRateScale(bitRateScale),
43 	fCodecID((enum CodecID)codecID),
44 	fCodec(NULL),
45 	fOwnContext(avcodec_alloc_context()),
46 	fContext(fOwnContext),
47 	fCodecInitStatus(CODEC_INIT_NEEDED),
48 
49 	fFrame(avcodec_alloc_frame()),
50 	fSwsContext(NULL),
51 
52 	fFramesWritten(0),
53 
54 	fChunkBuffer(new(std::nothrow) uint8[kDefaultChunkBufferSize])
55 {
56 	TRACE("AVCodecEncoder::AVCodecEncoder()\n");
57 
58 	if (fCodecID > 0) {
59 		fCodec = avcodec_find_encoder(fCodecID);
60 		TRACE("  found AVCodec for %u: %p\n", fCodecID, fCodec);
61 	}
62 
63 	memset(&fInputFormat, 0, sizeof(media_format));
64 
65 	fAudioFifo = av_fifo_alloc(0);
66 
67 	fDstFrame.data[0] = NULL;
68 	fDstFrame.data[1] = NULL;
69 	fDstFrame.data[2] = NULL;
70 	fDstFrame.data[3] = NULL;
71 
72 	fDstFrame.linesize[0] = 0;
73 	fDstFrame.linesize[1] = 0;
74 	fDstFrame.linesize[2] = 0;
75 	fDstFrame.linesize[3] = 0;
76 
77 	// Initial parameters, so we know if the user changed them
78 	fEncodeParameters.avg_field_size = 0;
79 	fEncodeParameters.max_field_size = 0;
80 	fEncodeParameters.quality = 1.0f;
81 }
82 
83 
84 AVCodecEncoder::~AVCodecEncoder()
85 {
86 	TRACE("AVCodecEncoder::~AVCodecEncoder()\n");
87 
88 	_CloseCodecIfNeeded();
89 
90 	if (fSwsContext != NULL)
91 		sws_freeContext(fSwsContext);
92 
93 	av_fifo_free(fAudioFifo);
94 
95 	avpicture_free(&fDstFrame);
96 	// NOTE: Do not use avpicture_free() on fSrcFrame!! We fill the picture
97 	// data on the fly with the media buffer data passed to Encode().
98 
99 	if (fFrame != NULL) {
100 		fFrame->data[0] = NULL;
101 		fFrame->data[1] = NULL;
102 		fFrame->data[2] = NULL;
103 		fFrame->data[3] = NULL;
104 
105 		fFrame->linesize[0] = 0;
106 		fFrame->linesize[1] = 0;
107 		fFrame->linesize[2] = 0;
108 		fFrame->linesize[3] = 0;
109 		av_free(fFrame);
110 	}
111 
112 	av_free(fOwnContext);
113 
114 	delete[] fChunkBuffer;
115 }
116 
117 
118 status_t
119 AVCodecEncoder::AcceptedFormat(const media_format* proposedInputFormat,
120 	media_format* _acceptedInputFormat)
121 {
122 	TRACE("AVCodecEncoder::AcceptedFormat(%p, %p)\n", proposedInputFormat,
123 		_acceptedInputFormat);
124 
125 	if (proposedInputFormat == NULL)
126 		return B_BAD_VALUE;
127 
128 	if (_acceptedInputFormat != NULL) {
129 		memcpy(_acceptedInputFormat, proposedInputFormat,
130 			sizeof(media_format));
131 	}
132 
133 	return B_OK;
134 }
135 
136 
137 status_t
138 AVCodecEncoder::SetUp(const media_format* inputFormat)
139 {
140 	TRACE("AVCodecEncoder::SetUp()\n");
141 
142 	if (fContext == NULL)
143 		return B_NO_INIT;
144 
145 	if (inputFormat == NULL)
146 		return B_BAD_VALUE;
147 
148 	// Codec IDs for raw-formats may need to be figured out here.
149 	if (fCodec == NULL && fCodecID == CODEC_ID_NONE) {
150 		fCodecID = raw_audio_codec_id_for(*inputFormat);
151 		if (fCodecID != CODEC_ID_NONE)
152 			fCodec = avcodec_find_encoder(fCodecID);
153 	}
154 	if (fCodec == NULL) {
155 		TRACE("  encoder not found!\n");
156 		return B_NO_INIT;
157 	}
158 
159 	_CloseCodecIfNeeded();
160 
161 	fInputFormat = *inputFormat;
162 	fFramesWritten = 0;
163 
164 	const uchar* userData = inputFormat->user_data;
165 	if (*(uint32*)userData == 'ffmp') {
166 		userData += sizeof(uint32);
167 		// The Writer plugin used is the FFmpeg plugin. It stores the
168 		// AVCodecContext pointer in the user data section. Use this
169 		// context instead of our own. It requires the Writer living in
170 		// the same team, of course.
171 		app_info appInfo;
172 		if (be_app->GetAppInfo(&appInfo) == B_OK
173 			&& *(team_id*)userData == appInfo.team) {
174 			userData += sizeof(team_id);
175 			// Use the AVCodecContext from the Writer. This works better
176 			// than using our own context with some encoders.
177 			fContext = *(AVCodecContext**)userData;
178 		}
179 	}
180 
181 	return _Setup();
182 }
183 
184 
185 status_t
186 AVCodecEncoder::GetEncodeParameters(encode_parameters* parameters) const
187 {
188 	TRACE("AVCodecEncoder::GetEncodeParameters(%p)\n", parameters);
189 
190 // TODO: Implement maintaining an automatically calculated bit_rate versus
191 // a user specified (via SetEncodeParameters()) bit_rate. At this point, the
192 // fContext->bit_rate may not yet have been specified (_Setup() was never
193 // called yet). So it cannot work like the code below, but in any case, it's
194 // showing how to convert between the values (albeit untested).
195 //	int avgBytesPerSecond = fContext->bit_rate / 8;
196 //	int maxBytesPerSecond = (fContext->bit_rate
197 //		+ fContext->bit_rate_tolerance) / 8;
198 //
199 //	if (fInputFormat.type == B_MEDIA_RAW_AUDIO) {
200 //		fEncodeParameters.avg_field_size = (int32)(avgBytesPerSecond
201 //			/ fInputFormat.u.raw_audio.frame_rate);
202 //		fEncodeParameters.max_field_size = (int32)(maxBytesPerSecond
203 //			/ fInputFormat.u.raw_audio.frame_rate);
204 //	} else if (fInputFormat.type == B_MEDIA_RAW_VIDEO) {
205 //		fEncodeParameters.avg_field_size = (int32)(avgBytesPerSecond
206 //			/ fInputFormat.u.raw_video.field_rate);
207 //		fEncodeParameters.max_field_size = (int32)(maxBytesPerSecond
208 //			/ fInputFormat.u.raw_video.field_rate);
209 //	}
210 
211 	parameters->quality = fEncodeParameters.quality;
212 
213 	return B_OK;
214 }
215 
216 
217 status_t
218 AVCodecEncoder::SetEncodeParameters(encode_parameters* parameters)
219 {
220 	TRACE("AVCodecEncoder::SetEncodeParameters(%p)\n", parameters);
221 
222 	if (fFramesWritten > 0)
223 		return B_NOT_SUPPORTED;
224 
225 	fEncodeParameters.quality = parameters->quality;
226 	TRACE("  quality: %.5f\n", parameters->quality);
227 	if (fEncodeParameters.quality == 0.0f) {
228 		TRACE("  using default quality (1.0)\n");
229 		fEncodeParameters.quality = 1.0f;
230 	}
231 
232 // TODO: Auto-bit_rate versus user supplied. See above.
233 //	int avgBytesPerSecond = 0;
234 //	int maxBytesPerSecond = 0;
235 //
236 //	if (fInputFormat.type == B_MEDIA_RAW_AUDIO) {
237 //		avgBytesPerSecond = (int)(parameters->avg_field_size
238 //			* fInputFormat.u.raw_audio.frame_rate);
239 //		maxBytesPerSecond = (int)(parameters->max_field_size
240 //			* fInputFormat.u.raw_audio.frame_rate);
241 //	} else if (fInputFormat.type == B_MEDIA_RAW_VIDEO) {
242 //		avgBytesPerSecond = (int)(parameters->avg_field_size
243 //			* fInputFormat.u.raw_video.field_rate);
244 //		maxBytesPerSecond = (int)(parameters->max_field_size
245 //			* fInputFormat.u.raw_video.field_rate);
246 //	}
247 //
248 //	if (maxBytesPerSecond < avgBytesPerSecond)
249 //		maxBytesPerSecond = avgBytesPerSecond;
250 //
251 //	// Reset these, so we can tell the difference between uninitialized
252 //	// and initialized...
253 //	if (avgBytesPerSecond > 0) {
254 //		fContext->bit_rate = avgBytesPerSecond * 8;
255 //		fContext->bit_rate_tolerance = (maxBytesPerSecond
256 //			- avgBytesPerSecond) * 8;
257 //		fBitRateControlledByUser = true;
258 //	}
259 
260 	return _Setup();
261 }
262 
263 
264 status_t
265 AVCodecEncoder::Encode(const void* buffer, int64 frameCount,
266 	media_encode_info* info)
267 {
268 	TRACE("AVCodecEncoder::Encode(%p, %lld, %p)\n", buffer, frameCount, info);
269 
270 	if (!_OpenCodecIfNeeded())
271 		return B_NO_INIT;
272 
273 	if (fInputFormat.type == B_MEDIA_RAW_AUDIO)
274 		return _EncodeAudio(buffer, frameCount, info);
275 	else if (fInputFormat.type == B_MEDIA_RAW_VIDEO)
276 		return _EncodeVideo(buffer, frameCount, info);
277 	else
278 		return B_NO_INIT;
279 }
280 
281 
282 // #pragma mark -
283 
284 
285 status_t
286 AVCodecEncoder::_Setup()
287 {
288 	TRACE("AVCodecEncoder::_Setup\n");
289 
290 	int rawBitRate;
291 
292 	if (fInputFormat.type == B_MEDIA_RAW_VIDEO) {
293 		TRACE("  B_MEDIA_RAW_VIDEO\n");
294 		// frame rate
295 		fContext->time_base.den = (int)fInputFormat.u.raw_video.field_rate;
296 		fContext->time_base.num = 1;
297 		// video size
298 		fContext->width = fInputFormat.u.raw_video.display.line_width;
299 		fContext->height = fInputFormat.u.raw_video.display.line_count;
300 		fContext->gop_size = 12;
301 		// TODO: Fix pixel format or setup conversion method...
302 		for (int i = 0; fCodec->pix_fmts[i] != PIX_FMT_NONE; i++) {
303 			// Use the last supported pixel format, which we hope is the
304 			// one with the best quality.
305 			fContext->pix_fmt = fCodec->pix_fmts[i];
306 		}
307 
308 		// TODO: Setup rate control:
309 //		fContext->rate_emu = 0;
310 //		fContext->rc_eq = NULL;
311 //		fContext->rc_max_rate = 0;
312 //		fContext->rc_min_rate = 0;
313 		// TODO: Try to calculate a good bit rate...
314 		rawBitRate = (int)(fContext->width * fContext->height * 2
315 			* fInputFormat.u.raw_video.field_rate) * 8;
316 
317 		// Pixel aspect ratio
318 		fContext->sample_aspect_ratio.num
319 			= fInputFormat.u.raw_video.pixel_width_aspect;
320 		fContext->sample_aspect_ratio.den
321 			= fInputFormat.u.raw_video.pixel_height_aspect;
322 		if (fContext->sample_aspect_ratio.num == 0
323 			|| fContext->sample_aspect_ratio.den == 0) {
324 			av_reduce(&fContext->sample_aspect_ratio.num,
325 				&fContext->sample_aspect_ratio.den, fContext->width,
326 				fContext->height, 255);
327 		}
328 
329 		// TODO: This should already happen in AcceptFormat()
330 		if (fInputFormat.u.raw_video.display.bytes_per_row == 0) {
331 			fInputFormat.u.raw_video.display.bytes_per_row
332 				= fContext->width * 4;
333 		}
334 
335 		fFrame->pts = 0;
336 
337 		// Allocate space for colorspace converted AVPicture
338 		// TODO: Check allocations...
339 		avpicture_alloc(&fDstFrame, fContext->pix_fmt, fContext->width,
340 			fContext->height);
341 
342 		// Make the frame point to the data in the converted AVPicture
343 		fFrame->data[0] = fDstFrame.data[0];
344 		fFrame->data[1] = fDstFrame.data[1];
345 		fFrame->data[2] = fDstFrame.data[2];
346 		fFrame->data[3] = fDstFrame.data[3];
347 
348 		fFrame->linesize[0] = fDstFrame.linesize[0];
349 		fFrame->linesize[1] = fDstFrame.linesize[1];
350 		fFrame->linesize[2] = fDstFrame.linesize[2];
351 		fFrame->linesize[3] = fDstFrame.linesize[3];
352 
353 		fSwsContext = sws_getContext(fContext->width, fContext->height,
354 			colorspace_to_pixfmt(fInputFormat.u.raw_video.display.format),
355 			fContext->width, fContext->height,
356 			fContext->pix_fmt, SWS_FAST_BILINEAR, NULL, NULL, NULL);
357 
358 	} else if (fInputFormat.type == B_MEDIA_RAW_AUDIO) {
359 		TRACE("  B_MEDIA_RAW_AUDIO\n");
360 		// frame rate
361 		fContext->sample_rate = (int)fInputFormat.u.raw_audio.frame_rate;
362 		// channels
363 		fContext->channels = fInputFormat.u.raw_audio.channel_count;
364 		// raw bitrate
365 		rawBitRate = fContext->sample_rate * fContext->channels
366 			* (fInputFormat.u.raw_audio.format
367 				& media_raw_audio_format::B_AUDIO_SIZE_MASK) * 8;
368 		// sample format
369 		switch (fInputFormat.u.raw_audio.format) {
370 			case media_raw_audio_format::B_AUDIO_FLOAT:
371 				fContext->sample_fmt = AV_SAMPLE_FMT_FLT;
372 				break;
373 			case media_raw_audio_format::B_AUDIO_DOUBLE:
374 				fContext->sample_fmt = AV_SAMPLE_FMT_DBL;
375 				break;
376 			case media_raw_audio_format::B_AUDIO_INT:
377 				fContext->sample_fmt = AV_SAMPLE_FMT_S32;
378 				break;
379 			case media_raw_audio_format::B_AUDIO_SHORT:
380 				fContext->sample_fmt = AV_SAMPLE_FMT_S16;
381 				break;
382 			case media_raw_audio_format::B_AUDIO_UCHAR:
383 				fContext->sample_fmt = AV_SAMPLE_FMT_U8;
384 				break;
385 
386 			case media_raw_audio_format::B_AUDIO_CHAR:
387 			default:
388 				return B_MEDIA_BAD_FORMAT;
389 				break;
390 		}
391 		if (fInputFormat.u.raw_audio.channel_mask == 0) {
392 			// guess the channel mask...
393 			switch (fInputFormat.u.raw_audio.channel_count) {
394 				default:
395 				case 2:
396 					fContext->channel_layout = AV_CH_LAYOUT_STEREO;
397 					break;
398 				case 1:
399 					fContext->channel_layout = AV_CH_LAYOUT_MONO;
400 					break;
401 				case 3:
402 					fContext->channel_layout = AV_CH_LAYOUT_SURROUND;
403 					break;
404 				case 4:
405 					fContext->channel_layout = AV_CH_LAYOUT_QUAD;
406 					break;
407 				case 5:
408 					fContext->channel_layout = AV_CH_LAYOUT_5POINT0;
409 					break;
410 				case 6:
411 					fContext->channel_layout = AV_CH_LAYOUT_5POINT1;
412 					break;
413 				case 8:
414 					fContext->channel_layout = AV_CH_LAYOUT_7POINT1;
415 					break;
416 				case 10:
417 					fContext->channel_layout = AV_CH_LAYOUT_7POINT1_WIDE;
418 					break;
419 			}
420 		} else {
421 			// The bits match 1:1 for media_multi_channels and FFmpeg defines.
422 			fContext->channel_layout = fInputFormat.u.raw_audio.channel_mask;
423 		}
424 	} else {
425 		TRACE("  UNSUPPORTED MEDIA TYPE!\n");
426 		return B_NOT_SUPPORTED;
427 	}
428 
429 	// TODO: Support letting the user overwrite this via
430 	// SetEncodeParameters(). See comments there...
431 	int wantedBitRate = (int)(rawBitRate / fBitRateScale
432 		* fEncodeParameters.quality);
433 	if (wantedBitRate == 0)
434 		wantedBitRate = (int)(rawBitRate / fBitRateScale);
435 
436 	fContext->bit_rate = wantedBitRate;
437 
438 	if (fInputFormat.type == B_MEDIA_RAW_AUDIO) {
439 		// Some audio encoders support certain bitrates only. Use the
440 		// closest match to the wantedBitRate.
441 		const int kBitRates[] = {
442 			32000, 40000, 48000, 56000, 64000, 80000, 96000, 112000, 128000,
443 			160000, 192000, 224000, 256000, 320000, 384000, 448000, 512000,
444 			576000, 640000
445 		};
446 		int diff = wantedBitRate;
447 		for (unsigned int i = 0; i < sizeof(kBitRates) / sizeof(int); i++) {
448 			int currentDiff = abs(wantedBitRate - kBitRates[i]);
449 			if (currentDiff < diff) {
450 				fContext->bit_rate = kBitRates[i];
451 				diff = currentDiff;
452 			} else
453 				break;
454 		}
455 	}
456 
457 	TRACE("  rawBitRate: %d, wantedBitRate: %d (%.1f), "
458 		"context bitrate: %d\n", rawBitRate, wantedBitRate,
459 		fEncodeParameters.quality, fContext->bit_rate);
460 
461 	// Add some known fixes from the FFmpeg API example:
462 	if (fContext->codec_id == CODEC_ID_MPEG2VIDEO) {
463 		// Just for testing, we also add B frames */
464 		fContext->max_b_frames = 2;
465     } else if (fContext->codec_id == CODEC_ID_MPEG1VIDEO){
466 		// Needed to avoid using macroblocks in which some coeffs overflow.
467 		// This does not happen with normal video, it just happens here as
468 		// the motion of the chroma plane does not match the luma plane.
469 		fContext->mb_decision = 2;
470     }
471 
472 	// Unfortunately, we may fail later, when we try to open the codec
473 	// for real... but we need to delay this because we still allow
474 	// parameter/quality changes.
475 	return B_OK;
476 }
477 
478 
479 bool
480 AVCodecEncoder::_OpenCodecIfNeeded()
481 {
482 	if (fContext != fOwnContext) {
483 		// We are using the AVCodecContext of the AVFormatWriter plugin,
484 		// and don't maintain it's open/close state.
485 		return true;
486 	}
487 
488 	if (fCodecInitStatus == CODEC_INIT_DONE)
489 		return true;
490 
491 	if (fCodecInitStatus == CODEC_INIT_FAILED)
492 		return false;
493 
494 	// Open the codec
495 	int result = avcodec_open(fContext, fCodec);
496 	if (result >= 0)
497 		fCodecInitStatus = CODEC_INIT_DONE;
498 	else
499 		fCodecInitStatus = CODEC_INIT_FAILED;
500 
501 	TRACE("  avcodec_open(%p, %p): %d\n", fContext, fCodec, result);
502 
503 	return fCodecInitStatus == CODEC_INIT_DONE;
504 
505 }
506 
507 
508 void
509 AVCodecEncoder::_CloseCodecIfNeeded()
510 {
511 	if (fContext != fOwnContext) {
512 		// See _OpenCodecIfNeeded().
513 		return;
514 	}
515 
516 	if (fCodecInitStatus == CODEC_INIT_DONE) {
517 		avcodec_close(fContext);
518 		fCodecInitStatus = CODEC_INIT_NEEDED;
519 	}
520 }
521 
522 
523 static const int64 kNoPTSValue = 0x8000000000000000LL;
524 	// NOTE: For some reasons, I have trouble with the avcodec.h define:
525 	// #define AV_NOPTS_VALUE          INT64_C(0x8000000000000000)
526 	// INT64_C is not defined here.
527 
528 status_t
529 AVCodecEncoder::_EncodeAudio(const void* _buffer, int64 frameCount,
530 	media_encode_info* info)
531 {
532 	TRACE("AVCodecEncoder::_EncodeAudio(%p, %lld, %p)\n", _buffer, frameCount,
533 		info);
534 
535 	if (fChunkBuffer == NULL)
536 		return B_NO_MEMORY;
537 
538 	status_t ret = B_OK;
539 
540 	const uint8* buffer = reinterpret_cast<const uint8*>(_buffer);
541 
542 	size_t inputSampleSize = fInputFormat.u.raw_audio.format
543 		& media_raw_audio_format::B_AUDIO_SIZE_MASK;
544 	size_t inputFrameSize = inputSampleSize
545 		* fInputFormat.u.raw_audio.channel_count;
546 
547 	size_t bufferSize = frameCount * inputFrameSize;
548 	bufferSize = min_c(bufferSize, kDefaultChunkBufferSize);
549 
550 	if (fContext->frame_size > 1) {
551 		// Encoded audio. Things work differently from raw audio. We need
552 		// the fAudioFifo to pipe data.
553 		if (av_fifo_realloc2(fAudioFifo,
554 				av_fifo_size(fAudioFifo) + bufferSize) < 0) {
555 			TRACE("  av_fifo_realloc2() failed\n");
556             return B_NO_MEMORY;
557         }
558         av_fifo_generic_write(fAudioFifo, const_cast<uint8*>(buffer),
559         	bufferSize, NULL);
560 
561 		int frameBytes = fContext->frame_size * inputFrameSize;
562 		uint8* tempBuffer = new(std::nothrow) uint8[frameBytes];
563 		if (tempBuffer == NULL)
564 			return B_NO_MEMORY;
565 
566 		// Encode as many chunks as can be read from the FIFO.
567 		while (av_fifo_size(fAudioFifo) >= frameBytes) {
568 			av_fifo_generic_read(fAudioFifo, tempBuffer, frameBytes, NULL);
569 
570 			ret = _EncodeAudio(tempBuffer, frameBytes, fContext->frame_size,
571 				info);
572 			if (ret != B_OK)
573 				break;
574 		}
575 
576 		delete[] tempBuffer;
577 	} else {
578 		// Raw audio. The number of bytes returned from avcodec_encode_audio()
579 		// is always the same as the number of input bytes.
580 		return _EncodeAudio(buffer, bufferSize, frameCount,
581 			info);
582 	}
583 
584 	return ret;
585 }
586 
587 
588 status_t
589 AVCodecEncoder::_EncodeAudio(const uint8* buffer, size_t bufferSize,
590 	int64 frameCount, media_encode_info* info)
591 {
592 	// Encode one audio chunk/frame. The bufferSize has already been adapted
593 	// to the needed size for fContext->frame_size, or we are writing raw
594 	// audio.
595 	int usedBytes = avcodec_encode_audio(fContext, fChunkBuffer,
596 		bufferSize, reinterpret_cast<const short*>(buffer));
597 
598 	if (usedBytes < 0) {
599 		TRACE("  avcodec_encode_audio() failed: %d\n", usedBytes);
600 		return B_ERROR;
601 	}
602 	if (usedBytes == 0)
603 		return B_OK;
604 
605 //	// Maybe we need to use this PTS to calculate start_time:
606 //	if (fContext->coded_frame->pts != kNoPTSValue) {
607 //		TRACE("  codec frame PTS: %lld (codec time_base: %d/%d)\n",
608 //			fContext->coded_frame->pts, fContext->time_base.num,
609 //			fContext->time_base.den);
610 //	} else {
611 //		TRACE("  codec frame PTS: N/A (codec time_base: %d/%d)\n",
612 //			fContext->time_base.num, fContext->time_base.den);
613 //	}
614 
615 	// Setup media_encode_info, most important is the time stamp.
616 	info->start_time = (bigtime_t)(fFramesWritten * 1000000LL
617 		/ fInputFormat.u.raw_audio.frame_rate);
618 	info->flags = B_MEDIA_KEY_FRAME;
619 
620 	// Write the chunk
621 	status_t ret = WriteChunk(fChunkBuffer, usedBytes, info);
622 	if (ret != B_OK) {
623 		TRACE("  error writing chunk: %s\n", strerror(ret));
624 		return ret;
625 	}
626 
627 	fFramesWritten += frameCount;
628 
629 	return B_OK;
630 }
631 
632 
633 status_t
634 AVCodecEncoder::_EncodeVideo(const void* buffer, int64 frameCount,
635 	media_encode_info* info)
636 {
637 	TRACE_IO("AVCodecEncoder::_EncodeVideo(%p, %lld, %p)\n", buffer, frameCount,
638 		info);
639 
640 	if (fChunkBuffer == NULL)
641 		return B_NO_MEMORY;
642 
643 	status_t ret = B_OK;
644 
645 	while (frameCount > 0) {
646 		size_t bpr = fInputFormat.u.raw_video.display.bytes_per_row;
647 		size_t bufferSize = fInputFormat.u.raw_video.display.line_count * bpr;
648 
649 		// We should always get chunky bitmaps, so this code should be safe.
650 		fSrcFrame.data[0] = (uint8_t*)buffer;
651 		fSrcFrame.linesize[0] = bpr;
652 
653 		// Run the pixel format conversion
654 		sws_scale(fSwsContext, fSrcFrame.data, fSrcFrame.linesize, 0,
655 			fInputFormat.u.raw_video.display.line_count, fDstFrame.data,
656 			fDstFrame.linesize);
657 
658 		// Encode one video chunk/frame.
659 		int usedBytes = avcodec_encode_video(fContext, fChunkBuffer,
660 			kDefaultChunkBufferSize, fFrame);
661 
662 		// avcodec.h says we need to set it.
663 		fFrame->pts++;
664 
665 		if (usedBytes < 0) {
666 			TRACE("  avcodec_encode_video() failed: %d\n", usedBytes);
667 			return B_ERROR;
668 		}
669 
670 		// Maybe we need to use this PTS to calculate start_time:
671 		if (fContext->coded_frame->pts != kNoPTSValue) {
672 			TRACE("  codec frame PTS: %lld (codec time_base: %d/%d)\n",
673 				fContext->coded_frame->pts, fContext->time_base.num,
674 				fContext->time_base.den);
675 		} else {
676 			TRACE("  codec frame PTS: N/A (codec time_base: %d/%d)\n",
677 				fContext->time_base.num, fContext->time_base.den);
678 		}
679 
680 		// Setup media_encode_info, most important is the time stamp.
681 		info->start_time = (bigtime_t)(fFramesWritten * 1000000LL
682 			/ fInputFormat.u.raw_video.field_rate);
683 
684 		info->flags = 0;
685 		if (fContext->coded_frame->key_frame)
686 			info->flags |= B_MEDIA_KEY_FRAME;
687 
688 		// Write the chunk
689 		ret = WriteChunk(fChunkBuffer, usedBytes, info);
690 		if (ret != B_OK) {
691 			TRACE("  error writing chunk: %s\n", strerror(ret));
692 			break;
693 		}
694 
695 		// Skip to the next frame (but usually, there is only one to encode
696 		// for video).
697 		frameCount--;
698 		fFramesWritten++;
699 		buffer = (const void*)((const uint8*)buffer + bufferSize);
700 	}
701 
702 	return ret;
703 }
704 
705